text
stringlengths 56
7.94M
|
---|
\begin{document}
\title{{\Large \bf
Polynomials with exponents in compact convex sets and
associated weighted extremal functions - \\
Characterization of polynomials by $L^2$-estimates }
\begin{abstract} \noindent
The main result of this paper is that an entire function $f$
that is in $L^2(\C^n,\psi)$ with respect to the
weight $\psi(z)=2mH_S(z)+\gamma\log(1+|z|^2)$ is a
polynomial
with exponents in $m\widehat S_\Gamma$.
Here $H_S$ is the
logarithmic supporting function of a compact convex set $S\subset \R^n_+$
with $0\in S$, $\gamma\geq 0$ is small enough in terms of $m$,
and $\widehat S_\Gamma$
is the hull of $S$ with respect to a certain cone
$\Gamma$ depending on $S$, $m$ and $\gamma$.
An example showing that in general
$\widehat S_\Gamma$ can not be replaced by $S$ is constructed.
\par
\noindent{\em Subject Classification (2020)}:
32U35. Secondary 32A08, 32A15, 32U15, 32W50.
\end{abstract}
\section{Introduction}
\label{sec:01}
\noindent
Let $S$ be a compact convex subset of $\R^n_+$
with $0\in S$. We let ${\mathcal P}^S_m(\C^n)$ denote the space
of all polynomials $p$ of the form
$$
p(z)=\sum_{\alpha\in (mS)\cap \N^n} a_\alpha z^\alpha, \qquad z\in \C^n,
$$
and set
${\mathcal P}^S(\C^n)=\cup_{m\in \N}{\mathcal P}^S_m(\C^n)$.
The \emph{$S$-Lelong class} $\L^S(\C^n)\subset \PSH(\C^n)$
is defined in the following way
in terms of the \emph{supporting function} $\varphi_S$ of $S$,
$$\varphi_S(\xi)=\sup_{s\in S}\scalar s\xi, \qquad \xi\in \R^n,
$$
and the map $\Log\colon \C^{*n}\to \R^n$,
$$\Log\, z=(\log|z_1|,\dots,\log|z_n|), \qquad z\in \C^{*n}.
$$
We define the \emph{logarithmic supporting function}
$H_S\in \PSH(\C^n)$ of $S$ by
$$H_S=\varphi_S\circ \Log$$
on $\C^{*n}$ and extend the definition to the coordinate
hyperplanes by the formula
$$
H_S(z)=\varlimsup_{\C^{*n}\ni w\to z}H_S(w), \qquad z\in \C^n\setminus
\C^{*n}.
$$
The $S$-Lelong class $\L^S(\C^n)$
is defined as the set of all $u\in \PSH(\C^n)$ satisfying a growth
estimate of the form $u\leq c_u+H_S$ for some constant $c_u$.
Let $N^S_s=\{\xi\in \R^n\,;\, \scalar s\xi=\varphi_S(\xi)\}$
denote the \emph{ normal cone} of $S$ at the point $s$ and define
for every cone $\Gamma\subset (\R^n\setminus N^S_0)\cup\{0\}$
with $\Gamma\neq \{0\}$ the \emph{ $\Gamma$-hull of $S$} by
\begin{equation}
\label{eq:1.1}
\widehat S_\Gamma
=\{x\in \R_+^ n\,;\, \scalar x\xi\leq \varphi_S(\xi),\ \xi\in \Gamma\}.
\end{equation}
We say that $S$ is \emph{$\Gamma$-convex} if $S=\widehat S_\Gamma$
and that $S$ is a \emph{lower set} if for every $s\in S$
the cube $[0,s_1]\times\cdots\times [0,s_n]$ is contained
in $S$. Every lower set is $\Gamma$-convex with repect to the cone
$\Gamma=\R^n_+$. (See \cite{MagSigSigSno:2023}, Section 4.)
Our main result is:
\begin{theorem} \label{thm:1.1}
Let $S$ be a compact convex subset of $\R_+^n$ with $0\in S$,
$m\in\N^*$, and $d_m=d(mS,\N^n\setminus mS)$ denote
the euclidean distance between
the sets $mS$ and $\N^n\setminus mS$.
Let $f\in \OO(\C^n)$, assume that
\begin{equation}
\label{eq:1.2}
\int_{\C^n}|f|^2(1+|\zeta|^2)^{-\gamma}e^{-2mH_S} \,
d\lambda <+\infty
\end{equation}
for some $0\leq \gamma < d_m$. Let $\Gamma$
be the cone consisting of all $\xi$ such that
the angle between the vectors ${\mathbf 1}=(1,\dots,1)$ and $\xi$ is
$\leq \arccos(-(d_m-\gamma)/\sqrt n)$ and let
$\widehat S_\Gamma$ be the hull of $S$ with respect to the cone
$\Gamma$
defined by (\ref{eq:1.1}).
Then $f\in {\mathcal P}_m^{\widehat S_\Gamma}(\C^n)$.
\end{theorem}
\noindent
Observe that the largest possible $d_m$ is $1$ and
smallest possible $\gamma$ is $0$, which implies that
the largest possible opening angle of the cone $\Gamma$
is $\arccos(-1/\sqrt n)$.
If $\gamma_0$ is the infimum of $\gamma$ such that
\eqref{eq:1.2} holds, then $\Gamma(\gamma_0) =
\cup_{\gamma>\gamma_0} \Gamma(\gamma)$.
Therefore $f$ is a polynomial with exponents in
$m \widehat S_{\Gamma(\gamma_0)} = \cap_{\gamma>\gamma_0}
m\widehat S_{\Gamma(\gamma)}$.
We are interested in determining conditions on $S$
which ensure that $\widehat S_\Lambda =S$ for some cone
$\Lambda\subseteq \Gamma$.
\begin{corollary} \label{cor:1.2} The function $f$ in
Theorem~\ref{thm:1.1} is in ${\mathcal P}_m^{S}(\C^n)$
in the cases:
\begin{enumerate}
\item [{\bf(i)}] $S$ is a lower set.
\item [{\bf(ii)}] $S=\widehat S_{\Lambda}$ for some cone
$\Lambda$ contained in
$\{\xi\in \R^n \,;\, \scalar{\mathbf 1}\xi\geq 0\}$.
\item [{\bf(iii)}] $(mS)\cap \N^n=(m\widehat S_{\Gamma})\cap \N^n$.
\end{enumerate}
\end{corollary}
Bayraktar, Hussung, Levenberg, and Perera \cite{BayHusLevPer:2020},
stated in Proposition 4.3 that a function $f\in\OO(\C^n)$ satisfying
(\ref{eq:1.2}) is in ${\mathcal P}^S_m(\C^n)$ when $S$ is a
polytope.
This is true for example in the cases mentioned in
Corollary \ref{cor:1.2}. However this is not correct in general
as seen by Example \ref{ex:4.1}.
For an exposition of the general background to the results
in this paper see
\cite{MagSigSigSno:2023} and the references therein.
\subsection*{Acknowledgment}
The results of this paper are a part of a research project,
{\it Holomorphic Approximations and Pluripotential Theory},
with project grant
no.~207236-051 supported by the Icelandic Research Fund.
We would like to thank the Fund for its support and the
Mathematics Division, Science Institute, University of Iceland,
for hosting the project.
\section{Taylor expansions of entire functions}
\label{sec:02}
Let $f\in \OO(\C^n)$ be an entire function with Taylor expansion
$f(z)=\sum_{\alpha\in \N^n}a_\alpha z^\alpha$ at the origin.
Let us now derive an estimate of the coefficients
$a_\alpha=f^{(\alpha)}(0)/\alpha!$.
The Cauchy formula implies that
for every polycircle $C_r=\{z\in \C^n\,;\, |z_j|=r_j\}$ with
center $0$ and polyradius $r\in \R_+^{*n}$ we have
\begin{equation}
\label{eq:2.1}
a_\alpha =\dfrac{f^{(\alpha)}(0)}{\alpha !}
=\dfrac 1{(2\pi i)^n}\int_{C_r} \dfrac{f(\zeta)}{\zeta^\alpha}\cdot
\dfrac{d\zeta_1\cdots d\zeta_n}{\zeta_1\cdots\zeta_n}.
\end{equation}
We parametrize $C_r$ by $[-\pi,\pi]^n\ni \theta\mapsto
(r_1e^{i\theta_1},\dots,r_ne^{i\theta_n})$ and get
\begin{equation}
\label{eq:2.2}
a_\alpha
=\dfrac 1{(2\pi)^n}\int_{[-\pi,\pi]^n}
\dfrac{f(r_1e^{i\theta_1},\dots,r_ne^{i\theta_n})}{r^\alpha
e^{i\scalar\alpha\theta}}\, d\theta_1\cdots d\theta_n.
\end{equation}
We take $\sigma,\tau \in \R^n$ with $\sigma_j<\tau_j$ for
$j=1,\dots,n$ and define
\begin{gather}
\label{eq:2.3}
A_{\sigma,\tau}=\{\zeta\in \C^n\,;\, e^{\sigma_j}\leq
|\zeta_j|<e^{\tau_j}\}\subset \C^n,\\
L_{\sigma,\tau}=\prod_{j=1}^n\big([e^{\sigma_j},e^{\tau_j}]
\times[-\pi,\pi]\big)\subset
\R^{2n}=\big(\R^2\big)^n, \quad \text{ and } \nonumber \\
K_{\sigma,\tau}=\prod_{j=1}^n [\sigma_j,\tau_j] \subset \R^n. \nonumber
\end{gather}
We multiply both sides of (\ref{eq:2.2})
by $r_1\cdots r_n\, dr_1\cdots dr_n$, integrate with
respect to the variables $r_j$ over
$[e^{\sigma_j},e^{\tau_j}]$, observe that
$\int_{[e^{\sigma_j},e^{\tau_j}]}r_j\, dr_j=\tfrac
12(e^{2\tau_j}-e^{2\sigma_j})$, and get
\begin{equation}
\label{eq:2.4}
a_\alpha
=\dfrac 1{v(A_{\sigma,\tau})}
\int_{L_{\sigma,\tau}}
\dfrac{f(r_1e^{i\theta_1},\dots,r_ne^{i\theta_n})}{r^\alpha
e^{i\scalar\alpha\theta}}\, (r_1\, dr_1d\theta_1)\cdots (r_n\,
dr_nd\theta_n),
\end{equation}
where
$v(A_{\sigma,\tau})=\pi^n\prod_{j=1}^n(e^{2\tau_j}-e^{2\sigma_j})$
is the volume of the polyannulus. By switching back to the
original coordinates $\zeta_j=r_je^{i\theta_j}$ we get
\begin{equation}
\label{eq:2.5}
a_\alpha=\dfrac 1{v(A_{\sigma,\tau})} \int_{A_{\sigma,\tau}}
\dfrac{f(\zeta)}{\zeta^\alpha} \, d\lambda(\zeta).
\end{equation}
Now we assume that
$f\in L^2(\C^n,\psi)\cap\OO(\C^n)$ for some
measurable $\psi\colon \C^n\to \overline \R$, i.e.,
\begin{equation}
\label{eq:2.6}
\|f\|_\psi^2=\int_{\C^n}|f|^2e^{-\psi}\, d\lambda <+\infty,
\end{equation}
By (\ref{eq:2.5}) and the Cauchy-Schwarz inequality
\begin{align}
\label{eq:2.7}
|a_\alpha|&\leq
\dfrac 1{v(A_{\sigma,\tau})} \int_{A_{\sigma,\tau}}
|f(\zeta)|e^{-\psi(\zeta)/2} \cdot\dfrac{e^{\psi(\zeta)/2}}{|\zeta^\alpha|}
\, d\lambda(\zeta)\\
&\leq \dfrac {\|f\|_\psi}{v(A_{\sigma,\tau})}
\bigg(\int_{A_{\sigma,\tau}}
\dfrac{e^{\psi(\zeta)}}{|\zeta^\alpha|^2}
\, d\lambda(\zeta)\bigg)^{1/2}_.
\nonumber
\end{align}
If $\psi$ is rotationally invariant in each
variable $\zeta_j$, i.e.,
$\psi(r_1e^{i\theta_1},\dots,r_ne^{i\theta_n})=
\psi(r_1,\dots,r_n)$, then we introduce
logarithmic coordinates $\xi_j=\log r_j$ and set
$\chi(\xi)=\tfrac 12 \psi(e^{\xi_1},\dots,e^{\xi_n})$, and observe that
$r_j\, dr_j=e^{2\xi_j}\, d\xi_j$ and get
\begin{equation}
\label{eq:2.8}
\int_{A_{\sigma,\tau}}
\dfrac{e^{\psi(\zeta)}}{|\zeta^\alpha|^2}
\, d\lambda(\zeta)
=
(2\pi)^n
\int_{K_{\sigma,\tau}}
e^{2(\chi(\xi)-\scalar \alpha\xi+\scalar{\mathbf 1}\xi)} \, d\lambda(\xi).
\end{equation}
We have
$v(A_{\sigma,\tau})=\pi^n\prod_{j=1}^n(e^{2\tau_j}-e^{2\sigma_j})$, so
by combining (\ref{eq:2.7}) and (\ref{eq:2.8}) we get
\begin{equation}
\label{eq:2.9}
|a_\alpha| \leq
\dfrac {\|f\|_\psi}{\prod_{j=1}^n(1-e^{-2(\tau_j-\sigma_j)})}
e^{-2\scalar{\mathbf 1}\tau}
\bigg(
\int_{K_{\sigma,\tau}}
e^{2(\chi(\xi)-\scalar \alpha\xi+\scalar{\mathbf 1}\xi)} \,
d\lambda(\xi)\bigg)^{1/2}_.
\end{equation}
Since $\xi_j\leq \tau_j$ for every $\xi\in K_{\sigma,\tau}$ we arrive
at the estimate
\begin{equation}
\label{eq:2.10}
|a_\alpha| \leq
\dfrac {\|f\|_\psi}{\prod_{j=1}^n(1-e^{-2(\tau_j-\sigma_j)})}
e^{-\scalar{\mathbf 1}\tau}
\bigg(\int_{K_{\sigma,\tau}}
e^{2(\chi(\xi)-\scalar \alpha\xi)} \, d\lambda(\xi)\bigg)^{1/2}_.
\end{equation}
\section{Proof of Theorem \ref{thm:1.1}}
\label{sec:03}
\noindent
Let $f(z)=\sum_{\alpha\in \N^n} a_\alpha z^\alpha$ be
the Taylor expansion of $f$ at the origin.
We need to show
that $a_\alpha=0$ for every $\alpha\in \N^n\setminus m\widehat S_\Gamma$.
Since $\alpha\not\in m \widehat S_\Gamma$, there exists $\tau \in \Gamma$
such that $|\tau|=1$ and $\scalar \alpha\tau >m\varphi_S(\tau)$.
By rotating $\tau$ we may assume that $\tau$ is an interior point
of $\Gamma$ which gives $-\scalar{\mathbf 1}\tau <d_m-\gamma_0$.
We choose $\varepsilon >0$ such that
$d_m-\gamma-\varepsilon>0$, and
$-\scalar{\mathbf 1}\tau <d_m-\gamma-\varepsilon$.
Recall that $\scalar\alpha\tau-m\varphi_S(\tau)$ is the euclidean
distance from $\alpha$ to the supporting hyperplane
$\{x\,;\, \scalar x\tau=m\varphi_S(\tau)\}$, so by assumption
$m\varphi_S(\tau)-\scalar\alpha\tau\leq -d_m$. Hence
\begin{equation*}
-\scalar{\mathbf 1}\tau + m\varphi_S(\tau)-\scalar\alpha\tau
<-\gamma-\varepsilon.
\end{equation*}
We choose $\sigma\in \R^n\setminus \{0\}$ such that $\sigma_j<\tau_j$ for
every $j=1,\dots,n$ and
\begin{equation*}
-\scalar{\mathbf 1}\tau + m\varphi_S(\xi) -\scalar\alpha\xi
<-(\gamma+\varepsilon)|\xi|, \qquad \xi\in K_{\sigma,\tau}.
\end{equation*}
By homogeneity we get
\begin{equation}
\label{eq:3.1}
-t\scalar{\mathbf 1}\tau + m\varphi_S(\xi)-\scalar\alpha\xi
<-(\gamma+\varepsilon)|\xi|, \qquad t>0, \ \xi\in tK_{\sigma,\tau}.
\end{equation}
Let $\xi_j=\log|\zeta_j|$ and observe that
$(1+|\zeta|^2)^\gamma \leq (n+1)^\gamma
\max\{1,\|\zeta\|_\infty^{2\gamma} \}$. From this inequality and
\eqref{eq:1.2} it follows that
$f\in L^2(\C^n,\psi)$, where
\begin{equation}
\label{eq:3.2}
\tfrac 12 \psi(\zeta)={\gamma}\log\|\zeta\|_\infty+mH_S(\zeta)
=\gamma \|\xi\|_\infty+m\varphi_S(\xi).
\end{equation}
We set $\chi(\xi)=\tfrac 12 \psi(e^{\xi_1},\dots,e^{\xi_n})$.
Then the estimate (\ref{eq:3.1}) gives
\begin{equation*}
-t\scalar{\mathbf 1}\tau + \chi(\xi)-\scalar\alpha\xi
<-\varepsilon|\xi|, \qquad t>0, \ \xi\in tK_{\sigma,\tau},
\end{equation*}
the estimate (\ref{eq:2.10}) with $tK_{\sigma,\tau}$ in the
role of $K_{\sigma,\tau}$ gives
\begin{align*}
|a_\alpha| &\leq
\dfrac {\|f\|_\psi}{\prod_{j=1}^n(1-e^{-2(\tau_j-\sigma_j)t})}
e^{-t\scalar{\mathbf 1}\tau}
\bigg(\int_{tK_{\sigma,\tau}}
e^{2(\chi(\xi)-\scalar\alpha\xi)} \, d\lambda(\xi)\bigg)^{1/2}\\
&\leq \dfrac {\|f\|_\psi}{\prod_{j=1}^n(1-e^{-2(\tau_j-\sigma_j)t})}
e^{-\epsilon|\sigma|t}t^{n/2} v(K_{\sigma,\tau})^{1/2} \to 0,
\qquad t\to +\infty,
\nonumber
\end{align*}
and we conclude that $a_\alpha=0$.
\section{An example}
\label{sec:04}
Observe that (\ref{eq:1.2}) is equivalent to $\|f\|_\psi<+\infty$
in the notation presented in \eqref{eq:2.6} for
$\psi\in \mathcal{PSH}(\C^n)$ given by
$$\psi(z)= 2m H_S(z)+\gamma \log (1+|z|^2), \qquad z\in \C^n.$$
Note that $\|f\|_{2mH_S}\geq \|f\|_{\psi}$ for any $\gamma\geq 0$, so
if $\|f\|_{2mH_S}<+\infty$, then $\|f\|_{\psi}<+\infty$.
Now we are going to give an example of a compact convex neighbourhood
of 0 in $\R^n_+$ denoted by $S$, such that for every $m\geq 4$, there
exist polynomials $p$ such that
$\|p\|_{2mH_S}<+\infty$, but $p\notin \mathcal P^S_m(\C^n)$. This
shows that $\widehat S_\Gamma$ in Theorem \ref{thm:1.1} can not be
replaced by $S$.
\begin{example}\label{ex:4.1}
Let $m\geq 4$ and $S\subseteq \R_+^2$ be the quadrilateral
\begin{equation*}
S=\ch\{(0,0), (a,0), (b,1-b),(0,1)\}.
\end{equation*}
where $0<a<1/m$ , $0<a<b<1$, $m(1-b)<1$, and $(b-a)/(1-b)>m-2-am$.
Then $(1,0),(2,0), \dots , (m-3,0)\notin mS$, but the following
calculations show that $\|p_k\|_{2mH_S}<+\infty$ for
$$
p_k(z)=z^{(k,0)}=z_1^k, \qquad k=1,\dots, m-3.
$$
\noindent
Since the map $(\R\times ]-\pi,\pi[)^2\to
\C^2$, $(\xi_1,\theta_1,\xi_2,\theta_2)\mapsto
(e^{\xi_1+i\theta_1},e^{\xi_2+i\theta_2})$ has the Jacobi determinant
$e^{2\xi_1+2\xi_2}$, we have
\begin{align*}
\|p_k\|_{2mH_S}&=\int_{\C^2}|z_1|^{2k}e^{-2mH_S(z)}\, d\lambda(z)
=4\pi^2 \int_{\R^2}e^{2(k+1)\xi_1+2\xi_2-2m\varphi_S(\xi)}\,
d\xi_1d\xi_2.
\end{align*}
Observe that from \cite{MagSigSigSno:2023}, equations (3.5) and (3.6), we get
\begin{equation*}
\varphi_S(\xi)=
\max_{x\in \operatorname{ext}(S)} \langle x,\xi \rangle
=
\begin{cases}
0, &\xi \in N^S_{(0,0)},\\
a\xi_1, &\xi \in N^S_{(a,0)},\\
b\xi_1+(1-b)\xi_2, &\xi \in N^S_{(b,(1-b))},\\
\xi_2, &\xi \in N^S_{(0,1)}.
\end{cases}
\end{equation*}
\noindent
We split the integral over $\R^2$ into the sum of the
integrals over the normal cones at the extreme points of $S$, which we
calculate as
\begin{align*}
\int_{N^S_{(0,0)}} e^{2(k+1)\xi_1+2\xi_2}\, d\xi
&=\int_{-\infty}^0 e^{2(k+1)\xi_1} \, d\xi_1 \, \int_{-\infty}^0
e^{2\xi_2} \, d\xi_2=\dfrac 1{4(k+1)},\\
\int_{N^S_{(a,0)}} e^{2(k+1)\xi_1+2\xi_2-2ma\xi_1}\, d\xi
&=\int_0^{\infty} e^{2(k+1-ma)\xi_1}\,
\int_{-\infty}^{-\xi_1(b-a)/(1-b)} e^{2\xi_2}\, d\xi_2\, d\xi_1\\
& =\dfrac {1}{4((b-a)/(1-b)+ma-1-k)},
\end{align*}
\begin{multline*}
\int_{N^S_{(b,1-b)}} e^{2(k+1)\xi_1+2\xi_2-2m(b\xi_1+(1-b)\xi_2)}\,
d\xi \\
=\int_0^{\infty} e^{2(k+1-mb)\xi_1}\,
\int_{-\xi_1(b-a)/(1-b)}^{\xi_1} e^{2(1-m(1-b))\xi_2}\, d\xi_2\,
d\xi_1 \\
=\dfrac {1}{4(1-m(1-b))}\bigg(
\dfrac 1{m-2-k}+\dfrac{1}{(b-a)/(1-b)+ma-1-k)}
\bigg),
\end{multline*}
\begin{align*}
\int_{N^S_{(0,1)}} e^{2(k+1)\xi_1+2\xi_2-2m\xi_2}\, d\xi\
&=\int_0^\infty e^{2(1-m)\xi_2}
\int_{-\infty}^{\xi_2}e^{2(k+1)\xi_1}\, d\xi_1 \, d\xi_2\\
&=\dfrac {1}{4(k+1)(m-2-k)}.
\end{align*}
\noindent
This shows that $\|p_k\|_{2mH_S}<+\infty$, and we have found polynomials
satisfying (\ref{eq:1.2}) which are not in $\mathcal P^S_m(\C^n)$.
\end{example}
{\small
\noindent
Science Institute,
University of Iceland,
IS-107 Reykjav\'ik,
ICELAND
\noindent
bsm@hi.is, alfheidur@hi.is, ragnar@hi.is, bergur@hi.is.
}
\end{document} |
\begin{document}
\begin{frontmatter}
\title{Explicit Gaussian quadrature rules for cubic splines with non-uniform knot sequences}
\author[VCC]{Rachid Ait-Haddou\corref{cor1}}
\ead{rachid.aithaddou@kaust.edu.sa}
\author[NumPor]{Michael Barto\v{n}}
\ead{Michael.Barton@kaust.edu.sa}
\author[NumPor]{Victor Manuel Calo}
\ead{Victor.Calo@kaust.edu.sa}
\cortext[cor1]{Corresponding author}
\address[VCC]{Visual Computing Center, King Abdullah University of Science and Technology, Thuwal 23955-6900, Kingdom of Saudi Arabia}
\address[NumPor]{Numerical Porous Media Center, King Abdullah University of Science and Technology, Thuwal 23955-6900, Kingdom of Saudi Arabia}
\begin{abstract}
We provide explicit expressions for quadrature rules
on the space of $C^1$ cubic splines with non-uniform, symmetrically stretched knot sequences.
The quadrature nodes and weights are derived via an explicit recursion
that avoids an intervention of any numerical solver and the rule is optimal, that is,
it requires minimal number of nodes. Numerical experiments validating
the theoretical results and the error estimates of the quadrature rules are also presented.
\end{abstract}
\begin{keyword}
Gaussian quadrature, cubic splines, Peano kernel, B-splines
\end{keyword}
\end{frontmatter}
\section{Introduction}\label{intro}
The problem of numerical quadrature has been of interest for decades due to its
wide applicability in many fields spanning collocation methods \cite{Sloan-1988},
integral equations \cite{Atkinson-1976}, finite elements methods \cite{Solin-2003}
and most recently, isogeometric analysis \cite{Cottrell-2009}.
Computationally, the integration of a function
is an expensive procedure and quadrature turned out to be a cheap, robust and elegant alternative.
A quadrature rule, or shortly a quadrature, is said to be an \textit{$m$-point rule},
if $m$ evaluations of a function $f$ are needed to approximate its weighted integral
over an interval $[a,b]$
\begin{equation}\label{eq:GaussQuad}
\int_a^b \omega(x) f(x) \, \mathrm{d}x = \sum_{i=1}^{m} \omega_i f(\tau_i) + R_{m}(f),
\end{equation}
where $\omega$ is a fixed non-negative \emph{weight function} defined over $[a,b]$.
Typically, the rule is required to be \emph{exact}, that is, $R_m(f) \equiv 0$
for each element of a predefined linear function space $\mathcal{L}$. In the case
when $\mathcal{L}$ is the linear space of polynomials of degree at most $2m-1$,
then the $m$-point Gaussian quadrature rule \cite{Gautschi-1997} provides the \emph{optimal}
rule that is exact for each element of $\mathcal{L}$, i.e. $m$ is the minimal number
of \textit{nodes} at which $f$ has to be evaluated.
The Gaussian nodes are the roots of the orthogonal polynomial $\pi_{m}$ where
$(\pi_0,\pi_1,\ldots,\pi_m,\ldots )$ is the sequence of orthogonal polynomials with
respect to the measure $\mu(x) = \omega(x)dx$. Typically, the nodes of the
Gaussian quadrature rule
are computed numerically using for example, the Golub-Welsh algorithm \cite{Golub-1969},
in the case the three-term recurrence relations for the orthogonal polynomials can
be expressed.
In the case when $\mathcal{L}$ is a Chebyshev space of dimension $2m$, Studden and Karlin
proved the \emph{existence} and \emph{uniqueness} of optimal $m$-point generalized quadrature rules,
which due to optimality are also called Gaussian,
that are exact for each element of the space $\mathcal{L}$ \cite{Karlin-1966}.
The nodes and weights of the quadrature rule can be computed using numerical schemes
based on Newton methods \cite{Ma-1996}.
In the case when $\mathcal{L}$ is a linear
space of splines, a favourite alternative to polynomials
due to their approximation superiority and the inherent locality property
\cite{deBoor-1972,Elber-2001,Hoschek-2002-CAGD},
Micchelli and Pinkus \cite{Micchelli-1977} derived the optimal
number of quadrature nodes. Moreover, the range of intervals,
the knot sequence subintervals that contain at least one node, was specified.
Their formula preserves the ``double precision'' of Gaussian rules for polynomials, that is,
for a spline function with $r$ (simple) knots, asymptotically, the number
of nodes is $[\frac{r}{2}]$. Whereas the optimal quadrature rule is unique in
the polynomial case and the Chebyshev systems case, this is in general not true
for splines. The computation of the nodes and weights of the optimal spline
quadrature (Gaussian quadrature) is rather a challenging problem as
the non-linear systems the nodes and weights satisfy depend on truncated power
functions. The systems become algebraic only with the right guess of the
knot intervals where the nodes lie.
Regarding the optimal quadrature rules for splines, the quadrature schemes
differ depending on the mutual relation between the \emph{degree} and \emph{continuity} $(d,c)$.
For cases with lower continuity, a higher number of nodes is required for the optimal
quadrature rule.
Also, the choice of the domain can bring a significant simplification.
Whereas an exact quadrature rule -- when the weight function $\omega \equiv 1$ in Eq.~(\ref{eq:GaussQuad}) --
can be obtained by simply evaluating $f$ at every second knot (midpoint)
for uniform splines of even (odd) degree over \textit{a real line}
\cite{Hughes-2010}, a closed interval is an obstacle, even for uniform splines,
that can be resolved only by employing numerical solvers \cite{Hughes-2012}.
Thus, the insightful proposition of Nikolov \cite{Nikolov-1996}, which yield optimal
and explicit quadrature rules for $(3,1)$ \emph{uniform} splines (with $\omega \equiv 1$),
is surprising. In Nikolov's scheme, a recursive relation between
the neighboring nodes is derived and, since the resulting system is of cubical degree,
a closed form formula is given to iteratively compute the nodes and weights.
In this paper, we generalize the quadrature rules of \cite{Nikolov-1996}
for splines with certain \textit{non-uniform} knot sequences,
keeping the desired properties of explicitness, exactness and optimality.
The rest of the paper is organized as follows.
In Section~\ref{sec:quad}, we recall some basic properties of $(3,1)$ splines
and derive their Gaussian quadrature rules. In Section~\ref{sec:err}, the error estimates are given
and Section~\ref{sec:ex} shows the numerical experiments. Finally,
possible extensions of our method are discussed in Section~\ref{sec:con}.
\section{Gaussian quadrature formulae for $C^1$ cubic splines}\label{sec:quad}
\begin{figure*}\label{fig:C1Splines}
\end{figure*}
In this section we recall few basic properties of $(3,1)$ splines and derive
explicit formulae for computing quadrature nodes and weights for a particular family of knot sequences.
Throughout the paper, $\pi_{n}$ denotes the linear space of polynomials
of degree at most $n$ and $[a,b]$ is a non-trivial real compact interval.
\subsection{$C^1$ cubic splines with symmetrically stretched knot sequences}\label{ssec:spline}
We start with the definition of the particular knot sequences above which the spline spaces are built.
\begin{dfn}\label{def:stretch}
A finite sequence $\mathcal{X}_n = (a=x_0,x_1,...,x_{n-1},x_{n} = b)$ of pairwise
distinct real numbers in the interval $[a,b]$ is said to be a
{\bf{symmetrically stretched knot sequence}} if the sequence is symmetric
with respect to the midpoint of the interval $[a,b]$ and such that
\begin{equation}\label{strech}
x_k - 2 x_{k+1} + x_{k+2} \geq 0 \quad \textnormal{for}
\quad k=0,...,[\frac{n}{2}]-1.
\end{equation}
\end{dfn}
Denote by $S^{n}_{3,1}$ the linear space of $C^1$ cubic splines over a symmetrically stretched
knot sequence $\mathcal{X}_n =(a=x_0,x_1,...,x_n=b)$
\begin{equation}\label{eq:family}
S^{n}_{3,1} = \{ f\in C^{1}[a,b]: f|_{(x_k,x_{k+1})} \in \pi_3, k=0,...,n-1\}.
\end{equation}
The dimension of the space $S^{n}_{3,1}$ is $2n+2$.
\begin{rem}
In the B-spline literature \cite{deBoor-1972,Hoschek-2002-CAGD,Elber-2001}, the knot sequence is
usually written with knots' multiplicities. As in this paper the multiplicity is
always two at every knot, we omit the classical notation and, throughout the paper,
write $\mathcal{X}_n$ without multiplicity, i.e. $x_k<x_{k+1}$, $k=0,\dots, n-1$.
\end{rem}
Similarly to \cite{Nikolov-1996}, we find it convenient to work with the
non-normalized B-spline basis. To define the basis, we extend our knot sequence
$\mathcal{X}_n$ with two extra knots outside the interval $[a,b]$ that we set to be
\begin{equation}\label{boundary_condition}
x_{-1} = 2x_{0}-x_{1}
\quad \textnormal{and} \quad
x_{n+1} =2 x_{n} - x_{n-1}.
\end{equation}
Note that the choice of $x_{-1}$ and $x_{n+1}$ is to get particular integrals in (\ref{boundaryIntegral})
that simplify expressions in Section~\ref{ssec:quad}. We emphasize that this setting does not affect the quadrature
rule derived later in Theorem~\ref{theo:quad}.
Denote by $\mathbf D = \{D_i\}_i^{2n+2}$ the basis of $S^{n}_{3,1}$ where
\begin{equation*}
\begin{split}
&D_{2k-1}(t) = [x_{k-2},x_{k-2},x_{k-1},x_{k-1},x_k](. - t)_{+}^{3} \\
&D_{2k}(t) = [x_{k-2},x_{k-1},x_{k-1},x_{k},x_k](. - t)_{+}^{3}, \\
\end{split}
\end{equation*}
where $[.]f$ stands for the divided difference and $u_{+} = \max(u,0)$ is the truncated
power function, see Fig.~\ref{fig:C1Splines}. Among the basic properties of the
basis $\mathbf D$, we need to recall the fact that for any $k=1,2,\ldots,n+1$, $D_{2k-1}$
and $D_{2k}$ have the same support, that is,
$\textnormal{supp}(D_{2k-1}) = \textnormal{supp}(D_{2k}) = [x_{k-2}, x_{k}]$, and
$D_{2k-1} (t) > 0$, $D_{2k}(t) > 0$ for all $t \in (x_{k-2}, x_{k})$.
Moreover, for $k=3,...,2n$, we have
\begin{equation}\label{interiorIntegral}
I[D_k] = \frac{1}{4}\; \textnormal{for} \quad k = 3,4,\ldots,2n,
\end{equation}
where $I[f]$ stands for the integral of $f$ over the interval $[a,b]$.
With the choice made in (\ref{boundary_condition}), we have
\begin{equation}\label{boundaryIntegral}
I[D_1] = I[D_{2n+2}] = \frac{1}{16}
\quad \textnormal{and} \quad
I[D_2] = I[D_{2n+1}]= \frac{3}{16}.
\end{equation}
Using the standard definition of divided difference for multiple knots,
explicit expressions for $D_{2k-1}(t)$
and $D_{2k}(t)$ with $t \in [x_{k-2},x_{k}]$ are obtained as
\begin{equation*}
D_{2k-1}(t) = a_{k} (x_k - t)_{+}^{3} + b_{k} (x_{k-1} - t)_{+}^{3} + c_{k} (x_{k-1} - t)_{+}^{2},
\end{equation*}
where, setting $h_k = x_{k} - x_{k-1}$ for $ k=0,1,\ldots,n+1$,
\begin{equation*}
a_k = \frac{1}{h_k^{2} (h_k +h_{k-1})^{2}},\;
b_k = \frac{2 h_{k} - h_{k-1}}{h_{k-1}^3 h_k^2},\;
c_k = \frac{-3}{h_{k-1}^2 h_{k}}.
\end{equation*}
Similarly, we obtain
\begin{equation*}
D_{2k}(t) = \alpha_k (x_k - t)_{+}^{3} + \beta_k (x_k - t)_{+}^{2} +
\gamma_k (x_{k-1} - t)_{+}^{3} + \eta_k (x_{k-1} - t)_{+}^{2},
\end{equation*}
where
\begin{equation*}
\alpha_k = \frac{-3 h_{k} - 2 h_{k-1}}{(h_k + h_{k-1})^2 h_k ^3},\;
\beta_k = \frac{3}{(h_k + h_{k-1}) h_k^2},\;
\gamma_k = \frac{2 h_{k-1} - h_{k}}{h_{k-1}^2 h_{k}^3},\;
\eta_k = \frac{3}{h_{k-1} h_{k}^2}.
\end{equation*}
That is, $D_{2k-1}$ and $D_{2k}$, are expressed by three parameters $x_{k-2}$, $x_{k-1}$ and $x_k$,
due to the fact that $[x_{k-2},x_k]$ is the maximal interval where both have a non-zero support,
see Fig.~\ref{fig:C1Splines}. Moreover, we have the following:
\begin{figure*}\label{fig:Lemma}
\end{figure*}
\begin{lemma}\label{lemmaD}
Let $\mathcal{X}_{n} = (a=x_0,x_1,...,x_n=b)$ be a symmetrically stretched knot sequence.
Then for any $k = 2,...,[n/2]+1$
\begin{equation*}
D_{2k-1}(t) > D_{2k}(t)\quad \textnormal{for any} \quad t \in (x_{k-2},x_{k-1}).
\end{equation*}
\end{lemma}
\begin{proof}
Over the interval $(x_{k-2},x_{k-1})$, the function $Q = D_{2k-1} - D_{2k}$ is a single
cubic polynomial. Therefore, it can be expressed in terms of Bernstein basis and can be
viewed as a B\'ezier curve on $(x_{k-2},x_{k-1})$, see Fig.~\ref{fig:Lemma},
$$
Q(t) = \sum_{i=0}^{3} q_i B_i^3(t), \; \textnormal{where} \;
B_i^3(t) = \binom{3}{i} \left(\frac{t-x_{k-2}}{x_{k-1} - x_{k-2}}\right)^i
\left(\frac{x_{k-1} - t}{x_{k-1} - x_{k-2}}\right)^{3-i}.
$$
Straightforward computations of the control points $(q_0,q_1,q_2,q_3)$ of $Q$
over the interval $[x_{k-2},x_{k-1}]$ leads to
$$
(q_0,q_1,q_2,q_3) = \left(0,0, \frac{1}{x_{k} - x_{k-2}},\frac{x_{k} -
2 x_{k-1} + x_{k-2}}{(x_{k} - x_{k-2})^2}\right).
$$
Therefore, according to (\ref{strech}), the control points are nonnegative,
with the third control point $q_2$ strictly positive. Therefore, $Q$ can only vanish
at $x_{k-2}$ and $x_{k-1}$ and is strictly positive over $(x_{k-2},x_{k-1})$.
\end{proof}
\subsection{Gaussian quadrature formulae}\label{ssec:quad}
In this section, we derive a quadrature rule for the family $S^{n}_{3,1}$, see (\ref{eq:family}),
and show it meets the three desired criteria, that is, the rule is optimal, exact and explicit.
With respect to exactness, according to \cite{Micchelli-1977,Micchelli-1972} there exists
a quadrature rule
\begin{equation}\label{quadrature}
I(f) = \int_{a}^{b} f(t) dt \simeq I_{n+1}(f) = \sum_{i=1}^{n+1} \omega_i f(\tau_i)
\end{equation}
that is exact for every function $f$ from the space $S^{n}_{3,1}$.
The explicitness and optimality follow from the construction.
\begin{lemma}\label{lemmag1}
Let $\mathcal{X}_{n} = (a=x_0,x_1,...,x_n=b)$ be a symmetrically stretched knot sequence.
Each of the intervals $I_{k} = (x_{k-1},x_{k})\; (k=1,...,[n/2])$ contains at least one node of
the Gaussian quadrature rule (\ref{quadrature}).
\end{lemma}
\begin{proof}
We proceed by induction on the index of the segment $I_{k}$.
There must be a node of the Gaussian quadrature rule in the interval $I_{1}$,
otherwise, using the exactness of the quadrature rule for $D_{1}$, we obtain
$I(D_{1}) = 0$ which contradicts equalities (\ref{boundaryIntegral}).
Now, let us assume that every segment $I_{l}$ contains -- one or several -- Gaussian nodes
for $l=1,2,...,k-1$. If the interval $I_{k}$ has no Gaussian nodes,
then using Lemma \ref{lemmaD}, we arrive to the following contradiction
$$
\frac{1}{4} = I[D_{2k}] = \sum_{\tau_j \in I_{k-1}} \omega_j D_{2k}(\tau_{j}) <
\sum_{\tau_j \in I_{k-1}} \omega_j D_{2k-1}(\tau_{j}) = I[D_{2k-1}] = \frac{1}{4}.
$$
\end{proof}
\begin{cor}\label{corollary1}
If $n$ is an even integer, then each of the intervals $I_{k} = (x_{k-1},x_{k})$ $(k=1,2,\ldots,n)$
contains exactly one Gaussian node and the middle $x_{n/2} = (a+b)/2$ of the interval $[a,b]$
is also a Gaussian node. If $n$ is odd then each of the intervals $I_{k} = (x_{k-1},x_{k})$
$(k=1,2,\ldots,n; k\not= (n+1)/2)$ contain exactly one Gaussian node, while the interval
$I_{(n+1)/2}$ contains two Gaussian nodes, positioned symmetrically with respect to $(a+b)/2$.
\end{cor}
\begin{proof}
If $n$ is an even number then by symmetry, we obtain at least one Gaussian node
in each interval $I_{k}$ for $k=1,2,\ldots,n$. If one of the intervals $I_{k}$ has more than
one node then by symmetry, we get more than $n+2$ nodes for the quadrature, contradicting our
quadrature rule (\ref{quadrature}). Moreover, by virtue of symmetry, the last missing Gaussian node
is forced to be the middle of the interval. Now, if $n$ is an odd integer, then by symmetry, each of the intervals
$I_{k}$, $k=1,2,\ldots,n$ contains at least one Gaussian node. Let us assume that the middle
interval $I_{(n+1)/2}$ contains exactly one node, then at least one of the remaining intervals
contains two nodes. By symmetry, the number of nodes will be at least $(n+2)$, contradicting our quadrature
rule (\ref{quadrature}). Therefore, the middle interval $I_{(n+1)/2}$ contains exactly two nodes while each
of the remaining intervals contain exactly one Gaussian node of the quadrature rule (\ref{quadrature}).
\end{proof}
Throughout the rest of this work, we use the following notation: For $k=1,2,\ldots,[n/2]+1$, we set
\begin{equation}
\theta_k = x_k - \tau_k; \quad \rho_k = x_{k+1} - \tau_k \quad \textnormal{and}
\end{equation}
\begin{equation*}
\begin{split}
& A_k = \frac{1}{4} - \omega_k \left(a_{k+1} \rho_k^3 + b_{k+1} \theta_k^3 + c_{k+1} \theta_k^2 \right), \\
&B_k = \frac{1}{4} - \omega_k \left(\alpha_{k+1} \rho_k^3 + \beta_{k+1} \rho_k^2 +
\gamma_{k+1} \theta_k^3 + \eta_{k+1} \theta_k^2 \right).
\end{split}
\end{equation*}
The explicit representation of the B-spline basis $D_i$ gives
\begin{equation}\label{equations_tau}
\begin{split}
& D_{2k-1}(\tau_k) = a_k \theta_k^3, \\
& D_{2k}(\tau_k) = \alpha_k \theta_k^3 + \beta_k \theta_k^2, \\
& D_{2k+1}(\tau_k) = a_{k+1} \rho_k^3 + b_{k+1} \theta_k^3 + c_{k+1}\theta_k^2, \\
& D_{2k+2}(\tau_k) = \alpha_{k+1} \rho_k^3 + \beta_{k+1} \rho_k^2 + \gamma_{k+1}\theta_k^3 + \eta_{k+1}\theta_k^2.
\end{split}
\end{equation}
We are ready now to proceed with the recursive algorithm which
starts at the domain's first subinterval $[x_0,x_1]$ by computing the first
node and weight, and sequentially parses the subintervals towards the domain's
midpoint, giving explicit formulae for the remaining unknowns $\tau_i$, $\omega_i$, $i=2,\dots,[n/2]+1$.
There is, according to Corollary \ref{corollary1}, a unique Gaussian node in the
interval $(x_0,x_1)$. This node is obtained by solving the system
\begin{equation*}
\begin{split}
& I[D_1] = \omega_1 D_1(\tau_1) = \frac{1}{16} = \omega_1 a_1 \theta_1^3, \\
& I[D_2] = \omega_1 D_2(\tau_1) = \frac{3}{16} = \omega_1 (\alpha_1 \theta_1^3 + \beta_1 \theta_1^2), \\
\end{split}
\end{equation*}
leading to the unique solution for $\theta_1$ and $\omega_1$ to be expressed as
\begin{equation*}
\theta_1 = \frac{\beta_1}{3 a_1 - \alpha_1} = \frac{3}{4}h_1
\quad \text{and} \quad
\omega_1 = \frac{1}{16 a_1 \theta_1^3} = \frac{16}{27} h_1.
\end{equation*}
The remaining nodes and weights are computed in turn explicitly using the recipe
formalized as follows:
\begin{theorem}\label{theo:quad}
The sequence of nodes and weights of the Gaussian quadrature rule (\ref{quadrature})
are given explicitly as $\theta_1 = \frac{3}{4}h_1, \omega_1 = \frac{16}{27} h_1$
and for $i =1,2,...,[n/2]-1$ by the recurrence relations
\begin{equation}\label{mainquadrature}
\theta_{i+1} = \frac{A_i \beta_{i+1}}{a_{i+1} B_i - \alpha_{i+1} A_i}
\quad \text{and} \quad
\omega_{i+1} = \frac{A_i}{a_{i+1} \theta_{i+1}^3}.
\end{equation}
If $n$ is even ($n=2m$) then $\tau_{m+1} = x_{m} = (a+b)/2$ and
\begin{equation}\label{even}
\omega_{m+1} = \frac{A_m + B_m - \frac{1}{4}}{a_{m+1} \theta_{m+1}^3}.
\end{equation}
If $n$ is odd ($n = 2m-1$) then $\theta_m$ is the greater root in $(0,x_m - x_{m-1})$
of the cubic equation
\begin{equation*}
\begin{split}
& \left( A_{m-1}(\alpha_m+b_{m+1}) - B_{m-1}(a_{m}+ \gamma_{m+1})\right) \theta_m^3 + \\
& \left( A_{m-1}(\beta_m+c_{m+1}) - B_{m-1}\eta_{m+1} \right) \theta_m^2 + \\
& (A_{m-1} a_{m+1} - B_{m-1} \alpha_{m+1})\rho_m^3 - B_{m-1} \beta_{m+1} \rho_m^2 =0,
\end{split}
\end{equation*}
and
\begin{equation*}
\omega_{m} = \frac{A_{m-1}}{(\gamma_{m+1}+ a_m) \theta_m^3 +
\eta_{m+1} \theta_m^2 + \alpha_{m+1} \rho_{m}^3 + \beta_{m+1} \rho_m^2}.
\end{equation*}
\end{theorem}
\begin{proof}
The proof proceeds by induction. We assume $\theta_l, \omega_l$
known for $l=1,2,\ldots,k$ ( $k \leq [n/2]-2$). Using (\ref{equations_tau})
we compute $\theta_{k+1}$ and $\omega_{k+1}$ by solving the system
$ I[D_{2k+1}] = 1/4$ and $ I[D_{2k+2}] = 1/4$, that is
\begin{equation*}
\begin{split}
\frac{1}{4} &= \omega_k D_{2k+1}(\tau_k) + \omega_{k+1} D_{2k+1}(\tau_{k+1})
= (\frac{1}{4} - A_k) + \omega_{k+1} a_{k+1} \theta_{k+1}^3, \\
\frac{1}{4} & = \omega_k D_{2k+2}(\tau_k) + \omega_{k+1} D_{2k+2}(\tau_{k+1})
= (\frac{1}{4} - B_k) + \omega_{k+1} (\alpha_{k+1} \theta_{k+1}^3 + \beta_{k+1} \theta_{k+1}^2). \\
\end{split}
\end{equation*}
Eliminating $\omega_{k+1}$ leads to the recurrence relations (\ref{mainquadrature}).
If $n$ is even ($n = 2m$), then by Corollary \ref{corollary1} we have $\tau_{m+1} = (a+b)/2$.
To compute the associated weight $\omega_{m+1}$, we take into account the symmetry, which gives
$\omega_{m} = \omega_{m+2}$ and $D_{2m+1}(\tau_{m+2}) =D_{2m+2}(\tau_{m})$, and solve
\begin{equation*}
\frac{1}{4} = I[D_{2m+1}] = \omega_m [D_{2m+1}(\tau_m) + D_{2m+2}(\tau_m)] +
\omega_{m+1} D_{2m+1}(\tau_{m+1}).
\end{equation*}
Using (\ref{equations_tau}), we obtain (\ref{even}). If $n$ is odd ($n = 2m-1$), then according to
Corollary~\ref{corollary1} the two nodes $\tau_{m}$ and $\tau_{m+1}$ belong to the interval $(x_{m-1},x_{m})$.
Due to the symmetry, we have $\omega_{m} = \omega_{m+1}$ and $\tau_{m+1} = (a+b)-\tau_{m}$ and
\begin{equation*}
D_{2m-1}(\tau_{m+1}) = D_{2m+2}(\tau_m), \quad D_{2m}(\tau_{m+1}) = D_{2m+1}(\tau_m).
\end{equation*}
Using the exactness of the quadrature rule for $D_{2m-1}$ and $D_{2m}$, we obtain
\begin{equation*}
\begin{split}
& \omega_m a_m \theta_m^3 = A_{m-1} + B_{m} - \frac{1}{4} \\
& \omega_{m} (\alpha_m \theta_m^3 + \beta_m \theta_m^2) = A_{m} + B_{m-1} - \frac{1}{4}
\end{split}
\end{equation*}
Solving the above system for $\theta_m$ and $\omega_m$ proves the theorem.
\end{proof}
\section{Error estimation for the $C^1$ cubic splines quadrature rule}\label{sec:err}
In the previous section, we have derived a quadrature rule that exactly
integrates functions from $S^{n}_{3,1}$. If $f$ is not an element of $S^{n}_{3,1}$,
the rule produces a certain error, also called \emph{remainder}, and the analysis of this error
is the objective of this section.
Let $W_{1}^{r} = \{ f \in C^{r-1}[a,b]; \; f^{(r-1)} \textnormal{abs. cont.}, \; ||f||_{L_1} < \infty \}$.
As the quadrature rule (\ref{quadrature}) is exact for polynomials of degree at most three,
for any element $f \in W_{1}^{d}$, $d \geq 4$, we have
\begin{equation*}
R_{n+1}[f] := I(f) - I_{n+1}(f) = \int_{a}^{b} K_4(R_{n+1};t) f^{(4)}(t) dt,
\end{equation*}
where the Peano kernel \cite{Gautschi-1997} is given by
\begin{equation*}
K_4(R_{n+1};t) = R_{n+1} \left[ \frac{(t-.)_{+}^{3}}{3!} \right].
\end{equation*}
An explicit representation for the Peano kernel over the interval $[a,b]$ in terms of the
weights and nodes of the quadrature rule (\ref{quadrature}) is given by
\begin{equation}
K_4(R_{n+1};t) = \frac{(t-a)^4}{24} - \frac{1}{6} \sum_{k=1}^{n+1} \omega_k (t - \tau_k)_{+}^{3}.
\end{equation}
Moreover, according to a general result for monosplines and quadrature rules \cite{Micchelli-1977}, the only zeros
of the Peano kernel over $(a,b)$ are the double knots of the cubic spline, see Section~\ref{sec:ex} in particular
Fig.~\ref{fig:PK} for an illustration. Therefore, for any $t \in (a,b)$,
$K_4(R_{n+1};t) \geq 0$ and, by the mean value theorem, there exists a real number $\xi \in [a,b]$ such that
\begin{equation}\label{remainder}
R_{n+1}(f) = c_{n+1,4} f^{(4)}(\xi) \quad \textnormal{with } \quad c_{n+1,4} = \int_{a}^{b} K_{4}(R_{n+1}; t) dt.
\end{equation}
Hence, the constant $c_{n+1,4}$ of the remainder $R_{n+1}$ is always positive and our quadrature rule belongs to the
family of positive definite quadratures of order $4$, e.g., see \cite{Nikolov-1996, Nikolov-1995, Schmeisser-1972}.
To compute the constant $c_{n+1,4}$, we can follow the approach of \cite{Nikolov-1996} by expressing the exactness
of our quadrature rule for the truncated powers $(x_k-t)_{+}^{2}, (x_k-t)_{+}^{3}; k=0,1,...,n $.
As the symmetric stretched knot sequences satisfy the assumptions of Theorem 2.2 in \cite{Nikolov-1996}, the proof applies straightforwardly
to our non-uniform setting, and the constant of the remainder is expressed as
\begin{theorem}
The error constant $c_{n+1,4}$ of the quadrature rule (\ref{quadrature}) is given by
\begin{equation}\label{eq:c}
c_{n+1,4} = \frac{1}{720} \sum_{k=0}^{[(n+1)/2]} (x_{k+1}-x_{k})^5 -
\frac{1}{12} \sum_{k=1}^{[(n+1)/2]} \omega_k (x_{k-1} - \tau_{k})^2 (x_{k} - \tau_{k})^2.
\end{equation}
\end{theorem}
\section{Numerical Experiments}\label{sec:ex}
We applied the quadrature rule to various symmetrically stretched knot sequences;
the nodes and weights computed by our formulae are summarized in Table~\ref{tabW}.
Even though the space of admissible stretched knot sequences is infinite dimensional,
for the sake simplicity, the proposed quadrature rule was tested on those
that are determined by the fewest possible number of parameters.
\begin{figure}\label{fig:BasisChebyN=5}
\end{figure}
\begin{table}[!tbh]
\begin{center}
\begin{minipage}{0.9\textwidth}
\caption{Nodes and weights for particular knot sequences. $N$ denotes the number of internal knots. All the knots
and weights are normalized on unit interval and, due to the symmetry, only first $[\frac{N}{2}]+2$ nodes and weights are shown.}\label{tabW}
\end{minipage}
\\
\footnotesize{
\renewcommand{1.2}{1.2}
\begin{tabular}{| c || r| r| r| r| r| r| r| r| r|}\hline
\rotatebox{0}{$N=5$}
& \multicolumn{2}{c|}{Chebyshev}
& \multicolumn{2}{c|}{Legendre}
& \multicolumn{2}{c|}{Geometric $q=2$}\\
$i$ & $\tau_i$ & $\omega_i$ & $\tau_i$ & $\omega_i$ & $\tau_i$ & $\omega_i$
\\\hline\hline
1 & 0.006118 & 0.014502 & 0.011728 & 0.027799 & 0.017857 & 0.042328 \\\hline
2 & 0.062790 & 0.113850 & 0.079882 & 0.121347 & 0.088993 & 0.104896 \\\hline
3 & 0.233416 & 0.230297 & 0.251054 & 0.219793 & 0.244959 & 0.216881 \\\hline
4 & 0.500000 & 0.282701 & 0.500000 & 0.262122 & 0.500000 & 0.271790 \\\hline\hline
$N=6$ & \multicolumn{2}{c|}{} & \multicolumn{2}{c|}{} & \multicolumn{2}{c|}{}\\
1 & 0.004259 & 0.010096 & 0.008441 & 0.020009 & 0.008333 & 0.019753 \\\hline
2 & 0.044447 & 0.081009 & 0.058300 & 0.089278 & 0.041530 & 0.048952 \\\hline
3 & 0.169161 & 0.172365 & 0.187089 & 0.169114 & 0.114314 & 0.101211 \\\hline
4 & 0.378223 & 0.236530 & 0.386490 & 0.221598 & 0.312967 & 0.330084 \\\hline\hline
$N=7$ & \multicolumn{2}{c|}{} & \multicolumn{2}{c|}{} & \multicolumn{2}{c|}{}\\
1 & 0.003134 & 0.007429 & 0.006362 & 0.015079 & 0.008333 & 0.019753 \\\hline
2 & 0.033034 & 0.060392 & 0.044320 & 0.068207 & 0.041530 & 0.048952 \\\hline
3 & 0.127538 & 0.132404 & 0.144115 & 0.132816 & 0.114314 & 0.101211 \\\hline
4 & 0.292314 & 0.192325 & 0.304385 & 0.183131 & 0.261560 & 0.203096 \\\hline
5 & 0.500000 & 0.214901 & 0.500000 & 0.201532 & 0.500000 & 0.253977 \\\hline\hline
$N=8$ & \multicolumn{2}{c|}{} & \multicolumn{2}{c|}{} & \multicolumn{2}{c|}{}\\
1 & 0.002402 & 0.005693 & 0.004964 & 0.011766 & 0.004032 & 0.009558 \\\hline
2 & 0.025481 & 0.046676 & 0.034784 & 0.053707 & 0.020095 & 0.023686 \\\hline
3 & 0.099304 & 0.104319 & 0.114113 & 0.106506 & 0.055313 & 0.048973 \\\hline
4 & 0.231216 & 0.156780 & 0.244557 & 0.151589 & 0.126561 & 0.098272 \\\hline
4 & 0.405347 & 0.186531 & 0.410645 & 0.176432 & 0.318965 & 0.319511 \\\hline\hline
$N=9$ & \multicolumn{2}{c|}{} & \multicolumn{2}{c|}{} & \multicolumn{2}{c|}{}\\
1 & 0.001899 & 0.004501 & 0.003980 & 0.009434 & 0.004032 & 0.009558 \\\hline
2 & 0.020237 & 0.037119 & 0.028004 & 0.043337 & 0.020095 & 0.023686 \\\hline
3 & 0.079375 & 0.084052 & 0.092445 & 0.087039 & 0.055313 & 0.048973 \\\hline
4 & 0.186823 & 0.129241 & 0.200155 & 0.126607 & 0.126561 & 0.098272 \\\hline
5 & 0.332973 & 0.159838 & 0.341205 & 0.152710 & 0.269215 & 0.196605 \\\hline
6 & 0.500000 & 0.170498 & 0.500000 & 0.161745 & 0.500000 & 0.245812 \\\hline
\end{tabular}
}
\end{center}
\end{table}
\begin{figure}\label{fig:GeomN=5}
\end{figure}
One such a prominent symmetrically stretched knot sequence stems from Chebyshev polynomials \cite{Gautschi-1997},
where its degree $N$ determines the roots which can be written as
\begin{equation}
x_k = - \cos(\phi_k), \quad \phi_k = \frac{2k-1}{2N}\pi, \quad k =1,2,\dots,N
\end{equation}
and the roots, according to Def.~\ref{def:stretch}, obviously form a non-uniform stretched knot
sequence on $[-1,1]$. The corresponding nodes and weights for $n-1=N=5$ are shown in Fig.~\ref{fig:BasisChebyN=5}.
Similarly, Legendre polynomials \cite{Szego-1936} satisfy the requirement that their roots form a
symmetrically stretched sequence. In order to have a qualitative comparison of the weights for
Chebyshev and Legendre knot sequences, and also for the comparison of their Peano kernels,
see Fig.~\ref{fig:PK}, the roots of Chebyshev polynomial were mapped to the unit domain.
Another family of symmetrically stretched knot sequences are those where the lengths of two neighboring knots form
a geometric sequence, i.e. the stretching ratio $q$ is constant, see Fig.~\ref{fig:GeomN=5}.
Obviously, the quadrature rule of Nikolov \cite{Nikolov-1996} is a special case for $q=1$. In some applications such as
solving the $1D$ heat equation \cite{Veerapaneni-2007} or simulating turbulent flows in 3D \cite{Bazilevs-2007,Bazilevs-2010},
where the finer and finer subdivisions closer to the domain boundary are needed, the uniform rule would eventually require large number
of knots whilst setting a proper non-uniform knot sequence could reduce the number of evaluations significantly.
The Peano kernels of geometric knot sequences considered as a function of the stretching ratio $q$
are shown in Fig.~\ref{fig:PKSrf}. It is not surprising, rather an expected result that the error constant $c_{n+1,4}$
looks favorably for the uniform knot sequence as the uniform layout is a certain equilibrium, that is,
a minimizer of the first term on the left side in (\ref{eq:c}).
We emphasize that these three types of non-uniform knot sequences are particular examples,
one can use any knot sequence satisfying Def.~\ref{def:stretch} that is suitable for a concrete application.
In all the numerical examples shown in the paper, we observed a similar phenomenon as in \cite{Nikolov-1996}, namely
that the weights are monotonically increasing when coming from the side to the middle of the interval, see Table~\ref{tabW}.
However, the proof for non-uniform knot sequences turned out to be rather difficult and we content ourselves here to
formulate it as an open problem, namely {\it{the quadrature nodes and weights computed in Theorem~\ref{theo:quad},
satisfy the inequalities}}
\begin{equation*}
\omega_i < \theta_i < \omega_{i+1} \quad \textnormal{for} \quad i=1,\dots, [\frac{n}{2}].
\end{equation*}
\begin{figure}\label{fig:PK}
\end{figure}
\begin{figure}\label{fig:PKSrf}
\end{figure}
\section{Conclusion and future work}\label{sec:con}
We have derived a quadrature rule for spaces of $C^1$ cubic splines with symmetrically
stretched knot sequences.
The rule possesses three crucial properties: We can exactly integrate the
functions from the space of interest; the rule requires minimal number of evaluations; and
the rule is defined in closed form, that is, we give explicit formulae without
need of any numerical algorithm.
To the best knowledge of the authors, the result is the first of the kind that handles
non-uniform knot sequences explicitly and, even though the symmetrical stretching seems
to be relatively restrictive, we believe that the infinite dimensional space of possible
knot sequences where the rule applies makes it a useful tool in many engineering applications.
Moreover, our quadrature rule is still exact, even though not optimal, for $C^2$ cubic splines.
Due to its explicitness,
it can also be freely used in various applications instead of $(3,2)$ splines quadrature rules,
for which the explicit formulae are not known.
In the future, we intent to derive quadrature rules for other spline spaces,
while aiming at particular engineering application.
\section*{Acknowledgments}
The research of the first author was supported by the KAUST Visual Computing Center.
\end{document} |
\begin{document}
\title{Bridging thermodynamics and metrology in non-equilibrium Quantum Thermometry}
\author{Vasco Cavina}
\affiliation{NEST, Scuola Normale Superiore and Istituto Nanoscienze-CNR, Piazza dei Cavalieri 7, I-56126, Pisa, Italy}
\author{Luca Mancino}
\affiliation{Dipartimento di Scienze, Universit\`{a} degli Studi Roma Tre, Via della Vasca Navale 84, 00146, Rome, Italy}
\author{Antonella De Pasquale}
\affiliation{Dipartimento di Fisica, Universit\`{a} di Firenze, Via G. Sansone 1, I-50019, Sesto Fiorentino (FI), Italy}
\affiliation{INFN Sezione di Firenze, via G.Sansone 1, I-50019 Sesto Fiorentino (FI), Italy}
\affiliation{NEST, Scuola Normale Superiore and Istituto Nanoscienze-CNR, Piazza dei Cavalieri 7, I-56126, Pisa, Italy}
\author{Ilaria Gianani}
\affiliation{Dipartimento di Scienze, Universit\`{a} degli Studi Roma Tre, Via della Vasca Navale 84, 00146, Rome, Italy}
\author{Marco Sbroscia}
\affiliation{Dipartimento di Scienze, Universit\`{a} degli Studi Roma Tre, Via della Vasca Navale 84, 00146, Rome, Italy}
\author{Robert I. Booth}
\affiliation{Dipartimento di Scienze, Universit\`{a} degli Studi Roma Tre, Via della Vasca Navale 84, 00146, Rome, Italy}
\affiliation{Institut de Physique, Sorbonne Universit\'{e}, 4 Place Jussieu, 75005, Paris, France}
\author{Emanuele Roccia}
\affiliation{Dipartimento di Scienze, Universit\`{a} degli Studi Roma Tre, Via della Vasca Navale 84, 00146, Rome, Italy}
\author{Roberto Raimondi}
\affiliation{Dipartimento di Matematica e Fisica, Universit\`{a} degli Studi Roma Tre, Via della Vasca Navale 84, 00146, Rome, Italy}
\author{Vittorio Giovannetti}
\affiliation{NEST, Scuola Normale Superiore and Istituto Nanoscienze-CNR, Piazza dei Cavalieri 7, I-56126, Pisa, Italy}
\author{Marco Barbieri}
\affiliation{Dipartimento di Scienze, Universit\`{a} degli Studi Roma Tre, Via della Vasca Navale 84, 00146, Rome, Italy}
\affiliation{Istituto Nazionale di Ottica - CNR, Largo Enrico Fermi 6, 50125, Florence, Italy}
\begin{abstract}
Single-qubit thermometry presents the simplest tool to measure the temperature of thermal baths with reduced invasivity. At thermal equilibrium, the temperature uncertainty is linked to the heat capacity of the qubit, however the best precision is achieved outside equilibrium condition. Here, we discuss a way to generalize this relation in a non-equilibrium regime, taking into account purely quantum effects such as coherence. We support our findings with an experimental photonic simulation.
\end{abstract}
\maketitle
\paragraph{Introduction:--}
Identifying strategies for improving
the measurement precision by means of quantum resources is the purpose of Quantum Metrology \cite{Giovannetti06,Giovannetti11,Paris09}. In particular, through the Quantum Cram\'{e}r-Rao Bound (QCRB), it sets ultimate limits on the best accuracy attainable in the estimation of unknown parameters even when the latter are not associated with observable quantities.
These considerations have attracted an increasing attention in the field of quantum thermodynamics, where an accurate control of the temperature is highly demanding \cite{Hilt09, Williams11, Kliesch14, Vinjanampathy16,Millen16}. Besides the emergence of primary and secondary thermometers based on precisely machined microwave resonators \cite{Mohr05, Weng14}, recent efforts have been made aiming at measuring temperature at even smaller scales, where nanosize thermal baths are
higly sensitives to disturbances induced by the probe
\cite{Stace10,Mann14,Mehboudi15,Depasquale16,
Depasquale17,DePalma17,Campbell117}.
Some paradigmatic examples of nanoscale thermometry involve nanomechanical resonators \cite{Brunelli11}, quantum harmonic oscillators \cite{Brunelli12} or atomic condensates \cite{Sabin14,Johnson16,Hohmann16} (also in conjunction with estimation of chemical potential \cite{Marzolino13}). In this context the analysis of quantum properties needs to be taken into account in order to establish, and eventually enhance, metrological precision \cite{Salvatori14,Jevtic15,Tham16,Mancino17,Correa17,Campbell18}.
In a conventional approach to thermometry,
an external bath ${\cal B}$ at thermal equilibrium
is typically indirectly probed
via an ancillary system, the thermometer ${\cal S}$, that
is placed into weak-interaction with the former.
Assuming hence that the thermometer reaches the thermal equilibrium configuration without perturbing ${\cal B}$ too much,
the Einstein Theory of Fluctuations (ETF) can be used to characterize the sensitivity of the procedure
in terms of the heat capacity of ${\cal S}$ which represents its thermal susceptibility
to the perturbation imposed by the bath~\cite{Landau80,Falcioni11,Dicastro15}.
Since this last is an equilibrium property, one should not expect it to hold in non-equilibrium regimes. However thermometry schemes that do not need a full thermalization of the probe have been recognized to offer higher sensitivities in temperature estimation \cite{Correa15}. Thus, if on the one hand the QCRB can still be used as the proper tool to
gauge the measurement uncertainty on the bath temperature,
on the other hand establishing a direct link between this approach and the thermodynamic properties of the probe is still an open question. Furthermore, the advantages pointed out in~\cite{Correa15}
are conditional on precisely addressing the probe during its evolution, a task which might be demanding in real experiments \cite{Mancino17}.
Here ${\cal S}$ is assumed to be a quantum system characterized by a local Hamiltonian $H$
that, after being initialized into some proper input state $\rho(0)$, weakly interacts for some time $\tau$ with the bath ${\cal B}$ of assigned, but unknown, temperature $T$, before been measured.
In this setting, we compare the performances of optimal estimation procedures with standard thermometry approaches: the temperature parameter $T$ is recovered by only monitoring the energy variation on ${\cal S}$ by its interaction with the bath. Then we derive a universal inequality that links metrological and thermodynamic quantities, ultimately discussing the optimal condition for its saturation.
In particular for the case where ${\cal S}$ is a two-level (qubit) system we show that optimality can be achieved for
a broad class of configurations which also include out of equilibrium scenarios for which ETF does not holds.
These results are also confirmed by an experiment where the proposed scheme is simulated via quantum photonics.
\paragraph{QCRB vs ETF:--}
A direct application of the QCRB
~\cite{Paris09,Giovannetti11} to our setting
establishes that the Mean Square Error (MSE) ${\rm D}elta^2 T$
of any temperature estimation procedure, based on an arbitrary local measurement on ${\cal S}$, is limited by the inequality
${\rm D}elta^2 T \geq 1/[M Q_{T}(\tau)]$. In this expression $M$ is the number of measurements one performs on the probe, while
$Q_{T}(\tau)$ is the Quantum Fisher Information (QFI): a complex functional which only depends on the reduced density matrix $\rho(\tau)$ describing the state of ${\cal S}$ after its interaction with ${\cal B}$ (see below for details).
Consider then the case where, as in the conventional thermometry approach, the bath temperature is recovered by just measuring the mean energy $E_T(\tau)=\mbox{tr}[ H\rho(\tau)]$ of ${\cal S}$ and
inverting its functional dependence upon $T$. A simple application
of the error propagation formula reveals that in this scenario the associated MSE can be expressed as ${\rm D}elta^2 T = {\rm D}elta^2 E_T(\tau)/[{M C_T^2(\tau)}]$,
where ${\rm D}elta^2E (\tau)=\mbox{tr}[ (H-E_T(\tau))^2 \rho(\tau)]$ is the
variance of $H$ on $\rho(\tau)$ we use to estimate the uncertainty of the mean energy $E_T(\tau)$, and $C_T(\tau) = \partial_T E_T(\tau)$ is the partial derivative of $E_T(\tau)$ with respect to $T$.
Since the latter quantity represents the energetic susceptibility of the system to the perturbation imposed by the bath, we can interpret it as a generalized Heat Capacity (HC) associated with the not-necessarily stationary state $\rho(\tau)$ of ${\cal S}$~\cite{Landau80,Falcioni11,Dicastro15}.
Irrespectively from the specific form of the probe/bath coupling,
we can hence invoke the QCRB to draw the following universal relation
\begin{eqnarray}
Q_T(\tau) \geq C_T^2(\tau)/{\rm D}elta^2 E_T(\tau)\;, \label{IMPO}
\end{eqnarray} that links together the generalized HC of ${\cal S}$, its energy spread ${\rm D}elta^2E_T(\tau)$, and the associated QFI functional.
The inequality~(\ref{IMPO}) can be shown to saturate at least in those cases where the ETF holds, i.e. when
$\tau$ is sufficiently long to ensure that, via thermalization,
${\cal S}$ reaches the equilibrium state represented by the
thermal Gibbs state
$\rho_{T}^{(eq)}=e^{-{\cal H}_S / k_BT}/\cal{Z}$, with ${\cal Z}=\mbox{Tr} [e^{-{\cal{H}_S} / k_BT}]$ the partition function of the system.
In this scenario in fact one has~\cite{ZANARDI1,ZANARDI2}
\begin{eqnarray}
Q_{T}^{(eq)} = \frac{{\rm D}elta^2 E_T^{(eq)}}{k_B^2 T^4} \;,
\qquad C_{T}^{(eq)} = \frac{{\rm D}elta^2 E_T^{(eq)}}{k_B T^2} \;,
\label{QuantumFisher111}
\end{eqnarray}
which indeed implies $Q_{T}^{(eq)} = [C_{T}^{(eq)}]^2/{\rm D}elta^2 E_T^{(eq)}$.
Accordingly one can conclude that, when the thermometer and the bath reaches thermal equilibrium, the standard
thermometry procedure which derive $T$ from the mean energy of ${\cal S}$, is optimal.
We point out that Eq.~(\ref{QuantumFisher111}) also establishes a direct linear dependence between QFI and the associated capacity, i.e.
\begin{eqnarray}
Q_{T}^{(eq)} ={C_{T}^{(eq)}}/({k_B T^2})\;,
\label{QuantumFisher1110}
\end{eqnarray}
which, as we shall clarify in the following, is a peculiar property of Gibbs states.
\begin{figure}
\caption{(Color online) Plots of the non-equilibrium QFI $Q_T(\tau)$ (purple curves) and of the quantity $C_T^2(\tau)/{\rm D}
\label{TheoreticalCurves}
\end{figure}
\paragraph{The Qubit model:--} Let us now focus on the
special case where the probe system ${\cal S}$
is a qubit with fixed Hamiltonian ${\cal H}=\hbar \omega \sigma_3 / 2$, and ${\cal B}$ is a Bosonic
thermal bath (hereafter $\sigma_3$ being the third Pauli operator). As in Refs.~\cite{Depasquale17,Campbell117,Brunelli11,Brunelli12,Correa15}
we describe the temporal evolution of ${\cal S}$ by assigning a Master Equation (ME)
which we write in the interaction picture representation as
$\dot{\rho}(t) =
\sum_{j=\pm} \gamma_j {\cal D}_j[\rho(t)]$.
In this expression ${\cal D}_-$ and ${\cal D}_+$ are Gorini-Kossakowski-Sudarshan-Lindblad (GKSL) generators having, respectively, the qubit ladder matrices
$\sigma_-= |0\rangle\langle 1|$ and $\sigma_+ = |1\rangle\langle 0|$
as corresponding Lindblad operators (hereafter $|0\rangle$ and $|1\rangle$ identify respectively the excited and the ground
state of the single-qubit thermometer).
The parameters $\gamma_-=\gamma(N+1)$ and $\gamma_+=\gamma N$ instead
set the temperature dependence of the system dynamics through the Planck number
$N= 1/(e^{\hbar \omega/ k_B T} -1)\in[0,\infty[$ that counts the average
number of resonant Bosonic excitations present in the bath, $\gamma$ being a positive rate that fixes the
time scale of the problem.
By direct integration of the ME one can easily verify that the state of ${\cal S}$ at time $\tau$ can be expressed as
$\rho(\tau)= \frac{1}{2} [ \openone + \vec{r}(\tau) \cdot
\vec{\sigma}]$ with a Bloch vector
$\vec{r}(\tau)$ having cartesian components equal to
${r}_{1,2}(\tau) = {r}_{1,2}(0) e^{-\gamma(2 N+1) \tau/2}$ and ${r}_3(\tau) = {r}_3(0) e^{-\gamma(2 N+1) t} -
(1- e^{-\gamma(2 N+1) \tau} )/(2N+1)$. This corresponds to an evolution induced by a Generalized Amplitude Damping (GAD)
channels $\Phi_\tau$~\cite{Nielsen00} which,
irrespectively from the specific choice of $\rho(0)$ will let the system to asymptotically
relax to a unique fixed point with Bloch vector $\vec{r}^{(eq)}= (0,0,-1/(2N+1))$ which represents
the system thermal Gibbs state
$\rho_{T}^{(eq)}$. In this long time limit, our model will behave as anticipated in the previous section,
saturating the inequality (\ref{IMPO}), i.e. allowing to recover the QCRB via ETF -- as well as fulfilling (\ref{QuantumFisher1110}).
What about the finite time $\tau$ regime?
For the present model the heat capacity $C_T(\tau)$ and the energy spread ${\rm D}elta^2 E_T(\tau)$ can be easily shown to be equal to
\begin{eqnarray}
C_T(\tau) = \tfrac{\hbar\omega}{2} \; \partial_T r_3(\tau)\;, \quad
{\rm D}elta^2 E_T(\tau)= \left(\tfrac{\hbar\omega}{2}\right)^2
[1 - r^2_3(\tau)].
\end{eqnarray}
Furthermore the QFI can be computed as
$Q_{T}^{(\tau)}={\mbox{Tr}}[ L_T\; \partial_T \rho(\tau)]$ with $L_T$ being the (possibly time dependent)
Symmetric Logarithmic Derivative of the problem, i.e. the self-adjoint operator which satisfies the identity $\partial_T \rho(\tau)=1/2 \; \lbrace L_T, \rho(\tau) \rbrace$, with $\{ \cdots, \cdots\}$ being the anti-commutator~\cite{Paris09}.
Simple algebra allows us to express this as
\begin{equation}
Q_{T}{(\tau)} = \frac{[\partial_T \; {{r}(\tau)} ]^2}{1-{r^2(\tau)}} +{{r^2(\tau)} \; [\partial_T \theta({\tau})]^2}\;,
\label{QuantumFisher}
\end{equation}
where ${r(\tau)}$ and $\theta(\tau)$ are, respectively, the length and the polar angle of the Bloch vector $\vec{r}(\tau)$, the azimuthal angle being a constant of motion and playing no role in the derivation -- see Appendix for details.
The first term on the r.h.s. of Eq.~(\ref{QuantumFisher}) describes the rearrangement of the population of the probe during its interaction with the reservoir, while the other one accounts for quantum coherence contributions which
nullifies in the asymptotic limit where $\gamma \tau \rightarrow \infty$ (the first term converging instead to $Q_{T}^{(eq)}$).
By direct substitution of these expressions into~(\ref{IMPO}) one can verify that for generic choices of $\tau$ and of the input state $\rho(0)$ the inequality will be strict -- see Fig.~\ref{TheoreticalCurves}. A notable exception however is obtained when the input state is diagonal into the
energy basis of $H$, i.e. when $r_{1,2}(0)$ both nullify (or equivalently when, independently from the choice of $\rho(0)$, the coherence terms of $\rho(\tau)$
are removed by a decoherence process that acts on $\cal S$ before the measurement stage).
In this special cases
the system remains diagonal along the full trajectory and Eq.~(\ref{QuantumFisher}) reduces to
$Q_{T}{(\tau)} = \frac{[\partial_T \; {{r}_3(\tau)} ]^2}{1-{r_3^2(\tau)}}$. Accordingly~(\ref{IMPO}) becomes an identity for all choices of
the interaction time $\tau$, implying that the standard thermometry scheme which recovers $T$ from just energy measures is
optimal. Notice that in this scenario, $\rho(\tau)$ has not reached the thermal equilibrium configuration so ETF arguments cannot be applied: this is made evident by the fact that even though (\ref{IMPO}) saturates, yet $Q_T(\tau)$ and $C_T(\tau)$ cannot be linearly connected as in (\ref{QuantumFisher1110}) unless one introduces an effective, yet fictitious,
rescaling of the proportionality coefficient appearing on the right-hand-side.
The numerical plots of Fig.~\ref{TheoreticalCurves} show the relations between the l.h.s. and r.h.s terms of (\ref{IMPO}).
In agreement with the finding of Ref.~\cite{Correa15}
we notice that in general the QFI reaches higher values (corresponding to better estimation accuracies) for finite (possibly dependent on $T$) values of $\tau$. Furthermore after having fixed the parameter $\tau$ at its best,
the absolute best performance is obtained when initializing the qubit into the ground state (see last panel of the figure) -- we have confirmed this result by numerical optimization of (\ref{QuantumFisher}),
as shown in details in the Appendix.
The first and last panel of Fig. \ref{TheoreticalCurves} explicitly show
the saturation of Eq. (\ref{IMPO}) for diagonal states at all times $\tau$, while for generic input this is only possible
when $\tau \rightarrow \infty$ since the system
asymptotically thermalize.
\begin{figure}
\caption{Part (a): quantum simulation via quantum photonics. A photon pair is produced via a Spontaneous Parametric Down Conversion (SPDC) process through a Type-I 3 $mm$ BBO source. One photon is employed to simulate the single-qubit thermometer, while the other one is used as an ancilla
to simulate the {\it system-bath}
\label{ExperimentalSetup}
\end{figure}
\begin{figure*}
\caption{Comparison between the experimental errors ${\rm D}
\label{ExperimentalResults}
\end{figure*}
\paragraph{Quantum Photonic Simulation:--} We have simulated the evolution of the probing qubit ${\cal S}$ under the action of the thermal bath
via a photonic implementation of the associated GAD channel $\Phi_t$~\cite{Wang13,Lu17,Aspuruguzik12, Cialdi2017}, in order to extract the experimental uncertainties on temperature estimation.
For this purpose we have exploited the Kraus representation of the map
$\rho(\tau)=\Phi_{\tau} [\rho(0)]=\sum_{i=1}^4 K_{i} \rho(0) {K_{i}}^\dagger$, where
$K_i$'s are four Kraus operators: the first two, i.e.
$K_1=\sqrt{\tfrac{N+1}{2N+1}} (\vert 0 \rangle \langle 0 \vert + {e^{-\gamma(2N+1) \tau/2}} \vert 1 \rangle \langle 1 \vert)$, $K_2=
\sqrt{\tfrac{N+1}{2N+1}} \sqrt{1-e^{-\gamma(2N+1) \tau}} \vert 0 \rangle \langle 1 \vert$,
being responsible for decay from the excited to the ground state
represent the action of an amplitude damping (AD) map,
the second two,
i.e.
$K_3= \sqrt{\tfrac{1}{2N+1}}( e^{-\gamma(2N+1) \tau/2} \vert 0 \rangle \langle 0 \vert + \vert 1 \rangle \langle 1 \vert)$, and $K_4=
\sqrt{\tfrac{1}{2N+1}}\sqrt{1-e^{-\gamma(2N+1) \tau}} \vert 1 \rangle \langle 0 \vert$,
describing the
absorption events, represent instead an inverse amplitude damping (IAD) map.
The previous decomposition depicts the GAD as a weighted sum of two different processes, an AD and an IAD with
weights respectively equal to
$\sqrt{\tfrac{N+1}{2N+1}}$ and $\sqrt{\tfrac{1}{2N+1}}$.
This last property is crucial for implementing a quantum optical simulation of the process:
after reproducing the AD and the IAD channel through a succession of optical logic gates, is possible to reconstruct the full density matrix simply doing a proper weighted
sum of the outputs of the two channels \cite{Mancino17}.
Specifically, an AD acting on a qubit $\mathcal{S}$ can be formally simulated by coupling the system with an ancilla $\mathcal{A}$ and doing the following operations :
\begin{enumerate}
\item a controlled-$\sigma_z$ gate, with $\mathcal{S}$ as the control, embedded between two rotations $R(\phi)$
acting on $\mathcal{A}$.
The rotations are performed around the $y$ axis and the angle $\phi$
has to be choosen in order to mimic the damping factor of the Kraus decomposition of the map, and in our case is such that $e^{-\gamma(2N+1) \tau} = \cos^2(2 \phi)$
\cite{Kiesel05, Mancino17, Mancavina18};
\item a projective measurement on the computational basis of $\mathcal{A}$,
conditioning a $\sigma_x$ gate on $\mathcal{S}$ (see Fig. \ref{ExperimentalSetup} (b), top panel).
\end{enumerate}
The above mentioned procedure works also for the IAD, except for two additional $\sigma_x$ and $\sigma_z$ rotations in the preparation and post-processing of the state
(Fig. \ref{ExperimentalSetup} (b), bottom panel).
An experimental implementation is obtained by associating
each logical gate with its corrispective element in the optical table, as explained in
Fig. \ref{ExperimentalSetup} (a).
The mean value of the energy and the temperature uncertainty
are inferred performing a measure on the Hamiltonian eigenbasis of $\mathcal{S}$,
a purpose that in practice is realized through experimental counts of the populations \cite{Nota1}.
The expectation value of the energy is given by
$\langle E \rangle= (n_0-n_1)/2(n_0+n_1)$, where $n_i$ corresponds
to the measured count rate of the state $i$. Its uncertainty is evaluated as
${\rm D}elta^2 E=n_0 n_1/(n_0+n_1)^3$; temperature uncertainties (at each estimation round) are then obtained
as ${\rm D}elta^2 T = {\rm D}elta^2 E / (\partial_T E)^2$. The results are summarized in
Fig.~\ref{ExperimentalResults}, in which we compare the experimental uncertainties
on the temperature with the related QCRB.
\paragraph{Conclusions:--}
Wherever thermal equilibrium is reached, the ETF establishes a neat link between the temperature fluctuations ${\rm D}elta T$, and the thermal susceptibility of the system corresponding to the heat capacity.
We have investigated whether inspired relations can be recovered in non-equilibrium regimes. Studying the case of a single-qubit thermometer, we have explicitly shown that this is not possible whenever coherence is present in the initial state of the probe, as the QFI functional which gauges the optimal accuracy threshold contains additional contributions. However for diagonal
input states the optimality of standard measurement procedure is restored and allows to saturate the QCRB with conventional thermometry approaches based on
energy measurements.
This peculiar effect is probably related with the small number of degree of freedom characterizing the thermometer we used.
As a matter of fact, we suspect that as the dimensionality of the probing system increases, optimal thermometry could only be
achieved by more complex measurement procedures which, even in the absence of off-diagonal terms, include the study of the full
statistic of the energy measures.
{\it Acknowledgements.}
ADP acknowledges financial support from the University of
Florence in the framework of the University Strategic Project
Program 2015 (project BRS00215).
\widetext
\appendix
\section{Derivation of equation (\ref{QuantumFisher})}
\label{Appa}
A convenient way to compute the QFI is to express the symmetric logarithmic derivative $L_T$ operator of the problem in the Pauli basis, i.e.
$L_{T} = l_0(\tau) \identity + \vec{l}(\tau) \cdot \vec{\sigma}$, with $l_0(\tau)$ and $\vec{l}(\tau)=(l_1(\tau),l_2(\tau),l_3(\tau))$ being real quantities
which can in principle depend upon the evolution time $\tau$. With this we can now write the identity $\{L_{T}(\tau), \rho(\tau)\} = 2 \partial_T \rho(\tau)$ as
\begin{eqnarray} && l_j (\tau) + r_j(\tau) l_0 (\tau) = \partial_T r_j(\tau) \;, \qquad j=1,2,3\;, \label{2app}\\
&&l_0 (\tau) + \sum_{j=1}^3 r_j(\tau) l_j(\tau)=0\;, \label{3app}
\end{eqnarray}
with $r_j(t)$ being the cartesian components of the the Bloch vector $\vec{r}(t)$ of $\rho(\tau)$.
By substitution of Eq.~(\ref{2app}) into Eq.~(\ref{3app}) we obtain
\begin{eqnarray} \begin{gathered}
l_0 (\tau) = -\frac{1}{2} \frac{\partial_T {r}^2(\tau) }{1-{r}^2(\tau)}\;,
\end{gathered} \end{eqnarray}
with $r(\tau) = \sqrt{ \sum_{j=1}^3 r^2_j(\tau)}$ being the length of $\vec{r}(\tau)$. Replacing this into (\ref{2app}) we can then write
\begin{eqnarray} \label{4app} \begin{gathered} l_j(\tau) = \partial_T r_j(\tau) + \frac{r_j(\tau) }{2} \frac{\partial_T {r}^2(\tau) }{1-{r}^2(\tau)} \;,
\end{gathered} \end{eqnarray}
and hence
\begin{eqnarray} Q_T = \mbox{Tr}[\partial_T \rho L_{T}] = \sum_{j=1}^3 l_j(\tau) \partial_T r_j(\tau)
= \sum_{j=1}^3 \big[ \partial_T r_j(\tau)\big]^2 +
\frac{1}{4} \frac{\big[\partial_T r^2(\tau) \big]^2}{1 - r^2(\tau)} = \sum_{j=1}^3 \big[ \partial_T r_j(\tau)\big]^2 +
r^2(\tau) \frac{\big[\partial_T r(\tau) \big]^2}{1 - r^2(\tau)}\;. \label{FFDS}
\end{eqnarray}
Expressing then the Bloch vector in polar coordinates $\vec{r}= r (\cos\phi\sin\theta, \sin\phi \sin\phi, \cos\theta)$ we notice that
the system ME
admits the azimuthal angle as constant of motion, i.e.
$\phi(\tau) = \arctan[r_2(\tau)/r_1(\tau)] = \arctan[r_2(0)/r_1(0)] =\phi(0)$, which, by construction cannot depend upon $T$. Exploiting this fact it turns out that (\ref{FFDS}) only depends upon
the partial derivative in $T$ of the modulus $r(\tau)$ and of the polar angle $\theta(\tau)$ as shown in (\ref{QuantumFisher}).
\section{QFI for a qubit in a bosonic channel}
\label{Appb}
The value of the QFI for a two level system evolving through the
GAD considered in the main text is represented in Fig.~\ref{Total}
for different times and initial preparations.
The plot shows that initialising the probe in the fundamental state
is the optimal choice for temperature estimation, and in agreement with
\cite{Correa15} the best performance is attained waiting a finite
amount of time.
This particular behaviour can be explained observing that the
decay rate of the populations is explicitly dependent
by the average number of resonant bosonic excitations, and consequently contains some information
about the temperature, that is eventually lost if the system
achieves complete thermalization.
In this last scenario the QFI becomes independent on the initial conditions, as
it is clearly shown in the upper right corner of Fig. (\ref{Total})
and its value asymptotically satisfies Eq. (\ref{QuantumFisher1110}) that
holds for thermalized probes.
Notice that the additional dependence on $T$ provided by the decay
rate is not always an advantage for temperature estimation, as it is
evident from the low-$\theta(0)$ region of the contour-plot and from the last panel
of Fig. (\ref{TheoreticalCurves}), that displays a null QFI
for a probe initialized in the excited
state for a properly chosen time of measurement.
Finally we remark that the fundamental state is no longer optimal if we fix different
values of the intermediate time $\tau$, as pointed out for instance in the
lower right corner of Fig. (\ref{Total}) in which the theoretical curve
for $\gamma \tau=0.6$ is represented.
\begin{figure}
\caption{Contour-plot (left panel) and 3D plot (upper right panel) of the QFI~(\ref{QuantumFisher}
\label{Total}
\end{figure}
\end{document} |
\begin{document}
\title{\textbf{Shared multi-processor scheduling}}
\author{
Dariusz Dereniowski\footnote{Corresponding author. Email: deren@eti.pg.gda.pl}\\
\small{\emph{Faculty of Electronics,}}\\
\small{\emph{Telecommunications and Informatics},}\\
\small{\emph{Gda{\'n}sk University of Technology},}\\
\small{\emph{Gda{\'n}sk, Poland}}
\and
Wies{\l}aw Kubiak\\
\small{\emph{Faculty of Business Administration},}\\
\small{\emph{Memorial University},}\\
\small{\emph{St. John's, Canada}}
}
\date{}
\maketitle
\begin{abstract}
We study shared multi-processor scheduling problem where each job can be executed on its private processor and simultaneously on one of many processors shared by all jobs in order to reduce the job's completion time due to processing time \emph{overlap}. The total weighted overlap of all jobs is to be maximized. The problem models subcontracting scheduling in supply chains and divisible load scheduling in computing. We show that synchronized schedules that complete each job at the same time on its private and shared processor, if any is actually used by the job, include optimal schedules. We prove that the problem is NP-hard in the strong sense for jobs with arbitrary weights, and we give an efficient, polynomial-time algorithm for the problem with equal weights.
\end{abstract}
\textbf{Keywords:} combinatorial optimization, divisible jobs, shared processors, subcontracting, supply chains
\section{Introduction}
The problem of scheduling divisible jobs on shared processors has attracted growing attention due to its importance in scheduling job-shops, parallel and distributed computer systems, and supply chains.
Anderson~\cite{A81} considers a job-shop scheduling model where each job is a batch of potentially infinitely small items that can be processed independently of
other items of the batch. A processor in the shop is being shared between the jobs processed by the processor at the rates proportional to the
processor capacity fraction allocated to them by scheduler. The objective is to minimize total weighted backlog in a given time horizon.
Bharadwaj et. al.~\cite{HBGR03} survey divisible load scheduling where fractions of total divisible load are distributed to subsets of nodes
of a shared network of processors for distributed processing. The processing by the nodes and possible communications
between the nodes overlap in time so that the completion time (makespan) for the whole load is shorter than the processing of the whole load by a single node. The goal is to chose the size of the load fractions for each node so that the makespan for the whole load is minimized. \cite{HBGR03} points out that many real-life applications satisfy the divisibility property, among them "...processing of massive experimental
data, image processing applications like feature extraction
and edge detection, and signal processing applications like
extraction of signals buried in noise from multidimensional
data collected over large spans of time, computation of Hough
transforms, and matrix computations." Drozdowski~\cite{D09} surveys optimal solutions for a \emph{single} divisible load
obtained for various network topologies.
Recently, Vairaktarakis and Aydinliyim~\cite{VairaktarakisAydinliyim07} consider scheduling divisible jobs on subcontractor's processor in supply chains to reduce the job's completion times.
Hezarkhani and Kubiak~\cite{HK15} refer to the problem as the subcontractor scheduling problem.
Vairaktarakis~\cite{V13} points out that the lack of due attention to the subcontractors'
operations can cause significant complications in supply chains. A well-documented real-life example
of this issue has been reported in Boeing's Dreamliner
supply chain where the overloaded schedules of subcontractors,
each working with multiple suppliers, resulted in long
delays in the overall production due dates (see Vairaktarakis~\cite{V13} for more details and references). The subcontractor scheduling problem
is common in quick-response
industries characterized by volatile demand and inflexible
capacities where subcontracting is often used --- those include
metal fabrication industry (Parmigiani~\cite{Par}), electronics assembly (Webster et al~\cite{Web}), high-tech
manufacturing (Aydinliyim and Vairaktarakis~\cite{Ay11}), textile
production, and engineering services (Taymaz and Kili\c{c}aslan~\cite{Tay}) where subcontracting enables a manufacturer to
speed up the completion times of his jobs.
In the subcontractor scheduling problem
each agent has its private processor and a \emph{single} subcontractor's processor shared by all jobs available for the
execution of its own job. The jobs can be divided between private and shared processor
so that job completion times are reduced by possibly overlapping executions on private and shared processor.
Vairaktarakis and Aydinliyim~\cite{VairaktarakisAydinliyim07} consider a non-preemptive case where at most one time interval
on subcontractor's shared processor is allowed for any job. They prove that under this assumption there
exist optimal schedules that complete job execution on private and shared processor at the same time, we refer to such schedules
as \emph {synchronized} schedules, and show that sequencing jobs in ascending order of their processing times on the shared
processor gives an optimal solution. Furthermore this solution guarantees non-empty interval on the shared processor for each
job. Hezarkhani and Kubiak \cite{HK15} observe that by allowing an agent to use a set of several
mutually disjoint intervals on the subcontractor processor one does not improve schedules by increasing total overlap. Therefore, \cite{HK15} actually observes that algorithm of
\cite{VairaktarakisAydinliyim07} solves the single processor preemptive problem to optimality as well. In this paper we generalize this preemptive model of \cite{HK15} by allowing many shared processors
and by allowing that the reduction in job completion time be rewarded at different rates for different jobs, i.e., we allow different weights for jobs.
It is worth pointing out that
Vairaktarakis and Aydinliyim \cite{VairaktarakisAydinliyim07} change focus from optimization typically sought after in the centralized setting to
coordinating mechanisms to ensure efficiency in the decentralized systems.
Vairaktarakis \cite{V13} analyzes the outcomes of a
decentralized subcontracting system under different protocols
announced by the subcontractor. Both papers
assume complete information yet neither provides coordinating
pricing schemes for the problem. To remedy this \cite{HK15} designs parametric pricing schemes that strongly
coordinate this decentralized system with complete information, that is, they ensure that the agents' choices
of subcontracting intervals always result in efficient (optimal) schedules. It also proves that the pivotal mechanism
is coordinating, i.e., agents are better off by reporting their true processing times, and by participating in the subcontracting.
The remainder of the paper is organized as follows. Section 2 introduces notation and formulates the shared multi-processor scheduling problem. Section 3 defines some desirable characteristics of schedules and proves that there always are optimal schedules with these characteristics. Section 4 proves that there always is an optimal schedule that is synchronized.
Section 5 considers special instances for which optimal schedules on shared processors are $V$-\emph{shaped} and \emph{reversible}. Section 6 proves that the problem is NP-hard in the strong sense
even when limited to the set of instances defined in Section 5. Section 7 gives an efficient, polynomial time algorithm for the problem with equal weights. Finally, Section 8 concludes the paper and lists open problems.
\section{Problem formulation} \label{sec:problem}
We are given a set $\mathcal{J}$ of $n$ preemptive jobs.
Each job $j\in\mathcal{J}$ has its processing time $p_j$ and weight $w_j$.
With each job $j\in\mathcal{J}$ we associate its \emph{private} processor denoted by $\cP_j$.
Moreover, $m\geq 1$ \emph{shared} processors $\Mshared_1,\ldots,\Mshared_m$ are available for all jobs.
A \emph{feasible} schedule $\mathcal{S}$ selects for each job $j\in\mathcal{J}$:
\begin{enumerate} [label={\normalfont{(\roman*)}}]
\item\label{it:f1} a shared processor $\Mshared[\mathcal{S},j]\in \{\mathcal{M}_1, \ldots, \mathcal{M}_m\}$,
\item\label{it:f2} a (possibly empty) \emph{set} of open, mutually disjoint time intervals in which $j$ executes on $\Mshared[\mathcal{S},j]$, and
\item\label{it:f3} a \emph{single} time interval $(0, \complTime{\mathcal{S}}{j}{\cP})$ where $j$ executes on its private processor $\cP_j$.
\end{enumerate}
The total length of all these intervals (the ones in~\ref{it:f2} and the one in~\ref{it:f3}) equals $p_j$. The simultaneous execution of $j$ on private $\cP_j$ and shared $\Mshared[\mathcal{S},j]$ is allowed and desirable, as follows from the optimization criterion given below. However, for any two jobs $j$ and $j'$ if they use the same shared processor, i.e., $\Mshared[\mathcal{S},j]=\Mshared[\mathcal{S},j']=\Mshared$, then any interval in which $j$ executes on $\Mshared$ is disjoint from any interval in which $j'$ executes on $\Mshared$.
In other words, each processor can execute at most one job at a time.
Given a feasible schedule $\mathcal{S}$, for each job $j\in\mathcal{J}$ we call any time interval of maximum length in which $j$ executes on both private $\cP_j$ and shared $\Mshared[\mathcal{S},j]$ simultaneously an \emph{overlap}.
The total overlap $t_j$ of job $j$ equals the sum of lengths of all overlaps for $j$.
The \emph{total weighted overlap} of $\mathcal{S}$ equals
\[\tct{\mathcal{S}}=\sum_{j\in\mathcal{J}}t_jw_j.\]
A feasible schedule that maximizes the total weighted overlap is called \emph{optimal}. For convenience we use the abbreviation WSMP to denote
the \emph{weighted shared multi-processor} scheduling problem: the instance of the problem consists of a set of jobs $\mathcal{J}$ and the number of shared processors $m$; the goal is to find an optimal schedule that maximizes total weighted overlap.
This objective function is closely related to the total completion time objective traditionally used in scheduling. The total completion time \emph{can} potentially be reduced by an increase of the total overlap resulting
from the simultaneous execution of jobs on private and shared processors. However, to take full advantage of this potential the schedules need to start jobs \emph{at} time $0$, otherwise the
overlap would not necessarily be advantageous in reducing the total completion time. At the same time we need to emphasize that the two objectives exist for different practical reasons. The minimization of total completion time
minimizes mean flow time and thus by Little's Law minimizes average inventory in the system. The maximization of the total overlap on the other hand maximizes the total net payoff resulting from completing job $j$ earlier at $p_j-t_j$ thanks to the use of shared processors (subcontractors) rather than at $p_j$ if those where not used. The $w_jt_j$ is a net payoff obtained from the completion of job (order) $t_j$ time units earlier due to the overlap $t_j$. This different focus sets the total weighted overlap objective apart from the total completion time objective as an objective important in practice in scheduling shared processors.
For illustration let us consider an example in Figure~\ref{fig:example} with two shared processors and $6$ jobs.
Note that in this example, each job completes at the same time on its private processor and on a shared one (Sections~\ref{sec:observations} and~\ref{sec:optimal} will conclude that for each problem instance there exists an optimal solution with this property).
\begin{figure}
\caption{A schedule for six-job instance and $m=2$ shared processors.}
\label{fig:example}
\end{figure}
\section{Simple observations about optimal schedules} \label{sec:observations}
We now make four simple observations that allow us to reduce a class of schedules to consider yet ensure at the same time that the reduced class always includes optimal schedules.
Let $\mathcal{S}$ be a feasible schedule. Let $\hspace{0.1cm}\bigl|\bigr.\hspace{0.1cm}artTime{\mathcal{S}}{j}{\Mshared}$ and $\complTime{\mathcal{S}}{j}{\Mshared}$ be the start time and the completion times of a job $j$ on the shared processor $\Mshared[\mathcal{S},j]$ respectively, both being $0$ if all of $j$ executes on its private processor only.
A schedule $\mathcal{S}$ is called \emph{normal} if $\complTime{\mathcal{S}}{j}{\Mshared}\leq\complTime{\mathcal{S}}{j}{\cP}$ for each $j\in\mathcal{J}$.
We observe the following.
\begin{observation} \label{obs:normal}
There exists an optimal schedule that is normal.
\end{observation}
\begin{proof}
Let $\mathcal{S}$ be an optimal schedule.
Suppose that some job $j$ completes on a shared processor later than on its private processor in $\mathcal{S}$, i.e., $\complTime{\mathcal{S}}{j}{\Mshared}>\complTime{\mathcal{S}}{j}{\cP}$ and thus $\mathcal{S}$ is not normal.
Hence, there exist intervals $I_1,\ldots,I_k$ such that for each $i\in\{1,\ldots,k\}$, the processor $\Mshared[\mathcal{S},j]$ executes $j$ in $I_i$, $I_i\subseteq(\complTime{\mathcal{S}}{j}{\cP},+\infty)$ and no part of $j$ executes in $(\complTime{\mathcal{S}}{j}{\cP},+\infty)\setminus\bigcup_{i=1}^kI_i$ on either $\Mshared[\mathcal{S},j]$ or $\cP_j$.
Then, modify $\mathcal{S}$ by removing the job $j$ from all intervals $I_1,\ldots,I_k$ on the shared processor $\Mshared[\mathcal{S},j]$ (so that $\Mshared[\mathcal{S},j]$ is idle in $\bigcup_{i=1}^k I_i$) and let $j$ execute in the interval $(0,\complTime{\mathcal{S}}{j}{\cP}+\sum_{i=1}^k|I_i|)$ on its private processor $\cP_j$.
Note that the total weighted overlap of $\mathcal{S}$ has not changed by this transformation.
After repeating this transformation for each job $j$ if need be we obtain an optimal schedule that is normal.
\end{proof}
\begin{observation} \label{obs:idle-time}
Let $\mathcal{S}$ be an optimal normal schedule and let $X_i=\{j\in\mathcal{J}\hspace{0.1cm}\bigl|\bigr.\hspace{0.1cm} \Mshared[\mathcal{S},j]=\Mshared_i \text{ and } \complTime{\mathcal{S}}{j}{\Mshared}>0\}$ for each $i\in\{1,\ldots,m\}$.
There is no idle time in time interval $(0,\max\{\complTime{\mathcal{S}}{j}{\Mshared}\hspace{0.1cm}\bigl|\bigr.\hspace{0.1cm} j\in X_i\})$ on each shared processor $\Mshared_i$.
\end{observation}
\begin{proof}
Note that by Observation~\ref{obs:normal}, there exists a normal optimal schedule $\mathcal{S}$.
Suppose for a contradiction that some shared processor $\Mshared_i$ is idle in a time interval $(l,r)\neq\emptyset$ and $r<\complTime{\mathcal{S}}{j}{\Mshared}\leq \complTime{\mathcal{S}}{j}{\cP}$ for some job $j\in X_i$.
Take maximum $\varepsilon\in(0,(r-l)/2]$ such that $j$ executes continuously in $I_1=(\complTime{\mathcal{S}}{j}{\cP}-\varepsilon,\complTime{\mathcal{S}}{j}{\cP})$ on $\cP_j$ and in $I_2=(\complTime{\mathcal{S}}{j}{\Mshared}-\varepsilon,\complTime{\mathcal{S}}{j}{\Mshared})$ on $\Mshared_i$.
Then, obtain a schedule $\mathcal{S}'$ by taking a piece of $j$ that executes in $I_1$ on $\cP_j$ and a piece of $j$ that executes in $I_2$ on $\Mshared_i$ and execute both pieces in $(l,r)$ on $\Mshared_i$.
Clearly, the new schedule $\mathcal{S}'$ is feasible and, since $\mathcal{S}$ is normal,
$\tct{\mathcal{S}'}=\tct{\mathcal{S}}+w_j\varepsilon$, which contradicts the optimality of $\mathcal{S}$.
\end{proof}
We say that a schedule $\mathcal{S}$ is \emph{non-preemptive} if each job $j$ executes
in time interval $(\hspace{0.1cm}\bigl|\bigr.\hspace{0.1cm}artTime{\mathcal{S}}{j}{\Mshared},\complTime{\mathcal{S}}{j}{\Mshared})$ on
$\Mshared[\mathcal{S},j]$ in $\mathcal{S}$.
In other words, in a non-preemptive schedule there is at most one interval in~\ref{it:f2} in the definition of a feasible schedule.
\begin{observation} \label{obs:non-preemptive}
There exists an optimal schedule that is normal and non-preemptive.
\end{observation}
\begin{proof}
By Observation~\ref{obs:normal}, there exists a normal optimal schedule $\mathcal{S}$.
Suppose that $\mathcal{S}$ is preemptive.
We transform $\mathcal{S}$ into a non-preemptive one whose total weighted overlap is not less than that of $\mathcal{S}$.
The transformation is performed iteratively.
At the beginning of each iteration a job $j$ is selected such that $j$ executes on a shared processor $\Mshared[\mathcal{S},j]$ in at least two disjoint time intervals
$(l,r)$ and $(l',r')$, where $r<l'$. Without loss of generality we assume that the intervals are of maximal lengths.
Modify $\mathcal{S}$ by shifting each job start, completion and preemption that occurs in time interval $(r,l')$ on $\Mshared[\mathcal{S},j]$ by $r-l$ units to the left, i.e. towards the start of the schedule at 0.
Then, the part of $j$ executed in $(l,r)$ in $\mathcal{S}$ is executed in $(l'-r+l,l')$ after the transformation.
The transformation does not increase the completion time of any job $j'$ on $\Mshared[\mathcal{S},j]$ and keeps it the same on $\cP_{j'}$ for each job $j'$.
Thus, in particular, $\mathcal{S}$ remains normal.
However, the number of preemptions of the job $j$ decreases by $1$ and there is no job whose number of preemptions increases.
Also, the total weighted overlap of $\mathcal{S}$ does not change.
Hence, after finite number of such iterations we arrive at a required normal non-preemptive optimal schedule.
\end{proof}
We say that a schedule $\mathcal{S}$ is \emph{ordered} if it is normal, non-preemptive and for any two jobs $j$ and $j'$ assigned to the same shared processor it holds $\complTime{\mathcal{S}}{j}{\Mshared}\leq\complTime{\mathcal{S}}{j'}{\Mshared}$ if and only if $\complTime{\mathcal{S}}{j}{\cP}\leq\complTime{\mathcal{S}}{j'}{\cP}$.
Informally speaking, the order of job completions on the shared processors is the same as the order of their completions on the private processors.
\begin{observation} \label{lem:ordered}
There exists an optimal schedule that is ordered.
\end{observation}
\begin{proof}
Let $\mathcal{S}$ be an optimal normal and non-preemptive schedule; such a schedule exists due to Observation~\ref{obs:non-preemptive}.
Let $X_i=\{j\in\mathcal{J}\hspace{0.1cm}\bigl|\bigr.\hspace{0.1cm} \Mshared[\mathcal{S},j]=\Mshared_i \text{ and } \complTime{\mathcal{S}}{j}{\Mshared}>0\}$ for each $i\in\{1,\ldots,m\}$.
Recall that each job $j\in X_i$ executes in a single interval $(\hspace{0.1cm}\bigl|\bigr.\hspace{0.1cm}artTime{\mathcal{S}}{j}{\Mshared},\complTime{\mathcal{S}}{j}{\Mshared})$ on $\mathcal{M}_i$ in a non-preemptive $\mathcal{S}$.
By Observation~\ref{obs:idle-time}, there is no idle time in time interval $(0,\complTime{\mathcal{S}}{j}{\Mshared})$ for each job $j\in X_i$ on $\Mshared_i$.
Thus, we may represent $\mathcal{S}$ on a processor $\Mshared_i$ as a sequence of pairs $\Mshared_i=((j_1,l_1),\ldots,(j_k,l_k))$, where $X_i=\{j_1,\ldots,j_k\}$ and the job $j_t$ executes in time interval
\[\left( \sum_{j'=0}^{t-1}l_{j'}, \sum_{j'=0}^{t}l_{j'} \right) \]
on $\Mshared_i$, where $l_0=0$, and
in time interval $(0,p_{j_t}-l_t)$ on $\cP_{j_t}$ for each $t\in\{1,\ldots,k\}$.
If $\mathcal{S}$ is ordered, then the proof is completed.
Hence, suppose that $\mathcal{S}$ is not ordered.
There exists a shared processor $\Mshared_i$ and an index $t\in\{1,\ldots,k-1\}$ such that
\begin{equation} \label{eq:ordered:normal}
\complTime{\mathcal{S}}{j_t}{\Mshared}<\complTime{\mathcal{S}}{j_{t+1}}{\Mshared} \quad\textup{and}\quad \complTime{\mathcal{S}}{j_t}{\cP}>\complTime{\mathcal{S}}{j_{t+1}}{\cP}.
\end{equation}
Consider a new non-preemptive schedule $\mathcal{S}'$ in which:
\[\Mshared_i=((j_1,l_1),\ldots,(j_{t-1},l_{t-1}),(j_{t+1},l_{t+1}),(j_t,l_t),(j_{t+2},l_{t+2}),\ldots,(j_k,l_k)),\]
i.e., the order of jobs $j_t$ and $j_{t+1}$ has been reversed on $\Mshared_i$ while the schedules on all other processors remain unchanged.
Note that this exchange does not affect start times and completion times of any job on $\Mshared_i$ except for $j_t$ and $j_{t+1}$.
Since $\mathcal{S}$ is normal, we obtain
\[\complTime{\mathcal{S}'}{j_{t+1}}{\Mshared}\leq\complTime{\mathcal{S}}{j_{t+1}}{\Mshared}\leq\complTime{\mathcal{S}}{j_{t+1}}{\cP}=\complTime{\mathcal{S}'}{j_{t+1}}{\cP}\]
and, also by \eqref{eq:ordered:normal},
\[\complTime{\mathcal{S}'}{j_{t}}{\Mshared}=\complTime{\mathcal{S}}{j_{t+1}}{\Mshared}\leq\complTime{\mathcal{S}}{j_{t+1}}{\cP}<\complTime{\mathcal{S}}{j_t}{\cP},\]
which proves that $\mathcal{S}'$ is normal.
Clearly, $\tct{\mathcal{S}'}=\tct{\mathcal{S}}$.
Set $\mathcal{S}:=\mathcal{S}'$ and repeat the exchange if need be.
After a finite number of such exchanges we arrive at a schedule that is ordered.
\end{proof}
\section{Optimal schedules are synchronized} \label{sec:optimal}
We say that a schedule is \emph{synchronized} if it is normal, non-preemptive and for each job $j$ whose part executes on some shared processor it holds $\complTime{\mathcal{S}}{j}{\Mshared}=\complTime{\mathcal{S}}{j}{\cP}$.
Note that a synchronized schedule is also ordered but the reverse implication does not hold in general.
In order to prove that there are optimal schedules that are synchronized we introduce \emph{pulling} and \emph{pushing} schedule transformations.
Let $\mathcal{S}$ be an optimal ordered (possibly synchronized) schedule.
Consider a shared processor $\Mshared_r$.
Let $X_r=\{j_1,\ldots,j_k\}$, $k>1$, be jobs executed on $\Mshared_r$ in $\mathcal{S}$
and ordered according to increasing order of their completion times on $\Mshared_r$.
Let $i\in\{2,\ldots,k\}$ be an index such that $\complTime{\mathcal{S}}{j_{\ell}}{\Mshared}=\complTime{\mathcal{S}}{j_{\ell}}{\cP}$ for each $\ell\in\{i,\ldots,k\}$.
(Recall that $\complTime{\mathcal{S}}{j_{i}}{\Mshared}\leq\complTime{\mathcal{S}}{j_{i}}{\cP}$ since $\mathcal{S}$ is normal.)
Observe that $j_k$ completes at the same time on its private processor (since $\mathcal{S}$ is optimal) and on $\Mshared_r$ and hence the index $i$ is well defined.
Finally, let
\[0<\varepsilon\leq\hspace{0.1cm}\bigl|\bigr.\hspace{0.1cm}artTime{\mathcal{S}}{j_i}{\Mshared}-\hspace{0.1cm}\bigl|\bigr.\hspace{0.1cm}artTime{\mathcal{S}}{j_{i-1}}{\Mshared} = \complTime{\mathcal{S}}{j_{i-1}}{\Mshared}-\hspace{0.1cm}\bigl|\bigr.\hspace{0.1cm}artTime{\mathcal{S}}{j_{i-1}}{\Mshared}.\]
We define an operation of \emph{pulling of}
$j_{i}$ by $\varepsilon$ in $\mathcal{S}$ as a transformation of $\mathcal{S}$ that results in a schedule $\mathcal{S}'$ defined as follows.
First, $\mathcal{S}$ and $\mathcal{S}'$ are identical on $\Mshared_r$ in time interval $(0,\hspace{0.1cm}\bigl|\bigr.\hspace{0.1cm}artTime{\mathcal{S}}{j_i}{\Mshared}-\varepsilon)$.
Then, for the job $j_{i-1}$ we set:
\[\complTime{\mathcal{S}'}{j_{i-1}}{\Mshared}=\complTime{\mathcal{S}}{j_{i-1}}{\Mshared}-\varepsilon, \quad\textup{and}\quad\complTime{\mathcal{S}'}{j_{i-1}}{\cP}=\complTime{\mathcal{S}}{j_{i-1}}{\cP}+\varepsilon.\]
Next, for each $\ell\in\{i,\ldots,k\}$ (by proceeding with subsequent increasing values of $\ell$) we define how $j_{\ell}$ is executed in $\mathcal{S}'$:
\[\hspace{0.1cm}\bigl|\bigr.\hspace{0.1cm}artTime{\mathcal{S}'}{j_{\ell}}{\Mshared}=\complTime{\mathcal{S}'}{j_{\ell-1}}{\Mshared}, \quad \complTime{\mathcal{S}'}{j_{\ell}}{\Mshared}=\complTime{\mathcal{S}}{j_{\ell}}{\Mshared}-\varepsilon/2^{\ell-i+1}, \quad \complTime{\mathcal{S}'}{j_{\ell}}{\cP}=\complTime{\mathcal{S}}{j_{\ell}}{\cP}-\varepsilon/2^{\ell-i+1}.\]
Finally, $\mathcal{S}'$ and $\mathcal{S}$ are identical on all other processors, i.e., on all processors different from $\Mshared_r$ and $\cP_{j_{i-1}},\cP_{j_i},\ldots,\cP_{j_l}$. The operation of pulling $j_3$ by $\varepsilon$ is illustrated in Figure \ref{fig:pulling}.
Note that if we take $\varepsilon=\hspace{0.1cm}\bigl|\bigr.\hspace{0.1cm}artTime{\mathcal{S}}{j_i}{\Mshared}-\hspace{0.1cm}\bigl|\bigr.\hspace{0.1cm}artTime{\mathcal{S}}{j_{i-1}}{\Mshared}$, i.e., $\varepsilon$ equals the length of the entire execution interval of $j_{i-1}$ on $\Mshared_r$, then pulling of $j_i$ by $\varepsilon$ produces $\mathcal{S}'$ in which $j_{i-1}$ executes only on its private processor. From this definition we have.
\begin{figure}
\caption{An example of pulling of $j_3$ by $\varepsilon$ in some schedule $\mathcal{S}
\label{fig:pulling}
\end{figure}
\begin{lemma} \label{lem:pulling}
The pulling of $j_i$ by $\varepsilon$ in $\mathcal{S}$ produces a feasible schedule $\mathcal{S}'$ and
\[\tct{\mathcal{S}'}=\tct{\mathcal{S}}-\varepsilon w_{j_{i-1}}+\varepsilon\sum_{\ell=i}^k \frac{w_{j_{\ell}}}{2^{\ell-i+1}}.\]
\hspace*{\fill}\nolinebreak\ensuremath{\Box}
\end{lemma}
We also define a transformation that is a `reverse' of pulling.
Recall that $j_1,\ldots,j_k$ are the jobs executing on the processor $\Mshared_r$.
For this transformation we assume that the schedule $\mathcal{S}$ is ordered but not synchronized and some job completes on $\Mshared_r$ earlier than on its private processor.
Select $i\in\{2,\ldots,k\}$ to be the index such that $\complTime{\mathcal{S}}{j_{\ell}}{\Mshared}=\complTime{\mathcal{S}}{j_{\ell}}{\cP}$ for each $\ell\in\{i,\ldots,k\}$
and $\complTime{\mathcal{S}}{j_{i-1}}{\cP}>\complTime{\mathcal{S}}{j_{i-1}}{\Mshared}$.
The index $i$ is well defined because $\complTime{\mathcal{S}}{j_k}{\Mshared}=\complTime{\mathcal{S}}{j_k}{\cP}$ in optimal schedule $\mathcal{S}$.
Let
\begin{equation} \label{eq:epsp1}
0 < \varepsilon \leq \frac{1}{2}\left(\complTime{\mathcal{S}}{j_{i-1}}{\cP}-\complTime{\mathcal{S}}{j_{i-1}}{\Mshared}\right)
\end{equation}
and for each $\ell\in\{i,\ldots,k\}$,
\begin{equation} \label{eq:epsp2}
\frac{\varepsilon}{2^{\ell-i+1}} \leq \complTime{\mathcal{S}}{j_{\ell}}{\Mshared}-\hspace{0.1cm}\bigl|\bigr.\hspace{0.1cm}artTime{\mathcal{S}}{j_{\ell}}{\Mshared}.
\end{equation}
The transformation of \emph{pushing of} $j_i$ by $\varepsilon$ in $\mathcal{S}$ produces schedule $\mathcal{S}'$ defined as follows.
Both $\mathcal{S}'$ and $\mathcal{S}$ are identical on $\Mshared_r$ in time interval $(0,\hspace{0.1cm}\bigl|\bigr.\hspace{0.1cm}artTime{\mathcal{S}}{j_{i-1}}{\Mshared})$,
\[\complTime{\mathcal{S}'}{j_{i-1}}{\Mshared}=\complTime{\mathcal{S}}{j_{i-1}}{\Mshared}+\varepsilon\textup{ and }\complTime{\mathcal{S}'}{j_{i-1}}{\cP}=\complTime{\mathcal{S}}{j_{i-1}}{\cP}-\varepsilon.\]
Then, for each $\ell\in\{i,\ldots,k\}$ (with increasing values of $\ell$) we have
\[\hspace{0.1cm}\bigl|\bigr.\hspace{0.1cm}artTime{\mathcal{S}'}{j_{\ell}}{\Mshared}=\complTime{\mathcal{S}'}{j_{\ell-1}}{\Mshared}, \quad \complTime{\mathcal{S}'}{j_{\ell}}{\Mshared}=\complTime{\mathcal{S}}{j_{\ell}}{\Mshared}+\frac{\varepsilon}{2^{\ell-i+1}}\]
and
\[\complTime{\mathcal{S}'}{j_{\ell}}{\cP}=\complTime{\mathcal{S}}{j_{\ell}}{\cP}+\frac{\varepsilon}{2^{\ell-i+1}}.\]
On each shared processor different than $\Mshared_r$ and on private processors different than $\cP_{j_{i-1}},\ldots,\cP_{j_k}$ the schedules $\mathcal{S}$ and $\mathcal{S}'$ are the same.
Note that if $\varepsilon/2^{\ell-i+1}=\complTime{\mathcal{S}}{j_{\ell}}{\Mshared}-\hspace{0.1cm}\bigl|\bigr.\hspace{0.1cm}artTime{\mathcal{S}}{j_{\ell}}{\Mshared}$ for some $\ell\in\{i,\ldots,k\}$, then the pushing operation eliminates $j_{\ell}$ from the shared processor, i.e., $j_{\ell}$ executes only on its private processor in $\mathcal{S}'$. From this definition we have.
\begin{lemma} \label{lem:pushing}
The pushing of $j_i$ by $\varepsilon$ in $\mathcal{S}$ produces a feasible schedule $\mathcal{S}'$ and
\[\tct{\mathcal{S}'}=\tct{\mathcal{S}}+\varepsilon w_{j_{i-1}}-\varepsilon\sum_{\ell=i}^k \frac{w_{j_{\ell}}}{2^{\ell-i+1}}.\]
\hspace*{\fill}\nolinebreak\ensuremath{\Box}
\end{lemma}
We also note that if one first makes pulling of some job $j'$ by $\varepsilon$ in a schedule $\mathcal{S}$ which results in a schedule $\mathcal{S}'$ in which the same job precedes $j'$ on the shared processor both in $\mathcal{S}$ and $\mathcal{S}'$, then pushing of $j'$ by $\varepsilon$ in $\mathcal{S}'$ results in returning back to $\mathcal{S}$.
We are now ready to prove our main result of this section, which will allow us to work only with synchronized schedules in the sections that follow.
\begin{lemma} \label{lem:synchronized}
There exists an optimal synchronized schedule.
\end{lemma}
\begin{proof}
Let $\mathcal{S}$ be an optimal schedule. By Observation~\ref{lem:ordered}, we may assume without loss of generality that $\mathcal{S}$ is ordered.
Suppose that $\mathcal{S}$ is not synchronized. We will convert $\mathcal{S}$ into a synchronized schedule by iteratively performing transformations described below.
Let $\Mshared_r$ be a shared processor such that there exists a job assigned to $\Mshared_r$ that completes earlier on $\Mshared_r$ than on its private processor.
Let $X_r=\{j_1,\ldots,j_k\}$, $k>1$, be jobs executed on $\Mshared_r$ in $\mathcal{S}$
and ordered according to increasing order of their completion times on $\Mshared_r$.
Let $i\in\{2,\ldots,k\}$ be the minimum index such that $\complTime{\mathcal{S}}{j_{\ell}}{\Mshared}=\complTime{\mathcal{S}}{j_{\ell}}{\cP}$ for each $\ell\in\{i,\ldots,k\}$.
Since $\mathcal{S}$ is not synchronized and $\complTime{\mathcal{S}}{j_k}{\Mshared}=\complTime{\mathcal{S}}{j_k}{\cP}$ in an optimal schedule, the index $i$ is well defined and $\complTime{\mathcal{S}}{j_{i-1}}{\Mshared}<\complTime{\mathcal{S}}{j_{i-1}}{\cP}$ by the minimality of $i$.
We first argue that
\begin{equation} \label{eq:synchronized:ineq}
w_{j_{i-1}}\geq\sum_{\ell=i}^k \frac{w_{j_{\ell}}}{2^{\ell-i+1}}.
\end{equation}
Consider pulling of $j_i$ by $\varepsilon=\complTime{\mathcal{S}}{j_{i-1}}{\Mshared}-\hspace{0.1cm}\bigl|\bigr.\hspace{0.1cm}artTime{\mathcal{S}}{j_{i-1}}{\Mshared}$ in $\mathcal{S}$ that produces a schedule $\mathcal{S}'$.
Then, by Lemma~\ref{lem:pulling} and the optimality of $\mathcal{S}$,
\[\tct{\mathcal{S}'}=\tct{\mathcal{S}}-\varepsilon w_{j_{i-1}}+\varepsilon\sum_{\ell=i}^k \frac{w_{j_{\ell}}}{2^{\ell-i+1}}
=\tct{\mathcal{S}}-\varepsilon\left( w_{j_{i-1}}-\sum_{\ell=i}^k \frac{w_{j_{\ell}}}{2^{\ell-i+1}}\right)\leq\tct{\mathcal{S}},\]
which proves~\eqref{eq:synchronized:ineq}.
Following~\eqref{eq:epsp1} and~\eqref{eq:epsp2}, define
\[\varepsilon'=\min\left\{\left(\complTime{\mathcal{S}}{j_{i-1}}{\cP}-\complTime{\mathcal{S}}{j_{i-1}}{\Mshared}\right)/2,
\min\left\{2^{\ell-i+1}\left(\complTime{\mathcal{S}}{j_{\ell}}{\Mshared}-\hspace{0.1cm}\bigl|\bigr.\hspace{0.1cm}artTime{\mathcal{S}}{j_{\ell}}{\Mshared}\right)\hspace{0.1cm}\bigl|\bigr.\hspace{0.1cm} \ell=i,\ldots,k\right\}\right\}.\]
Since $\mathcal{S}$ is normal, by the choice of $i$ we have $\varepsilon'>0$.
Obtain a schedule $\mathcal{S}'$ by performing pushing of $j_i$ by $\varepsilon'$ in $\mathcal{S}$.
Note that if $\varepsilon'=(\complTime{\mathcal{S}}{j_{i-1}}{\cP}-\complTime{\mathcal{S}}{j_{i-1}}{\Mshared})/2$, then $j_{i-1}$ completes at the same time on the shared and private processors in $\mathcal{S}'$.
If, on the other hand, $\varepsilon'=2^{\ell-i+1}(\complTime{\mathcal{S}}{j_{\ell}}{\Mshared}-\hspace{0.1cm}\bigl|\bigr.\hspace{0.1cm}artTime{\mathcal{S}}{j_{\ell}}{\Mshared})$ for some $\ell\in\{i,\ldots,k\}$, then $j_{\ell}$ is eliminated from the shared processor, i.e., $j_{\ell}$ executes only on its private processor in $\mathcal{S}'$.
By Lemma~\ref{lem:pushing} and \eqref{eq:synchronized:ineq},
\[\tct{\mathcal{S}'}=\tct{\mathcal{S}}+\varepsilon' w_{j_{i-1}}- \varepsilon'\sum_{\ell=i}^k \frac{w_{j_{\ell}}}{2^{\ell-i+1}}
=\tct{\mathcal{S}}+\varepsilon'\left( w_{j_{i-1}}-\sum_{\ell=i}^k \frac{w_{j_{\ell}}}{2^{\ell-i+1}}\right)\geq\tct{\mathcal{S}}.\]
Moreover, $\mathcal{S}'$ satisfies the following two conditions:
\begin{enumerate} [label={\normalfont{(\alph*)}}]
\item\label{eq:item:syn1} for each $\ell\in\{i,\ldots,k\}$, if $j_{\ell}$ is assigned to $\Mshared_r$ in $\mathcal{S}'$, then $\complTime{\mathcal{S}'}{j_{\ell}}{\Mshared}=\complTime{\mathcal{S}'}{j_{\ell}}{\cP}$,
\item\label{eq:item:syn2} if all jobs $j_{\ell},\ldots,j_k$ are assigned to $\Mshared_r$ in $\mathcal{S}'$, then $\complTime{\mathcal{S}'}{j_{i-1}}{\Mshared}=\complTime{\mathcal{S}'}{j_{i-1}}{\cP}$.
\end{enumerate}
Set $\mathcal{S}:=\mathcal{S}'$ and repeat the transformation.
Condition~\ref{eq:item:syn1} ensures that if a job completes at the same time on private and shared processors in $\mathcal{S}$, then this property is either preserved by the transformation or the job is executed only on its private processor in the new schedule $\mathcal{S}'$.
Note that in the latter case, such a job will remain on its private processor during future transformations, never `returning' back to any shared processor; this follows directly from the pushing transformation.
Thus, in each transformation either the number of jobs executing on shared processors decreases or, due to \ref{eq:item:syn2}, if this number does not decrease, then the number of jobs that complete at the same time on private and shared processors increases by one.
Hence it follows that after at most $2|\mathcal{J}|$ transformations we obtain an optimal schedule that is synchronized.
\end{proof}
In a synchronized schedule the order $j_1,\ldots,j_k$ of job executions on a processor $\Mshared_r$ uniquely determines the schedule on $\Mshared_r$ $r\in\{1,\ldots,m\}$.
\section{V-shapeness and duality of some instances} \label{sec:Vshape}
Our main goal in the section is to introduce special classes of instances that will provide a key to the complexity analysis of the problem in the next section. The following observation was made by \cite{VairaktarakisAydinliyim07} for a single shared processor non-preemptive problem and extended to preemptive one in \cite{HK15}. It will be used often in the remainder of the paper.
\begin{observation}
\label{obs:T_i}
If jobs $j_1,\ldots,j_k$ with processing times $p_1,\ldots,p_k$, respectively, are executed on a shared processor in some synchronized schedule in that order,
then the job $j_i$ executes in time interval $(T_i,T_{i+1})$ of length $\bar{t_i}$, where $T_1=0$ and
\[T_i=\sum_{\ell=1}^{i-1} \frac{p_{\ell}}{2^{i-\ell}}\textup{ for each }i\in\{2,\ldots,k+1\}, \quad \bar{t_i}=\frac{p_i}{2}-\sum_{\ell=1}^{i-1}\frac{p_{\ell}}{2^{i-\ell+1}}=\frac{p_i}{2}-\frac{T_i}{2}\textup{ for each }i\in\{1,\ldots,k\}.\]
\hspace*{\fill}\nolinebreak\ensuremath{\Box}
\end{observation}
\subsection{Consecutive job exchange for processing-time-inclusive instances} \label{lem:V:general}
We begin with a lemma which allows us to calculate the difference in total weighted overlaps of two schedules, one of which is obtained from the other by exchanging two consecutive jobs on a shared processor.
This exchange is complicated by the fact that for the job that gets later in the permutation after the exchange it may no longer be possible to execute on the shared processor since the job may prove too short for that. Generally, the test whether this actually happens depends not only on the processing times of the jobs that precede the later job but also on their \emph{order}. Instead, we
would like to be able to select an arbitrary subset $A$ of $\mathcal{J}$, take any permutation of the jobs in $A$, and always guarantee that there exists a synchronized schedule that has exactly the jobs in $A$ that appear in the order determined by the permutation on the shared processor. Clearly, this freedom cannot be ensured for arbitrary instances. Therefore we introduce
an easy to test sufficient condition that would always guarantee the validity of the job exchange.
Consider a set of jobs $A=\{j_1,\ldots,j_k\}$, where we assume $p_{1}\leq p_{2}\leq\cdots\leq p_{k}$; here we take $p_i$ to be the processing time of the job $j_i$, $i\in\{1,\ldots,k\}$.
We say that the set of jobs $A$ is \emph{processing-time-inclusive} if
\[x=T_{|A|}=\sum_{\ell=1}^{|A|-1} \frac{p_{\ell+1}}{2^{|A|-\ell}} < p_1.\]
Note that $x$ is the makespan of a schedule on a shared processor for jobs in $A\setminus\{j_1\}$ when the jobs are scheduled in ascending order of their processing times, i.e., the order $j_2,\ldots,j_k$.
By~\cite{VairaktarakisAydinliyim07}, the ascending order of processing times of jobs in $A\setminus\{j_1\}$ provides the longest schedule on the shared processor.
Thus, in other words, for processing-time-inclusive jobs $A$, the makespan $x$ is shorter than the shortest job in $A$.
The condition can be checked in time $O(|\mathcal{J}| \log |\mathcal{J}|)$.
Finally, for a permutation $j_1,\ldots,j_k$ of jobs with weights $w_1,\ldots,w_k$, respectively,
define
\begin{equation} \label{W}
W_i=\sum_{\ell=i+2}^{k} \frac{w_{\ell}}{2^{\ell-i-1}}
\end{equation}
for each $i\in\{-1,0,1,\ldots,k-2\}$.
\begin{lemma} \label{lem:switching}
Let $\mathcal{S}$ be a synchronized schedule that executes processing-time-inclusive jobs $j_1,\ldots,j_k$ with processing times $p_1,\ldots,p_k$ and weights $w_1,\ldots,w_k$, respectively, in the order $j_1,\ldots,j_k$ on a shared processor $\mathcal{M}$, and let $i\in\{1,\ldots,k-1\}$.
Let $\mathcal{S}'$ be a synchronized schedule obtained by exchanging jobs $j_i$ and $j_{i+1}$ in $\mathcal{S}$ so that all jobs are executed in the order $j_1,\ldots,j_{i-1},j_{i+1},j_i,j_{i+2},\ldots,j_k$ on $\mathcal{M}$.
Then,
\[\tct{\mathcal{S}}-\tct{\mathcal{S}'}= \frac{(w_{i+1}-w_i)T_i}{4} + \frac{(p_i-p_{i+1})W_{i+1}}{4} + \frac{w_ip_{i+1}}{4} - \frac{w_{i+1}p_i}{4}.\]
\end{lemma}
\begin{proof}
Note that the construction of $\mathcal{S}'$ is valid since the jobs are processing-time-inclusive.
We calculate the values of $\mathcal{S}$ and $\mathcal{S}'$ on $\mathcal{M}$ only since the schedules on other shared processors remain unchanged and thus contribute the same amount $\sigma$ to the total weighted overlap of both schedules.
By Observation~\ref{obs:T_i} we have
\begin{equation} \label{eq:S:sum}
\tct{\mathcal{S}} = \sigma + \sum_{\ell=1}^k \bar{t_{\ell}}w_{\ell},
\end{equation}
for $\mathcal{S}$ and
\begin{equation} \label{eq:S':sum}
\tct{\mathcal{S}'} = \sigma + \sum_{\ell=1}^{i-1} \bar{t_{\ell}}w_{\ell}+ \frac{(p_{i+1}-T_i)w_{i+1}}{2} + \frac{(p_{i}-p_{i+1}/2-T_i/2)w_{i}}{2} + \sum_{\ell=i+2}^{k} (T_{\ell+1}'-T_{\ell}')w_{\ell},
\end{equation}
for $\mathcal{S}'$, where
\[T_{\ell}'=T_i+\frac{p_{i+1}}{2^{\ell-i}}+\frac{p_i}{2^{\ell-i-1}}+\sum_{\ell'=i+2}^{\ell-1}\frac{p_{\ell'}}{2^{\ell-\ell'}}\]
for each $\ell\in\{i+2,\ldots,k+1\}$.
We obtain
\[T_{\ell}-T_{\ell}'=\frac{p_i}{2^{\ell-i}} + \frac{p_{i+1}}{2^{\ell-i-1}} - \frac{p_{i+1}}{2^{\ell-i}} - \frac{p_i}{2^{\ell-i-1}} = \frac{p_{i+1}-p_i}{2^{\ell-i}},\]
for each $\ell\in\{i+2,\ldots,k+1\}$.
Thus,
\begin{equation} \label{eq:W_i}
\left(\sum_{\ell=i+2}^k (T_{\ell+1}-T_{\ell})w_{\ell}-\sum_{\ell=i+2}^{k} (T_{\ell+1}'-T_{\ell}')w_{\ell}\right) = (p_i-p_{i+1})\sum_{\ell=i+2}^{k}\frac{w_{\ell}}{2^{\ell+1-i}}.
\end{equation}
For notational brevity set
\begin{equation} \label{eq:fw_i}
f(w_i) := w_i\left( T_{i+1}-T_i+\frac{T_i}{4}-\frac{p_i}{2}+\frac{p_{i+1}}{4}\right) = w_i\left(\frac{T_i}{2}+\frac{p_i}{2}-\frac{3T_i}{4}-\frac{p_i}{2}+\frac{p_{i+1}}{4} \right) = w_i\left(\frac{p_{i+1}}{4}-\frac{T_i}{4}\right),
\end{equation}
and
\begin{equation} \label{eq:fw_i+1}
f(w_{i+1}) := w_{i+1}\left( T_{i+2}-T_{i+1}-\frac{p_{i+1}}{2}+\frac{T_i}{2} \right) = w_{i+1}\left( \frac{T_i}{4}+\frac{p_{i+1}}{2}+\frac{p_i}{4} -\frac{p_i}{2}-\frac{p_{i+1}}{2} \right) = w_{i+1}\left(\frac{T_i}{4}-\frac{p_i}{4}\right).
\end{equation}
By \eqref{eq:S:sum}, \eqref{eq:S':sum}, \eqref{eq:W_i}, \eqref{eq:fw_i} and \eqref{eq:fw_i+1} we obtain
\begin{eqnarray*}
\tct{\mathcal{S}}-\tct{\mathcal{S}'} & = & \left((T_{i+1}-T_{i})w_{i}-\frac{(p_{i+1}-T_i)w_{i+1}}{2}\right) + \left((T_{i+2}-T_{i+1})w_{i+1}-\frac{(p_{i}-p_{i+1}/2-T_i/2)w_{i}}{2}\right) \\
& + & \left(\sum_{\ell=i+2}^k (T_{\ell+1}-T_{\ell})w_{\ell}-\sum_{\ell=i+2}^{k} (T_{\ell+1}'-T_{\ell}')w_{\ell}\right) \\
& = & f(w_i)+f(w_{i+1}) + (p_i-p_{i+1})\sum_{\ell=i+2}^{k} \frac{w_{\ell}}{2^{\ell+1-i}} \\
& = & \frac{(w_{i+1}-w_i)T_i}{4} + \frac{(p_i-p_{i+1})W_{i+1}}{4} + \frac{w_ip_{i+1}}{4} - \frac{w_{i+1}p_i}{4}
\end{eqnarray*}
as required.
\end{proof}
The next observation that follows directly from Lemma~\ref{lem:pulling} (see also (\ref{eq:synchronized:ineq})).
\begin{observation} \label{obs:wW}
Let $\mathcal{S}$ be an optimal synchronized schedule that executes jobs $j_1,\ldots,j_k$ with
weights $w_1,\ldots,w_k$, respectively,
on a shared processor in the order $j_1,\ldots,j_k$.
Then, $w_i\geq W_{i-1}$ for each $i\in\{1,\ldots,k-1\}$.
\hspace*{\fill}\nolinebreak\ensuremath{\Box}
\end{observation}
We finish this section with the following feature of optimal synchronized schedules.
\begin{observation} \label{obs:TW}
Let $\mathcal{S}$ be an optimal synchronized schedule that executes
jobs $j_1,\ldots,j_k$ with processing times $p_1,\ldots,p_k$ and weights $w_1,\ldots,w_k$, respectively, on a shared processor in the order $j_1,\ldots,j_k$.
Then,
\[0=T_1< T_2 < \cdots < T_{k+1},\]
and
\[W_{-1}\geq W_0\geq W_1\geq\cdots\geq W_{k-2}.\]
\end{observation}
\begin{proof}
By Observation \ref {obs:T_i} we have $T_{i+1}=\frac{p_i}{2}+\frac{T_i}{2}$ for each $i\in\{1,\ldots,k\}$. Since $j_i$ is executed on the shared processor, we have $p_i>T_i$.
Thus, $T_{i+1}>\frac{T_i}{2}+\frac{T_i}{2}=T_i$ for each $i\in\{1,\ldots,k\}$.
By~\eqref{W} we have $W_i=\frac{w_{i+2}}{2}+\frac{W_{i+1}}{2}$ for each $i\in\{-1,0,1,\ldots,k-3\}$.
By Observation \ref{obs:wW}, $w_{i+2}\geq W_{i+1}$ for each $i\in\{-1,0,1,\ldots,k-3\}$.
Thus, $W_i\geq \frac{W_{i+1}}{2}+\frac{W_{i+1}}{2}=W_{i+1}$ for each $i\in\{-1,0,1,\ldots,k-3\}$.
\end{proof}
\subsubsection{Instances with $p_i=w_i$} \label{lem:V:restricted}
Let $\mathcal{S}$ be a synchronized schedule that executes jobs $j_1,\ldots,j_k$ with processing times $p_1,\ldots,p_k$ on a shared processor in the order $j_1,\ldots,j_k$. The schedule $\mathcal{S}$ is called \emph{V-shaped} if, for each shared processor, there exists an index $\ell\in\{1,\ldots,k\}$ such that $p_1\geq p_2\geq\cdots\geq p_{\ell}$ and $p_{\ell}\leq p_{\ell+1}\leq\cdots\leq p_{k}$.
\begin{lemma} \label{lem:Vshape}
Optimal synchronized schedules for instances with processing-time-inclusive jobs $\mathcal{J}$ and with $p_i=w_i$ for $i\in \mathcal{J}$,
are V-shaped.
\end{lemma}
\begin{proof}
Let $\mathcal{S}$ be an optimal synchronized schedule for $\mathcal{J}$.
Take an arbitrary shared processor, and let $j_1,\ldots,j_k$ be the order of jobs on this processor.
Since $\mathcal{S}$ is optimal, by Lemma~\ref{lem:switching} for each $i\in\{1,\ldots,k-1\}$ (note that the jobs in $\mathcal{J}$ are processing-time-inclusive by assumption as required in the lemma),
\begin{equation} \label{eq:non-positive}
\frac{(p_{i+1}-p_i)(T_i-W_{i+1})}{4} \geq 0
\end{equation}
because $w_i=p_i$ and $w_{i+1}=p_{i+1}$.
Denote $b_i=(p_{i+1}-p_i)/|p_{i+1}-p_i|$ for each $i\in\{1,\ldots,k-1\}$. By definition $b_i=0$ for $p_{i+1}=p_i$.
Note that if all $b_i$'s are non-negative or all of them are non-positive, then $p_1\leq\cdots\leq p_k$ and $p_1\geq\cdots\geq p_k$, respectively, and hence $\mathcal{S}$ is V-shaped.
Define $l=\max\{i\hspace{0.1cm}\bigl|\bigr.\hspace{0.1cm} b_i=-1\}$ and $r=\min\{i\hspace{0.1cm}\bigl|\bigr.\hspace{0.1cm} b_i=1\}$.
Note that $l<r$ implies
\[ p_1\geq p_2\geq\cdots\geq p_l \quad\textup{and}\quad p_{l}\leq p_{l+1}\leq\cdots\leq p_k\]
and thus $\mathcal{S}$ is V-shaped.
Hence, it remains to argue that $l<r$.
Suppose for a contradiction that $l>r$ (note that $l\neq r$ by definition).
By \eqref{eq:non-positive},
\[\frac{b_r(T_r-W_{r+1})}{4} \geq 0\]
which implies that $T_r\geq W_{r+1}$.
By Observation~\ref{obs:TW}, $W_{r+1}\geq W_{l+1}$ and $T_r<T_l$, which implies $T_l> W_{l+1}$.
Since $b_l=-1$, this gives $b_l(T_l-W_{l+1})<0$.
Hence,
\[\frac{(p_{l+1}-p_l)(T_l-W_{l+1})}{4} < 0\]
which contradicts \eqref{eq:non-positive} and completes the proof.
\end{proof}
\subsection{Duality and Reversibility}
This section introduces duality of processing times and weights in the WSMP problem. This duality will be used in the next section to prove the problem strong NP-hardness.
The duality is particularly easy to observe from a matrix representation of the total weighted overlap of a synchronized schedule.
Let us define two $k\times k$ matrices
\[
\mathbf{L}_k = \begin{bmatrix}
0 & 0 & 0 & \cdots & 0 & 0 \\
2^{-1} & 0 & 0 & \cdots & 0 & 0 \\
2^{-2} & 2^{-1} & 0 & \cdots & 0 & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots & \vdots \\
2^{-(k-2)} & 2^{-(k-3)} & 2^{-(k-2)} & \cdots & 0 & 0 \\
2^{-(k-1)} & 2^{-(k-2)} & 2^{-(k-3)} & \cdots & 2^{-1} & 0 \\
\end{bmatrix},
\quad \textup{and}\quad
\mathbf{U}_k= \begin{bmatrix}
0 & 2^{-1} & \cdots & 2^{-(k-3)} & 2^{-(k-2)} & 2^{-(k-1)} \\
0 & 0 & \cdots & 2^{-(k-4)} & 2^{-(k-3)} & 2^{-(k-2)} \\
0 & 0 & \cdots & 2^{-(k-5)} & 2^{-(k-4)} & 2^{-(k-3)}\\
\vdots & \vdots & \ddots & \vdots & \vdots & \vdots \\
0 & 0 & \cdots & 0 & 0 & 2^{-1} \\
0 & 0 & \cdots & 0 & 0 & 0 \\
\end{bmatrix}.
\]
Let $\mathbf{W}$ be the vector of weights, $\mathbf{W}=[w_1\,\ldots\,w_k]$, and $\mathbf{P}$ be the vector of processing times, $\mathbf{P}=[p_1\,\ldots\,p_k]$.
Since $(\mathbf{A}\cdot\mathbf{B})^{\textup{T}}=\mathbf{B}^{\textup{T}}\cdot\mathbf{A}^{\textup{T}}$, we observe the following.
\begin{observation} \label{obs:matrices}
It holds $\mathbf{W}\cdot\mathbf{L}_k\cdot\mathbf{P}^{\textup{T}}=\mathbf{P}\cdot\mathbf{U}_k\cdot\mathbf{W}^{\textup{T}}$.
\hspace*{\fill}\nolinebreak\ensuremath{\Box}
\end{observation}
The above matrix notation can be conveniently used to express the total weighted overlap of a given schedule as stated in the next lemma.
\begin{lemma} \label{lem:SviaMatrices}
Let $\mathcal{S}$ be a synchronized schedule that executes jobs $j_1,\ldots,j_k$ with processing times $p_1,\ldots,p_k$ and weights $w_1,\ldots,w_k$, respectively, on a single shared processor in the order $j_1,\ldots,j_k$.
Then,
\[\tct{\mathcal{S}}=\frac{1}{2}\mathbf{P}\cdot\mathbf{I}_k\cdot\mathbf{W}^{\textup{T}}-\frac{1}{2}\mathbf{W}\cdot\mathbf{L}_k\cdot\mathbf{P}^{\textup{T}}=\frac{1}{2}\mathbf{W}\cdot\mathbf{I}_k\cdot\mathbf{P}^{\textup{T}}-\frac{1}{2}\mathbf{P}\cdot\mathbf{U}_k\cdot\mathbf{W}^{\textup{T}},\]
where $\mathbf{I}_k$ is the $k\times k$ identity matrix.
\end{lemma}
\begin{proof}
By Observation \ref{obs:T_i} we have
\[
\tct{\mathcal{S}} = \sum_{\ell=1}^k (T_{\ell+1}-T_{\ell})w_{\ell}
= \sum_{\ell=1}^k \left(\sum_{t=1}^{\ell}\frac{p_t}{2^{\ell+1-t}} - \sum_{t=1}^{\ell-1}\frac{p_t}{2^{\ell-t}}\right) w_{\ell}
= \sum_{\ell=1}^k \frac{p_{\ell}w_{\ell}}{2} - \sum_{\ell=1}^k \sum_{t=1}^{\ell-1} \frac{p_t w_{\ell}}{2^{\ell+1-t}}.
\]
Note that
\[\sum_{\ell=1}^k \frac{p_{\ell}w_{\ell}}{2} = \frac{1}{2}\mathbf{P}\cdot\mathbf{I}_k\cdot\mathbf{W}^{\textup{T}}=\frac{1}{2}\mathbf{W}\cdot\mathbf{I}_k\cdot\mathbf{P}^{\textup{T}}\]
and, by Observation~\ref{obs:matrices},
\[\sum_{\ell=1}^k \sum_{t=1}^{\ell-1} \frac{p_t w_{\ell}}{2^{\ell+1-t}} = \frac{1}{2}\mathbf{W}\cdot\mathbf{L}_k\cdot\mathbf{P}^{\textup{T}}= \frac{1}{2}\mathbf{P}\cdot\mathbf{U}_k\cdot\mathbf{W}^{\textup{T}},\]
which completes the proof.
\end{proof}
This lemma points out at a duality of processing times and weights in the WSMP problem, namely, $\frac{1}{2}\mathbf{W}\cdot\mathbf{I}_k\cdot\mathbf{P}^{\textup{T}}-\frac{1}{2}\mathbf{P}\cdot\mathbf{U}_k\cdot\mathbf{W}^{\textup{T}}$ is a transposition
of $\frac{1}{2}\mathbf{P}\cdot\mathbf{I}_k\cdot\mathbf{W}^{\textup{T}}-\frac{1}{2}\mathbf{W}\cdot\mathbf{L}_k\cdot\mathbf{P}^{\textup{T}}$. The former takes the weights for processing times and the processing times for the weights from the latter, and the $\mathbf{U}_k=\mathbf{L}_k^{\textup{T}}$ reverses the order of jobs from $1,\ldots,k$ to $k,\ldots,1$. Unfortunately, the reversed order may not result in a feasible schedule on the shared processor in general since it may no longer be possible to execute some jobs on the shared processor according to that order because the jobs may prove too short for that. Recall that we exchanged processing times for weights and vice versa besides reversing the order.
Again, the test whether this actually happens depends not only on the weights of jobs but also on their \emph{order}. Therefore we introduce
an easy to test sufficient condition that would always guarantee the validity of the reversed order.
We now introduce a concept analogous to the one of processing-time-inclusive jobs but for the weights.
Consider a set of jobs $A=\{j_1,\ldots,j_k\}$ such that $w_{1}\leq w_{2}\leq\cdots\leq w_{k}$, where $w_i$ is the weight of $j_i$, $i\in\{1,\ldots,k\}$.
We say that the set of jobs $A$ is \emph{weight-inclusive} if
\[x=\sum_{\ell=1}^{|A|-1} \frac{w_{\ell+1}}{2^{|A|-\ell}} < w_1.\]
Note that $x$ is the makespan of a schedule on a shared processor for jobs in $A\setminus\{j_1\}$ when the jobs are scheduled in ascending order of their weights, i.e., the order $j_2,\ldots,j_k$, and the processing time of $j_i$ equals its weight $w_i$ for each $i\in\{2,\ldots,k\}$.
Again, by~\cite{VairaktarakisAydinliyim07}, this order of jobs in $A\setminus\{j_1\}$ provides the longest schedule on the shared processor.
The condition can be checked in time $O(|\mathcal{J}| \log |\mathcal{J}|)$.
We remark that if $A'$ is a set of $k$ jobs such that the processing time of the $i$-th job in $A'$ equals $w_i$, then $A$ is weight-inclusive if and only if $A'$ is processing-time-inclusive.
We have the following duality lemma.
\begin{lemma} \label{lem:reversability}
Let $\mathcal{M}$ be a shared processor with jobs $\mathcal{J}$. Suppose that $\mathcal{J}$ is both processing-time-inclusive and weight-inclusive.
Let $\mathcal{S}$ be any synchronized schedule and let $\mathcal{S}'$ be a synchronized schedule obtained from $\mathcal{S}$ by reversing the order of jobs on $\mathcal{M}$, and by exchanging the processing times for weights and the weights for processing times.
Then, $\tct{\mathcal{S}}=\tct{\mathcal{S}'}$.
\hspace*{\fill}\nolinebreak\ensuremath{\Box}
\end{lemma}
Observe that for the case $p_i=w_i$, the processing-time-inclusion for $\mathcal{J}$ on $\mathcal{M}_{\ell}$ implies the weight-inclusion for $\mathcal{J}$ on $\mathcal{M}_{\ell}$, and the duality reduces to a schedule reversibility.
\section{WSMP is NP-hard in the strong sense} \label{sec:NPC}
In this section we prove, by a transformation from the Numerical 3-Dimensional Matching (N3DM) \cite{GareyJohnson79}, that the decision version of weighted multiple shared-processors (WMSM) problem is strongly NP-hard even if for each job its processing time and weight are equal.
The N3DM problem input consists of three multisets of integers $X =\{x_{1} ,\ldots ,x_{n}\} ,Y =\{y_{1} ,\ldots ,y_{n}\}$ and $Z =\{z_{1} ,\ldots ,z_{n}\}$, and an integer $b$.
The decision question is: does there exist multisets $S_1,\ldots,S_n$, each of size $3$, such that $\bigcup_{i=1}^n S_i=X\cup Y\cup Z$ and for each $i\in\{1,\ldots,n\}$ it holds $\sum_{a\in S_i}a=b$, $|X\cap S_i|=1$, $|Y\cap S_i|=1$ and $|Z\cap S_i|=1$?
In this section we use $\xi(\mathbf{A})$ to denote the sum of all entries of a matrix $\mathbf{A}$.
We construct an instance of the WMSM problem as follows.
The weights are equal to processing times for all jobs.
There are $3 n$ jobs and $n$ shared processors.
The jobs are split into three sets $A ,B$ and $C$ of equal size $n$.
The jobs in $A$ have processing times
\begin{equation*}
s_{i} =2 (M +m +x_{i}) =2 a_{i},
\end{equation*}
the jobs in $B$ have processing times
\begin{equation*}
b_{i} =2 M +y_{i},
\end{equation*}
and the jobs in $C$ have processing times
\begin{equation*}
r_{i} =2 (M +m^{2} +z_{i}) =2 c_{i}.
\end{equation*}
We take the integers $M$ and $m$ as follows:
\begin{equation} \label{eq:Mm}
M >7(m^2+b) \quad \textup{ and }\quad m >\max \{b ,6\}.
\end{equation}
Informally speaking, the $M$ is to guarantee that each shared processor has exactly three jobs in an optimal schedule, the $m$ is to guarantee that each shared processor does exactly one job from each of the sets $A$, $B$ and $C$.
A synchronized schedule $\mathcal{S}$ for the above instance is called \emph{equitable} if each shared processor executes exactly three jobs $i\in A$, $j\in B$ and $k\in C$ with the ordering $(i, j, k)$.
For brevity we define:
\[h(\Delta_1,\ldots,\Delta_n) := \sum _{l =1}^{n}\left(\frac{15}{8}a_{l}^2+\frac{3}{8}b_{l}^2 + \frac{15}{8}c_{l}^2\right) -\frac{1}{4} \sum_{l=1}^n\left(4 M +m +m^{2} +b-\Delta_l\right)^{2}\]
for any integers $\Delta_1,\ldots,\Delta_n$.
The lower bound in the decision counterpart of the WMSM is set to $h(0,\ldots,0)$.
The outline of the proof is as follows.
In Lemma~\ref{lem:total} we provide a formula for the total weighted overlap of a given equitable schedule.
Informally speaking, this lemma provides in particular a one-to-one correspondence between the total weighted overlaps of equitable schedules and the values of $h(\Delta_1,\ldots,\Delta_n)$.
This reduces the task of finding equitable schedules that maximize the total weighted overlap to finding values of $\Delta_1,\ldots,\Delta_n$ that maximize the function $h$.
These values are $\Delta_1=\cdots=\Delta_n=0$ as indicated above. We use this observation in Lemma~\ref{lem:NPCiff}; this key lemma proves the correspondence between N3DM and WMSM but it works with equitable schedules only.
More precisely, we argue in Lemma~\ref{lem:NPCiff} that there exists a solution to the N3DM problem if and only if there exists an equitable schedule $\mathcal{S}$ for the WMSM problem for which it holds $\tct{\mathcal{S}}\geq h(0,\ldots,0)$.
Finally, Lemma~\ref{lem:equitable} justifies restricting attention to equitable schedules only: each optimal schedule for an instance of WMSM constructed from an input to N3DM problem is equitable.
Thus, these three lemmas prove our NP-hardness result stated in Theorem~\ref{thm:NPC}.
\begin{lemma} \label{lem:total}
For an equitable schedule $\mathcal{S}$ it holds
\[\tct{\mathcal{S}}= h(\Delta_1,\ldots,\Delta_n), \]
where $\Delta_{l} =b -(x_{i} +y_{j} +z_{k})$ and $i, j, k$ are jobs from $A ,B$ and $C$, respectively, done on shared processor $\Mshared_l$.
\end{lemma}
\begin{proof}
Consider three jobs $i \in A ,j \in B ,k \in C$ scheduled with the ordering $(i, j, k)$ on some shared processor $\Mshared_l$. Denote this schedule by $\mathcal{S}_l$. Let $\mathbf{P}_l=[s_i, b_j, r_k]$ be the vector of processing times of jobs $i, j, k$.
Since processing time equals the weight for each job, by Lemma~\ref{lem:SviaMatrices}
we have $\tct{\mathcal{S}_l}=\frac{1}{2}\mathbf{P}_l\cdot\mathbf{I}_3\cdot\mathbf{P}_l^{\textup{T}}-\frac{1}{2}\mathbf{P}_l\cdot\mathbf{L}_3\cdot\mathbf{P}_l^{\textup{T}}=\frac{3}{4}\mathbf{P}_l\cdot\mathbf{I}_3\cdot\mathbf{P}_l^{\textup{T}}-\frac{1}{2}\xi(\mathbf{A}_l)$ where
\[
\mathbf{A}_l=\begin{bmatrix}
\frac{1}{2} s_{i} s_{i} & \frac{1}{4} s_{i} b_{j} & \frac{1}{8} s_{i} r_{k} \\
\frac{1}{4} b_{j} s_{i} & \frac{1}{2} b_{j} b_{j} & \frac{1}{4} b_{j} r_{k} \\
\frac{1}{8} r_{k} s_{i} & \frac{1}{4} r_{k} b_{j} & \frac{1}{2} r_{k} r_{k}
\end{bmatrix}
=
\begin{bmatrix}
2 a_{i} a_{i} & \frac{1}{2} a_{i} b_{j} & \frac{1}{2} a_{i} c_{k} \\
\frac{1}{2} b_{j} a_{i} & \frac{1}{2} b_{j} b_{j} & \frac{1}{2} b_{j} c_{k} \\
\frac{1}{2} c_{k} a_{i} & \frac{1}{2} c_{k} b_{j} & 2 c_{k} c_{k}
\end{bmatrix}
= \mathbf{B}_l + \mathbf{C}_l,\]
and where
\[
\mathbf{B}_l=\begin{bmatrix}
\frac{1}{2} a_{i} a_{i} & \frac{1}{2} a_{i} b_{j} & \frac{1}{2} a_{i} c_{k} \\
\frac{1}{2} b_{j} a_{i} & \frac{1}{2} b_{j} b_{j} & \frac{1}{2} b_{j} c_{k} \\
\frac{1}{2} c_{k} a_{i} & \frac{1}{2} c_{k} b_{j} & \frac{1}{2} c_{k} c_{k}
\end{bmatrix}
\textup{ and }
\mathbf{C}_l=\begin{bmatrix}
\frac{3}{2} a_{i} a_{i} & 0 & 0 \\
0 & 0 & 0 \\
0 & 0 & \frac{3}{2} c_{k} c_{k}
\end{bmatrix}.
\]
We have
\begin{equation*}
\xi(\mathbf{B}_l)=\frac{1}{2} (a_{i} +b_{j} +c_{k})^{2} =\frac{1}{2} (M +m +x_{i} +2 M +y_{j} +M +m^{2} +z_{k})^{2} =\frac{1}{2} (4 M +m +m^{2} +x_{i} +y_{j} +z_{k})^{2}
\end{equation*}
and
\begin{equation*}
\xi(\mathbf{C}_l) = \frac{3}{2} a_{i}^{2} +\frac{3}{2} c_{k}^{2}.
\end{equation*}
Therefore,
\begin{equation} \label{eq:total}
\sum_{l=1}^n\xi(\mathbf{A}_l) = \frac{1}{2}\sum_{l=1}^n (4 M +m +m^{2} +b - \Delta_l)^{2} +\frac{3}{2}\sum _{l =1}^{n}(a_{l}^{2} +c_{l}^{2}).
\end{equation}
We finally obtain:
\begin{eqnarray*}
\tct{\mathcal{S}} & = & \sum_{l=1}^n \tct{\mathcal{S}_l} = \sum_{l=1}^n \frac{3}{4}\mathbf{P}_l\cdot\mathbf{I}_3\cdot\mathbf{P}_l^{\textup{T}} - \frac{1}{2}\sum_{l=1}^n \xi(\mathbf{A}_l) \\
& = & \frac{3}{4}\sum_{l=1}^n \left( \frac{1}{2}s_{l}^2+\frac{1}{2}b_{l}^2 + \frac{1}{2}r_{l}^2\right) - \frac{1}{2}\sum_{l=1}^n \xi(\mathbf{A}_l) \\
& = & \sum_{l=1}^n \left( \frac{15}{8}a_{l}^2+\frac{3}{8}b_{l}^2 + \frac{15}{8}c_{l}^2\right) - \frac{1}{4}\sum_{l=1}^n (4 M +m +m^{2} +b - \Delta_l)^{2} \\
& = & h(\Delta_1,\ldots,\Delta_n).
\end{eqnarray*}
\end{proof}
\begin{lemma} \label{lem:NPCiff}
There exists a solution to the \textup{N3DM} problem with the input $X,Y,Z$ and $b$ if and only if for the set of jobs $A\cup B\cup C$ and $n$ shared processors there exists an equitable schedule $\mathcal{S}$ such that $\tct{\mathcal{S}} \geq h(0,\ldots,0)$.
\end{lemma}
\begin{proof}
($\Longrightarrow$)
Suppose that there exists a solution $S_1,\ldots,S_n$ to the \textup{N3DM} problem, where take for convenience $S_i=\{x_i,y_i,z_i\}$ for each $i\in\{1,\ldots,n\}$.
Construct a schedule $\mathcal{S}$ such that the $i$-th shared processor executes the jobs with processing times $s_i,b_i,r_i$ in this order.
Since $\mathcal{S}$ is equitable, Lemma~\ref{lem:total} implies that $\tct{\mathcal{S}}=h(\Delta_1,\ldots,\Delta_n)$, where $\Delta_i=b-(x_1+y_i+z_i)$ for each $i\in\{1,\ldots,n\}$.
Since $S_1,\ldots,S_n$ is a solution to the \textup{N3DM} problem, $x_i+y_i+z_i=b$ for each $i\in\{1,\ldots,n\}$ and therefore $\tct{\mathcal{S}}=h(0,\ldots,0)$ as required.
\noindent
($\Longleftarrow$)
Suppose there is an equitable schedule $\mathcal{S}$ on $n$ shared processors with $\tct{\mathcal{S}}\geq h(0,\ldots,0)$.
Recall that by definition of equitable schedule each shared processor does exactly three jobs, the first one from $A$, the second from $B$ and the third from $C$.
By Lemma~\ref{lem:total}, $\tct{\mathcal{S}}= h(\Delta_1,\ldots,\Delta_n)$, where $\Delta_{l} =b -(x_{l} +y_{l} +z_{l})$ and $s_{l} ,b_{l} ,r_{l}$ are the processing times of jobs from $A ,B$ and $C$, respectively, done on the $l$-th shared processor for each $l\in\{1,\ldots,n\}$.
Denote
\[g(\Delta_1,\ldots,\Delta_n) := \sum_{l=1}^n\left(4 M +m +m^{2} +b-\Delta_l\right)^{2}.\]
Since\begin{equation*}
\sum _{l =1}^{n} \Delta _{l} = 0
\end{equation*}
we have
\begin{equation*}
g(\Delta_1,\ldots,\Delta_n) = n (4 M +m +m^{2} +b)^{2} +\sum _{l=1}^{n} \Delta_{l}^{2}.
\end{equation*}
By definition,
\[h(\Delta_1,\ldots,\Delta_n)\geq h(0,\ldots,0) \quad\Leftrightarrow\quad g(\Delta_1,\ldots,\Delta_n)\leq g(0,\ldots,0).\]
Moreover,
\[g(\Delta_1,\ldots,\Delta_n)\leq g(0,\ldots,0) \quad\Leftrightarrow\quad \sum_{l=1}^{n}\Delta_{l}^2\leq 0.\]
Thus, $\Delta_{l} =0$ for each $l\in\{1,\ldots,n\}$ and hence $x_l+y_l+z_l=b$ for each $l\in\{1,\ldots,n\}$ which implies that $X,Y,Z$ and $b$ is a solution to N3DM.
\end{proof}
It remains to justify our earlier assumption that it is sufficient to limit ourselves to equitable schedules only.
\begin{lemma} \label{lem:equitable}
For the instance $\mathcal{J}=A\cup B\cup C$ on $n$ shared processors constructed from the input to the \textup{N3DM} problem, each optimal schedule is equitable.
\end{lemma}
\begin{proof}
We first prove that each shared processor does exactly three jobs in any optimal schedule.
Suppose for a contradiction that this is not the case in some optimal schedule $\mathcal{S}$.
Then there exist shared processors $\Mshared_{l'}$ and $\Mshared_{l}$ that execute $x'<3$ and $x>3$ jobs, respectively.
We obtain a new schedule $\mathcal{S}'$ from $\mathcal{S}$ by moving a job $j$ from the last position $x$ on $\Mshared_{l}$ to the last position $x'+1$ on $\Mshared_{l'}$.
Observe that the processing time of each job is at most $2(M+m^2+b)$ and thus the last job on $\Mshared_{l'}$ completes in $\mathcal{S}$ by $\frac{3}{2}(M+m^2+b)$ and the shortest job in the instance is not shorter than $2M$, thus $\frac{3}{2}(M+m^2+b)<2M$ for $M>7(m^2+b)$ (as guaranteed by \eqref{eq:Mm}) and consequently the job $j$ is long enough to be executed in position $x'+1$ on $\Mshared_{l'}$.
Since the transition from $\mathcal{S}$ to $\mathcal{S}'$ does not affect the execution intervals of any job except for $j$, we obtain by Observation~\ref{obs:T_i}
\begin{equation} \label{eq:all3-1}
\tct{\mathcal{S}}-\tct{\mathcal{S}'} = w_j\left(\frac{T_{x'+1}'}{2}-\frac{T_x}{2}\right),
\end{equation}
where $T_{x'+1}'$ and $T_x$ are completion times of the last job in $\mathcal{S}$ on processors $\Mshared_{l'}$ and $\Mshared_l$, respectively.
The maximum job processing time in $A\cup B\cup C$ does not exceed $2(M+m^2+b)$ and hence by Observation~\ref{obs:T_i}
\[
T_{x'+1}'\leq \sum_{\ell=1}^{x'}\frac{2(M+m^2+b)}{2^{x'+1-\ell}} = 2(M+m^2+b)\left(1-\frac{1}{2^{x'}}\right) \]
and the minimum job processing time of a job in $A\cup B\cup C$ is not less than $2M$ which gives
\[
T_x \geq \sum_{\ell=1}^{x-1}\frac{2M}{2^{x-\ell}} = 2M\left(1-\frac{1}{2^{x-1}}\right).\]
Thus, since $x'<3$ and $x'< x-1$
\[T_{x'+1}'-T_x \leq 2M\left(\frac{1}{2^{x-1}}-\frac{1}{2^{x'}}\right)+ 2(m^2+b)\left(1-\frac{1}{2^{x'}}\right)<
-2M\frac{1}{2^{x'+1}}+
2(m^2+b)\left(1-\frac{1}{2^3}\right).
\]
However,
\[-M \frac{1}{2^{x'+1}}+(m^2+b)
\left(1-\frac{1}{2^3}\right)<0
\]
for $M>7(m^2+b)$ (as guaranteed by \eqref{eq:Mm})
which gives $\tct{\mathcal{S}}-\tct{\mathcal{S}'}<0$ and contradicts the optimality of $\mathcal{S}$.
Thus, we have proved that each shared processor executes exactly three jobs in each optimal schedule.
In the following we will often compare lengths of jobs from the sets $A$, $B$ and $C$.
In particular, by~\eqref{eq:Mm}, we have that for each $i,j,k\in\{1,\ldots,n\}$,
\begin{equation} \label{eq:ABC}
b_i \leq 2M+b \leq 2(M+m) < s_j < 2(M+m+b) \leq 2(M+m^2) < r_k.
\end{equation}
Informally, each job in $C$ is longer than any job in $A$, and each job in $A$ is longer than any job in $B$.
We now prove that each shared processor does exactly one job from $C$.
Consider an optimal schedule $\mathcal{S}$ in which some shared processor $\Mshared_l$ executes at least two jobs $i$ and $k$ from $C$.
Then, no jobs from $C$ are on another shared processor $\Mshared_{l'}$.
Without loss of generality we may assume due to~\eqref{eq:ABC} that $i$ and $k$ are the longest and the second longest jobs respectively on $\Mshared_l$.
Denote by $j$ the third job on $\Mshared_l$.
By Lemma~\ref {lem:Vshape}, an optimal schedule on $\Mshared_l$ is V-shaped.
Thus, the order of jobs on $\Mshared_l$ is either $(i,k,j)$, $(j,k,i)$, $(i,j,k)$ or $(k,j,i)$.
By Lemma~\ref{lem:reversability}, we can further reduce the number of cases to $(i, k, j)$ and $(i, j, k)$.
It can be easily checked, we omit details here, that the former order is not optimal on $\Mshared_l$ since $r_k\geq q_j$, where $q_j$ is the processing time of the job $j$.
Thus, it suffices to consider the order $(i, j, k)$ on $\Mshared_l$.
Let $q_{i'},q_{j'},q_{k'}$ be the processing times of jobs scheduled with the ordering $(i',j',k')$ on $\Mshared_{l'}$.
By Lemma~\ref{lem:SviaMatrices}, we have $\tct{\mathcal{S}}=\sigma-\xi(\mathbf{A})/2-\xi(\mathbf{A}')/2$, where
\[\mathbf{A}=\begin{bmatrix}
0 & \frac{1}{4} r_{i} q_{j} & \frac{1}{8} r_{i} r_{k} \\
\frac{1}{4} q_{j} r_{i} & 0 & \frac{1}{4} q_{j} r_{k} \\
\frac{1}{8} r_{k} r_{i} & \frac{1}{4} r_{k} q_{j} & 0
\end{bmatrix},
\quad
\mathbf{A}'=\begin{bmatrix}
0 & \frac{1}{4} q_{i'} q_{j'} & \frac{1}{8} q_{i'} q_{k'} \\
\frac{1}{4} q_{j'} q_{i'} & 0 & \frac{1}{4} q_{j'} q_{k'} \\
\frac{1}{8} q_{k'} q_{i'} & \frac{1}{4} q_{k'} q_{j'} & 0
\end{bmatrix}.
\]
and $\sigma=\sum_{i\neq l, l'} \tct{\mathcal{S}_i}+\frac{1}{2}\mathbf{P}_l\cdot\mathbf{I}_3\cdot\mathbf{P}_l^{\textup{T}}+\frac{1}{2}\mathbf{P}_{l'}\cdot\mathbf{I}_3\cdot\mathbf{P}_{l'}^{\textup{T}}$
in which we take $\mathcal{S}_i$ to be the schedule on $\Mshared_i$ and the corresponding private processors assigned to jobs executed on $\Mshared_i$.
Consider the matrices
\[\mathbf{B}=\begin{bmatrix}
0 & \frac{1}{4} q_{i'} q_{j} & \frac{1}{8} q_{i'} r_{k} \\
\frac{1}{4} q_{j} q_{i'} & 0 & \frac{1}{4} q_{j} r_{k} \\
\frac{1}{8} r_{k} q_{i'} & \frac{1}{4} r_{k} q_{j} & 0
\end{bmatrix},
\quad
\mathbf{B}'=\begin{bmatrix}
0 & \frac{1}{4} r_{i} q_{j'} & \frac{1}{8} r_{i} q_{k'} \\
\frac{1}{4} q_{j'} r_{i} & 0 & \frac{1}{4} q_{j'} q_{k'} \\
\frac{1}{8} q_{k'} r_{i} & \frac{1}{4} q_{k'} q_{j'} & 0
\end{bmatrix}.
\]
obtained from $\mathbf{A}$ and $\mathbf{A}'$, respectively, by exchanging $r_i$ and $q_{i'}$.
Thus, there exists a schedule $\mathcal{S}'$ obtained from $\mathcal{S}$ by exchanging job $i$ on $\Mshared_l$ with job $i'$ on $\Mshared_{l'}$, $\tct{\mathcal{S}'}=\sigma-\xi(\mathbf{B})/2-\xi(\mathbf{B}')/2$.
Observe that by~\eqref{eq:ABC}, in $\mathcal{S}'$, the first jobs on $\Mshared_{l}$ and $\Mshared_{l'}$ complete by $(M+m^2+b)$, the second jobs on those processors completes by $\frac{3}{2}(M+m^2+b)$, and moreover the shortest job in the instance is not shorter than $2M$, thus $\frac{3}{2}(M+m^2+b)<2M$ for $M>7(m^2+b)$ as guaranteed by~\eqref{eq:Mm} and consequently all jobs on $\Mshared_{l}$ and $\Mshared_{l'}$ are long enough to be executed on $\Mshared_{l}$ and $\Mshared_{l'}$ after the exchange.
Therefore, $\mathcal{S}'$ is feasible.
We have
\begin{equation*}
\tct{\mathcal{S}'}-\tct{\mathcal{S}}=\frac{1}{8} (r_{i} -q_{i'}) (r_{k} -q_{k'} +2 (q_{j} -q_{j'})).
\end{equation*}
Note that, by \eqref{eq:Mm} and~\eqref{eq:ABC}
\[r_{i} -q_{i'}\geq 2(M+m^2) - 2(M+m+b)=2(m^2-m-b)>0\]
and
\[r_{k} -q_{k'} +2 (q_{j} -q_{j'})\geq 2(M+m^2)-2(M+m+b) + 2(2M-2(M+m+b))=2(m^2-3m-3b)>0.\]
Thus, $\tct{\mathcal{S}'}>\tct{\mathcal{S}}$ which contradicts the optimality of $\mathcal{S}$.
This proves that each shared processor executes exactly one job from the set $C$.
Third, we prove that each shared processor does exactly one job from $A$.
Analogously as before, consider an optimal schedule $\mathcal{S}$ in which some shared processor $\Mshared_l$ executes a job $k\in C$
and jobs
$i,j\in A$ and some other shared processor $\Mshared_{l'}$ executes a job
$k'\in C$ and no job from $A$ (thus, the two remaining jobs on that processor
$i',j'\in B$).
By~\eqref{eq:ABC}, the job $k$ is longer that the jobs $i$ and $j$, and similarly, the job $k'$ is longer than $i'$ and $j'$.
By Lemma~\ref{lem:Vshape}, the schedule $\mathcal{S}$ is V-shaped and thus $k$ is the first or the last job on $\Mshared_l$ and $k'$ is the first or the last job on $\Mshared_{l'}$.
Furthermore, Lemma~\ref{lem:reversability} implies that we may without loss of generality assume that $k$ and $k'$ are the last jobs on $\Mshared_l$ and $\Mshared_{l'}$, respectively.
Let $i\in A$ be the first job on $\Mshared_l$, and $i'\in B$ be the first job on $\Mshared_{l'}$.
We have $\tct{\mathcal{S}}=\sigma-\xi(\mathbf{A})/2-\xi(\mathbf{A}')/2$, where
\[\mathbf{A}=\begin{bmatrix}
0 & \frac{1}{4} s_{i} s_{j} & \frac{1}{8} s_{i} r_{k} \\
\frac{1}{4} s_{j} s_{i} & 0 & \frac{1}{4} s_{j} r_{k} \\
\frac{1}{8} r_{k} s_{i} & \frac{1}{4} r_{k} s_{j} & 0
\end{bmatrix},
\quad
\mathbf{A}'=\begin{bmatrix}
0 & \frac{1}{4} b_{i'} b_{j'} & \frac{1}{8} b_{i'} r_{k'} \\
\frac{1}{4} b_{j'} b_{i'} & 0 & \frac{1}{4} b_{j'} r_{k'} \\
\frac{1}{8} r_{k'} b_{i'} & \frac{1}{4} r_{k'} b_{j'} & 0
\end{bmatrix}
\]
and $\sigma=\sum_{i\neq l, l'} \tct{\mathcal{S}_i}+\frac{1}{2}\mathbf{P}_l\cdot\mathbf{I}_3\cdot\mathbf{P}_l^{\textup{T}}+\frac{1}{2}\mathbf{P}_{l'}\cdot\mathbf{I}_3\cdot\mathbf{P}_{l'}^{\textup{T}}$.
Obtain a schedule $\mathcal{S}'$ by exchanging in $\mathcal{S}$ the $i\in A$ from $\Mshared_l$ with the $i'\in B$ from $\Mshared_{l'}$.
Observe that the first jobs on $\Mshared_{l}$ and $\Mshared_{l'}$ complete by $(M+m^2+b)$ in $\mathcal{S}'$, the second jobs on those processors complete by $\frac{3}{2}(M+m^2+b)$, and moreover the shortest job in the instance is not shorter than $2M$, thus $\frac{3}{2}(M+m^2+b)<2M$ for $M>7(m^2+b)$, according to~\eqref{eq:Mm}, and consequently all jobs on $\Mshared_{l}$ and $\Mshared_{l'}$ are long enough to be executed on $\Mshared_{l}$ and $\Mshared_{l'}$ after the exchange.
This implies that $\mathcal{S}'$ is feasible.
For the new schedule we have $\tct{\mathcal{S}'}=\sigma-\xi(\mathbf{B})/2-\xi(\mathbf{B}')/2$, where
\[\mathbf{B}=\begin{bmatrix}
0 & \frac{1}{4} b_{i'} s_{j} & \frac{1}{8} b_{i'} r_{k} \\
\frac{1}{4} b_{i'} s_{j} & 0 & \frac{1}{4} s_{j} r_{k} \\
\frac{1}{8} b_{i'} r_{k} & \frac{1}{4} r_{k} s_{j} & 0
\end{bmatrix},
\quad
\mathbf{B}'=\begin{bmatrix}
0 & \frac{1}{4} s_{i} b_{j'} & \frac{1}{8} s_{i} r_{k'} \\
\frac{1}{4} s_{i} b_{j'} & 0 & \frac{1}{4} b_{j'} r_{k'} \\
\frac{1}{8} s_{i} r_{k'} & \frac{1}{4} r_{k'} b_{j'} & 0
\end{bmatrix}.
\]
Therefore,\begin{equation*}
\tct{\mathcal{S}'}-\tct{\mathcal{S}}=\frac{1}{8} (s_{i} -b_{i'}) (r_{k} -r_{k'} +2 (s_{j} -b_{j'})).
\end{equation*}
We have $s_i-b_{i'}\geq 2m-b>0$ and $r_{k} -r_{k'} +2 (s_{j} -b_{j'})\geq 4m-4b>0$, where both inequalities follow from~\eqref{eq:Mm} and~\eqref{eq:ABC}.
Therefore, we obtain $\tct{\mathcal{S}'}>\tct{\mathcal{S}}$ --- a contradiction.
This proves that each shared processor does exactly one job from each set $A ,B$ and $C$.
It remains to argue that for three jobs $i\in A$, $j\in B$ and $k\in C$ scheduled on some shared processor $\Mshared_l$, their order on $\Mshared_l$ is $(i,j,k)$.
By Lemmas~\ref{lem:Vshape} and~\ref{lem:reversability} and~\eqref{eq:ABC}, possible orders in an optimal synchronized schedule are $(i,j,k)$, $(j,i,k)$, and take two schedules $\mathcal{S}$ and $\mathcal{S}'$ that execute the jobs in these orders, respectively.
We have by \eqref{eq:Mm}
\[\tct{\mathcal{S}}-\tct{\mathcal{S}'} = \frac{r_k}{8}(s_i-b_j)>0,\]
which completes the proof of the lemma.
\end{proof}
We conclude this section with its main result.
\begin{theorem} \label{thm:NPC}
The weighted multiple shared-processors problem \textup{WSMP} is strongly \textup{NP}-hard.
\end{theorem}
\begin{proof}
Note that the weights and the processing times of jobs in our reduction are bounded by $O(M+m^2+b)$.
By \eqref{eq:Mm}, $M$ and $m$ are polynomially bounded by $b$ and the value of $b$ is bounded by a polynomial in $n$ since the \textup{N3DM} problem is strongly NP-hard.
Thus, the theorem follows from Lemmas~\ref{lem:NPCiff} and~\ref{lem:equitable}.
\end{proof}
\section{An $O(n\log n)$ algorithm for equal weights}
This section gives an $O(n\log n)$ optimization algorithm for the \textup{WSMP} problem with equal weights, i.e. $w_{i}=w$ for $i\in \mathcal{J}$.
Without loss of generality we assume $w=1$ for convenience. We begin with the following result of \cite{VairaktarakisAydinliyim07} for a single shared processor non-preemptive problem and extended to preemptive one in \cite{HK15}.
\begin{lemma}[\cite{HK15,VairaktarakisAydinliyim07}]
\label{lem:HK15}
If jobs $1,\ldots, n$ with unit weights and processing times $p_1,\ldots,p_n$, respectively, are executed on a shared processor in an optimal synchronized schedule in the order $1,\ldots,n$,
then $p_1\leq p_2\leq \cdots \leq p_n$ and the total weighted overlap equals
\[ \sum_{i=1}^n \bar{t_i}=\frac{p_n}{2} + \frac{p_{n-1}}{4} + \cdots + \frac{p_1}{2^n}.\]
\hspace*{\fill}\nolinebreak\ensuremath{\Box}
\end{lemma}
This hints at the following
algorithm for the \textup{WSMP} problem with unit weights. Take the following sequence of positional weights:
\begin{equation} \label{position}
\underbrace{\frac{1}{2},\ldots,\frac{1}{2}}_{m-times},\underbrace{\frac{1}{4}
,\ldots,\frac{1}{4}}_{m-times},\ldots,\underbrace{\frac{1}{2^{k}},\ldots,\frac{1}{
2^{k}}}_{r-times}
\end{equation}
where
\begin{equation}\label{a}
k=\left\lceil \frac{n}{m}\right\rceil \text{ and } r=n-\left\lfloor \frac{n}{m} \right\rfloor m,
\end{equation}
and order the jobs in descending order of their processing times so that
\begin{equation} \label{proctime}
p_{n}\geq \cdots\geq p_{1}.
\end{equation}
Match the $i$-th positional weight from the left in the sequence \eqref{position} with the the $i$-th job form the left in the sequence \eqref{proctime} for $i\in\{1,\ldots,n\}$. Partition the set of jobs
$\mathcal{J}$ into $m$ disjoint subsets
\[
\mathcal{J}_{1},\ldots,\mathcal{J}_{m}
\]
so that any two jobs in a subset are matched with different positional weights. Thus, in each subset we have exactly one job matched with $\frac{1}{2}$, exactly one with $\frac{1}{4}$, \ldots, and exactly one matched with $\frac{1}{2^{k-1}}$. Moreover, there are exactly $0\leq r<m$ subsets with exactly one job matched with $\frac{1}{2^{k}}$ in each. Without loss of generality we may assume that these $r$ sets are $\mathcal{J}_{1},\ldots,\mathcal{J}_{r}$. Finally, schedule the jobs from $\mathcal{J}_{\ell}$ on shared processor $\mathcal{M}_{\ell}$ in ascending order of their processing times for each $\ell\in\{1,\ldots,m\}$. Let the resulting synchronized schedule be $\bar{\mathcal{S}}$, and let $\bar{\mathcal{S}}_{\ell}$ be the synchronized schedule for $\Mshared_{\ell}$ and the private processors of jobs executed on $\Mshared_{\ell}$ for each $\ell\in\{1,\ldots,m\}$. From Lemma~\ref{lem:HK15} and this algorithm we immediately obtain.
\begin{lemma}\label{S}
It holds that
\[\tct{\bar{\mathcal{S}}}=\sum_{\ell=1}^m \tct{\bar{\mathcal{S}}_{\ell}}=\sum_{\ell=1}^r \sum _{i=1}^{\lceil \frac{n}{m} \rceil} \frac{p^{\ell}_{i}}{2^{\lceil \frac{n}{m} \rceil+1-i}}+ \sum_{\ell=r+1}^{m} \sum _{i=1}^{\lfloor \frac{n}{m} \rfloor} \frac{p^{\ell}_{i}}{2^{\lfloor \frac{n}{m} \rfloor+1-i}},\]
where $\mathcal{J}_{\ell}=\{p^{\ell}_{i}\hspace{0.1cm}\bigl|\bigr.\hspace{0.1cm} i\in\{1,\ldots,|\mathcal{J}_{\ell}|\}\}$ and $p^{\ell}_1\leq\cdots\leq p^{\ell}_{|\mathcal{J}_{\ell}|}$ for each $\ell\in\{1,\ldots,m\}$.
\hspace*{\fill}\nolinebreak\ensuremath{\Box}
\end{lemma}
It remains to show that $\bar{\mathcal{S}}$ is optimal.
We start by noting that in optimal schedules for the \textup{WSMP} with unit weights each job executes on some shared processor.
\begin{lemma} \label{lem:unit-all}
Let $\mathcal{S}$ be an optimal schedule for a set of jobs $\mathcal{J}$ with unit weights and $m$ shared processors.
Then, each job executes on some shared processor.
\end{lemma}
\begin{proof}
Suppose that some schedule $\mathcal{S}$ does not satisfy the lemma.
Take an arbitrary shared processor $\Mshared_{\ell}$ and any job $j\in\mathcal{J}$ with processing time $p$ that executes entirely on its private processor $\cP_j$.
Suppose that jobs $j_1,\ldots,j_k$ with processing times $p_1,\ldots,p_k$, respectively, execute on $\Mshared_{\ell}$ in $\mathcal{S}$ in this order.
By Lemma~\ref{lem:HK15}, $p_1\leq\cdots\leq p_k$.
Take the maximum $i\in\{1,\ldots,k\}$ such that $p_i\leq p$. If $p<p_1$, then take $i=0$.
Consider a synchronized schedule $\mathcal{S}'$ that is identical to $\mathcal{S}$ on all shared processors different than $\Mshared_{\ell}$ and executes jobs $j_1,\ldots,j_i,j,j_{i+1},\ldots,j_k$ (with processing times $p_1,\ldots,p_i$, $p,p_{i+1},\ldots,p_k$ respectively), in this order, on $\Mshared_{\ell}$.
Due to the choice of $i$, $\mathcal{S}'$ is feasible and by Lemma~\ref{S} for $i\geq1$
\begin{eqnarray*}
\tct{\mathcal{S}'}-\tct{\mathcal{S}} & = & \sum_{\ell=1}^{i}\frac{p_{\ell}}{2^{k+2-\ell }}+\frac{p}{2^{k+1-i}} - \sum_{\ell=1}^{i}\frac{p_{\ell}}{2^{k+1-\ell}} \\
& = & \frac{p_1}{2^{k+1}} + \sum_{\ell=2}^{i}\frac{p_{\ell}-p_{\ell-1}}{2^{k+2-\ell}} + \left(\frac{p-p_i}{2^{k+1-i}}\right).
\end{eqnarray*}
Since $p_1\leq\cdots\leq p_i\leq p$, we obtain that $\tct{\mathcal{S}'}-\tct{\mathcal{S}}\geq p_1/2^{k+1}>0$, which implies that $\mathcal{S}$ is not optimal and completes the proof for $i\geq 1$. Finally,
\begin{eqnarray*}
\tct{\mathcal{S}'}-\tct{\mathcal{S}} & = & \frac{p}{2^{k+1}},
\end{eqnarray*}
for $i=0$ which implies that $\mathcal{S}$ is not optimal and completes the proof.
\end{proof}
\begin{lemma} \label{opt}
$\bar{\mathcal{S}}$ is optimal.
\end{lemma}
\begin{proof}
Let $\mathcal{S'}$ be a synchronized schedule with jobs $\mathcal{J}'_1, \ldots, \mathcal{J}'_m$ on shared processors $\mathcal{M}_{1}, \ldots, \mathcal{M}_{m}$, respectively.
Denote $n_{\ell}=|\mathcal{J}'_{\ell}|$ and denote the processing times of jobs in $\mathcal{J}'_{\ell}$ by $q_1^{\ell} \leq\cdots\leq q_{n_{\ell}}^{\ell}$, $\ell\in\{1,\ldots,m\}$.
We have by Lemma~\ref{lem:HK15},
\[\tct{\mathcal{S}'}\leq \sum_{\ell=1}^{m}\sum_{i=1}^{n_{\ell}}\frac{q_{i}^{\ell}}{2^{n_{\ell}+1-i}}.\]
We will argue that
\begin{equation} \label{eq}
\sum_{\ell=1}^{m}\sum_{i=1}^{n_{\ell}}\frac{q_{i}^{\ell}}{2^{n_{\ell}+1-i}} \leq \sum_{\ell=1}^r \sum _{i=1}^{\lceil \frac{n}{m} \rceil} \frac{p^{\ell}_{i}}{2^{\lceil \frac{n}{m} \rceil+1-i}}+ \sum_{\ell=r+1}^{m} \sum _{i=1}^{\lfloor \frac{n}{m} \rfloor} \frac{p^{\ell}_{i}}{2^{\lfloor \frac{n}{m} \rfloor+1-i}}
\end{equation}
which by Lemma \ref{S} proves the lemma. To prove inequality \eqref{eq} take the positional weights of the left hand side of \eqref{eq} in the non-ascending order. Let them make a vector $\mathbf{\alpha}$. By Lemma~\ref{lem:unit-all}, the length of $\mathbf{\alpha}$ is $n$.
We obtain a vector $\mathbf{\alpha}'$ as follows.
Initially set $\mathbf{\alpha}':=\mathbf{\alpha}$ and perform the following action as long as possible.
Find the minimum $i\in\{1,\ldots,k-1\}$ (recall (\ref{a}) for definition of $k$ and $r$) such that the value $1/2^i$ appears less than $m$ times in $\mathbf{\alpha}'$, and replace any entry of $\mathbf{\alpha}'$ with value less than $1/2^i$ with the value $1/2^i$.
Finally, any value less than $1/2^k$ replace in $\mathbf{\alpha}'$ with $1/2^k$.
Clearly, $1/2^i$ appears exactly $m$ times in $\mathbf{\alpha}'$ for each $i\in\{1,\ldots,k-1\}$, and $1/2^k$ appears exactly $r$ times in $\mathbf{\alpha}'$.
Take the positional weights of the right hand side of (\ref{eq}) in the non-ascending order. Let them make a vector $\mathbf{\beta}$.
We observe that $\mathbf{\alpha}'$ is a permutation of $\mathbf{\beta}$ and thus we
can readily show that
\[\sum_{i=1}^{\ell} \mathbf{\alpha}_i\leq\sum_{i=1}^{\ell} \mathbf{\alpha}_i'\leq \sum_{i=1}^{\ell} \mathbf{\beta}_i\]
for each $\ell\in\{1,\ldots,n\}$. Hence the inequality (\ref{eq}) holds by the rearrangement inequality of Hardy-Littlewood-Polya \cite{HLP}.
\end{proof}
We conclude this section with the following result.
\begin{theorem}
For any set of jobs $\mathcal{J}$ with equal weights and arbitrary processing times and $m\geq 1$ shared processors, the schedule $\bar{\mathcal{S}}$ is an optimal solution to the \textup{WSMP} problem and can be computed in $O(n\log n)$-time.
\hspace*{\fill}\nolinebreak\ensuremath{\Box}
\end{theorem}
\section{Conclusions and open problems}
We studied the shared multi-processor scheduling problem. We proved that the maximization of total weighted overlap is NP-hard in the strong sense
though the case with equal weights is solvable in $O(n\log n)$ time. We also proved that synchronized schedules include optimal schedules. This characterization
as well as other characteristics of special subclasses of the problem may prove instrumental in settling the complexity of the single processor case, which remains
open, and in developing efficient branch-and-bound algorithms, heuristics, and approximation algorithms with guaranteed worst case approximation for the problem.
We conjecture that the single processor case is NP-hard even for instances with processing times equal weights for all jobs.
The design of coordinating mechanisms to ensure efficiency of decentralized shared multi-processor scheduling remains an interesting line of research in supply chains
with subcontracting. In particular, coordinating pricing schemes for multi-processor problem with equal weights (such schemes do not exist for the weighted case in general, see \cite{HK15}) seem to readily extend those developed in \cite{HK15} for a single processor.
In this paper we assumed that a job can use only a single shared processor, if any. However, relaxations of this assumption that allow for using an arbitrary or a fixed number of shared processors by a job
could possibly lead to interesting scheduling problems. We leave investigation of these relaxations for further research.
\end{document} |
\begin{document}
\title{DeepWaste: Applying Deep Learning to Waste Classification for a Sustainable Planet}
\begin{abstract}
Accurate waste disposal, at the point of disposal, is crucial to fighting climate change. When materials that could be recycled or composted get diverted into landfills, they cause the emission of potent greenhouse gases such as methane. Current attempts to reduce erroneous waste disposal are expensive, inaccurate, and confusing. In this work, we propose \textbf{\emph{DeepWaste}}, an easy-to-use mobile app, that utilizes highly optimized deep learning techniques to provide users instantaneous waste classification into trash, recycling, and compost. We experiment with several convolution neural network architectures to detect and classify waste items. Our best model, a deep learning residual neural network with 50 layers, achieves an average precision of 0.881 on the test set. We demonstrate the performance and efficiency of our app on a set of real-world images.
\end{abstract}
\section{Introduction}
Every year, the world generates over 2 billion tons of solid waste [1]. In the U.S., even though 75\% of this waste is capable of being recycled, only 34\% is actually recycled [2].
Further, 91\% of plastic isn't recycled [3] and only about 5\% of food and other organic waste is composted [4]. This waste generates over 1.5 billion metric tons of CO$_2$ equivalent greenhouse gases [1], contributing nearly as much to climate change as all the cars on the U.S. roads.
Despite massive investment to educate the public about accurate waste disposal, efforts so far have been only moderately successful. People are often confused by what they can recycle, or compost. Signs and boards found ubiquitously near waste bins are difficult to understand and are often incomplete. Furthermore, disposal of waste varies based on the local recycling facilities' capabilities, and therefore rules for disposal are subject to change on a county-by-county basis.
Errors in waste disposal constitute not only missed opportunities to recycle or compost, but also lead to the contamination of recycling and compost bins. Often, an entire bin can end up at a landfill due to a single error leading to contamination. Data from the National Waste and Recycling Association shows that human confusion in the identification and correct disposal of waste into our waste bins results in nearly 25\% of recyclables getting contaminated [5], diverting materials that could be recycled into our landfill. When a recyclable or compostable material ends up in the landfill, it releases methane which is several times more potent than CO$_2$ in contributing to global warming. Clearly, current ways to inform the public have not been working very well.
In this work, we leverage the recent improvements of convolution neural networks (CNNs) for image-recognition tasks [6] and the availability of increased computational power on modern-day cell phones, to provide a novel approach for waste identification that is fast, low-cost, and accurate for anyone, anywhere. We present DeepWaste, the first mobile app targeted at the problem of erroneous waste disposal, at the point of disposal, through deep learning. We construct from scratch a fully-annotated dataset of more than 1200 waste items that is trained to achieve our best performing model with an average precision of 0.881 on the test set.
\section{Previous Work}
\label{gen_inst}
The topic of waste classification has begun garnering some interest recently in research, but the attempts reported across literature to solve this problem have suffered from low accuracy (ranging from 22\% to low 70\%) [7], [8], or a network size that is too big for real-time application [9].
Further, most of the previous attempts to mitigate the aforementioned problem of erroneous disposal envisaged deployment within a “smart bin” or the use of a commercial and industrial grade binning system within a recycling plant [10], [11], [12], requiring expensive hardware that costs thousands of dollars. The high cost of these solutions has so far been a deterrent to their large-scale adoption. Our approach is novel, as it allows the use of widely available mobile phones, and therefore has the potential of large-scale adoption at little or no cost.
Finally, none of the previous approaches have targeted compost classification. This is a significant problem because when compostable material such as food scraps and green waste gets into a landfill, it is generally compacted down and covered. This removes the oxygen and causes it to break down in an anaerobic process. Eventually, this releases methane, a greenhouse gas that is 25 times more potent than carbon dioxide in warming the earth over a 100-year timescale (and more than 80 times on a 20-year timescale) [13]. Our work is the first work in literature, to the best of our knowledge, that considers not only recycling but also compost as a new category for classification.
\section{Methods}
\label{gen_inst}
Classifying waste using machine learning is challenging for three reasons. First, whether a waste is recyclable or compostable or not depends on the properties of the material which can be hard to detect simply from the image. Second, the material can come in any shape such as a broken bottle, or a crumpled can, or deformed plastic; any machine learning technique needs to deal with this variation. Third, material that is recyclable depends on the capabilities of the local recycling center, so the app needs to take this geographical variable into account.
Since there was no public dataset available, to accomplish this task, a dataset was constructed from scratch by contacting various recycling centers and collecting images from the local neighborhood. Towards this goal, we developed the ability for user generated content so that users can easily take a picture, annotate it, and upload it to the cloud for further training. In total, we manually collect 1218 images items at various lightings and angles, with 396 images containing compostable item(s), 427 images containing recyclable item(s), and 395 images containing landfill item(s). Utilizing this data, we experiment with several state-of-the-art convolution neural network methods, including InceptionV3 [14], Inception ResnetV2 [15], Resnet 50 [16], Mobile Net [17], and PNAS Net [18]. All CNNs used were initialized with weights pre-trained on ImageNet.
During training, each input image was rotated with an angle randomly selected among 0$^{\circ}$, 90$^{\circ}$, 180$^{\circ}$, and 270$^{\circ}$ and also randomly flipped, cropped, and blurred for data augmentation. Each method outputs a confidence for an inputted image. Hyper parameters specific to each method are set to the best values described in the original work.
\section{Results}
\label{headings}
Out of the various CNNs benchmarked on the dataset, Resnet50 showed the best accuracy and convergence on the test set in terms of average precision and thus was optimized and subsequently deployed inside of a mobile app using Apple CoreML. Core ML optimizes on-device performance by leveraging the CPU, GPU, and Neural Engine while minimizing its memory footprint and power consumption. The DeepWaste model is running strictly on the user’s mobile device, therefore removing the need for internet connection and sharing data.
The benchmarked models and their respective average precision scores on the test set can be found in Table 1.
\begin{table}[h!]
\begin{center}
\begin{tabular}{ |c|c|c|c|c|c| }
\hline
\textbf{Accuracy} & \textbf{Inception\_V3} & \textbf{Inception\_ResnetV2} & \textbf{Resnet\_50} & \textbf{MobileNet} & \textbf{PNAS\_net} \\
\hline
Trash & 0.771 & 0.773 & 0.761 & 0.751 & 0.722 \\
\hline
Recycle & 0.891 & 0.783 & 0.924 & 0.949 & 0.864 \\
\hline
Compost & 0.806 & 0.806 & 0.882 & 0.873 & 0.841 \\
\hline
\hline
\textbf{Overall} & \textbf{0.84} & \textbf{0.82} & \textbf{0.881} & \textbf{0.842} & \textbf{0.852} \\
\hline
\end{tabular}
\end{center}
\caption{CNN performance on test set}
\label{table:1}
\end{table}
Figure 1 shows the DeepWaste app classifying commonly confused items in real-life. A user can simply point their phone camera to any piece of waste and get instantaneous feedback, with an average prediction time of around 100ms. DeepWaste is able to correctly identify items with high accuracy, even when the shape has been deformed such as a crushed soda can, orange peels, an apple core, crumpled paper, and a plastic bag. Note that the plastic bag in Figure 1 is classified as trash because plastic bags, films, and wraps cannot be recycled in your curbside recycling bin; they must be dropped-off to a special retail stores that can collect plastic grocery bags for recycling. Throwing this plastic bag into the recycling bin has the potential of contaminating the entire bin.
\begin{figure}
\caption{DeepWaste app classification output}
\end{figure}
\section{Conclusion and Future Work}
\label{others}
In this work, we propose a mobile application that uses highly-optimized deep learning techniques to provide users instantaneous waste classification, enabling them to accurately dispose of waste into recycling, compost, or trash. Currently, the mobile app is available to beta users for testing. In the near future, we aim to construct a larger data set by releasing the app to the general public so that more users can add to the growing DeepWaste database. We have started a conversation with local recycling companies to explore if they would be interested in promoting the adoption of DeepWaste to their customers. The initial feedback is encouraging and we hope to continue this conversation and start a trial soon. We hope that our work can reduce the amount of incorrect waste disposal, and over time raise more awareness around the environmental impacts of waste on our climate. If DeepWaste can even reduce erroneous waste disposal by 1\%, it will be equivalent to removing over 6.5 million gasoline-burning passenger vehicles from the road, demonstrating the potential for machine learning techniques to tackle challenging problems related to climate change.
\section*{References}
\small
[1] S. Kazam, L. Yao, P. Bhada-Tata, F. Van Woerden, “What a Waste 2.0: A Global Snapshot of Solid Waste Management to 2050,” World Bank, pp. 3-5, 2018.
[2] United States Environmental Protection Agency (EPA), “National Overview: Facts and Figures on Materials, Wastes and Recycling,” 2016. URL https://www.epa.gov/facts-and-figures-about-materials-waste-and-recycling/national-overview-facts-and-figures-materials
[3] L. Parker, “A Whopping 91 Percent of Plastic Isn’t Recycled,” National Geographic, 2018. URL https://www.nationalgeographic.org/article/whopping-91-percent-plastic-isnt-recycled/
[4] United States Environmental Protection Agency (EPA), “Food: Material-Specific Data,” 2016. URL https://www.epa.gov/facts-and-figures-about-materials-waste-and-recycling/food-material-specific-data
[5] M. Koerth, “The Era Of Easy Recycling May Be Coming To An End,” FiveThirtyEight, 2019. URL https://fivethirtyeight.com/features/the-era-of-easy-recycling-may-be-coming-to-an-end/
[6] Y. LeCun, Y. Bengio, and G. Hinton, “Deep learning,” Nature, Vol. 521, pp. 436, 2015.
[7] G. Thung, M. Yang, "Classification of Trash for Recyclability Status," \emph{Stanford CS229 Project Report}, 2016.
[8] O. Awe, R. Mengitsu, V. Sreedhar, "Final Report: Smart Trash Net: Waste Localization and Classification," \emph{Stanford CS229 Project Report}, 2016.
[9] C. Bircanoğlu, M. Atay, F. Beşer, Ö. Genç and M. A. Kızrak, "RecycleNet: Intelligent Waste Sorting Using Deep Neural Networks," \emph{Innovations in Intelligent Systems and Applications (INISTA)}, Thessaloniki, pp. 1-7, doi: 10.1109/INISTA.2018.8466276, 2018.
[10] I. Salimi, B. S. Bayu Dewantara, I. K. Wibowo, "Visual-based trash detection and classification system for smart trash bin robot," \emph{International Electronics Symposium on Knowledge Creation and Intelligent Computing (IES-KCIC)}, Bali, Indonesia, pp. 378-383, doi: 10.1109/KCIC.2018.8628499 2018.
[11] D. Vinodha, J. Sangeetha, B. Cynthia Sherin, M. Renukadevi, "Smart Garbage System with Garbage Separation Using Object Detection," International Journal of Research in Engineering, Science and Management 2020.
[12] D. Ziouzios, M. Dasygenis, "A Smart Recycling Bin for Waste Classification," \emph{Panhellenic Conference on Electronics \& Telecommunications (PACET)}, pp. 1-4, doi: 10.1109/PACET48583.2019.8956270, 2019.
[13] United States Environmental Protection Agency (EPA), "Overview of Greenhouse Gases," 2018. URL https://www.epa.gov/ghgemissions/overview-greenhouse-gases
[14] C. Szegedy, V. Vanhoucke, S. Ioffe, J. Shlens, Z. Wojna, "Rethinking the inception architecture for computer vision," in \emph{Proceedings of the IEEE conference on computer vision and pattern recognition}, pp. 2818-2826, 2016.
[15] C. Szegedy, S. Ioffe, V. Vanhoucke, A.Alemi, "Inception-v4, inception-resnet and the impact of residual connections on learning," arXiv preprint arXiv:1602.07261, 2016.
[16] K. He, X. Zhang, S. Ren, J. Sun, "Deep residual learning for image recognition," \emph{Proceedings of the IEEE conference on computer vision and pattern recognition}, pp. 770-778, 2016.
[17] A. Howard, M. Zhu, B. Chen, D. Kalenichenko, W. Wang, T. Weyand, M. Andreetto, Adam, H, "Mobilenets: Efficient convolutional neural networks for mobile vision applications," arXiv preprint arXiv:1704.04861, 2017.
[18] C. Liu, B. Zoph, J. Shlens, W. Hua, L. Li, L. Fei-Fei, A. Yuille, J. Huang, K. Murphy, "Progressive neural architecture search," arXiv preprint arXiv:1712.00559, 2017.
\end{document} |
\begin{document}
\thispagestyle{empty}
\setcounter{page}{1}
\begin{center}
{\large\bf An extension of Mizoguchi--Takahaashi's fixed point
theorem
\vskip.15in
{\bf M. Eshaghi Gordji, H. Baghani, M. Ramezani and H. Khodaei }
\\[2mm]
{\footnotesize Department of Mathematics, Semnan University,\\ P. O.
Box 35195-363, Semnan, Iran}}
\end{center}
\vskip 5mm
\noindent{\footnotesize{\bf Abstract.}
Our main theorem
is an extension of the well--known Mizoguchi--Takahaashi's fixed
point theorem [N. Mizogochi and W. Takahashi, Fixed point theorems
for multi--valued mappings on complete metric space,
{\it J. Math. Anal. Appl.} 141 (1989) 177--188].
\vskip.15in
\footnotetext { \textbf{ 2000 Mathematics Subject Classification}: 54H25.}
\footnotetext { \textbf{Keywords}: Hausdorff metric; Set--valued contraction;
Nadler's fixed point theorem; Mizoguchi--Takahaashi's fixed
point theorem}
\footnotetext{\textbf{E-mail}{\tt : madjid.eshaghi@gmail.com,
h.baghani@gmail.com, ramezanimaryam873@gmail.com,
khodaei.hamid.math@gmail.com}}
}
\newtheorem{df}{Definition}[section]
\newtheorem{rk}[df]{Remark}
\newtheorem{lem}[df]{Lemma}
\newtheorem{thm}[df]{Theorem}
\newtheorem{pro}[df]{Proposition}
\newtheorem{cor}[df]{Corollary}
\newtheorem{ex}[df]{Example}
\setcounter{section}{0}
\numberwithin{equation}{section}
\vskip .2in
\begin{center}
\section{INTRODUCTION AND STATEMENT OF RESULTS}
\end{center}
Let $(X,d)$ be a metric space. $CB(X)$ denotes the collection of
all nonempty closed bounded subsets of $X$. For $A,B \in CB(X)$,
and $x\in X$, define $D(x,A):=\inf\{d(x,a);a\in A\}$, and
$$H(A,B):=\max\{\sup_{a\in A}D(a,B),\sup_{b\in B}D(b,A).$$ It is easy to see that
$H$ is a metric on $CB(X)$. $H$ is called the Hausdorff metric
induced by $d$.
\begin{df} An element $x\in X$ is said to be a fixed point of a multi--valued mapping
$T:X\rightarrow CB(X)$, if such that $x\in T(x)$.
\end{df}
One can show that $(CB(X),H)$ is a complete metric space,
whenever $(X,d)$ is a complete metric space (see for example
Lemma $8.1.4$, of \cite{Rus}).
In 1969, Nadler \cite {N} extended the Banach contraction principle
\cite {Ba} to set--valued mappings as follows.
\begin{thm} Let $(X, d)$
be a complete metric space and let $T$ be a mapping from $X$ into
$CB(X)$. Assume that there exists $r\in [0, 1)$ such that $\mathcal
H_d(T x,Ty)\leq rd(x,y)$ for all $x,y\in X$. Then there exists $z\in
X$ such that $z\in T(z).$
\end{thm}
Nadler's theorem was generalized by Mizoguchi and Takahaashi
\cite{A} in the following way.
\begin{thm}
Let $(X,d)$ be a complete metric space and let $T$ be a mapping
from $(X,d)$ into $(CB(X), H)$ satisfies
$$H(Tx,Ty)\leq\alpha(d(x,y))d(x,y)$$ for all $x,y \in X$, where
$\alpha$ be a function from $[0,\infty)$ into $[0,1)$ such that
$\limsup_{s\to t^+}\alpha(s)<1$ for all $t\in [0,\infty)$. Then $T$
has a fixed point.
\end{thm}
Recently Suzuki \cite{Su} proved the Mizoguchi--Takahashi's fixed
point theorem by an interesting and short proof.\\
On the other hand, Banach contraction principle was generalized by
Reich \cite{R1,R2} as follows.
\begin{thm}
Let $(X,d)$ be a complete metric space and let $T$ be a mapping
from $(X,d)$ into $(CB(X), H)$ satisfies $$H(Tx,Ty)\leq
\beta[D(x,Tx)+D(y,Ty)]$$ for all $x,y \in X$, where
$\beta\in[0,\frac{1}{2})$. Then $T$ has a fixed point.
\end{thm}
In 1973, Hardy and Rogers \cite {HR} extended the Reich's theorem
by the following way.
\begin{thm}
Let $(X,d)$ be a complete metric space and let $T$ be a mapping from
$X$ into $X$ such that $$d(Tx,Ty)\leq \alpha d(x,y)+\beta
[d(x,Tx)+d(y,Ty)]+\gamma [d(x,Ty)+d(y,Tx)]$$ for all $x,y \in X$,
where $\alpha,\beta,\gamma \geq 0$ and $\alpha+2\beta+2\gamma <1$.
Then $T$ has a fixed point.
\end{thm}
Recently, the authors of the present paper \cite {EBKR} extended the
theorems 1.5 and 1.2 as follows.
\begin{thm}
Let $(X,d)$ be a complete metric space and let $T$ be a mapping
from $X$ into $CB(X)$ such that $$H(Tx,Ty)\leq \alpha d(x,y)+\beta
[D(x,Tx)+D(y,Ty)]+\gamma [D(x,Ty)+D(y,Tx)]$$ for all $x,y \in X$,
where $\alpha,\beta,\gamma \geq 0$ and $\alpha+2\beta+2\gamma <1$.
Then $T$ has a fixed point.
\end{thm}
In this paper, we shall generalize above results. More precisely,
we prove the following theorem, which can be regarded as an
extension of all theorems 1.2,1.3,1.4,1.5 and 1.6.
\begin{thm}
Let $(X,d)$ be a complete metric space and let $T$ be mapping
from $X$ into $CB(X)$ such that
\begin{eqnarray*}
H(Tx,Ty)&\leq& \alpha (d(x,y))d(x,y)+\beta(d(x,y))
[D(x,Tx)+D(y,Ty)]\\&&+\gamma(d(x,y)) [D(x,Ty)+D(y,Tx)]
\end{eqnarray*}
for all $x,y \in X$, where $\alpha,\beta,\gamma$ are mappings from
$[0,\infty)$ into $[0,1)$ such that
$\alpha(t)+2\beta(t)+2\gamma(t)<1$ and $\limsup_{s\to
t^+}\frac{\alpha(t)+\beta(t)+\gamma(t)}{1-(\beta(t)+\gamma(t))}<1$
for all $t\in[0,\infty)$. Then $T$ has a fixed point.
\end{thm}
Moreover, we conclude the following results by using theorem 1.7.
\begin{cor}
Let $(X,d)$ be a complete metric space and let $T$ be a mapping
from $(X,d)$ into $(CB(X), H)$ satisfies $$H(Tx,Ty)\leq
\beta(d(x,y))[D(x,Tx)+D(y,Ty)]$$ for all $x,y \in X$, where $\beta$
be a function from $[0,\infty)$ into $[0,\frac{1}{2})$ and
$\limsup_{s\to t}\beta(s)<\frac{1}{2}$ for all $t\in [0,\infty)$.
Then $T$ has a fixed point.
\end{cor}
\begin{cor}
Let $(X,d)$ be a complete metric space and let $T$ be a mapping
from $(X,d)$ into $(CB(X), H)$ satisfies
$$H(Tx,Ty)\leq\alpha(d(x,y))d(x,y)+ \beta(d(x,y))[D(x,Tx)+D(y,Ty)]$$ for
all $x,y \in X$, where $\alpha,\beta$ are function from $[0,\infty)$
into $[0,1)$ such that $\alpha(t)+2\beta(t)<1$ and $\limsup_{s\to
t^+}(\frac{\alpha(t)+\beta(t)}{1-\beta(t)})<1$ for all $t\in
[0,\infty)$. Then $T$ has a fixed point.
\end{cor}
\vskip .2in
\begin{center}
\section{Proof of the main theorem}
\end{center}
\begin{proof}
Define function $\alpha^{'}$ from $[0,\infty)$ into $[0,1)$ by
$\alpha^{'}(t)=\frac{\alpha(t)+1-2\beta(t)-2\gamma(t)}{2}$ for
$t\in [0,\infty)$. Then we have the following assertions:\\\\
$1)$ $\alpha(t)<\alpha^{'}(t)$ for all $t\in[0,\infty)$.\\\\
$2)$ $\limsup_{s\to
t^+}\frac{\alpha^{'}(t)+\beta(t)+\gamma(t)}{1-(\beta(t)+\gamma(t))}<1$ for all $t\in[0,\infty)$.\\\\
$3)$ For $x,y\in X$ and $u\in Tx$, there exists $\nu\in Ty $ such
that
\begin{eqnarray*}
d(\nu,u)&\leq&\alpha^{'} (d(x,y))d(x,y)+\beta(d(x,y))
[D(x,Tx)+D(y,Ty)]\\&&+\gamma(d(x,y)) [D(x,Ty)+D(y,Tx)].
\end{eqnarray*}
Putting $u=y$ in 3), we obtain that:\\
$4)$ For $x\in X $ and $y\in Tx$ there exists $\nu\in Ty$ such that
\begin{eqnarray*}
d(\nu,y)&\leq& \alpha^{'} (d(x,y))d(x,y)+\beta(d(x,y))
[D(x,Tx)+D(y,Ty)]\\&&+\gamma(d(x,y)) [D(x,Ty)+D(y,Tx)].
\end{eqnarray*}
Hence, we can define sequence $\{x_n\}_{n\in\mbox{$\mathbb{N}$}}$ such that
$x_{n+1}\in Tx_n, x_{n+1}\neq x_n$ and
\begin{eqnarray*}
d(x_{n+2},x_{n+1})&\leq& \alpha^{'}
(d(x_{n+1},x_{n}))d(x_{n+1},x_{n})+\beta(d(x_{n+1},x_{n}))
[D(x_n,Tx_n)\\&&+D(x_{n+1},Tx_{n+1})]+\gamma(d(x_{n+1},x_{n})
[D(x_n,Tx_{n+1})\\&&+D(x_{n+1},Tx_n)]
\end{eqnarray*}for all $n\in\mbox{$\mathbb{N}$}$. It follows that
$$
d(x_{n+2},x_{n+1})\leq\frac{\alpha^{'}(d(x_{n+1},x_{n}))+\beta(d(x_{n+1},x_{n}))+\gamma(d(x_{n+1},x_{n}))}
{1-(\beta(d(x_{n+1},x_{n}))+\gamma(d(x_{n+1},x_{n})))}d(x_{n+1},x_{n})$$for
all $n\in\mbox{$\mathbb{N}$}$. On the other hand, we have
$$\frac{\alpha^{'}(t)+\beta(t)+\gamma(t)}{1-(\beta(t)+\gamma(t))}<1$$
for all $t\in\ [0,\infty)$, then $\{d(x_{n+1},x_n)\}$ is a
non-increasing sequence in $\mbox{$\mathbb{R}$}$. Hence, $\{d(x_{n+1},x_n)\}$ is a
converges to some nonnegative integer $\tau$. By assumption,
$$\limsup_{s\to \tau^+
}\frac{\alpha^{'}(s)+\beta(s)+\gamma(s)}{1-(\beta(s)+\gamma(s))}<1$$
so, we have
$$\frac{\alpha^{'}(\tau)+\beta(\tau)+\gamma(\tau)}{1-(\beta(\tau)+\gamma(\tau))}<1$$
then, there exist $r\in [0,1)$ and $\varepsilonilon>0$ such that
$$\frac{\alpha^{'}(s)+\beta(s)+\gamma(s)}{1-\beta(s)+\gamma(s)}<r$$
for all $s\in[\tau,\tau+\varepsilonilon]$. We can take $\nu\in\mbox{$\mathbb{N}$}$ such that
$$\tau\leq d(x_{n+1},x_n)\leq\tau+\varepsilonilon$$ for all $n\in\mbox{$\mathbb{N}$}$ with
$n\geq\nu$. It follows that
\begin{eqnarray*}
d(x_{n+2},x_{n+1})&\leq&\frac{\alpha^{'}(d(x_{n+1},x_{n}))+\beta(d(x_{n+1},x_{n}))+\gamma(d(x_{n+1},x_{n}))}{1-(\beta(d(x_{n+1},x_{n}))+\gamma(d(x_{n+1},x_{n})))}d(x_{n+1},x_{n})\\&\leq&
r d(x_{n+1},x_{n})
\end{eqnarray*}
for all $n\in\mbox{$\mathbb{N}$}$ with $n\geq\nu$. This implies that \\
\begin{eqnarray*}
\sum_{n=1}^\infty d(x_{n+2},x_{n+1})\leq\sum_{n=1}^\nu
d(x_{n+1},x_n)+\sum_{n=1}^\infty r^n d(x_{\nu+1},x_{\nu})<\infty.
\end{eqnarray*}
Hence, $\{x_n\}$ is a Cauchy sequence. Since $X$ is a complete
metric space, then $\{x_n\}$ converges to some point $x^*\in X$.
Now, we have
\begin{eqnarray*}
D(x^*,Tx^*)&\leq& d(x^*,x_{n+1})+D(x_{n+1},Tx^*)\\&\leq&
d(x^*,x_{n+1})+H(Tx_{n},Tx^*)\\&\leq&
d(x^*,x_{n+1})+\alpha(d(x_{n},x^*))
d(x_{n},x^*)\\&+&\beta(d(x_{n},x^*))
[D(x_{n},Tx_{n})+D(x^*,Tx^*)]\\&+&\gamma(d(x_{n},x^*))
[D(x_{n},Tx^*)+D(x^*,Tx_{n})]
\end{eqnarray*}
for all $n\in\mbox{$\mathbb{N}$}$. Therefore,
\begin{eqnarray*}
D(x^*,Tx^*)&\leq& d(x^*,x_{n+1})+\alpha(d(x_{n},x^*))
d(x_{n},x^*)\\&+&\beta(d(x_{n},x^*))
[d(x_{n+1},x_{n})+D(x^*,Tx^*)]\\&+&\gamma(d(x_{n},x^*))
[D(x_{n},Tx^*)+d(x_{n},x^*)]
\end{eqnarray*}for all $n\in\mbox{$\mathbb{N}$}$.
It follows that
\begin{eqnarray*}
D(x^*,Tx^*)&\leq&\liminf_{n\to\infty}(\beta(d(x_n,x^*))+\gamma(d(x_n,x^*)))
D(x^*,Tx^*)\\&=&\liminf_{s\to 0^+}(\beta(s)+\gamma(s))
D(x^*,Tx^*)\\&\leq&\limsup_{s\to 0^+
}(\frac{\alpha(s)+\beta(s)+\gamma(s)}{1-(\beta(s)+\gamma(s))})
D(x^*,Tx^*).
\end{eqnarray*}
On the other hand, we have $$\limsup_{s\to
0^+}(\frac{\alpha(s)+\beta(s)+\gamma(s)}{1-(\beta(s)+\gamma(s))})<1$$
then $D(x^*,Tx^*)=0$. Since $Tx^*$ is closed, then $x^*\in Tx^*$.
\end{proof}
\end{document} |
\begin{document}
\title{The Alexander and Jones Polynomials Through Representations of Rook Algebras}
\author{Stephen Bigelow\footnote{University of California Santa Barbara, bigelow@math.ucsb.edu}, Eric Ramos\footnote{Funded by NSF grant DMS - 0852065. Carnegie Mellon University, eramos@cmu.edu}, Ren Yi\footnote{Funded by NSF grant DMS - 0852065. Stony Brook University, reyi@ic.sunysb.edu}}
\maketitle
\begin{abstract}
In the 1920's Artin defined the braid group, $B_n$, in an attempt to understand knots in a more algebraic setting. A braid is a certain arrangement of strings in three-dimensional space. It is a celebrated theorem of Alexander that every knot is obtainable from a braid by identifying the endpoints of each string. Because of this correspondence, the Jones and Alexander polynomials, two of the most important knot invariants, can be described completely using the braid group. There has been a recent growth of interest in other diagrammatic algebras, whose elements have a similar topological flavor to the braid group. These have wide ranging applications in areas including representation theory and quantum computation. We consider representations of the braid group when passed through another diagrammatic algebra, the planar rook algebra. By studying traces of these matrices, we recover both the Jones and Alexander polynomials.
\end{abstract}
\section{Introduction}
The Artin braid groups, $B_n$, are the groups generated by $\sigma_1,\sigma_2,\ldots, \sigma_{n-1}$ satisfying the following relations:
\begin{enumerate}
\item $\sigma_i\sigma_j = \sigma_j \sigma_i$ if $|i-j| > 1$
\item $\sigma_i\sigma_j\sigma_i = \sigma_j\sigma_i\sigma_j$ if $|i-j| = 1$
\end{enumerate}
For our purposes, one will find the abstract presentation insufficient. Instead we will work with the following definition found, for example, in \cite{KT}.
\begin{definition}
A geometric braid on $n \geq 1$ strings is a set $b \subset \mathbb{R}^2 \times I$ formed by $n$ disjoint topological intervals called the strings of $b$ such that the projection $\mathbb{R}^2 \times I \rightarrow I$ is increasing and maps each string homeomorphically onto $I$ with
\begin{eqnarray*}
b\cap(\mathbb{R}^2 \times \{0\}) = \{ (1,0,0),(2,0,0),\ldots, (n,0,0)\} \\
b\cap(\mathbb{R}^2 \times \{1\}) = \{ (1,0,1),(2,0,1),\ldots, (n,0,1)\}
\end{eqnarray*}
\end{definition}
The operation in this new interpretation is defined by stacking the two braids. An example of this is seen below:\\
These groups are the focus of much study due to their wide ranging applications. One particular area of importance is the representation theory of $B_n$. For a comprehensive survey of what is known about these representations, see \cite{BUR}, \cite{FOR} and \cite{BIG}. In this paper, we will study braid group representations when factored through the so called planar rook algebra, $\mathbb{C}P$.\\
The rook monoid, $R_n$, as defined in \cite{FHH}, consists of all bipartite graphs on two rows of $n$ vertices, one on top of the other forming the boundary of a rectangle, such that each vertex has degree either zero or one. Furthermore, we label the vertices in our diagram with the numbers 1 through $n$ from left to right. For example we have the following diagram:
{\beginpicture
\setcoordinatesystem units <0.5cm,0.3cm>
\setplotarea x from 0 to 6, y from -7 to -.5
\put{$\bullet$} at 1 -4 \put{$\bullet$} at 1 -1
\put{$\bullet$} at 2 -4 \put{$\bullet$} at 2 -1
\put{$\bullet$} at 3 -4 \put{$\bullet$} at 3 -1
\put{$1$} at 1 -.2
\put{$2$} at 2 -.2
\put{$3$} at 3 -.2
\put{$1$} at 1 -4.8
\put{$2$} at 2 -4.8
\put{$3$} at 3 -4.8
\plot 1 -4 3 -1 /
\plot 2 -4 1 -1 /
\put {$\in R_3$} at 5 -2.5
\endpicture}
We also have the following definitions which will simplify discussions of rook diagrams:
\begin{definition}
Given any rook diagram, $d \in R_n$ we say:
\begin{enumerate}
\item $d$ has a \textit{vertical line} at $i$ if it contains an edge from the vertex $i$ on top to the same vertex $i$ on bottom.
\item $d$ has a \textit{dead end} at $i$ if vertex $i$ on top (or bottom) is not incident to an edge.
\item $d$ has a \textit{slash} from $i$ to $j$ if vertex $i$ on bottom is adjacent to vertex $j$ on top.
\end{enumerate}
\end{definition}
The product, $d_1d_2$, of two rook diagrams $d_1$ and $d_2$ is obtained by stacking $d_1$ on top of $d_2$ and deleting any edge from one that connects to a dead end from the other. For example,
{\beginpicture
\setcoordinatesystem units <0.5cm,0.3cm>
\setplotarea x from 0 to 20, y from -10 to 3
\put{$\bullet$} at 1 -4 \put{$\bullet$} at 1 -1
\put{$\bullet$} at 2 -4 \put{$\bullet$} at 2 -1
\put{$\bullet$} at 1 -7 \put{$\bullet$} at 2 -7
\put{$\bullet$} at 3 -4 \put{$\bullet$} at 3 -1 \put{$\bullet$} at 3 -7
\plot 1 -4 2 -1 /
\plot 1 -4 3 -7 /
\plot 3 -1 3 -4 /
\plot 8 -3 9 -5 /
\put{$\bullet$} at 7 -3 \put{$\bullet$} at 7 -5
\put{$\bullet$} at 8 -5 \put{$\bullet$} at 8 -3
\put{$\bullet$} at 9 -5 \put{$\bullet$} at 9 -3
\put{$=$} at 5 -4
\endpicture}
With the rook monoid defined, one considers the submonoid of planar rook diagrams, $P_n$. These are those diagrams that can be drawn in the plane without crossings when their edges are not allowed to leave the rectangle formed by their vertices. Notice that the diagrams being multiplied above are in $P_3$. Finally, one simply says that the planar rook algebra, $\mathbb{C}P$, is the $\mathbb{C}$-algebra generated by $P_n$ with multiplication defined using the distributive law.\\
What makes $\mathbb{C}P$ such an ideal object of study, aside from its relatively simple presentation, is that its representations have already been completely classified. One will find this classification in \cite{FHH}. This is invaluable for studying representations of $B_n$ through $\mathbb{C}P$. We discover through this paper that many of these representations are those of the Hecke Algebra. We define:
\begin{definition}
The Hecke Algebra, $\mathcal{H}_n(q)$, is the quotient algebra of $\mathbb{C} B_n$ by the subalgebra generated by the following relations:
\[
(\sigma_i - 1)(\sigma_i + q) = 0, \quad i\in \{1,\ldots,n\}.
\]
\end{definition}
This algebra has shown itself to be spectacularly useful in various mathematical fields. In \cite{JON2}, Jones illustrated how this algebra and its representations could be used to fully recover the Jones polynomial invariant. To understand this, one must first understand the connection between links and braids.\\
Looking back to the geometric definition of a braid, it may become clear that there exists some relationship between the braid groups and link theory. Indeed it is a celebrated theorem of Alexander that these two concepts are essentially the same. To make this precise we first define for any braid $b\in B_n$, the closure of $b$, denoted $\hat{b}$, is the link in $\mathbb{R}^3$ obtained by identifying the top and bottom of $b$. From this definition one can prove the following theorem from \cite{ALE1} and \cite{ALE2}.
\begin{theorem}[Alexander]
Every oriented link is isotopic to a closed braid
\end{theorem}
Unfortunately, the above correspondence is not one to one. Though this may seem like a major setback, there is a way to exactly characterize the correspondence's failure of bijectivity using a method described first by Markov. We define an equivalence relation, $\sim$, on braids as being generated by the following three ``Markov moves":
\begin{enumerate}
\item $ab \sim ba$
\item $\iota(a)\sigma_{n} \sim a$
\item $\iota(a)\sigma_{n}^{-1} \sim a$
\end{enumerate}
where $a,b\in B_n$ and $\iota: B_n\hookrightarrow B_{n+1}$ is the map that adds a vertical string to the end of a braid. Using this one then proves the following theorem.
\begin{theorem} [Markov]
Given two braids, $b_1$ and $b_2$, $\hat{b}_1 = \hat{b}_2$ if and only if $b_1 \sim b_2$
\end{theorem}
In this paper we will be expanding on the ideas found in \cite{BIG3} by creating and using Markov traces to emulate the results of Jones in \cite{JON1}. We find that both the Jones and Alexander Polynomials arise through traces of the Planar Rook Algebra.
\section{Finding the homomorphism}
Before we begin, it is important that we impose an ordering on $P_2$ as this will allow for much simpler notation later on.
\begin{definition}
We enumerate the elements of $P_2$ in the following fashion:
\end{definition}
{\beginpicture
\setcoordinatesystem units <0.5cm,0.3cm>
\setplotarea x from 0 to 30, y from -8 to 2
\put{$d_1 = $} at 0 -2.5
\put{$\bullet$} at 1.5 -4 \put{$\bullet$} at 1.5 -1
\put{$\bullet$} at 2.5 -4 \put{$\bullet$} at 2.5 -1
\put{$d_2 = $} at 5 -2.5
\put{$\bullet$} at 6.5 -4 \put{$\bullet$} at 6.5 -1
\put{$\bullet$} at 7.5 -4 \put{$\bullet$} at 7.5 -1
\plot 6.5 -1 6.5 -4 /
\put{$d_3 = $} at 10 -2.5
\put{$\bullet$} at 11.5 -4 \put{$\bullet$} at 11.5 -1
\put{$\bullet$} at 12.5 -4 \put{$\bullet$} at 12.5 -1
\plot 11.5 -4 12.5 -1 /
\put{$d_4 = $} at 15 -2.5
\put{$\bullet$} at 16.5 -4 \put{$\bullet$} at 16.5 -1
\put{$\bullet$} at 17.5 -4 \put{$\bullet$} at 17.5 -1
\plot 16.5 -1 17.5 -4 /
\put{$d_5 = $} at 20 -2.5
\put{$\bullet$} at 21.5 -4 \put{$\bullet$} at 21.5 -1
\put{$\bullet$} at 22.5 -4 \put{$\bullet$} at 22.5 -1
\plot 22.5 -4 22.5 -1 /
\put{$d_6 = $} at 25 -2.5
\put{$\bullet$} at 26.5 -4 \put{$\bullet$} at 26.5 -1
\put{$\bullet$} at 27.5 -4 \put{$\bullet$} at 27.5 -1
\plot 26.5 -4 26.5 -1 /
\plot 27.5 -4 27.5 -1 /
\endpicture}
Next, we present the homomorphism which we will be working with for the remainder of the paper, $\varphi:B_n\rightarrow (\mathbb{C}P)^*$ where $(\mathbb{C}P)^*$ is the group of units of $\mathbb{C}P$. To do this, we introduce the following definition:
\begin{definition}
Given two diagrams, $a,b\in P_n$, we define the tensor product, denoted $a\otimes b$, to be the result of appending of $b$ to the right of $a$.
\end{definition}
With these in mind we begin looking for a homomorphism of the following form:
\[
\varphi(\sigma_i) = a\cdot d_{1i} + b\cdot d_{2i} + c\cdot d_{3i} + d\cdot d_{4i} + e\cdot d_{5i} + f \cdot d_{6i}
\]
where
\[d_{ji} = I^{\otimes i-1} \otimes d_j \otimes I^{\otimes n-i-1}
\]
and $I$ is the identity diagram in $P_1$. This is shown explicitly below:
It therefore remains to find the conditions on our coefficients which make $\varphi$ a group homomorphism. In particular we need to make sure the braid relations are satisfied. Furthermore, we need to make sure every element mapped to in $\mathbb{C}P$ is invertible. For this reason we claim $f$ cannot be zero. If it was, our homomorphism would not have an identity diagram as one of its terms. One then notices that it is impossible for the identity to be created by multiplication of other diagrams. Because we require $\varphi(\sigma_i^{-1}) = \varphi(\sigma_i)^{-1}$, it follows that $f$ must be non-zero. Therefore, for the purpose of simplicity we will scale $f$ to be 1. After doing this we find the following:
\begin{theorem}\label{homom}
Assuming $a+c+d \neq 1, f=1$ and $cd\neq 0$, any mapping of the above form is a homomorphism if and only if its coefficients are in one of the following five families:
\begin{enumerate}
\item $ b = e = -1$
\item $ a = -c-d$, $b = -1$, $e = -cd$
\item $ a = -c-d$, $b = -cd$, $e = -1$
\item $ a = 1-c-d+ cd$, $b = -cd$, $e =-1 $
\item $ a = 1-c-d+cd$, $b = -1$, $e = -cd$
\end{enumerate}
Furthermore, for each of the above five families one finds the following
\begin{enumerate}
\item
$\varphi(\sigma_i^{-1}) = \left(1 - \frac{1}{d} - \frac{1}{c} + \frac{1}{-1+a+c+d}\right)d_{1i} - d_{2i} + \frac{1}{d}d_{3i} + \frac{1}{c}d_{4i}- d_{5i}+ d_{6i}$
\item
$\varphi(\sigma_i^{-1}) = (-\frac{1}{c} - \frac{1}{d})d_{1i} - \frac{1}{cd}d_{2i} + \frac{1}{d}d_{3i} +\frac{1}{c}d_{4i} - d_{5i} + d_{6i}$
\item
$\varphi(\sigma_i^{-1}) = (-\frac{1}{c} - \frac{1}{d})d_{1i} - d_{2i} + \frac{1}{d}d_{3i} +\frac{1}{c}d_{4i} -\frac{1}{cd} d_{5i}+ d_{6i}$
\item
$\varphi(\sigma_i^{-1}) = (1 -\frac{1}{c} - \frac{1}{d} + \frac{1}{cd})d_{1i} - d_{2i} + \frac{1}{d}d_{3i} + \frac{1}{c}d_{4i}- \frac{1}{cd}d_{5i} + d_{6i}$
\item
$\varphi(\sigma_i^{-1}) = (1 -\frac{1}{c} - \frac{1}{d} + \frac{1}{cd})d_{1i} -\frac{1}{cd} d_{2i} + \frac{1}{d}d_{3i} + \frac{1}{c}d_{4i}- d_{5i} + d_{6i}$
\end{enumerate}
\end{theorem}
\begin{proof}
We begin by considering the first braid relation. If two generators, $\sigma_i$ and $\sigma_j$, are such that $|i-j| > 1$, then the diagrams in their image will not overlap in their copies of $d_i$. In particular, in any product of diagrams the $d_i$ will always end up on top, or bottom, of vertical lines. It then follows that this will commute. We find, therefore, the first braid relation will hold regardless of what coefficients we choose.\\
Next we claim it suffices consider the second braid relation for $\varphi: B_3 \rightarrow (\mathbb{C} P_3)^*$. One will notice that in the product, $\varphi(\sigma_i)\cdot\varphi(\sigma_{i+1})\cdot \varphi(\sigma_i)$, the only parts of the diagrams which actually change are the copies of the $d_j$. Everything else in these diagrams are vertical lines at the ends, which act like identities. From this we see that for higher values of $n$, one will only be adding more vertical lines which do not influence anything.\\
Once these facts have been established, one may proceed by exhaustive case study in proving the first half of the theorem. This can be done by a computer by considering the regular representation of $\mathbb{C}P$ and identifying the matrix $\varphi(\sigma_1\sigma_{2}\sigma_{1}) - \varphi(\sigma_2\sigma_{1}\sigma_{2})$ with the zero matrix. This results in 400 polynomials that are required to equal zero. We factored these polynomials, and observed that the expression $c(b+1)(e+1)$ appeared frequently. We therefore solved the system of equations separately in the cases where c, b+1 or e+1 is zero. After finishing this, any redundant or non-invertible solutions were removed and our result follows. \\
Once this has been complete, one simply inverts the matrices found for $\varphi(\sigma_1)$ to finish the proof.
\end{proof}
For convenience, we identify the above homomorphisms by $\varphi_i$, where $i=1,...,5$. One may notice the symmetry in the variables $b$ and $e$ as well as $c$ and $d$. Intuitively one may justify this by considering what happens to a diagram when rotated or reflected. To be more precise, we first notice that in each pair of ``dual" families, the only free variables are $c$ and $d$. If we broadcast our choice by saying for $i\in \{2,\ldots,5\}, \varphi_i = \varphi_i^{c,d}$ then the following corollary becomes apparent.
\begin{corollary}
Let $\alpha: B_n \rightarrow B_n$ be the automorphism which sends $\sigma_i$ to $\sigma_i^{-1}$
\begin{enumerate}
\item $\varphi_2^{c,d}(\sigma_i) = \varphi_3^{\frac{1}{d}, \frac{1}{c}}(\alpha(\sigma_i))$
\item $\varphi_5^{c,d}(\sigma_i) = \varphi_4^{\frac{1}{d},\frac{1}{c}}(\alpha(\sigma_i))$
\end{enumerate}
\end{corollary}
Because of this corollary, we are free to only consider $\varphi_1$, $\varphi_2$ and $\varphi_5$. For the remainder of this paper we will be looking at each one of these three morphisms and discovering representations which factor through them as well as knot invariants. We find that $\varphi_2$ and $\varphi_5$ have many very similar, and sometimes exactly the same, properties. We will also find that $\varphi_1$ is the most different and will therefore be treated last. With all this said we begin with $\varphi_5$.
\section{The Hecke algebra and Jones Polynomial}
We begin this section by quickly considering the representations that factor through $\varphi_5$. Once this is completed we will discuss any knot invariants that can be found using this homomorphism.
\subsection{Representations Through $\varphi_5$}
We begin our study by showing $\varphi_5$ satisfies a HOMFLYPT polynomial. Before we begin however, it is important one take note of the form it is presented in. In particular, we show a skein relation in terms of the braids $\sigma_i, \sigma_i^{-1}$ and $1$. It is non-obvious that any pair of links that are related by a crossing change in an arbitrary diagram can be obtained from braid closures in this way. For a proof of this fact one must simply look to the proof of Alexander's theorem itself. This can be found, for example, in \cite{KT}. After one understands this algorithm for converting a braid to a link it becomes clear that what we are working with is indeed equivalent. \\
With all of this said we proceed with the following theorem:
\begin{lemma}\label{skein}
The homomorphism, $\varphi_5: B_n \rightarrow (\mathbb{C}P)^*$, satisfies the following skein relation for all $\sigma_j$, $j\in \{1,\ldots,n-1\}$ and $x\in{B_n}$
\[
\varphi_5 (x\sigma_j) - cd \cdot \varphi_5 (x\sigma^{-1}_j) = (1-cd) \cdot \varphi_5(x).
\]
In particular these homomorphisms satisfy the following quadratic equation
\[
(\varphi_5(\sigma_j) - \varphi_5(1))\cdot(\varphi_5(\sigma_j) + cd\cdot \varphi_5(1)) = 0.
\]
\end{lemma}
\begin{proof}
We begin by noticing it suffices to prove for both assertions that,
\[
\varphi_5 (\sigma_j) - cd \cdot \varphi_5 (\sigma^{-1}_j) = (1-cd) \cdot \varphi_5(1).
\]
We proceed by gathering like terms to find
\[
(1-c-d+cd - cd(\frac{1-c-d+cd}{cd}))d_{1j} + (-1 + \frac{cd}{cd})d_{2j} + (c - \frac{cd}{d})d_{3j} + (d - \frac{cd}{c})d_{4j} + (-cd + cd)d_{5j} + (1-cd)d_{6j}.
\]
Simplifying the above gives our result.
\end{proof}
From this we may immediately categorize the representations of $B_n$ through $\varphi_5$. To do this one simply combines the quadratic equation mentioned in the statement of Lemma \ref{skein} in conjunction with the definition of the Hecke algebra. To be precise,
\begin{theorem}
All representations of $B_n$ through $\varphi_5$ are representations of $\mathcal{H}_n(cd)$
\end{theorem}
\subsection{Rediscovering the Jones polynomial}
Now that those representations which factor through $\varphi_5$ have been shown to fall into a well studied class of representations, we turn our attention to the question of knot invariants. All of our results in this topic require the use of trace functions.
\begin{definition}
A \textit{trace function}, $tr:A\rightarrow F$ is any linear function from an algebra, $A$, to a field, $F$, which satisfies $tr(ab) = tr(ba)$
\end{definition}
It is a simple exercise to show that if $A$ is the algebra of all $n\times n$ of matrices then the only trace functions are scalar multiples of the classical matrix trace. Furthermore, we say a trace is a \textit{Markov trace} if it is invariant under the Markov moves. Using Markov trace functions, we will discover knot invariants associated with each of our homomorphism families.\\
We begin by defining what we call the bubble trace, $tr^{\beta}_n$.
\begin{definition}
For any $\beta \in \mathbb{C}$, the bubble trace function $tr^{\beta}_n:\mathbb{C}P\rightarrow \mathbb{C}$ is the linear function which acts on diagrams, $d\inP_n$, by $tr^{\beta}_n(d) = \beta^{k(d)}$ where $k(d)$ is the number of vertical lines in $d$.
\end{definition}
Of course it requires some justification to show that this is a trace. We have the following lemma:
\begin{lemma}\label{vli}
For all diagrams $a,b \in P_n$, $k(ab) = k(ba)$, where $k(d)$ is the number of vertical lines in $d$.
\end{lemma}
\begin{proof}
One considers how a vertical line can be formed in a product. First, one may have two vertical lines stacked on top of one another. This configuration is obviously preserved if product was done in the opposite order. Secondly, one may have a slash from $i$ to $j$ in $a$ and an slash from $j$ to $i$ in $b$. While the location of the vertical line is not preserved by changing the order of $a$ and $b$, its existence is.
\end{proof}
Now that we see $tr_n^{\beta}$ is a trace, we will attempt to make it a Markov trace. This is achieved below:
\begin{proposition}\label{traces}
Let $tr^{\beta}_n$ be a bubble trace, then the following is a Markov trace:
\[
Tr^5_n(x) = (\sqrt{cd})^{w(x)+n} \cdot tr^{\frac{1+cd}{cd}}_n(\varphi_5(x))
\]
where $w(x)$ is the exponent sum, or writhe, of $x$.
\end{proposition}
\begin{proof}
We begin by noting the first Markov move is preserved by Lemma \ref{vli}. We then consider $tr^{\beta}_{n+1}(\varphi(x\sigma_n))$ where $x\in B_{n+1}$ does not contain $\sigma_n$. We may visualize this using Figure 1 below.\\
From this picture we see that we may gather terms in the following way:
\[
(a+c+d+e\beta)tr^{\beta}_n(\varphi_5(x)') + (\beta + b)tr^{\beta}_n(\varphi_5(x))
\]
where $\varphi(x)'$ is a linear combination of all the diagrams of $\varphi_5(x)$ but with any vertical lines at $n$, broken. We see that by construction $a+c+d + e\beta = 0$ and so we are left with:
\begin{eqnarray}
tr^{\frac{1+cd}{cd}}_{n+1}(\varphi_5(x\sigma_n)) = \frac{1}{cd}tr^{\frac{1+cd}{cd}}_n(\varphi_5(x)) \label{tra}.
\end{eqnarray}
Multiplying \ref{tra} by $(\sqrt{cd})^{w(x\sigma_n)+n+1}$ and using the definition of $Tr_n^5$
\[
Tr^5_{n+1}(x \sigma_n) = (\sqrt{cd})^{w(x)+1+n+1-2}tr^{\frac{1+cd}{cd}}_n(\varphi_5(x)) = (\sqrt{cd})^{w(x)+n}tr^{\frac{1+cd}{cd}}_n(\varphi_5(x)) = Tr^5_n(x).
\]
The proof for the last Markov move follows similarly.
\end{proof}
Using this new Markov trace function we will discover that one can indeed recover the Jones polynomial from $\varphi_5$.
\begin{theorem}\label{jones}
If $cd \neq -1$ then $Tr^5_n(x) = \frac{1+cd}{\sqrt{cd}} V(\hat{x})$ where $V$ is the Jones polynomial.
\end{theorem}
\begin{proof}
Since $Tr_n^5(x)$ is a Markov trace, we know it is an invariant of the oriented knot, or link, $\hat{x}$. Furthermore, we note the only invariants on knots that satisfy the Jones skein relation are scalar multiples of the Jones polynomial. Therefore we begin by showing that $Tr^5_n$ satisfies the proper skein relation. Once this is finished it will only remain to show that our trace also sends the unknot to $\frac{1+cd}{\sqrt{cd}}$ . One recalls Lemma \ref{skein} and applies $tr^{\frac{1+cd}{cd}}_n$ to both sides to obtain
\[
tr^{\frac{1+cd}{cd}}_n(\varphi_5(x\sigma_i)) - cd\cdot tr^{\frac{1+cd}{cd}}_n(\varphi_5(x\sigma_i^{-1})) = (1-cd)tr^{\frac{1+cd}{cd}}_n(\varphi_5(x)).
\]
substituting the definition of $Tr^5_n$ given in Proposition \ref{traces} we find,
\begin{description}
\item $(\sqrt{cd})^{-(n+w(x)+1)}Tr^5_{n}(x\sigma_i) - cd \cdot (\sqrt{cd})^{-(n+w(x)-1)}Tr^5_n(x\sigma^{-1}_i) =(1-cd) \cdot(\sqrt{cd})^{-(n+w(x))}Tr_n^5(x)$
\item $\implies \frac{1}{cd}Tr^5_n(x\sigma_i) - cd\cdot Tr^5_n(x\sigma_i^{-1}) = (\frac{1}{\sqrt{cd}}-\sqrt{cd})\cdot Tr^5_n(x)$
\end{description}
as needed. Furthermore we see in $B_1$,
\[
Tr^5_1(1) = (\sqrt{cd})^{0 + 1}\frac{1+cd}{cd} = \frac{1+cd}{\sqrt{cd}}.
\]
One notices that by our assumptions $cd \neq 0, -1$ and this concludes the proof.
\end{proof}
We may now discuss the second homomorphism family.
\section{The Hecke algebra and the Alexander polynomial}
As stated previously, one will find the results in this section to be very similar to those found above. Before we prove various properties of this homomorphism, we will rescale $\varphi_2$ for convenience. In particular we scale $f$ to be $\frac{1}{\sqrt{cd}}$ and find:
\begin{eqnarray}
\varphi_2(\sigma_j) &=&(\frac{-\sqrt{c}}{\sqrt{d}} - \frac{\sqrt{d}}{\sqrt{c}})d_{1j} - \frac{1}{\sqrt{cd}}d_{2j} + \frac{\sqrt{c}}{\sqrt{d}}d_{3j}+\frac{\sqrt{d}}{\sqrt{c}}d_{4j}-\sqrt{cd}\cdot d_{5j} + \frac{1}{\sqrt{cd}}d_{6j} \label{eqn1}.\\
\varphi_2(\sigma_j^{-1}) &=& (\frac{-\sqrt{c}}{\sqrt{d}} - \frac{\sqrt{d}}{\sqrt{c}})d_{1j} - \frac{1}{\sqrt{cd}}d_{2j} + \frac{\sqrt{c}}{\sqrt{d}}d_{3j}+\frac{\sqrt{d}}{\sqrt{c}}d_{4j}-\sqrt{cd}\cdot d_{5j} + \sqrt{cd}\cdot d_{6j}\label{eqn2}.
\end{eqnarray}
One will notice that all of the coefficients between the two linear combinations above are the same aside from the last one. For the rest of this section we shall refer to our newly scaled homomorphism by $\varphi_2$.
\subsection{Representations through $\varphi_2$}
We begin similarly to the $\varphi_5$ case by showing $\varphi_2$ satisfies a HOMFLYPT polynomial. In particular we find,
\begin{lemma} \label{skein2}
For each $x\in B_n$
\[
\varphi_2(x\sigma_i) - \varphi_2(x\sigma_i^{-1}) = (\frac{1}{\sqrt{cd}} - \sqrt{cd})\cdot\varphi_2(x).
\]
\end{lemma}
\begin{proof}
This follows from equations \ref{eqn1} and \ref{eqn2}.
\end{proof}
Once again the above skein relationship tells us exactly what representations will look like through $\varphi_2$. We have the following theorem:
\begin{theorem}
All representations of $B_n$ through $\varphi_2$ are representations of $\mathcal{H}_n(1+cd)$
\end{theorem}
\subsection{Rediscovering the Alexander polynomial}
Now that the representations are classified we may begin considering knot invariants. One may expect this to be done using the methods discussed for the previous homomorphism. Unfortunately, this cannot be the case. Going through the same steps above one finds a Markov bubble trace with $\beta = 0$ for $\varphi_2$. It then can be shown that this trace satisfies the Alexander skein relation. A quick computation reveals that the unknot is sent to zero and thus the work is wasted. We do not give up on our goal and thus define the following trace:
\[
tr_n(x) =
\begin{cases}
1 &\text{ if } x \text{ has exactly 1 vertical line }\\
0 &\text{ otherwise}
\end{cases}.
\]
One may quickly note that once again Lemma \ref{vli} tells us this is indeed a trace. Furthermore, Lemma \ref{skein2} shows that it will satisfy the Alexander skein relation. Because we do not know whether $tr_n$ is a Markov trace, we may not yet conclude it is the Alexander polynomial. The remainder of this subsection will be dedicated to normalizing $tr_n$ to make it a Markov trace. We begin first with the following critical proposition
\begin{proposition}\label{vip}
Let $x_n = \sigma_1\cdots\sigma_{n-1} \in B_n$. Then the following three statements hold
\begin{enumerate}
\item The sum of all coefficients of diagrams in $\varphi_2(x_n)$ with no vertical lines is 0.
\item The sum of all coefficients of diagrams in $\varphi_2(x_n)$ with a vertical line at $n$ and nowhere else is $(-\sqrt{cd})^{n-1}$.
\item $tr_n(\varphi_2(x_n)) = \left(\frac{-1}{\sqrt{cd}}\right)^{n-1}[1 + cd + (cd)^2 + \ldots + (cd)^{n-1}]$.
\end{enumerate}
\end{proposition}
\begin{proof}
We prove the lemma by induction. \\
If $n=2$ then
\[
\varphi_2(\sigma_1) = (\frac{-\sqrt{c}}{\sqrt{d}} - \frac{\sqrt{d}}{\sqrt{c}})d_1 - \frac{1}{\sqrt{cd}}d_2 + \frac{\sqrt{c}}{\sqrt{d}}d_3+\frac{\sqrt{d}}{\sqrt{c}}d_4-\sqrt{cd}\cdot d_5 + \frac{1}{\sqrt{cd}}d_6.
\]
One begins by quickly verifying that the coefficients on $d_1$, $d_3$ and $d_4$, the only terms with no vertical lines, sum to zero. Next, one sees that the only diagram with a single vertical line at $n$ is $d_5$ and its coefficient is $-\sqrt{cd}$. For the final claim we see only the fifth and second terms have exactly one vertical line. Summing their two coefficients gives $-\frac{1}{\sqrt{cd}} - \sqrt{cd}$, as needed.\\
Assume that the statement is true for some $n \geq 2$, and consider $x_{n+1}$. We write this as $x_n\sigma_{n}$, where $x_n \in B_{n+1}$, and consider the diagrams found in Figure 1. We see in this case that the first, third and fourth terms $-$ those associated with the empty diagram and the two slashes, respectively $-$ do not gain a vertical line on their far right. We may therefore conclude that these will have no effect in proving claim two. The inductive hypothesis also tells us that any items in these terms with no straight lines will sum to zero, so we need not worry about them for the remainder of claim 1. Finally, when one takes the trace, one finds, similarly to that in the proof of Proposition \ref{traces}, terms will gather in the following way:
\[
((\frac{-\sqrt{c}}{\sqrt{d}} - \frac{\sqrt{d}}{\sqrt{c}}) + \frac{\sqrt{c}}{\sqrt{d}}+ \frac{\sqrt{d}}{\sqrt{c}})tr_n(\varphi_2(x)').
\]
This sum is zero and thus we are free to ignore these terms for the remainder of the proof.\\
We next turn our attention to the second, fifth, and sixth terms $-$ those associated with the left vertical line, right vertical line, and the identity diagram, respectively. We notice the trace of the sixth term will cause any diagram in $\varphi_2(x_n)$ with a vertical line to vanish. Furthermore, the terms without a vertical line will sum to zero by the hypothesis. We therefore only need consider the second and fifth terms.\\
Looking then at the fifth term, we see that any terms in $\varphi_2(x_n)$ with no vertical lines will once again sum to zero. The only terms we consider are those in $\varphi_2(x_n)$ with exactly one vertical line in the far right, as the break caused by $d_5$ will cause this line to be lost in $\varphi_2(x_n\sigma_n)$. By our induction hypothesis we know these terms' coefficients sum to $(-\sqrt{cd})^{n-1}$. This completes the proof of claim 2 as the coefficient of the fifth term is $-\sqrt{cd}$. To finish the entire induction one simply notices the second term is exactly $tr(\varphi(x_n))$ with a coefficient of $\frac{-1}{cd}$. Our inductive hypothesis gives us both our results. \\
\end{proof}
With this proven we define a new trace in the following way, for any $x\inB_n$:
\[
Tr_n^2(x) = \frac{tr_n(\varphi_2(x))}{tr_n(\varphi_2(x_n))}.
\]
One will notice that this trace will continue to satisfy the Alexander skein relation.\\
We claim that the above trace is indeed a Markov trace. It is non-obvious how one can prove this directly, however. Instead we will provide a work around using ideas introduced in \cite{BIG2}. For our next statements, we must introduce some notation used in the aforementioned paper.
\begin{definition}
For any partition, $\lambda = (n_1,\ldots,n_k)$, of $n$ we say $\tau_{\lambda}$ is the braid $(\sigma_{1}\cdots\sigma_{n_1-1})\otimes \ldots \otimes (\sigma_{1}\cdots\sigma_{n_k-1})$ where the $i$th term in the product is a braid in $B_{n_i}$.
\end{definition}
To be clear one will find $\tau_{(4,1)}$ below:\\
\begin{center}
\includegraphics{41.jpg}
\end{center}
One may notice that if $\lambda = (n)$ then $\tau_{(n)}$ is exactly the $x_n$ in Proposition \ref{vip}. With this made clear we have another lemma as well as a critical proposition.
\begin{lemma}\label{unl}
if $\lambda \neq (n)$ then $Tr(\tau_{\lambda})=0$.
\end{lemma}
\begin{proof}
We will prove the proposition for the case where $\lambda$ has two parts. One will see that the method used generalizes quite easily. We see that $\tau_{\lambda}=(\sigma_1 \cdots \sigma_i)(\sigma_{i+2} \cdots\sigma_{n})$ for some $i > 0$. Let $\beta_1=\sigma_1\cdots \sigma_i$, $\beta_2=\sigma_{i+2} \cdots\sigma_{n}$. Since $\beta_1$ and $\beta_2$ are disjoint, all of the non vertical portions of diagrams which arise from $\varphi_2(\beta_1)$ will end up on top of vertical lines in the diagrams of $\varphi_2(\beta_2)$. From this it follows that when applying $Tr_n^2$ to the product, the surviving diagrams will be a product of a diagram with a single vertical line from one of $\varphi_2(\beta_1)$ or $\varphi_2(\beta_2)$, with a diagram with no vertical lines from the other. Once again by Proposition \ref{vip} we know these coefficients sum to zero and we are done.
\end{proof}
\begin{proposition}\label{big}
For all braid words, $w \in B_n$, there exists a sequence $w = w_0, w_1,\ldots, w_k$ such that the following hold:
\begin{enumerate}
\item $w_j = x\sigma_i^{\pm1}y$, $w_{j+1} = x\sigma_i^{\mp1}y$ for some $x,y\in B_n$ and some $i$.
\item $w_k$ is conjugate to $\tau_{\lambda}$ for some partition $\lambda$
\end{enumerate}
\end{proposition}
\begin{proof}
the proof can be found in the form of an algorithm discussed in \cite{BIG2}
\end{proof}
With all of these tools we are ready to prove that $Tr_n^2$ is indeed the Alexander polynomial. We have:
\begin{theorem}
If $w\in B_n$ then $Tr_n^2(w) = \Delta(\hat{w})$, where $\Delta$ is the Alexander polynomial.
\end{theorem}
\begin{proof}
We prove the theorem by induction on the length of the word $w$. If $|w| = 0$ then $Tr_n^2(\varphi_2(w)) = 0$, unless $n = 1$ or $2$. This is because in the case of $n = 3$, the extra vertical line makes it so that only terms with no vertical lines will survive the trace. We know these terms sum to zero. For all greater $n$, every term has at least two vertical lines. In the cases of $n = 1$ and $2$, the trace will be 1 as desired by simple calculation.\\
Next assume the statement holds for all lengths up to and including $l$ and let $w$ be an arbitrary word of length $l+1$. To proceed we prove the following claim:
\begin{claim}
All braids in the sequence $w = w_0,\ldots,w_k$ granted by Proposition \ref{big} have $Tr_n^2(w_i) = \Delta(\hat{w}_i)$.
\end{claim}
\begin{proof}
We prove that $Tr_n^2(w_{k-r}) = \Delta(\hat{w}_{k-r})$ by induction on $r$.\\
if $r=0$ then,
\[
Tr_n^2(w_{k-r})=Tr_n^2(w_k)=Tr_n^2(\tau_{\lambda})=\Delta(\hat{\tau}_{\lambda})=\Delta(\hat{w}_{k-r})
\]
by Proposition \ref{vip} and Lemma \ref{unl}, along with the fact that knots are invariant under conjugation of their underlying braid.\\
Next we assume the statement is true for some $r$ and consider $w_{k-r-1}$. Without loss of generality assume $w_{k-r}=a\sigma_i^{-1} b$, and $w_{k-(r+1)}=a \sigma_i b$. We notice that our skein relation gives us a relationship between successive terms in this sequence, along with a term of lesser length. Because we know the theorem is true for shorter words as well as previous terms in the sequence, we may conclude the theorem is true always. To be precise one computes,
\begin{description}
\item $Tr_n^2(w_{k-(r+1)})=Tr_n^2(a \sigma_i b)=Tr_n^2(ba \sigma_i)$
\item $=z \cdot Tr_n^2(ba)+Tr_n^2(ba\sigma_i^{-1}) $
\item $=z \cdot \Delta(\hat{ba})+\Delta(\hat{ba \sigma_i^{-1}})$
\item $=\Delta(\hat{ba \sigma_i})=\Delta(\hat{a\sigma_ib})$
\item $=\Delta(\hat{w}_{k-(r+1)})$
\end{description}
\end{proof}
Thus, $Tr_n^2(w)=\Delta(\hat{w})$ and our proof is completed.
\end{proof}
\section{Colored braids and linking numbers}
We conclude our paper in this section by discussing the first homomorphism family. As was alluded to previously, this family has very different properties than the other two. One will find, for example, that $\varphi_1$ does not satisfy a HOMFLYPT polynomial. Because of this, we will need to describe representations through $\varphi_1$ using alternative methods. In particular we will use coloring methods along with a representation of $\mathbb{C}P$ first described in \cite{FHH}.\\
\subsection{Representations through $\varphi_1$}
In order to discuss these representations, we must first introduce some notation.\\
We begin by creating a vector space, $V^n$. We say,
\[
V^n = \mathbb{C} \text{-}span\{\mathbf{v}_S |\quad S\subseteq\{1,\ldots,n\}\}.
\]
Furthermore, we consider the following subspaces of this vector space,
\[ V^n_k = \mathbb{C} \text{-}span\{\mathbf{v}_S|\quad S\subseteq \{1,\ldots,n\} \text{ and } |S| = k\}. \]
With these defined, we introduce the following notational tools:
\begin{definition}
For a planar rook diagram $d$, let $\beta(d)$ and $\tau(d)$ denote the vertices in the top and bottom rows of $d$, respectively, which are incident to edges. We further let $f: \beta(d) \rightarrow \tau(d)$ be the function that sends a bottom vertex to its neighbor in $d$.
\end{definition}
Using these tools we may introduce a family of representations, $\rho_k: \mathbb{C} P_n \rightarrow End(V^n_k)$ as follows:
\[
\rho_k(d)(\mathbf{v}_{S}) = \left\{ \begin{array}{rl}
\mathbf{v}_{f(S)} &\mbox{ if $S\subseteq \beta(d)$} \\
0 &\mbox{ otherwise}
\end{array} \right.
\]
It turns out that these representations are the fundamental irreducible representations of $\mathbb{C}P$. To be precise one has the following theorem found in \cite{FHH}:
\begin{theorem}\label{mat}
$\displaystyle\bigoplus^n_{k=0}\binom{n}{k} \rho_k$ is an isomorphism from $\mathbb{C} P_n$ to a direct sum of matrix algebras.
\end{theorem}
Using this theorem we will classify all representations of $B_n$ through $\mathbb{C}P$ by using colored braids. To be precise, for any $x \in B_n$ and $S \subseteq \{1,\ldots,n\}$, color each strand of $x$ green if its starting vertex is in $S$ and red otherwise. Let $r(x)$ be the number of crossings between red strands counted with sign and $r'(x)$ be the number of crossings between a red and a green strand counted with sign. We also say that for any $S \subseteq \{1,\ldots,n\}, \sum S = \displaystyle\sum_{i \in S}i$. \\
With all this defined we may proceed with the following theorem:
\begin{theorem} \label{rep}
Let $x \in B_n$ and $\pi(x) \in S_n$ be the underlying permutation of $x$, then
\[
\rho_k \varphi_1(\beta)(\mathbf{v}_S)=\lambda_S^T(x)\mathbf{v}_T
\]
where $T=\pi(x)(S)$ and
\[
\lambda_S^T(x)=(a+c+d-1)^{w(x)}(\sqrt{cd})^{w'(x)}(\sqrt{\frac{c}{d}})^{\sum T - \sum S}
\]
\end{theorem}
\begin{proof}
One notices that by the nature of group actions it suffices to show that the statement is true for $\sigma_i^{\pm1}$ where $i\in \{1,\ldots,n-1\}$.\\
Fix $S \subseteq \{1,\ldots,n\}$ and say $T = (i,i+1)S$. One verifies through quick computation that
\begin{enumerate}
\item
\[
\rho_k \varphi_1(\sigma_i)(\mathbf{v}_S)=\lambda_S^T \mathbf{v}_T
\]
where
\[
\lambda_S^T(x) = \left\{ \begin{array}{rl}
1 = (a+c+d-1)^0\sqrt{cd}^0\sqrt{\frac{c}{d}}^0 &\mbox{ if $i, i+1 \in S$}\\
a+c+d-1 = (a+c+d-1)^1\sqrt{cd}^0\sqrt{\frac{c}{d}}^0 &\mbox{ if $i, i+1 \notin S$} \\
c = (a+c+d-1)^0\sqrt{cd}^1\sqrt{\frac{c}{d}}^1 &\mbox{ if $i\in S, i+1 \notin S$}\\
d = (a+c+d-1)^0\sqrt{cd}^1\sqrt{\frac{c}{d}}^{-1} &\mbox{ if $i\notin S, i+1 \in S$}
\end{array} \right.
\]
\item
\[
\rho_k \varphi_1(\sigma_i^{-1})(\mathbf{v}_S)=\lambda_S^T \mathbf{v}_T
\]
where
\[
\lambda_S^T(x) = \left\{ \begin{array}{rl}
1 = (a+c+d-1)^0\sqrt{cd}^0\sqrt{\frac{c}{d}}^0 &\mbox{ if $i, i+1 \in S$}\\
\frac{1}{a+c+d-1} = (a+c+d-1)^{-1}\sqrt{cd}^0\sqrt{\frac{c}{d}}^0 &\mbox{ if $i, i+1 \notin S$} \\
\frac{1}{d} = (a+c+d-1)^0\sqrt{cd}^{-1}\sqrt{\frac{c}{d}}^1 &\mbox{ if $i\in S, i+1 \notin S$}\\
\frac{1}{c} = (a+c+d-1)^0\sqrt{cd}^{-1}\sqrt{\frac{c}{d}}^{-1} &\mbox{ if $i\notin S, i+1 \in S$}
\end{array} \right.
\]
\end{enumerate}
As required.
\end{proof}
For the purpose of completion, one may ask whether this representation of $B_n$ is related to any well known ones. Computation suggests that $\rho_k\varphi_1$ is some generalization of the representation discussed in \cite{FOR}; however we do not have a precise description of the connection between them.\\
\subsection{A linking number invariant}
Now that we have established the representations through $\varphi_1$ we turn our attentions to knot invariants through $\varphi_1$. We find in this case that given any braid $x \in B_n$, any Markov trace will only be dependent on the linking numbers of $\hat{x}$. Despite the anti-climax of the result, it is non-trival to show as we will see.\\
We begin our proof with the following lemma:
\begin{lemma}\label{tdf}
The only trace functions $tr:\mathbb{C}P \rightarrow \mathbb{C}$ are linear combinations of matrix traces of the $V_k^n$ representations
\end{lemma}
\begin{proof}
By Theorem \ref{mat}, We know that the $\rho_k$ representations give us an isomorphism between $\mathbb{C}P$ and an algebra of block diagonal matrices. Because any trace functions in such an algebra would be a linear combination of traces from each block, we have our result.
\end{proof}
This very useful fact will allow us to exhaustively consider all possible trace functions. For example, using Lemma \ref{tdf} along with Theorem \ref{rep}, we know that for any braid $x \in B_n$, we only need to consider those $S \subseteq \{1,\ldots,n\}$ with $\pi(x)(S) = S$. Consider then the coloring of a braid, $x$, whose underlying permutation fixes a set $S$. We know that if a vertex was associated with a green (or red) vertex at the bottom then it will be so at the top. Furthermore, one may realize that the collection of green (or red) strands will form one or more complete components in the closure, $\hat{x}$. Finally, the scalar $\lambda^S_S(x)$ associated with the action $\rho_k\varphi_1(x)\mathbf{v}_S$ is determined by the total writhe of the red components of $\hat{x}$ and the total linking number between red and green components. This of course follows from Theorem \ref{rep} along with the remarks just made. Using this fact in conjunction with Lemma \ref{tdf} we conclude the following lemma:
\begin{lemma}\label{ttra}
Let $tr: \mathbb{C}P \rightarrow \mathbb{C}$ be a trace function, then for any braid $x\in B_n$, $tr(\varphi_1(x))$ is determined by the scalars $\lambda^S_S(x)$ in
\[
\rho_k\varphi_1(x)\mathbf{v}_S = \lambda^S_S(x)\mathbf{v}_S.
\]
In particular, $tr$ is at most dependent on
\begin{enumerate}
\item Which sets of strands give rise to components of $\hat{x}$,
\item the writhes of these components and
\item the linking numbers between components of $\hat{x}$.
\end{enumerate}
\end{lemma}
using this lemma we are ready to prove the main theorem of this section. We have,
\begin{theorem}
Let $Tr:\mathbb{C}P \rightarrow \mathbb{C}$ be a Markov trace function, then for any braid $x \in B_n$, $Tr(\varphi_1(x))$ is only dependent on the linking numbers between components of $\hat{x}$.
\end{theorem}
\begin{proof}
Let $L$ be an arbitrary link of $c$ components and take $x \in B_n$ such that $L = \hat{x}$. We recall that this relationship is unchanged by applying Markov moves to $x$, and thus without loss of generality we may assume that each component of $L$ uses $N$ strands of $x$ where $N$ is some sufficiently large number. This is done by conjugating $x$ to move the strands in any component of $\hat{x}$ to the right of the braid, and then appending $\sigma_n$ or $\sigma_n^{-1}$. Furthermore, each one of these moves which adds a strand to $x$ can happen using either a negative or positive crossing. This then allows us to control the writhe of each component in $\hat{x}$. One further notes that the writhe of each component will necessarily be of the opposite integer parity to the number of strands in this component. One then makes sure that through use of Markov moves, each component in $\hat{x}$ has a write of zero and $2N+1$ strands for sufficiently large $N$. From Lemma \ref{ttra} we know that $Tr$ can depend on at most the number of strands in each component, the writhes of these components and the linking number. Because $Tr$ is a Markov trace, we are free to apply the aforementioned Markov moves to any braid without changing its value. This concludes the proof.
\end{proof}
\end{document} |
\begin{document}
\title[Subgeometric Adaptive MCMC]{Limit theorems for some adaptive MCMC
algorithms with subgeometric kernels
}
\author[Y. Atchadé]{Yves Atchadé}
\thanks{ Y. Atchadé: University of Michigan, 1085 South University, Ann Arbor,
48109, MI, United States. {\em E-mail address} yvesa@umich.edu}
\author[G. Fort]{ Gersende Fort} \thanks{G. Fort: LTCI, CNRS-TELECOM ParisTech,
46 rue Barrault, 75634 Paris Cedex 13, France. {\em E-mail address}
gfort@tsi.enst.fr}
\thanks{This work is partly supported by the french National Research Agency
(ANR) under the program ANR-05-BLAN-0299.}
\mathrm{s}ubjclass[2000]{60J10, 65C05}
\keywords{Adaptive Markov chain Monte Carlo, Markov chain, Subgeometric
ergodicity.}
\maketitle
\begin{abstract}
This paper deals with the ergodicity (convergence of the marginals) and the law of large numbers for adaptive MCMC algorithms built from transition kernels that are not necessarily geometrically ergodic.
We develop a number of results that broaden significantly the class of adaptive
MCMC algorithms for which rigorous analysis is now possible. As an example, we
give a detailed analysis of the Adaptive Metropolis Algorithm of
\cite{haarioetal00} when the target distribution is sub-exponential in the
tails.
\end{abstract}
\mathrm{s}etcounter{secnumdepth}{3}
\mathrm{s}ection{Introduction}
This paper deals with the convergence of Adaptive Markov Chain Monte Carlo (AMCMC).
Markov Chain Monte Carlo (MCMC) is a well known, widely used
method to sample from arbitrary probability distributions. One of the major
limitation of the method is the difficulty in finding sensible values for the
parameters of the Markov kernels. Adaptive MCMC provides a general
framework to tackle this problem where the parameters are adaptively tuned,
often using previously generated samples. This approach generates a class of
stochastic processes that is the object of this paper.
Denote $\pi$ the probability measure of interest on some measure space
$(\mathsf{X},\mathcal{X})$. Let $\{P_\theta,\theta\in\Theta\}$ be a family of
$\phi$-irreducible and aperiodic Markov kernels each with invariant
distribution $\pi$. We are interested in the class of stochastic processes
based on non-homogeneous Markov chains $\{(X_n,\theta_n),\;n\geq 0\}$ with
transition kernels $\{\bar P\left(n; (x,\theta); (dx',d\theta')\right), n\geq 0
\}$ satisfying $\int_{\Theta} \bar P\left(n; (x,\theta); (\cdot,d\theta')
\right) = P_\theta(x,\cdot)$. Often, these transition kernels are of the form
$\{P_\theta(x,dy)\delta_{H_{n}(\theta,y)}(d\theta'), n\geq 0\}$ where
$\{H_l,\;l\geq 0\}$ is a family measurable functions, $H_l:\; \Theta\times
\mathsf{X}\to \Theta$. The stochastic approximation dynamic corresponds to the case
$H_l(\theta,x)=\theta+\gamma_l \; H(\theta,x)$. In this latter case, it is
assumed that the best values for $\theta$ are the solutions of the equation
$\int H(\theta,x)\pi(dx)=0$. Since the pioneer work of \cite{gilksetal98,
holden98, haarioetal00, andrieuetrobert02}, the number of AMCMC algorithms in
the literature has significantly increased in recent years. But despite many
recent works on the topic, the asymptotic behavior of these algorithms is still
not completely understood. Almost all previous works on the convergence of
AMCMC are limited to the case when each kernel $P_\theta$ is geometrically
ergodic (see e.g.. \cite{rosenthaletroberts05,andrieuetal06}). In this paper,
we weaken this condition and consider the case when each transition kernel is
sub-geometrically ergodic.
More specifically, we study the ergodicity of the marginal $\{X_n, n\geq 0 \}$
i.e. the convergence to $\pi$ of the distribution of $X_n$ irrespective of the
initial distribution, and the existence of a strong law of large numbers
for AMCMC.
We first show that a diminishing adaptation assumption of the form
$|\theta_n-\theta_{n-1}|\to 0$ in a sense to be made precise (assumption
B\ref{B1}) together with a uniform-in-$\theta$ positive recurrence towards a
small set $C$ (assumptions A\ref{A-VCset}(\ref{Anew}) and
A\ref{A-VCset}(\ref{A3rev})) and a uniform-in-$\theta$ ergodicity condition of
the kernels $\{P_\theta, \theta \in \Theta\}$ (assumption
A\ref{A-VCset}(\ref{A4rev})) are enough to imply the ergodicity of AMCMC.
We believe that this result is close to be optimal. Indeed, it is well documented in the literature that
AMCMC can fail to be ergodic if the diminishing assumption does not hold (see
e.g. \cite{rosenthaletroberts05} for examples). Furthermore, the additional
assumptions are also fairly weak since in the case where $\Theta$ is reduced to
the single point $\{\theta_\mathrm{s}tar\}$ so that $\{X_n, n\geq 0\}$ is a Markov
chain with transition kernel $P_{\theta_\mathrm{s}tar}$, these conditions hold if
$P_{\theta_\mathrm{s}tar}$ is an aperiodic positive that is polynomially ergodic.
We then prove a strong law of large numbers for AMCMC. We show that the
diminishing adaptation assumption and a uniform-in-$\theta$ polynomial drift
condition towards a small set $\mathcal{C}$ of the form $P_\theta V\leq V-c
V^{1-\alpha}+b\ensuremath{\mathbbm{1}}_{\mathcal{C}}(x)$, $\alpha\in (0,1)$, implies a strong law of large
number for all real-valued measurable functions $f$ for which
$\mathrm{s}up_{\mathsf{X}}(|f|/V^{\beta})<\infty$, $\beta\in[0,1-\alpha)$. This result is
close to what can be achieved with Markov chains (with fixed transition kernel)
under similar conditions (\cite{meynettweedie93}).
On a more technical note, this paper makes two key contributions to the
analysis of AMCMC. Firstly, to study the ergodicity, we use a more careful
coupling technique which extends the coupling approach of
\cite{rosenthaletroberts05}. Secondly, we tackle the law of large numbers using
a resolvent kernel approach together with martingales theory. This approach has
a decisive advantage over the more classical Poisson equation approach
(\cite{andrieuetal06}) in that no continuity property of the resolvent kernels
is required. It is also worth noting that the results developed in this paper
can be applied to adaptive Markov chains beyond Markov Chain Monte Carlo
simulation provided all the transition kernels have the same invariant
distribution.
The remainder of the paper is organized as follows. In Section
\ref{sec:ResultsUnif} we state our assumptions followed by a statement of our
main results. Detailed discussion of the assumptions and some comparison with
the literature are provided in Section \ref{sec:discussionUnif}. We apply our
results to the analysis of the Adaptive Random Walk Metropolis algorithm of
\cite{haarioetal00} when the target distribution is sub-exponential in the
tails. This is covered in Section \ref{sec:Example} together with a toy
example taken from \cite{atchadeetrosenthal03}. All the proofs are postponed to
Section~\ref{sec:Proofs}.
\mathrm{s}ection{Statement of the results and discussion}\label{sec:ResultsUnif}
\mathrm{s}ubsection{Notations}\label{sec:notations}
For a transition kernel $P$ on a measurable general state space
$(\mathbb{T},\mathcal{B}(\mathbb{T}))$, denote by $P^n$, $n\geq 0$, its $n$-th
iterate defined as
\[
P^0(x,A) \eqdef \delta_x(A) \;, \qquad \qquad P^{n+1}(x,A) \eqdef \int
P(x,dy ) P^n(y,A) \;, \quad n \geq 0 \;;
\]
$\delta_x(dt)$ stands for the Dirac mass at $\{x\}$. $P^n$ is a transition kernel
on $(\mathbb{T},\mathcal{B}(\mathbb{T}))$ that acts both on bounded measurable
functions $f$ on $\mathbb{T}$ and on $\mathrm{s}igma$-finite measures $\mu$ on
$(\mathbb{T},\mathcal{B}(\mathbb{T}))$ via $P^nf(\cdot) \eqdef \int
P^n(\cdot,dy) f(y)$ and $\mu P^n(\cdot) \eqdef \int \mu(dx) P^n(x, \cdot)$.
If $V: \mathbb{T}\to [1, +\infty)$ is a function, the $V$-norm of a function
$f: \mathbb{T}\to \mathbb R$ is defined as $|f|_V \eqdef \mathrm{s}up_{\mathbb{T}} |f|
/V$. When $V=1$, this is the supremum norm. The set of functions with finite
$V$-norm is denoted by $\mathcal{L}_V$.
If $\mu$ is a signed measure on a measurable space
$(\mathbb{T},\mathcal{B}(\mathbb{T}))$, the total variation norm $\| \mu
\|_{\mathrm{TV}}$ is defined as
\[
\| \mu \|_{\mathrm{TV}} \eqdef \mathrm{s}up_{\{f, |f|_1 \leq 1 \}} | \mu(f)| = 2 \; \mathrm{s}up_{A \in
\mathcal{B}(\mathbb{T})}|\mu(A)|= \mathrm{s}up_{A \in \mathcal{B}(\mathbb{T})} \mu(A)
- \inf_{A \in \mathcal{B}(\mathbb{T})} \mu(A) \;;
\]
and the $V$-norm, for some function $V : \mathbb{T} \to [1, +\infty)$, is
defined as $\| \mu \|_{V} \eqdef \mathrm{s}up_{\{g, |g|_V \leq 1 \}} |\mu(g)|$.
Let $\mathsf{X}, \Theta$ be two general state space resp. endowed with a countably
generated $\mathrm{s}igma$-field $\mathcal{X}$ and $\mathcal{B}(\Theta)$. Let $\{P_\theta, \theta \in
\Theta \}$ be a family of Markov transition kernels on $(\mathsf{X},\mathcal{X})$ such
that for any $(x,A) \in \mathsf{X} \times \mathcal{X}$, $\theta \mapsto P_\theta(x,A)$
is measurable. Let $\{\bar P(n;\cdot,\cdot), n \geq 0 \}$ be a family of transition kernels on $(\mathsf{X} \times \Theta, \mathcal{X} \otimes \mathcal{B}(\Theta))$,
satisfying for any $A \in \mathcal{X}$,
\begin{equation}\label{eq:tk1}
\int_{A \times \Theta} \bar P\left(n; (x,\theta); (dx',d\theta')\right) = P_{\theta}(x, A) \;.
\end{equation}
An adaptive Markov chain is a non-homogeneous Markov chain $\{ Z_n =
(X_n,\theta_n), n\geq 0 \}$ on $\mathsf{X}\times\Theta$ with transition kernels
$\{\bar P(n; \cdot; \cdot), n \geq 0\}$.
Among examples of such transition kernels, consider the case when
$\{(X_n,\theta_n), n\geq 0\}$ is obtained through the algorithm: given
$(X_n,\theta_n)$, sample $X_{n+1} \mathrm{s}im P_{\theta_n}(X_n, \cdot)$ and set
$\theta_{n+1} = \theta_n$ with probability $1-p_{n+1}$ or set $\theta_{n+1}=
\tilde \Xi_{n+1}(X_n,\theta_n,X_{n+1})$ with probability $p_{n+1}$. Then
\begin{multline*}
\bar P\left(n; (x,\theta); (dx',d\theta')\right) = P_\theta(x,dx') \ \left\{
\left(1-p_{n+1} \right) \ \delta_\theta(d\theta') + p_{n+1} \ \delta_{\tilde
\Xi_{n+1}(x,\theta,x')}(d\theta') \right\} \;.
\end{multline*}
A special case is the case when $p_{n+1}=1$ and $\theta_{n+1} =
H_{n+1}(\theta_n,X_{n+1})$, where $\{H_l, l\geq 0 \}$ is a family of measurable
functions $H_l: \Theta \times \mathsf{X} \to \Theta$. Then,
\[
\bar P\left(n; (x,\theta); (dx',d\theta')\right) \eqdef P_{\theta}(x, dx') \
\ \delta_{H_{n+1}(\theta,x')}(d \theta') \;.
\]
Such a situation occurs for example if $\theta_{n+1}$ is updated following a
stochastic approximation dynamic: $\theta_{n+1} = \theta_n + \gamma_{n+1}
H(\theta_n,X_{n+1})$.
From $\{\bar P\left(n;\cdot,\cdot\right),\;n\geq 0\}$ and for any integer
$l\geq 0$, we introduce a family - indexed by $l$ - of sequence of transition
kernels $\{\bar P_l(n;\cdot,\cdot), n \geq 0 \}$, where $\bar
P_l\left(n;\cdot,\cdot\right) \eqdef \bar P\left(l+n;\cdot,\cdot\right)$ and we
denote by $\mathbb{P}_{x,\theta}^{(l)}$ and $\mathbb{E}_{x,\theta}^{(l)}$ the probability and
expectation on the canonical space $(\Omega, \mathcal{F})$ of the canonical
non-homogeneous Markov chain $\{ Z_n = (X_n,\theta_n), n\geq 0 \}$ with
transition kernels $\{\bar P_l(n; \cdot; \cdot), n \geq 0\}$ and initial
distribution $\delta_{(x,\theta)}$. We denote by $\ensuremath{\mathbbm{1}}derline{\theta}$ the
shift operator on $\Omega$ and by $\{\mathcal{F}_k, k \geq 0 \}$ the natural filtration
of the process $\{Z_k, k\geq 0\}$. We use the notations $\mathbb{P}_{x,\theta}$ and
$\mathbb{E}_{x,\theta}$ as shorthand notations for $\mathbb{P}_{x,\theta}^{(0)}$ and
$\mathbb{E}_{x,\theta}^{(0)}$.
Set
\[
D(\theta,\theta') \eqdef \mathrm{s}up_{x \in \mathsf{X}} \| P_{\theta}(x,\cdot) -
P_{\theta'}(x,\cdot) \|_{\mathrm{TV}} \;.
\]
\mathrm{s}ubsection{Convergence of the marginals}
We assume that minorization, drift conditions and ergodicity are available for
$P_\theta$ uniformly in $\theta$. For a set $\mathcal{C}$, denote by $\tau_\mathcal{C}$
the return-time to $\mathcal{C} \times \Theta$ : $\tau_\mathcal{C} \eqdef \inf\{n \geq 1,
X_n \in \mathcal{C} \}$.
\debutA
\item \label{A-VCset} There exist a measurable function $V: \mathsf{X} \to
[1,+\infty)$ and a measurable set $\mathcal{C}$ such that
\begin{enumerate}[(i)]
\item \label{Anew} $\mathrm{s}up_l \mathrm{s}up_{\mathcal{C} \times \Theta}
\mathbb{E}_{x,\theta}^{(l)}\left[\mathbf{r}(\tau_\mathcal{C}) \right] < +\infty$ for some
non-decreasing function $\mathbf{r} : {\mathbb{N}} \to (0, +\infty)$ such that $\mathrm{s}um_n
1/\mathbf{r}(n) < +\infty$.
\item \label{A4rev} there exist a probability measure $\pi$ such that
\[
\lim_{n \to +\infty} \ \mathrm{s}up_{x \in \mathsf{X}} V^{-1}(x) \ \mathrm{s}up_{\theta \in \Theta}
\| P^n_\theta(x, \cdot) -\pi \|_{\mathrm{TV}} = 0 \;.
\]
\item \label{A3rev} $ \mathrm{s}up_\theta P_\theta V \leq V$ on $\mathcal{C}^c$ and $
\mathrm{s}up_{\mathcal{C} \times \Theta} \{P_\theta V(x) + V(x) \} < +\infty$.
\end{enumerate}
\finA
\debutB
\item \label{B1} There exist probability distributions $\xi_1, \xi_2$ resp. on
$\mathsf{X}, \Theta$ such that for any $\epsilon>0$, $ \lim_n
\mathbb{P}_{\xi_1,\xi_2}\left( D(\theta_n, \theta_{n-1}) \geq \epsilon \right)=0$.
\finB
\begin{theo}
\label{theo:MarginalUnifCase}
Assume A\ref{A-VCset} and B\ref{B1}. Then
\[
\lim_{n \to +\infty} \mathrm{s}up_{\{f, |f|_1 \leq 1 \}} \left|
\mathbb{E}_{\xi_1, \xi_2}\left[f(X_n) - \pi (f)\right] \right| = 0 \;.
\]
\end{theo}
Sufficient conditions for A\ref{A-VCset} to hold are the following
uniform-in-$\theta$ conditions \debutA
\item \label{Adrift}
\begin{enumerate}[(i)]
\item The transition kernels $P_\theta$ are $\phi$-irreducible, aperiodic.
\item There exist a function $V : \mathsf{X} \to [1, +\infty)$, $\alpha \in
(0,1)$ and constants $b,c$ such that for any $\theta \in \Theta$
\[
P_\theta V(x) \leq V(x) - c\ V^{1-\alpha}(x) + b\ensuremath{\mathbbm{1}}_\mathcal{C}(x) \;.
\]
\item For any level set $\mathcal{D}$ of $V$, there exist $\epsilon_\mathcal{D}>0$ and a
probability $\nu_\mathcal{D}$ such that for any $\theta$, $P_\theta(x, \cdot) \geq
\epsilon_\mathcal{D} \ensuremath{\mathbbm{1}}_\mathcal{D}(x) \ \nu_\mathcal{D}(\cdot)$.
\end{enumerate}
\finA
We thus have the corollary
\begin{coro}{(of Theorem~\ref{theo:MarginalUnifCase})}
\label{coro:MarginalUnifCase}
Assume A\ref{Adrift} and B\ref{B1}. Then
\[
\lim_{n \to +\infty} \mathrm{s}up_{\{f, |f|_1 \leq 1 \}} \left|
\mathbb{E}_{\xi_1, \xi_2}\left[f(X_n) - \pi (f)\right] \right| = 0 \;.
\] \end{coro}
Assumption A\ref{A-VCset}(\ref{Anew}) and A\ref{A-VCset}(\ref{A3rev}) are
designed to control the behavior of the chain ``far from the center''. When
the state space $\mathsf{X}$ is ``bounded'' so that for example, $V=1$ in
A\ref{A-VCset}(\ref{A4rev}), then we have the following result
\begin{lemma}
\label{lemma:MarginalUnifCaseBounded}
If there exists a probability measure $\pi$ such that $\lim_{n \to +\infty} \
\mathrm{s}up_{ \mathsf{X} \times \Theta} \| P^n_\theta(x, \cdot) -\pi(\cdot) \|_{\mathrm{TV}} = 0 $, then
A\ref{A-VCset}(\ref{Anew}) and A\ref{A-VCset}(\ref{A3rev}) hold with a bounded
function $V$ and $\mathcal{C} = \mathsf{X}$.
\end{lemma}
Combining the assumptions of Lemma~\ref{lemma:MarginalUnifCaseBounded} and
B\ref{B1}, we deduce from Theorem~\ref{theo:MarginalUnifCase} the convergence
of the marginals. This result coincides with \cite[Theorem
5]{rosenthaletroberts05}. As observed by \cite{Bai:2008} (personal
communication), assumption A\ref{Adrift} also imply the ``containment
condition'' as defined in \cite{rosenthaletroberts05}. Consequently,
Corollary~\ref{coro:MarginalUnifCase} could also be established by applying
\cite[Theorem 13]{rosenthaletroberts05}: this would yield to the following
statement, which is adapted from \cite{Bai:2008}. Define $M_\epsilon(x,\theta)
\eqdef \inf \{n \geq 1, \|P_\theta^n(x,\cdot) - \pi(\cdot) \|_\mathrm{TV} \leq \epsilon
\}$.
\begin{prop}
\label{prop:YanBai}
Assume A\ref{Adrift} and B\ref{B1}. Then for any
$\epsilon>0$, the sequence $\{M_\epsilon(X_n,\theta_n), n\geq 0 \}$ is bounded
in probability for the probability $\mathbb{P}_{\xi_1,\xi_2}$ and
\[
\lim_{n \to +\infty} \mathrm{s}up_{\{f, |f|_1 \leq 1 \}} \left| \mathbb{E}_{\xi_1,
\xi_2}\left[f(X_n) - \pi (f)\right] \right| = 0 \;.
\]
\end{prop}
\mathrm{s}ubsection{Strong law of large numbers}
Assumptions A\ref{A-VCset} and B\ref{B1} are strengthened as follows \debutA
\item \label{A2} There exist a probability measure $\nu$ on $\mathsf{X}$, a positive
constant $\varepsilon$ and a set $\mathcal{C} \in \mathcal{X}$ such that for any
$\theta \in \Theta$, $P_\theta(x,\cdot) \geq \ensuremath{\mathbbm{1}}_\mathcal{C}(x) \ \varepsilon
\nu(\cdot)$.
\item \label{A5} There exist a measurable function $V: \mathsf{X} \to [1,+\infty)$,
$0 < \alpha < 1$ and positive constants $b,c$ such that for any $\theta \in
\Theta$, $P_\theta V \leq V - c \ V^{1-\alpha} + b \ensuremath{\mathbbm{1}}_\mathcal{C}$.
\item \label{A6} There exist a probability measure $\pi$ and some $0 \leq
\beta < 1-\alpha$ such that for any level set $\mathcal{D} \eqdef \{x \in \mathsf{X}, V(x)
\leq d \}$ of $V$,
\[
\lim_{n \to +\infty} \ \mathrm{s}up_{\mathcal{D} \times \Theta} \| P^n_\theta(x, \cdot) -\pi
\|_{V^\beta} = 0 \;.
\]
\finA
\debutB
\item \label{B2} For any level set $\mathcal{D}$ of $V$ and any $\epsilon>0$,
\[
\lim_n \mathrm{s}up_{l \geq 0} \mathrm{s}up_{ \mathcal{D} \times \Theta} \mathbb{P}_{x,\theta}^{(l)}\left(
D(\theta_n, \theta_{n-1}) \geq \epsilon \right)=0 \;.
\]
\finB
\begin{theo}
\label{theo:SLLNUnboundedUnifCase}
Assume A\ref{A2}-\ref{A6} and B\ref{B2}. Then for any
measurable function $f: \mathsf{X} \to \mathbb R$ in $\mathcal{L}_{V^\beta}$ and any initial
distribution $\xi_1,\xi_2$ resp. on $\mathsf{X}, \Theta$ such that $\xi_1(V) <
+\infty$,
\[
\lim_{n \to +\infty} n^{-1} \mathrm{s}um_{k=1}^n f(X_k) = \pi(f) \;, \qquad \qquad
\mathbb{P}_{\xi_1,\xi_2}-\text{a.s.}
\]
\end{theo}
As in the case of the convergence of the marginals, when A\ref{A6} and
B\ref{B2} hold with $\mathcal{D} = \mathsf{X}$ and $\beta = 0$, A\ref{A2} and A\ref{A5}
can be omitted. We thus have
\begin{prop}
\label{prop:SLLNUnboundedUnifCaseBounded}
Assume that A\ref{A6} and B\ref{B2} hold with $\mathcal{D} = \mathsf{X}$ and
$\beta=0$. Then for any measurable bounded function $f: \mathsf{X} \to \mathbb R$
and any initial distribution $\xi_1,\xi_2$ resp. on $\mathsf{X}, \Theta$
\[
\lim_{n \to +\infty} n^{-1} \mathrm{s}um_{k=1}^n f(X_k) = \pi(f) \;, \qquad \qquad
\mathbb{P}_{\xi_1,\xi_2}-\text{a.s.}
\]
\end{prop}
\mathrm{s}ubsection{Discussion}\label{sec:discussionUnif}
\mathrm{s}ubsubsection{Non-adaptive case}
We start by comparing our assumptions to assumptions in Markov chain theory
under which the law of large numbers hold. In the setup above, taking
$\Theta=\{\theta_\mathrm{s}tar\}$ and $H(\theta_\mathrm{s}tar,x) = \theta_\mathrm{s}tar$ reduces
$\{X_n, n\geq 0\}$ to a Markov chain with transition kernel $P_{\theta_\mathrm{s}tar}$.
Assume that $P_{\theta_\mathrm{s}tar}$ is Harris-recurrent.
In that case, a condition which is known to be minimal and to imply ergodicity
in total variation norm is that $P_{\theta_\mathrm{s}tar}$ is an aperiodic positive
Harris recurrent transition kernel \cite[Theorems 11.0.1 and
13.0.1]{meynettweedie93}. Condition A\ref{A-VCset}(\ref{Anew}) is stronger
than positive Harris recurrence since it requires $\mathrm{s}up_\mathcal{C} \mathbb{E}_x
[\mathbf{r}(\tau_\mathcal{C})]<+\infty$ for some rate $\mathbf{r}$, $\mathbf{r}(n)>> n$.
Nevertheless, as discussed in the proof (see remark~\ref{rem:YanBai},
Section~\ref{sec:Proofs}), the condition $\mathrm{s}um_n \{1/\mathbf{r}(n) \} <+\infty$ is
really designed for the adaptive case. A\ref{A-VCset}(\ref{A4rev}) is stronger
than what we want to prove (since A\ref{A-VCset}(\ref{A4rev}) implies the
conclusion of Theorem~\ref{theo:MarginalUnifCase} in the non-adaptive case);
this is indeed due to our technique of proof which is based on the comparison
of the adaptive process to a process - namely, a Markov chain with transition
kernel $P_\theta$ - whose stationary distribution is $\pi$. Our proof is thus
designed to address the adaptive case. Finally, B\ref{B1} is trivially true.
For the strong law of large numbers (Theorem \ref{theo:SLLNUnboundedUnifCase}),
B\ref{B2} is still trivially true in the Markovian case and A\ref{A6} is
implied by A\ref{A2} and A\ref{A5} combined with the assumption that
$P_{\theta_\mathrm{s}tar}$ is $\phi$-irreducible and aperiodic (see
Appendix~\ref{app:UniformControl} and references therein). In the Markovian
case, whenever $P_{\theta_\mathrm{s}tar}$ is $\phi$-irreducible and aperiodic,
A\ref{A2} and A\ref{A5} are known sufficient conditions for a strong law of
large numbers for $f \in \mathcal{L}_{V^{1-\alpha}}$, which is a bit stronger than the
conclusions of Theorem~\ref{theo:SLLNUnboundedUnifCase}. This slight loss of
efficiency is due to the technique of proof based on martingale theory (see
comments Section~\ref{subsec:MethodsProof}). Observe that in the geometric
case, there is the same loss of generality in \cite[Theorem 8]{andrieuetal06}.
More generally, any proof of the law of large numbers based on the martingale
theory (through for example the use of the Poisson's equation or of the
resolvent kernel) will incur the same loss of efficiency since limit theorems
exist only for $L^p$-martingale with $p>1$.
\mathrm{s}ubsubsection{Checking assumptions A\ref{A-VCset}(\ref{A4rev}) and A\ref{A6}}
\label{subsec:CheckCond}
A\ref{A-VCset}(\ref{A4rev}) and A\ref{A6} are the most technical of our
assumptions. Contrary to the case of a single kernel, the relations between
A\ref{A-VCset}(\ref{A4rev}) (resp. A\ref{A6}) and
A\ref{A-VCset}(\ref{Anew})-A\ref{A2} (resp. A\ref{A2}, A\ref{A5}) are not
completely well understood. Nevertheless these assumptions can be checked under
conditions which are essentially of the form A\ref{A2}, A\ref{A5} plus the
assumptions that each transition kernel $P_\theta$ is $\phi$-irreducible and
aperiodic, as discussed in Appendix~\ref{app:UniformControl}.
\mathrm{s}ubsubsection{On the uniformity in $\theta$ in assumptions A\ref{A-VCset}(\ref{Anew}), A\ref{A-VCset}(\ref{A4rev}), A\ref{A2} and A\ref{A5}}
We have formulated A\ref{A-VCset}(\ref{Anew}), A\ref{A-VCset}(\ref{A4rev}),
A\ref{A2} and A\ref{A5} such that all the constants involved are independent of
$\theta$, for $\theta \in\Theta$. Intuitively, this corresponds to AMCMC
algorithms based on kernels with overall similar ergodicity properties. This
uniformity assumption might seem unrealistically strong at first. But the next
example shows that when these conditions do not hold
uniformly in $\theta$ for $\theta \in\Theta$, pathologies can occur if the
adaptation parameter can wander to the boundary of $\Theta$.
\begin{example}
The example is adapted from \cite{winkler03}. Let $\mathsf{X}=\{0,1\}$ and
$\{P_\theta,\;\theta\in(0,1)\}$ be a family of transition matrices with
$P_\theta(0,0)=P_\theta(1,1)=1-\theta$. Let $\{\theta_n, n\geq 0\}$,
$\theta_n \in (0,1)$, be a deterministic sequence of real numbers decreasing
to $0$ and $\{X_n, n\geq 0\}$ be a non-homogeneous Markov chain on $\{0,1\}$
with transition matrices $\{P_{\theta_n}, n\geq 0\}$. One can check that
$D(\theta_n,\theta_{n-1})\leq \theta_{n-1}-\theta_n$ for all $n\geq 1$ so
that B\ref{B1} and B\ref{B2} hold.
For any compact subset $\mathsf{K}$ of $(0,1)$, it can be checked that
A\ref{A-VCset}(\ref{Anew}), A\ref{A-VCset}(\ref{A4rev}), A\ref{A2} and
A\ref{A5} hold uniformly for all $\theta\in\mathsf{K}$. But these assumptions
do not hold uniformly for all $\theta\in (0,1)$. Therefore Theorems
\ref{theo:MarginalUnifCase} and \ref{theo:SLLNUnboundedUnifCase} do not
apply. Actually one can easily check that $\mathbb{P}_{x,\theta_0}\left(X_n\in
\cdot\right) \to \pi(\cdot)$ as $n\to\infty$, but that
$\mathbb{E}_{x,\theta_0}\left[\left(n^{-1}\mathrm{s}um_{k=1}^n
f(X_k)-\pi(f)\right)^2\right]$ do not converge to $0$ for bounded
functions $f$. That is, the marginal distribution of $X_n$ converges to $\pi$
but a weak law of large numbers fails to hold.
\end{example}
This raises the question of how to construct AMCMC when
A\ref{A-VCset}(\ref{Anew}), A\ref{A-VCset}(\ref{A4rev}), A\ref{A2} and
A\ref{A5} do not hold uniformly for all $\theta\in\Theta$. When these
assumptions hold uniformly on any compact subsets of $\Theta$ and the
adaptation is based on stochastic approximation, one approach is to stop the
adaptation or to reproject $\theta_n$ back on $\mathcal{K}$ whenever
$\theta_n\notin\mathcal{K}$ for some fixed compact $\mathcal{K}$ of $\Theta$. A
more elaborate strategy is Chen's truncation method which - roughly speaking -
reinitializes the algorithm with a larger compact, whenever
$\theta_n\notin\mathcal{K}$ (\cite{chen:zhu:1986,chen:gua:gao:1988}). A third
strategy consists in proving a drift condition on the bivariate process
$\{(X_n,\theta_n), n \geq 0\}$ in order to ensure the stability of the process
(\cite{andrieu:vlad:2008}, see also \cite{benveniste:metivier:priouret:1987}).
This question is however out of the scope of this paper; the use of the Chen's
truncation method to weaken our assumption is addressed in
\cite{atchade:fort:2008b}.
\mathrm{s}ubsubsection{Comparison with the literature}
\label{subsec:CompLite}
The convergence of AMCMC has been considered in a number of early works, most
under a geometric ergodicity assumption. \cite{haarioetal00} proved the
convergence of the adaptive Random Walk Metropolis (ARWM) when the state space
is bounded. Their results were generalized to unbounded spaces in
\cite{atchadeetrosenthal03} assuming the diminishing adaptation assumption and
a geometric drift condition of the form
\begin{equation}\label{GeoDrift}P_\theta V(x)\leq \lambda V(x)+b\textbf{1}_C(x),\end{equation}
for $\lambda\in (0,1)$, $b<\infty$ and $\theta\in\Theta$.
\cite{andrieuetal06} undertook a thorough analysis of adaptive chains under the
geometric drift condition (\ref{GeoDrift}) and proved a strong law of large
numbers and a central limit theorem. \cite{andrieuetatchade05} gives a theoretical discussion on the efficiency of
AMCMC under (\ref{GeoDrift}).
\cite{rosenthaletroberts05} improves on the literature by relaxing the
convergence rate assumption on the kernels. They prove the convergence of the
marginal and a weak law of large numbers for bounded functions. But their
analysis requires a uniform control on certain moments of the drift function, a
condition which is easily checked in the geometric case (i.e. when
A\ref{Adrift} or A\ref{A5} is replaced with (\ref{GeoDrift})). Till recently,
it was an open question in the polynomial case but this has been recently
solved by \cite{Bai:2008} - contemporaneously with our work - who proves that
such a control holds under conditions which are essentially of the form
A\ref{Adrift}.
\cite{yang:2007} tackles some open questions mentioned in
\cite{rosenthaletroberts05}, by providing sufficient conditions - close to the
conditions we give in Theorems~\ref{theo:MarginalUnifCase} and
\ref{theo:SLLNUnboundedUnifCase} - to ensure convergence of the marginals and a
weak law of large numbers for bounded functions. The conditions in
\cite[Theorems 3.1 and 3.2]{yang:2007} are stronger than our conditions. But
we have noted some skips and mistakes in the proofs of these theorems.
\mathrm{s}ubsubsection{Comments on the methods of proof}
\label{subsec:MethodsProof}
The proof of Theorem \ref{theo:MarginalUnifCase} is based on an argument
extended from \cite{rosenthaletroberts05} which can be sketched heuristically
as follows. For $N$ large enough, we can expect $P^N_{\theta_n}(X_n,\cdot)$ to
be within $\epsilon$ to $\pi$ (by ergodicity). On the other hand, since the
adaptation is diminishing, by waiting long enough, we can find $n$ such that
the distribution of $X_{n+N}$ given $(X_n,\theta_n)$ is within $\epsilon$ to
$P^N_{\theta_n}(X_n,\cdot)$. Combining these two arguments, we can then
conclude that the distribution of $X_{n+N}$ is within $2\epsilon$ to $\pi$.
This is essentially the argument of \cite{rosenthaletroberts05}. The difficulty
with this argument is that the distance between $P_{\theta_n}^N(x,\cdot)$ and
$\pi$ depends in general on $x$ and can rarely be bounded uniformly in $x$. We
solve this problem here by introducing some level set $\mathcal{C}$ of $V$ and by
using two basic facts: \textit{(i)} under A\ref{A-VCset}(\ref{Anew}), the
process cannot wait too long before coming back in $\mathcal{C}$; \textit{(ii)} under
A\ref{A-VCset}(\ref{A4rev}-\ref{A3rev}), a bound on the distance between
$P_{\theta_n}^N(x,\cdot)$ and $\pi$ uniformly in $x$, for $x \in \mathcal{C}$, is
possible.
The proof of Theorem \ref{theo:SLLNUnboundedUnifCase} is based on a resolvent
kernel approach that we adapted from \cite{merlevedeetal06} (see also
\cite{mw00}), combined with martingale theory. Another possible route to the
SLLN is the Poisson's equation technique which has been used to study adaptive
MCMC in \cite{andrieuetal06}. Under A\ref{A2} and A\ref{A5}, a solution
$g_\theta$ to the Poisson's equation with transition kernel $P_\theta$ exists
for any $f\in\mathcal{L}_{V^\beta}$, $0\leq \beta\leq 1-\alpha$ and
$g_\theta\in\mathcal{L}_{V^{\beta+\alpha}}$. But in order to use
$\{g_\theta,\;\theta\in\Theta\}$ to obtain a SLLN for $f$, we typically need to
control $|g_\theta-g_{\theta'}|$ which overall can be expensive. Here we avoid
these pitfalls by introducing the resolvent $\hat g_a(x,\theta)$ of the process
$\{X_n\}$, defined by
\[\hat g_a^{(l)}(x,\theta) \eqdef \mathrm{s}um_{j\geq 0}(1-a)^{j+1}\mathbb{E}_{x,\theta}^{(l)}\left[f(X_j)\right] \;, \;\;x\in\mathsf{X},\theta\in\Theta,a\in(0,1), l \geq 0 \;.
\]
\mathrm{s}ection{Examples}
\label{sec:Example}
\mathrm{s}ubsection{A toy example} We first consider an example discussed in
\cite{atchadeetrosenthal03} (see also \cite{rosenthaletroberts05}). Let $\pi$
be a target density on the integers $\{1, \cdots, K \}$, $K \geq 4$. Let
$\{P_\theta, \theta \in \{1, \cdots, M\} \}$ be a family of Random Walk
Metropolis algorithm with proposal distribution $q_\theta$, the uniform
distribution on $\{x-\theta, \cdots, x-1, x+1, \cdots, x+\theta \}$.
Consider the sequence $\{(X_n,\theta_{n}), n\geq 0 \}$ defined as follows:
given $X_n,\theta_n$,
\begin{itemize}
\item the conditional distribution of $X_{n+1}$ is $P_{\theta_n}(X_n, \cdot)$.
\item if $X_{n+1} = X_n$, set $\theta_{n+1} = \max(1, \theta_n -1)$ with
probability $p_{n+1}$ and $\theta_{n+1} = \theta_n$ otherwise; if $X_{n+1}
\neq X_n$, set $\theta_{n+1} = \min(M, \theta_n +1)$ with probability
$p_{n+1}$ and $\theta_{n+1} = \theta_n$ otherwise.
\end{itemize}
This algorithm defines a non-homogeneous Markov chain - still denoted
$\{(X_n,\theta_{n}), n\geq 0 \}$ - on a canonical probability space endowed
with a probability $\mathbb{P}$. The transitions of this Markov process are given by
the family of transition kernels $\{\bar P(n; (x,\theta), (dx', d\theta'),
n\geq 0 \}$ where
\begin{multline*}
\bar P(n; (x,\theta), (dx', d\theta') = P_\theta(x,dx') \; \left( \ensuremath{\mathbbm{1}}_{x=x'} \left\{ p_{n+1} \ \delta_{1 \vee (\theta-1)}(d\theta') + (1-p_{n+1}) \ \delta_{\theta}(d\theta') \right\} \right. \\
\left. + \ensuremath{\mathbbm{1}}_{x\neq x'} \left\{ p_{n+1} \ \delta_{M \wedge
(\theta+1)}(d\theta') + (1-p_{n+1}) \ \delta_{\theta}(d\theta')
\right\} \right) \;.
\end{multline*}
In this example, each kernel $P_\theta$ is uniformly ergodic~: $P_\theta$ is
$\phi$-irreducible, aperiodic, possesses an invariant probability measure $\pi$
and
\[
\lim_n \mathrm{s}up_{x \in \mathsf{X}} \|P_\theta^n(x,\cdot) - \pi(\cdot) \|_{\mathrm{TV}} = 0 \;.
\]
Since $\Theta$ is finite, this implies that A\ref{A-VCset}(\ref{A4rev}) (resp.
A\ref{A6}) hold with $V=1$ (resp. $\mathcal{D} = \mathsf{X}$ and $\beta =0$). Furthermore,
$\mathbb{E}_{x,\theta}^{(l)}\left[D(\theta_n, \theta_{n+1})\right] \leq 2 p_{n+1}$ so
that B\ref{B1} (resp. B\ref{B2}) hold with any probability measures $\xi_1,
\xi_2$ (resp. with $\mathcal{D} = \mathsf{X}$) provided $p_n \to 0$. By
Lemma~\ref{lemma:MarginalUnifCaseBounded} combined with
Theorem~\ref{theo:MarginalUnifCase}, and by
Proposition~\ref{prop:SLLNUnboundedUnifCaseBounded}, we have
\begin{prop}
Assume $\lim_n p_n =0$. For any probability distributions $\xi_1, \xi_2$ on
$\mathsf{X}, \Theta$,
\begin{enumerate}[(i)]
\item $\mathrm{s}up_{\{f, |f|_1 \leq 1 \}} |\mathbb{E}_{\xi_1,\xi_2}[f(X_n)] - \pi(f)| \to
0$
\item For any bounded function $f$
\[
n^{-1} \mathrm{s}um_{k=1}^n f(X_k) \to \pi(f) \;, \qquad \qquad \mathbb{P}_{\xi_1,\xi_2}-\text{a.s.}
\]
\end{enumerate}
\end{prop}
\mathrm{s}ubsection{The adaptive Random Walk Metropolis of \cite{haarioetal00}}
\label{sec:ex2}
We illustrate our results with the adaptive Random Walk Metropolis of
\cite{haarioetal00}. The Random Walk Metropolis (RWM) algorithm is a popular
MCMC algorithm~\cite{hastings:1970,metropolis:1953}. Let a target density
$\pi$, absolutely continuous w.r.t. the Lebesgue measure $\mu_{Leb}$ with
density still denoted by $\pi$. Choose a proposal distribution with density
w.r.t. $\mu_{Leb}$ denoted $q$, and assume that $q$ is a positive symmetric
density on $\mathbb R^p$. The algorithm generates a Markov chain $\{X_n, n\geq 0\}$
with invariant distribution $\pi$ as follows. Given $X_n=x$, a new value
$Y=x+Z$ is proposed where $Z$ is generated from $q(\cdot)$. Then we either
'accept' $Y$ and set $X_{n+1}=Y$ with probability
$\alpha(x,Y)\eqdef\min\left(1,\pi(Y)/\pi(x)\right)$ or we 'reject' $Y$ and set
$X_{n+1}=x$.
For definiteness, we will assume that $q$ is a zero-mean multivariate Gaussian
distribution (this assumption can be replaced by regularity conditions and
moment conditions on the proposal distribution). Given a proposal distribution
with finite second moments, the convergence rate of the RWM kernel depends
mainly on the tail behavior of the target distribution $\pi$. If $\pi$ is
super-exponential in the tails with regular contours, then the RWM kernel is
typically geometrically ergodic (\cite{jarnerethansen98}). Otherwise, it is
typically sub-geometric
(\cite{gersendeetmoulines00,gersendeetmoulines03,doucetal04}).
Define
\[
\mu_\mathrm{s}tar\eqdef\int_\mathsf{X} x \; \pi(x) \; \mu_{Leb}(dx) \;, \qquad
\Sigma_\mathrm{s}tar\eqdef \int_\mathsf{X} xx^T \; \pi(x)\mu_{Leb}(dx) -\mu_\mathrm{s}tar \;
\mu_\mathrm{s}tar^{T} \;,
\]
resp. the expectation and the covariance matrix of $\pi$ ($\cdot^T$ denotes the
transpose operation). Theoretical results suggest setting the
variance-covariance matrix $\Sigma$ of the proposal distribution
$\Sigma=c_\mathrm{s}tar\Sigma_\mathrm{s}tar$ where $c_\mathrm{s}tar$ is set so as to reach the optimal
acceptance rate $\bar\alpha$ in stationarity (typically $\bar\alpha$ is set to
values around $0.3-0.4$). See e.g. \cite{robertsetrosenthal01} for more
details. \cite{haarioetal00} have proposed an adaptive algorithm to learn
$\Sigma_*$ adaptively during the simulation. This algorithm has been studied in
detail in \cite{andrieuetal06} under the assumption that $\pi$ is
super-exponential in the tails. An adaptive algorithm to find the optimal value
$c_\mathrm{s}tar$ has been proposed in \cite{atchadeetrosenthal03} (see also
\cite{atchade05}) and studied under the assumption that $\pi$ is
super-exponential in the tails. We extend these results to cases where $\pi$ is
sub-exponential in the tails.
Let $\Theta_+$ be a convex compact of the cone of $p\times p$ symmetric
positive definite matrices endowed with the Shur norm $|\cdot|_\mathrm{s}$,
$|A|_\mathrm{s}\eqdef \mathrm{s}qrt{\mathrm{Tr}(A^T \, A)}$. For example, for $\mathsf{a}, M >
0$, $\Theta_+ = \{ \text{$A+\mathsf{a} \, \mathrm{Id}$: $A$ is symmetric
positive semidefinite and } |A|_s \leq M \}$. Next, for
$-\infty<\kappa_l<\kappa_u<\infty$ and $\Theta_\mu$ a compact subset of
$\mathsf{X}$, we introduce the space $\Theta \eqdef \Theta_\mu \times \Theta_+\times
[\kappa_l,\kappa_u]$. For $\theta =(\mu,\Sigma,c)\in \Theta$, denote by
$P_\theta$ the transition kernel of the RWM algorithm with proposal
$q_{\theta}$ where $q_\theta$ stands for the multivariate Gaussian distribution
with variance-covariance matrix $e^c \Sigma$.
Consider the adaptive RWM defined as follows
\begin{algo}\label{arwm1}
\begin{description}
\item [Initialization] Let $\bar\alpha$ be the target acceptance probability.
Choose $X_0\in\mathsf{X}$, $(\mu_0,\Sigma_0,c_0)\in\Theta$.
\item [Iteration]
Given $(X_n,\mu_n,\Sigma_n,c_n)$:
\begin{description}
\item [1] Generate $Z_{n+1}\mathrm{s}im q_{\theta_n} d\mu_{Leb}$ and set $Y_{n+1} = X_n
+Z_{n+1}$. With probability $\alpha(X_n,Y_{n+1})$ set $X_{n+1}=Y_{n+1}$ and
with probability $1-\alpha(X_n,Y_{n+1})$, set $X_{n+1}=X_n$.
\item [2] Set
\begin{align}
\mu & = \mu_n+(n+1)^{-1}\left(X_{n+1}-\mu_n\right) \;, \label{ex2:defiMu} \\
\Sigma & = \Sigma_n+(n+1)^{-1}\left[\left(X_{n+1}-\mu_n\right)\left(X_{n+1}-\mu_n\right)^T-\Sigma_n\right] \;, \label{ex2:defiSigma} \\
c & = c_n+\frac{1}{n+1}\left(\alpha(X_n,Y_{n+1})-\bar\alpha\right) \;.
\label{ex2:defic}
\end{align}
\item [3] If $(\mu, \Sigma,c)\in\Theta$, set $\mu_{n+1} = \mu$,
$\Sigma_{n+1}=\Sigma$ and $c_{n+1}=c$. Otherwise, set $\mu_{n+1} = \mu_n$,
$\Sigma_{n+1}=\Sigma_n$ and $c_{n+1}=c_n$.
\end{description}
\end{description}
\end{algo}
This is an algorithmic description of a random process $\{(X_n, \theta_n),
n\geq 0\}$ which is a non-homogeneous Markov chain with successive transitions
kernels $\{\bar P(n; (x,\theta), (dx',d \theta')), n\geq 0 \}$ given by
\begin{multline*}
\bar P(n; (x,\theta), (dx',d \theta')) = \int q_\theta(z) \ \left\{ \alpha(x,x+z) \delta_{x+z}(dx') + (1-\alpha(x,x+z)) \delta_x(dx') \right\} \cdots \\
\left(\ensuremath{\mathbbm{1}}_{\{\phi(\theta,x+z,x') \in
\Theta\}}\delta_{\phi(\theta,x+z,x')}(d\theta') + \ensuremath{\mathbbm{1}}_{\{\phi(\theta,x+z,x')
\notin \Theta\}}\delta_{\theta}(d\theta') \right) \ d\mu_{Leb}(dz)
\end{multline*}
where $\phi$ is the function defined from the rhs expressions of
(\ref{ex2:defiMu}) to (\ref{ex2:defic}). Integrating over $\theta'$, we see
that for any $A \in \mathcal{X}$,
\[
\int_{A \times \Theta} \bar P(n;(x,\theta),(dx',d\theta')) = P_\theta(x,A)
\;.
\]
\begin{lemma}
\label{lem:example:smallset}
Assume that $\pi$ is bounded from below and from above on compact sets. Then
any compact subset $\mathcal{C}$ of $\mathsf{X}$ with $\mu_{Leb}(\mathcal{C})>0$ satisfies
A\ref{A2}.
\end{lemma}
\begin{proof}
See \cite[Theorem 2.2]{robertsettweedie96}.
\end{proof}
Following (\cite{gersendeetmoulines00}), we assume that $\pi$ is
sub-exponential in the tails: \debutD
\item \label{D1} $\pi$ is positive and continuous on $\mathbb R^p$, and twice
continuously differentiable in the tails.
\item \label{D2} there exist $m\in (0,1)$, positive constants $d_i<D_i$, $i=0,1,2$ and
$r,R>0$ such that for $|x|\geq R$:
\begin{enumerate}[(i)]
\item \label{D2z} $\pscal{\frac{\nabla \pi(x)}{|\nabla \pi(x)|}}{\frac{x}{|x|}}
\leq -r $.
\item \label{D2a} $d_0|x|^m\leq -\log\pi(x)\leq D_0|x|^m$,
\item \label{D2b} $d_1|x|^{m-1}\leq |\nabla\log\pi(x)|\leq D_1|x|^{m-1}$,
\item \label{D2c} $d_2|x|^{m-2}\leq |\nabla^2\log\pi(x)|\leq D_2|x|^{m-2}$.
\end{enumerate}
\finD
Examples of target density that satisfies D\ref{D1}-D\ref{D2} are the Weibull
distributions on $\mathbb R$ with density $\pi(x) \propto |x|^{m-1} \exp(-\beta
|x|^m)$ (for large $|x|$), $\beta>0$, $m \in (0,1)$. Multidimensional examples
are provided in \cite{gersendeetmoulines00}.
\mathrm{s}ubsubsection{Law of large numbers for exponential functions}
In this subsection, we assume that \debutD
\item \label{D3} there exist $s_\mathrm{s}tar>0$, $ 0<\upsilon<1-m$ and $0<\eta<1$ such
that as $|x| \to+\infty$,
\[
\mathrm{s}up_{\theta \in \Theta} \ \int_{\{z, |z| \geq \eta |x|^\upsilon \}} \left(1
\vee \frac{\pi(x)}{\pi(x+z)} \right)^{s_\mathrm{s}tar} \; \; q_\theta(z) \
\mu_{Leb}(dz) =o\left( |x|^{2(m-1)} \right) \;.
\]
\finD A sufficient condition for D\ref{D3} is that $\pi(x+z) \geq \pi(x)
\pi(z)$ for any $x$ large enough and $|z| \geq \eta |x|^\upsilon$ (which holds
true for Weibull distributions with $0<m<1$). Indeed, we then have
\begin{multline*}
\int_{\{z, |z| \geq \eta |x|^\upsilon \}} \left(1 \vee
\frac{\pi(x)}{\pi(x+z)} \right)^{s_\mathrm{s}tar} \; q_\theta(z) \mu_{Leb}(dz) \\
\leq C\; \exp(-\lambda_\mathrm{s}tar \eta^2 |x|^{2 \upsilon}) \mathrm{s}up_{\theta \in
\Theta} \ \int \exp(s_\mathrm{s}tar D_0 |z|^m) \; \exp(\lambda_\mathrm{s}tar |z|^2) \
q_\theta(z) \mu_{Leb}(dz)
\end{multline*}
for some constant $C< +\infty$, and $\lambda_\mathrm{s}tar >0$ such that the rhs is
finite.
\begin{lemma}\label{driftRWM}
Assume D\ref{D1}-\ref{D3}. For $0<s \leq s_\mathrm{s}tar$, define $V_s(x)\eqdef 1 +
\pi^{-s}(x)$. There exist $0< s \leq s_\mathrm{s}tar$ and for any $\alpha \in (0,1)$,
there exist positive constants $b,c$ and a compact set $\mathcal{C}$ such that
\begin{equation*}
\mathrm{s}up_{\theta \in \Theta} P_\theta V_s(x)\leq
V_s(x)-c V^{1-\alpha}_s(x)+b\ensuremath{\mathbbm{1}}_\mathcal{C}(x).
\end{equation*}
Hence A\ref{Adrift}-\ref{A6} hold.
\end{lemma}
\begin{lemma} \label{ex:lem:HypB}
Assume D\ref{D1}-\ref{D3}. B\ref{B2} holds and B\ref{B1} holds for any
probability measures $\xi_1$,$\xi_2$ such that $\int |\ln \pi|^{2/m} d \xi_1
< +\infty$.
\end{lemma}
The proof of Lemmas~\ref{driftRWM} and \ref{ex:lem:HypB} are in Appendix C.
\begin{prop}
\label{prop:ex2:CasRapide}
Assume D\ref{D1}-\ref{D3}. Consider the sequence $\{X_n, n \geq 0 \}$ given by the
algorithm \ref{arwm1}.
\begin{enumerate}[(i)]
\item For any probability measures $\xi_1,\xi_2$ such that $\int |\ln
\pi|^{2/m} d \xi_1 < +\infty$,
\[
\mathrm{s}up_{\{f, |f|_1 \leq 1 \}} |\mathbb{E}_{\xi_1,\xi_2}[f(X_n)] - \pi(f)| \to 0 \;.
\]
\item \label{item2} There exists $0 < s \leq s_\mathrm{s}tar$ such that for any
probability measures $\xi_1,\xi_2$ such that $\int |\pi|^{-s} d \xi_1 <
+\infty$, and any function $ f \in \mathcal{L}_{1+\pi^{-r}}$, $0 \leq r<s$,
\[
n^{-1} \mathrm{s}um_{k=1}^n f(X_k ) \to \pi(f) \;, \qquad \mathbb{P}_{\xi_1,\xi_2}-\text{a.s.}
\]
\end{enumerate}
\end{prop}
The drift function $V_s$ exhibited in Lemma 3.3. is designed for limit theorems
relative to functions $f$ increasing as $\exp(\beta |x|^m)$. This implies a
condition on the initial distribution $\xi_1$ which has to possess
sub-exponential moments (see
Proposition~\ref{prop:ex2:CasRapide}(\ref{item2})), which always holds with
$\xi_1 = \delta_x$, $ x \in \mathsf{X}$.
\mathrm{s}ubsubsection{Law of large numbers for polynomially increasing functions}
Proposition~\ref{prop:ex2:CasRapide} also addresses the case when $f$ is of the
form $1+|x|^r$, $r>0$. Nevertheless, the conditions on $\xi_1$ and the
assumptions D\ref{D3} can be weakened in that case.
We have to find a drift function $V$ such that $V^{1-\alpha}(x) \mathrm{s}im
1+|x|^{r+\iota}$ for some $\alpha \in (0,1)$, $\iota>0$. Under D\ref{D3}, this
can be obtained from the proof of Lemma 3.3. and this yields $V(x) \mathrm{s}im 1 +
|x|^{r+\iota+2-m}$ (apply the Jensen's inequality to the drift inequality
(\ref{eq:drift:sous-geom}) with the concave function $\phi(t) \mathrm{s}im [\ln
t]^{(r+\iota+2)/m-1}$; see \cite[Lemma 3.5]{jarneretroberts02} for similar
calculations). Hence, the condition on $\xi_1$ gets into
$\xi_1(|x|^{r+\iota+2-m})< +\infty$ for some $\iota>0$.
Drift inequalities with $V \mathrm{s}im (-\ln \pi)^{s}$ for some $s>2/m-1$, can also be
derived by direct computations: in that case, D\ref{D3} can be removed.
Details are omitted and left to the interested reader.
To conclude, observe that these discussions relative to polynomially increasing
functions can be extended to any function $f$ which is a concave transformation
of $\pi^{-s}$.
\mathrm{s}ection{Proofs of the results of Section~\ref{sec:ResultsUnif}}
\label{sec:Proofs}
For a set $\mathcal{C} \in \mathcal{X}$, define the hitting-time on $\mathcal{C} \times \Theta$
of $\{Z_n, n\geq 0 \}$ by $\mathrm{s}igma_\mathcal{C} \eqdef \inf\{n \geq 0, Z_n \in \mathcal{C}
\times \Theta \}$. If $\pi(|f|) < +\infty$, we set $\bar f \eqdef f - \pi(f)$.
\mathrm{s}ubsection{Preliminary results}
We gather some useful preliminary results in this section. Section
\ref{sec:OptCouplingUnif} gives an approximation of the marginal distribution
of the adaptive chain by the distribution of a related Markov chain. In Section
\ref{sec:ModMomentsUnif}, we develop various bounds for modulated moments of
the adaptive chain as consequences of the drift conditions. In Section
\ref{sec:ReturnTimesUnif} we bound the expected return times of the adaptive
chain to level sets of the drift function $V$. The culminating result of this
subsection is Theorem~\ref{theo:controleG} which gives an explicit bound on the
resolvent function $g^{(l)}_a(x,\theta)$.
\mathrm{s}ubsubsection{Optimal coupling}\label{sec:OptCouplingUnif}
\begin{lemma}
\label{lem:coupling}
For any integers $l \geq 0, N \geq 2$, any measurable bounded function $f$ on
$\mathsf{X}^N$ and any $(x,\theta) \in \mathsf{X} \times \Theta$,
\begin{multline*}
\Delta \eqdef \left| \mathbb{E}_{x, \theta}^{(l)}\left[ f(X_1, \cdots, X_N)
\right] - \int_{\mathsf{X}^N} P_{\theta}(x, dx_1) \; \prod_{k=2}^N
P_{\theta}(x_{k-1}, dx_k) f(x_1, \cdots, x_n)\right| \\ \leq |f|_1 \;
\mathrm{s}um_{j=1}^{N-1} \mathrm{s}um_{i=1}^j \mathbb{E}_{x,\theta}^{(l)} \left[D(\theta_i,
\theta_{i-1}) \right] \;.
\end{multline*}
\end{lemma}
\begin{proof}
We can assume w.l.g. that $|f|_{1} \leq 1$. Set $z_k = (x_k,t_k)$. With the
convention that $\prod_{k=a}^b a_k=1$ for $a>b$ and upon noting that $
\int_\mathsf{X} P_{\theta}(x, dx') h(x') = \int_{\mathsf{X} \times \Theta} \bar
P_{l}(0; (x,\theta), (dx', d \theta'))h(x')$ for any bounded measurable function $h:\;\mathsf{X}\to\mathbb R$,
\begin{multline*}
\Delta = \left| \int_{(\mathsf{X} \times \Theta)^N} \mathrm{s}um_{j=1}^{N-1} \bar P_{l}(0;
(x,\theta), dz_1) \;
\prod_{k=2}^{j} \bar P_{l}(k-1; z_{k-1}, d z_{k}) \cdots \right. \\
\left. \left\{ \bar P_{l}(j; z_j, dz_{j+1}) - \bar P_{l}(0; (x_j,\theta),
dz_{j+1}) \right\}
\prod_{k=j+2}^N \bar P_{l}(0; (x_{k-1},\theta), dz_{k}) f(x_1, \cdots, x_N) \right| \\
\leq \mathrm{s}um_{j=1}^{N-1} \int_{\mathsf{X}^j} \bar P_{l}(0; (x,\theta), dz_1) \;
\prod_{k=2}^{j} \bar P_{l}(k-1; z_{k-1}, d z_{k}) \mathrm{s}up_{x\in \mathsf{X}} \|
P_{t_j}( x, \cdot) - P_{\theta}(x, \cdot) \|_{\mathrm{TV}}
\end{multline*}
where we used that
\[
\int_{(\mathsf{X} \times \Theta)^{N-j-1}} \prod_{k=j+2}^N \bar P_{l}(0;
(x_{k-1},\theta), dz_{k}) f(x_1, \cdots, x_N)
\]
is bounded by a function $\Xi(x_{1}, \cdots, x_{j+1})$ that does not depend
upon $t_k, k\leq N$ and for any bounded function $\Xi$ on $\mathsf{X}^{j+1}$
\begin{multline*}
\int_{\mathsf{X} \times \Theta} \left\{ \bar P_{l}(j; z_j, dz_{j+1}) - \bar
P_{l}(0;
(x_j,\theta), dz_{j+1}) \right\} \Xi(x_1, \cdots, x_{j+1}) \\
= \int_{\mathsf{X}} \left\{ P_{t_j}( x_j, dx_{j+1}) - P_{\theta}( x_j, dx_{j+1})
\right\} \Xi(x_1, \cdots, x_{j+1}) \leq \mathrm{s}up_{x\in \mathsf{X}} \| P_{t_j}( x,
\cdot) - P_{\theta}(x, \cdot) \|_{\mathrm{TV}} \ |\Xi|_1\;.
\end{multline*}
Hence
\begin{multline*}
\Delta \leq \mathrm{s}um_{j=1}^{N-1} \mathbb{E}_{x,\theta}^{(l)} \left[ \mathrm{s}up_{x\in \mathsf{X}} \|
P_{\theta_j}(x, \cdot) - P_{\theta_0}(x, \cdot)
\|_{\mathrm{TV}} \right] \\
\leq \mathrm{s}um_{j=1}^{N-1} \mathbb{E}_{x,\theta}^{(l)} \left[ \mathrm{s}um_{i=1}^j \mathrm{s}up_{x\in
\mathsf{X}} \| P_{\theta_i}(x, \cdot) - P_{\theta_{i-1}}(x, \cdot) \|_{\mathrm{TV}}
\right] = \mathrm{s}um_{j=1}^{N-1} \mathrm{s}um_{i=1}^j \mathbb{E}_{x,\theta}^{(l)}
\left[D(\theta_i, \theta_{i-1}) \right] \;.
\end{multline*}
\end{proof}
\begin{lemma}
\label{lem:couplingoptimal}
Let $\mu, \nu$ be two probability distributions. There exist a probability
space $(\Omega, \mathcal{F}, \mathbb{P})$ and random variables $X,Y$ on $(\Omega, \mathcal{F})$ such
that $X \mathrm{s}im \mu$, $Y \mathrm{s}im \nu$ and $\mathbb{P}(X = Y) = 1 - \| \mu - \nu \|_\mathrm{TV}$.
\end{lemma}
The proof can be found e.g. in \cite[Proposition 3]{roberts:rosenthal:2004}.
As a consequence of Lemmas~\ref{lem:coupling} and \ref{lem:couplingoptimal}, we
have
\begin{prop}
\label{prop:ContructionCouplingOpt}
Let $l \geq 0, N \geq 2$ and set $z = (x,\theta)$. There exists a process
$\{(X_k, \tilde X_k), 0 \leq k \leq N\}$ defined on a probability space
endowed with the probability $\overline{\mathbb{P}}_{z, z}^{(l)}$ such that
\[
\overline{\mathbb{P}}_{z, z}^{(l)} \left( X_k = \tilde X_k, 0 \leq k \leq N \right) \geq 1 -
\mathrm{s}um_{j=1}^{N-1} \mathrm{s}um_{i=1}^j\mathbb{E}_{z}^{(l)} \left[ D(\theta_i,\theta_{i-1})
\right] \;,
\]
$(X_0, \cdots, X_{N})$ has the $X$-marginal distribution of $\mathbb{P}^{(l)}_{z}$
restricted to the time-interval $\{0, \cdots, N\}$, and $(\tilde X_0, \cdots,
\tilde X_{N})$ has the same distribution as a homogeneous Markov chain with
transition kernel $P_{\theta}$ and initial distribution $\delta_x$.
\end{prop}
\mathrm{s}ubsubsection{Modulated moments for the adaptive chain}\label{sec:ModMomentsUnif}
Let $V: \mathsf{X} \to [1, +\infty)$ be a measurable function and assume that there
exist $\mathcal{C} \in \mathcal{X}$, positive constants $b,c$ and $0 < \alpha \leq 1$
such that for any $\theta \in \Theta$,
\begin{equation}
\label{eq:A2-A5}
P_\theta V \leq V - c V^{1-\alpha} +b \ensuremath{\mathbbm{1}}_\mathcal{C} \;.
\end{equation}
\begin{lemma}
\label{lem:JarnerRoberts}
Assume (\ref{eq:A2-A5}). There exists $\bar b$ such that for any $0 \leq \beta
\leq 1$, $\theta \in \Theta$: $P_\theta V^\beta \leq V^\beta - \beta c
V^{\beta-\alpha} + \bar b \ensuremath{\mathbbm{1}}_\mathcal{C}$.
\end{lemma}
\begin{proof}
See \cite[Lemma 3.5]{jarneretroberts02}.
\end{proof}
\begin{prop}
Assume (\ref{eq:A2-A5}). For any $l\geq 0$, $(x,\theta) \in \mathsf{X} \times
\Theta$, and any stopping-time $\tau$,
\[
c \ \mathbb{E}_{x,\theta}^{(l)} \left[ \mathrm{s}um_{k=0}^{\tau-1} \left(k \alpha c + 1
\right)^{\alpha^{-1}-1} \right] \leq V(x) + b \ \mathbb{E}_{x,\theta}^{(l)} \left[
\mathrm{s}um_{k=0}^{\tau-1} \left((k+1) \alpha c + 1 \right)^{\alpha^{-1}-1}
\ensuremath{\mathbbm{1}}_\mathcal{C}(X_k)\right] \;.
\]
\end{prop}
\begin{proof}
The proof can be adapted from \cite[Proposition 2.1]{doucetal04} and
\cite[Proposition 11.3.2]{meynettweedie93}and is omitted.
\end{proof}
\begin{prop}
\label{prop:ComparaisonGal} Assume (\ref{eq:A2-A5}).
\begin{enumerate}[(i)]
\item \label{prop:CpG1} There exists $\bar b$ such that for any $j \geq 0$,
$0 \leq \beta \leq 1$, $l\geq 0$ and $(x,\theta) \in \mathsf{X} \times \Theta$
\[
\mathbb{E}_{x,\theta}^{(l)} \left[ V^\beta(X_j)\right] \leq V^\beta(x) + \bar b
j^\beta \;.
\]
\item \label{prop:CpG2} Let $0 \leq \beta \leq 1$ and $0 \leq a \leq 1$. For
any stopping-time $\tau$,
\begin{multline*}
\mathbb{E}_{x,\theta}^{(l)} \left[(1-a)^\tau V^\beta(X_{\tau}) \ensuremath{\mathbbm{1}}_{\tau <
+\infty}\right] + \mathbb{E}_{x,\theta}^{(l)} \left[ \mathrm{s}um_{j=0}^{\tau-1}
(1-a)^{j} \; \{ a \; V^\beta(X_{j}) + \beta c (1-a)
V^{\beta-\alpha}(X_{j}) \} \right] \\
\leq V^\beta(x) + \bar b (1-a) \mathbb{E}_{x,\theta}^{(l)} \left[
\mathrm{s}um_{j=0}^{\tau-1} (1-a)^{j} \; \ensuremath{\mathbbm{1}}_\mathcal{C}(X_{j}) \right] \;..
\end{multline*}
\item \label{prop:CpG3} Let $0 \leq \beta \leq 1-\alpha$ and $0<a<1$. For any
stopping-time $\tau$ and any $q \in [1, +\infty]$,
\begin{multline*}
\mathbb{E}_{x,\theta}^{(l)} \left[ \mathrm{s}um_{j=0}^{\tau-1} (1-a)^{j} V^\beta(X_{j}) \right] \\
\leq a^{1/q-1} (1-a)^{-1/q} \; V^{\beta+\alpha/q}(x) \; \left(1 + \bar b
\; \mathbb{E}_{x,\theta}^{(l)}\left[\mathrm{s}um_{j=0}^{\tau-1} (1-a)^j \ensuremath{\mathbbm{1}}_\mathcal{C}(X_j)
\right] \right) \left( \alpha c \right)^{-1/q} \;,
\end{multline*}
(with the convention that $1/q = 0$ when $q = +\infty$).
\end{enumerate}
\end{prop}
\begin{proof} The proof is done in the case $l=0$. The general case is similar and omitted.
(\ref{prop:CpG1}) is a trivial consequence of Lemma~\ref{lem:JarnerRoberts}.
(\ref{prop:CpG2}) Let $\beta \leq 1$. Set $\tau_N = \tau \wedge N$ and $Y_n
= (1-a)^n V^\beta(X_n)$. Then
\begin{multline*}
Y_{\tau_N} = Y_0 + \mathrm{s}um_{j=1}^{\tau_N} \left(Y_j - Y_{j-1} \right) = Y_0 +
\mathrm{s}um_{j=1}^{\tau_N} (1-a)^{j-1} \; \left((1-a) V^\beta(X_j) -
V^\beta(X_{j-1}) \right) \\
= Y_0 + \mathrm{s}um_{j=1}^{\tau_N} (1-a)^{j} \; \left(V^\beta(X_j) -
V^\beta(X_{j-1}) \right) - a \mathrm{s}um_{j=1}^{\tau_N} (1-a)^{j-1} \;
V^\beta(X_{j-1}) \;.
\end{multline*}
Hence,
\begin{multline*}
\mathbb{E}_{x,\theta} \left[Y_{\tau_N} \right] + a \; \mathbb{E}_{x,\theta} \left[
\mathrm{s}um_{j=0}^{\tau_N-1} (1-a)^{j} \; V^\beta(X_{j}) \right] \\
= V^\beta(x) + \mathrm{s}um_{j\geq 1} (1-a)^{j} \; \mathbb{E}_{x,\theta} \left[
\left(V^\beta(X_j) - V^\beta(X_{j-1})
\right) \ensuremath{\mathbbm{1}}_{j \leq \tau_N} \right] \\
\leq V^\beta(x) + \mathrm{s}um_{j\geq 1} (1-a)^{j} \; \mathbb{E}_{x,\theta} \left[\left( -
\beta c \; V^{\beta-\alpha}(X_{j-1}) + \bar b \ensuremath{\mathbbm{1}}_\mathcal{C}(X_{j-1}) \right)
\ensuremath{\mathbbm{1}}_{j \leq \tau_N} \right],
\end{multline*}
where we used Lemma~\ref{lem:JarnerRoberts} in the last inequality. This
implies
\begin{multline*}
\mathbb{E}_{x,\theta} \left[Y_{\tau_N} \right] + a \; \mathbb{E}_{x,\theta} \left[
\mathrm{s}um_{j=0}^{\tau_N-1} (1-a)^{j} \; V^\beta(X_{j}) \right] + (1-a) \beta c
\; \mathbb{E}_{x,\theta} \left[ \mathrm{s}um_{j=0}^{\tau_N-1} (1-a)^{j} \;
V^{\beta-\alpha}(X_{j}) \right] \\
\leq V^\beta(x) + \bar b (1-a) \mathbb{E}_{x,\theta} \left[ \mathrm{s}um_{j=0}^{\tau_N-1}
(1-a)^{j} \; \ensuremath{\mathbbm{1}}_\mathcal{C}(X_{j}) \right].
\end{multline*}
The results follows when $N \to +\infty$. \\
(\ref{prop:CpG3}) The previous case provides two upper bounds, namely for $0 <
\beta \leq 1-\alpha$,
\[
a \; \mathbb{E}_{x,\theta} \left[ \mathrm{s}um_{j=0}^{\tau-1} (1-a)^{j} V^\beta(X_{j}) \right]
\leq V^\beta(x) + \bar b \; (1-a) \mathbb{E}_{x,\theta} \left[ \mathrm{s}um_{j=0}^{\tau-1}
(1-a)^{j} \; \ensuremath{\mathbbm{1}}_\mathcal{C}(X_{j}) \right],
\]
and
\[
(1-a) \; \left( (\beta +\alpha) c \right) \ \mathbb{E}_{x,\theta} \left[
\mathrm{s}um_{j=0}^{\tau-1} (1-a)^{j} V^{\beta}(X_{j}) \right] \leq
V^{\beta+\alpha}(x) + \bar b \mathbb{E}_{x,\theta} \left[ \mathrm{s}um_{j=0}^{\tau-1}
(1-a)^{j} \; \ensuremath{\mathbbm{1}}_\mathcal{C}(X_{j}) \right].
\]
We then use the property $\left[c \leq c_1 \wedge c_2 \right]\mathcal{L}ongrightarrow c
\leq c_1^{1/q} c_2^{1-1/q}$ for any $ q \in [1, +\infty]$.
\end{proof}
\begin{prop}
\label{prop:ComparaisonGal2} Assume (\ref{eq:A2-A5}). Let $\{r_n, n\geq 0\}$ be a non-increasing positive sequence. There exists $\bar b$ such that for any $l\geq0$, $(x,\theta) \in \mathsf{X} \times \Theta$, $ 0 \leq \beta \leq 1$ and $n \geq 0$,
\[
\beta c \ \mathbb{E}_{x,\theta}^{(l)} \left[ \mathrm{s}um_{k \geq n} r_{k+1}
V^{\beta-\alpha}(X_k) \right] \leq r_n \mathbb{E}_{x,\theta}^{(l)} \left[
V^\beta(X_n)\right] + \bar b \ \mathbb{E}_{x,\theta}^{(l)} \left[ \mathrm{s}um_{k \geq n}
r_{k+1} \ensuremath{\mathbbm{1}}_\mathcal{C}(X_k) \right] \;.
\]
\end{prop}
The proof is on the same lines as the proof of
Proposition~\ref{prop:ComparaisonGal}(\ref{prop:CpG2}) and is omitted.
\mathrm{s}ubsubsection{Delayed successive visits to an accessible level set of $V$}\label{sec:ReturnTimesUnif}
\label{sec:DelayedSuccVisit}
Let $\mathcal{D} \in \mathcal{X}$ and two positive integers $n_\mathrm{s}tar, N$. Define on
$(\Omega, \mathcal{F}, \mathbb{P}_{x,\theta}^{(l)})$ the sequence of ${\mathbb{N}}$-valued random
variables $\{\tau^n, n\geq 1 \}$ as
\[
\tau^0 \eqdef \tau_\mathcal{D} \;, \qquad \tau^1 \eqdef \tau^0 + n_\mathrm{s}tar +
\tau_\mathcal{D} \circ \ensuremath{\mathbbm{1}}derline{\theta}^{\tau^0 + n_\mathrm{s}tar} \;, \qquad \tau^{k+1}
\eqdef \tau^k + N + \tau_\mathcal{D} \circ \ensuremath{\mathbbm{1}}derline{\theta}^{\tau^k+N} \;, \ \
k\geq 1 \;.
\]
\begin{prop}
\label{prop:TimeFiniteAS}
Assume A\ref{A2} and there exist $V : \mathsf{X} \to [1,+\infty)$ and a constant $b
< +\infty$ such that for any $\theta \in \Theta$, $P_\theta V \leq V - 1 + b
\ensuremath{\mathbbm{1}}_\mathcal{C}$. Let $\mathcal{D} \in \mathcal{X}$. Let $n_\mathrm{s}tar, N$ be two non-negative
integers. Then
\[
\varepsilon \ \nu(\mathcal{D}) \ \mathbb{E}_{x,\theta}^{(l)} \left[
\mathrm{s}um_{k=0}^{\tau_\mathcal{D}-1} \ensuremath{\mathbbm{1}}_\mathcal{C}(X_k)\right] \leq 1 \;,
\]
and if $\mathrm{s}up_\mathcal{D} V < +\infty$ and $\nu(\mathcal{D})>0$, there exists a (finite)
constant $C$ depending upon $\varepsilon, \nu(\mathcal{D}), \mathrm{s}up_\mathcal{D} V, b, n_\mathrm{s}tar,
N$ such that for any $l \geq 0$, $(x,\theta) \in \mathsf{X} \times \Theta$ and
$k\geq 0$,
\[
\mathbb{E}_{x,\theta}^{(l)} \left[ \tau^k \right] \leq k \ C + V(x)\;.
\]
\end{prop}
\begin{proof}
Since $V \geq 1$, Proposition~\ref{prop:ComparaisonGal}(\ref{prop:CpG2})
applied with $a=0$, $\beta=\alpha=1$, $c=1$ and $\tau = \tau_\mathcal{D}$ implies
\[
\mathbb{E}_{x,\theta}^{(l)}\left[ \tau_\mathcal{D} \right] \leq V(x) + \bar b \
\mathbb{E}_{x,\theta}^{(l)} \left[ \mathrm{s}um_{k=0}^{\tau_\mathcal{D}-1} \ensuremath{\mathbbm{1}}_\mathcal{C}(X_k)\right]
\;.
\] By A\ref{A2}, we have $P_\theta(x,\mathcal{D}) \geq [\varepsilon \nu(\mathcal{D})] \ \ensuremath{\mathbbm{1}}_\mathcal{C}(x)$ for any $(x,\theta)$ so that
\[
\varepsilon \nu(\mathcal{D}) \ \mathbb{E}_{x,\theta}^{(l)} \left[ \mathrm{s}um_{k=0}^{\tau_\mathcal{D}-1}
\ensuremath{\mathbbm{1}}_\mathcal{C}(X_k)\right] \leq \mathbb{E}_{x,\theta}^{(l)} \left[
\mathrm{s}um_{k=0}^{\tau_\mathcal{D}-1} P_{\theta_k}(X_k,\mathcal{D})\right] =
\mathbb{E}_{x,\theta}^{(l)} \left[ \mathrm{s}um_{k=0}^{\tau_\mathcal{D}-1} \ensuremath{\mathbbm{1}}_{\mathcal{D}}(X_{k+1})
\right] \leq 1 \;.
\]
Hence $\mathbb{E}_{x,\theta}^{(l)}\left[ \tau_\mathcal{D} \right] \leq V(x) + \bar b[\varepsilon
\nu(\mathcal{D})]^{-1}$. By the Markov property and
Proposition~\ref{prop:ComparaisonGal}(\ref{prop:CpG1})
\begin{multline*}
\mathbb{E}_{x,\theta}^{(l)}\left[ \tau^1 \right] \leq n_\mathrm{s}tar + V(x) + \bar b
[\varepsilon \nu(\mathcal{D})]^{-1} + \mathbb{E}_{x,\theta}^{(l)}\left[ \mathbb{E}_{Z_{n_\mathrm{s}tar +
\tau_\mathcal{D}}}^{(n_\mathrm{s}tar+l+\tau_\mathcal{D})}\left[ \mathrm{s}igma_\mathcal{D}
\right] \right] \\
\leq n_\mathrm{s}tar + 2 \; \bar b[\varepsilon \nu(\mathcal{D})]^{-1} + V(x) + \mathrm{s}up_\mathcal{D} V
+ n_\mathrm{s}tar \bar b\;.
\end{multline*}
The proof is by induction on $k$. Assume that $\mathbb{E}_{x,\theta}^{(l)}\left[
\tau^k \right] \leq k C + V(x)$ with $C \geq2 \bar b[\varepsilon
\nu(\mathcal{D})]^{-1}+ \mathrm{s}up_\mathcal{D} V + (N \vee n_\mathrm{s}tar)(1+\bar b)$. Then using again the Markov
property and Proposition~\ref{prop:ComparaisonGal}(\ref{prop:CpG1}), and upon
noting that $\mathbb{P}_{x,\theta}^{(l)}(Z_{\tau^k} \in \mathcal{D}) =1$,
\begin{multline*}
\mathbb{E}_{x,\theta}^{(l)}\left[ \tau^{k+1} \right] \leq N +
\mathbb{E}_{x,\theta}^{(l)}\left[ \tau^{k} \right]+ \mathbb{E}_{x,\theta}^{(l)}\left[
\mathbb{E}_{Z_{\tau^k+N}}^{(\tau^k+N+l)}\left[ \tau_\mathcal{D} \right] \right] \\
\leq N + \bar b[\varepsilon \nu(\mathcal{D})]^{-1} + \mathbb{E}_{x,\theta}^{(l)}\left[
\tau^{k}
\right]+\mathbb{E}_{x,\theta}^{(l)}\left[ V(X_{\tau^k+N}) \right] \\
\leq N + \bar b[\varepsilon \nu(\mathcal{D})]^{-1} + \mathbb{E}_{x,\theta}^{(l)}\left[
\tau^{k} \right]+\mathbb{E}_{x,\theta}^{(l)}\left[
\mathbb{E}_{Z_{\tau^k}}^{(\tau^k+l)}\left[
V(X_{N}) \right]\right] \\
\leq N + \bar b [\varepsilon \nu(\mathcal{D})]^{-1} + \mathbb{E}_{x,\theta}^{(l)}\left[
\tau^{k} \right]+ \left( \mathrm{s}up_\mathcal{D} V + N \bar b \right) \;.
\end{multline*}
\end{proof}
\mathrm{s}ubsubsection{Generalized Poisson equation}
\label{sec:GeneralPoisson}
Assume (\ref{eq:A2-A5}). Let $0 < a <1$, $l\geq 0$ and $0\leq \beta \leq
1-\alpha$. For $f \in \mathcal{L}_{V^\beta}$ such that $\pi(|f|) < +\infty$, let us
define the function
\[
\hat g_{a}^{(l)}(x,\theta) \eqdef \mathrm{s}um_{j \geq 0} (1-a)^{j+1} \;
\mathbb{E}_{x,\theta}^{(l)}[\bar f(X_j)] \;.
\]
\begin{prop}
\label{prop:QuasiPoissonEq}
Assume (\ref{eq:A2-A5}). Let $0 \leq \beta \leq 1-\alpha$ and $f \in
\mathcal{L}_{V^\beta}$. For any $(x,\theta) \in \mathsf{X} \times \Theta$, $l \geq
0$ and $0<a<1$, $\hat g_a^{(l)}$ exists, and
\[
\bar f(x) = \frac{1}{1-a} \hat g_a^{(l)}(x,\theta) -\mathbb{E}_{x,\theta}^{(l)} \left[
\hat g_a^{(l+1)}\left(X_1, \theta_1 \right) \right] \;.
\]
\end{prop}
\begin{proof}
By Proposition~\ref{prop:ComparaisonGal}(\ref{prop:CpG1}), $\left|
\mathbb{E}_{x,\theta}^{(l)} \left[ \bar f(X_j) \right]\right| \leq |\bar
f|_{V^\beta} \; \left( V^\beta(x) + \bar b j^\beta \right) $. Hence, $ \hat
g_a^{(l)}(x,\theta)$ exists for any $x,\theta,l$. Furthermore, $\hat
g_a^{(l+1)}\left(X_1, \theta_1 \right)$ is $\mathbb{P}_{x,\theta}^{(l)}$-integrable.
By definition of $\hat g_a^{(l)}$ and by the Markov property,
\begin{multline*}
\mathbb{E}_{x,\theta}^{(l)} \left[ \hat g_a^{(l+1)}\left(X_1, \theta_1 \right)
\right] = \mathrm{s}um_{j \geq 0} (1-a)^{j+1} \mathbb{E}_{x,\theta}^{(l)} \left[ \bar
f(X_{j+1}) \right] = (1-a)^{-1}\; \mathrm{s}um_{j
\geq 1} (1-a)^{j+1} \mathbb{E}_{x,\theta}^{(l)} \left[ \bar f(X_{j}) \right] \\
= (1-a)^{-1}\; \left( \hat g_a^{(l)}(x,\theta) - (1-a) \bar f(x) \right).
\end{multline*}
\end{proof}
\begin{theo}
\label{theo:controleG}
Assume A\ref{A2}-\ref{A6} and B\ref{B2}. Let $0 \leq \beta <1-\alpha$. For any
$\epsilon>0$, there exists an integer $n \geq 2$ such that for any $0<a<1$, $f
\in \mathcal{L}_{V^\beta}$, $l\geq 0$, $(x,\theta) \in \mathsf{X} \times \Theta$ and $q
\in[1, +\infty]$,
\begin{multline*}
\left( |\bar f|_{V^\beta} \right)^{-1} \; \left| \hat g_{a}^{(l)}(x,\theta)
\right| \leq 4 \; \epsilon \; \left(1-(1-a)^n \right)^{-1} \; n \\
+ \frac{V^{\beta+\alpha/q}(x)}{a^{1-1/q}(1-a)^{1/q}} (\alpha c)^{-1/q}\;
\left( 1+ \bar b [\varepsilon \nu(\mathcal{D})]^{-1} + 2 \; (1+\bar b n_\mathrm{s}tar)
(1+\bar b) \ \mathrm{s}up_\mathcal{D} V^{\beta +\alpha/q} \right) \;.
\end{multline*}
By convention, $1/q =0$ when $q = +\infty$. In particular, $\lim_{a \to 0}
\left( |\bar f|_{V^\beta} \right)^{-1} \; \left| a\hat g_{a}^{(l)}(x,\theta)
\right| = 0$.
\end{theo}
\begin{rem}\label{remtheoG}
Before dwelling into the proof of the theorem, we first make two important remarks. Firstly, a simplified restatement of Theorem \ref{theo:controleG} is the following. There exists a finite constant $c_0$ such that
for any $0<a\leq 1/2$, $f \in \mathcal{L}_{V^\beta}$, $l\geq 0$, $(x,\theta) \in \mathsf{X} \times
\Theta$ and $q \in[1, +\infty]$,
\begin{equation}
\label{eq:MajoRem}
\left| \hat g_{a}^{(l)}(x,\theta)\right|\leq c_0 |\bar f|_{V^\beta} \ a^{-1} \left(1+a^{1/q} V^{\beta+\alpha/q}(x)\right).\end{equation}
This follows by taking $\epsilon=1$, say, and upon noting that
$n\left(1-(1-a)^n\right)^{-1}\leq 2^{n-1}/a$.
The second point is that if we take $a_1,a_2\in (0,1)$ we can write
\[
\hat g_{a_1}^{(l)}(x,\theta)-\hat g_{a_2}^{(l)}(x,\theta)=\frac{a_2-a_1}{(1-a_1)(1-a_2)}\times\\
\mathrm{s}um_{k\geq 0}(1-a_1)^{k+1}\mathbb{E}_{x,\theta}^{(l)}\left[ \hat
g_{a_2}^{(l+k)}(X_k, \theta_k)\right] \;.
\]
By (\ref{eq:MajoRem}) and Proposition \ref{prop:ComparaisonGal}
(\ref{prop:CpG3}), it holds
\begin{equation}\label{bounddiffGa}
\left|\hat g_{a_1}^{(l)}(x,\theta)-\hat g_{a_2}^{(l)}(x,\theta)\right|\leq c_1 \ |\bar f|_{V^\beta} \ |a_2-a_1|a_2^{-1}a_1^{-2+1/q}V^{\beta+\alpha/q}(x),\end{equation}
for some finite constant $c_1$, for all $0<a_1,a_2\leq 1/2$, $f \in \mathcal{L}_{V^\beta}$, $l\geq 0$, $(x,\theta) \in \mathsf{X} \times
\Theta$ and $q \in[1, +\infty]$.
\end{rem}
\begin{proof}
Let $\epsilon>0$. Let us consider the sequence of stopping times $\{\tau^k,
k \geq 0\}$ defined in Section~\ref{sec:DelayedSuccVisit} where $(\mathcal{D}, N,
n_\mathrm{s}tar)$ are defined below.
\paragraph{\tt Choice of $\mathcal{D}, N, n_\mathrm{s}tar$.}
Choose a level set $\mathcal{D}$ of $V$ large enough so that $\nu(\mathcal{D})>0$. Choose
$N$ such that
\begin{equation}
\label{eq:Controle3}
\frac{1}{N} \; \mathrm{s}um_{j=0}^{N-1} \mathrm{s}up_{\mathcal{D} \times \Theta} \; \| P_\theta^j(x,\cdot) -
\pi(\cdot) \|_{V^\beta} \leq \epsilon \;,
\end{equation}
the existence of which is given by A\ref{A6}; and such that - since $\alpha +
\beta <1$, -
\begin{equation}
\label{eq:Controle4}
(\alpha c)^{-1} \ N^{-1} \left( \mathrm{s}up_\mathcal{D} V^{\beta +\alpha} + \bar b N^{\beta +\alpha} + \bar b [\varepsilon \nu(\mathcal{D})]^{-1} \right) \leq \epsilon \;.
\end{equation}
Set $\epsilon_N \eqdef N^{-2} \{ \epsilon \; \left( \mathrm{s}up_\mathcal{D} V^\beta + \bar
b N^{-1} \mathrm{s}um_{j=1}^{N-1} j^\beta \right)^{-1} \}^{1/(1-\beta)}$ (which can
be assumed to be strictly lower than $N^{-2}$ since $\beta>0$). By
B\ref{B2}, choose $n_\mathrm{s}tar$ such that for any $q \geq n_\mathrm{s}tar$, $l\geq 0$, $
\mathrm{s}up_{\mathcal{D} \times \Theta} \mathbb{P}_{x,\theta}^{(l)}(D(\theta_q,\theta_{q-1}) \geq
\epsilon_N/2) \leq \epsilon_N/4$.
By Proposition~\ref{prop:TimeFiniteAS}, $\mathbb{P}_{x,\theta}^{(l)}(\tau^k <
+\infty) =1$ for any $(x,\theta) \in \mathsf{X} \times \Theta$, $l \geq 0$, $k\geq
0$.
\paragraph{\tt Optimal coupling.}
With these definitions, $ \mathrm{s}up_{i \geq 1} \mathrm{s}up_{k \geq 1} \mathbb{E}_{x,\theta}^{(l)}
\left[ \mathbb{E}_{Z_{\tau^k}}^{(\tau^k+l)} \left[ D(\theta_i,\theta_{i-1}) \right]
\right]\leq \epsilon_N$, upon noting that $\mathbb{P}_{x,\theta}^{(l)}( n_\mathrm{s}tar \leq
\tau^k) =1$ and $D(\theta,\theta') \leq 2$. We apply
Proposition~\ref{prop:ContructionCouplingOpt} and set $\mathcal{E}_N \eqdef
\{X_k = \tilde X_k, 0 \leq k<N \}$. We have for any $l \geq 0$, $k \geq 1$,
$(x,\theta) \in \mathsf{X} \times \Theta$,
\begin{equation}
\label{eq:CouplingProbability2}
\mathbb{E}_{x,\theta}^{(l)}\left[ \overline{\mathbb{P}}_{Z_{\tau^k}, Z_{\tau^k}}^{(\tau^k+l)} \left( \mathcal{E}^c_N \right) \right] \leq
\mathrm{s}um_{j=1}^{N-1} \mathrm{s}um_{i=1}^j \mathbb{E}_{x,\theta}^{(l)}\left[ \mathbb{E}_{Z_{\tau^k}}^{(\tau^k+l)} \left[ D(\theta_i,\theta_{i-1}) \right] \right] \leq N^2 \epsilon_N < 1 \;.
\end{equation}
Observe that $\mathcal{D},N$ and $n_\mathrm{s}tar$ do not depend upon $a,l,x,\theta$ and
$f$.
\paragraph{\tt Proof of Theorem~\ref{theo:controleG}.}
Assume that for any $0<a<1$, $l\geq 0$, $(x,\theta) \in \mathsf{X} \times \Theta$
and $k \geq 2$,
\begin{equation}
\label{eq:ResultPropAdaptive2}
\left| \mathbb{E}_{x,\theta}^{(l)} \left[ \mathrm{s}um_{j=0}^{N-1} (1-a)^{\tau^k+ j+1} \; \bar f
\left( X_{\tau^{k}+j} \right)\right] \right|\leq |\bar f|_{V^\beta} \; 3 N
\epsilon \; (1-a)^{n_\mathrm{s}tar+(k-1)N} \;.
\end{equation}
We have
\[
\hat g_{a}^{(l)}(x,\theta) = \mathrm{s}um_{j \geq 0} (1-a)^{j+1} \left\{
\mathbb{E}_{x,\theta}^{(l)}\left[\bar f(X_j) \ensuremath{\mathbbm{1}}_{j < \tau^1}\right] + \mathrm{s}um_{k \geq
1} \mathbb{E}_{x,\theta}^{(l)}\left[\bar f(X_j) \ensuremath{\mathbbm{1}}_{\tau^k \leq j <
\tau^{k+1}}\right] \right\} \;.
\]
On one hand, by Proposition~\ref{prop:ComparaisonGal}(\ref{prop:CpG3}) applied
with $\tau = \tau_\mathcal{D}$ and Proposition~\ref{prop:TimeFiniteAS},
\begin{multline*}
\left| \mathrm{s}um_{j \geq 0} (1-a)^{j+1} \mathbb{E}_{x,\theta}^{(l)}\left[\bar f(X_j)
\ensuremath{\mathbbm{1}}_{j < \tau^0}\right] \right| = \left|
\mathbb{E}_{x,\theta}^{(l)}\left[\mathrm{s}um_{j = 0}^{\tau_\mathcal{D}-1} (1-a)^{j+1}
\bar f(X_j) \right] \right| \\
\leq |\bar f|_{V^\beta} \; \mathbb{E}_{x,\theta}^{(l)}\left[\mathrm{s}um_{j =
0}^{\tau_\mathcal{D}-1} (1-a)^{j+1} V^\beta(X_j) \right] \leq |\bar
f|_{V^\beta} \; \frac{V^{\beta+\alpha/q}(x)}{a^{1-1/q}} \frac{\left( 1 + \bar
b [\varepsilon \nu(\mathcal{D})]^{-1}\right)}{(1-a)^{1/q}} (\alpha
c)^{-1/q}\;.
\end{multline*}
Applied with $\tau = \tau_\mathcal{D}$,
Propositions~\ref{prop:ComparaisonGal}(\ref{prop:CpG1} and (\ref{prop:CpG3})
and \ref{prop:TimeFiniteAS} yield
\begin{multline*}
|\bar f|_{V^\beta}^{-1} \; \left| \mathrm{s}um_{j \geq 0} (1-a)^{j+1}
\mathbb{E}_{x,\theta}^{(l)}\left[\bar f(X_j) \ensuremath{\mathbbm{1}}_{ \tau^0 \leq j < \tau^1}\right]
\right| = |\bar f|_{V^\beta}^{-1} \; \left| \mathbb{E}_{x,\theta}^{(l)}\left[
\mathrm{s}um_{j = \tau_\mathcal{D} }^{\tau_\mathcal{D} +n_\mathrm{s}tar +\tau_\mathcal{D} \circ
\ensuremath{\mathbbm{1}}derline{\theta}^{n_\mathrm{s}tar + \tau_\mathcal{D}} -1} (1-a)^{j+1} \bar f(X_j)
\right]\right|
\\
\leq \mathbb{E}_{x,\theta}^{(l)}\left[ \mathbb{E}_{Z_{\tau_\mathcal{D}}}^{(\tau_\mathcal{D}+l)}\left[
\mathrm{s}um_{j
=0}^{n_\mathrm{s}tar+\tau_\mathcal{D} \circ \ensuremath{\mathbbm{1}}derline{\theta}^{n_\mathrm{s}tar}-1} (1-a)^{j+1} V^\beta(X_j) \right]\right] \\
\leq \mathbb{E}_{x,\theta}^{(l)}\left[
\mathbb{E}_{Z_{\tau_\mathcal{D}}}^{(\tau_\mathcal{D}+l)}\left[\mathrm{s}um_{j =0}^{n_\mathrm{s}tar-1}
(1-a)^{j+1} V^\beta(X_j) \right]\right] + \mathbb{E}_{x,\theta}^{(l)}\left[
\mathbb{E}_{Z_{\tau_\mathcal{D}+n_\mathrm{s}tar}}^{(\tau_\mathcal{D}+n_\mathrm{s}tar+l)}\left[\mathrm{s}um_{j
=0}^{\tau_\mathcal{D}-1} (1-a)^{j+1} V^\beta(X_j) \right]\right] \\
\leq 2 \; \frac{(1+\bar b n_\mathrm{s}tar) (1+\bar b)}{a^{1-1/q} (1-a)^{1/q}} (\alpha
c)^{-1/q} \ \mathrm{s}up_\mathcal{D} V^{\beta +\alpha/q} \;.
\end{multline*}
For $k \geq 1$,
\begin{multline*}
\left|\mathrm{s}um_{j \geq 0} (1-a)^{j+1} \; \mathbb{E}_{x,\theta}^{(l)}\left[\bar f(X_j)
\ensuremath{\mathbbm{1}}_{\tau^k \leq j < \tau^{k+1}}\right] \right| \leq \left|
\mathbb{E}_{x,\theta}^{(l)}\left[\mathrm{s}um_{j = \tau^k}^{\tau^k + N-1} (1-a)^{j+1} \;
\bar
f(X_j)\right] \right| \\
+ \mathbb{E}_{x,\theta}^{(l)}\left[ (1-a)^{\tau^k+N} \;
\mathbb{E}_{Z_{\tau^k+N}}^{(\tau^k+N+l)}\left[ \mathrm{s}um_{j=0}^{\tau_\mathcal{D} -1}
(1-a)^{j+1} \; \left|\bar f\right|(X_j) \right]\right]\;.
\end{multline*}
By Proposition~\ref{prop:ComparaisonGal}(\ref{prop:CpG1}) and (\ref{prop:CpG2})
applied with $\tau = \tau_\mathcal{D}$, Proposition~\ref{prop:TimeFiniteAS} and Eq.
(\ref{eq:ResultPropAdaptive2}), and upon noting that $\tau^k \geq n_\mathrm{s}tar +
(k-1) N$ $\mathbb{P}_{(x,\theta)}^{(l)}$-\text{a.s.} ,
\begin{multline*}
\left| \mathrm{s}um_{j \geq 0} (1-a)^{j+1} \; \mathbb{E}_{x,\theta}^{(l)}\left[\bar f(X_j)
\ensuremath{\mathbbm{1}}_{\tau^k
\leq j < \tau^{k+1}}\right]\right| \\
\leq |\bar f|_{V^\beta} \; \mathbb{E}_{x,\theta}^{(l)}\left[ (1-a)^{n_\mathrm{s}tar+(k-1)N}
\; \left(
3 N \epsilon + (1-a)^{N} \{ V^{\beta +\alpha}(X_{\tau^k+N}) + \bar b [\varepsilon \nu(\mathcal{D})]^{-1} \} (\alpha c)^{-1}\right) \right] \\
\leq |\bar f|_{V^\beta} \; (1-a)^{n_\mathrm{s}tar+(k-1)N} \; \left( 3 N \epsilon +
(\alpha c)^{-1} \ \mathrm{s}up_{r,\mathcal{D} \times \Theta} \mathbb{E}_{x,\theta}^{(r)}\left[
V^{\beta
+\alpha}(X_{N}) + \bar b [\varepsilon \nu(\mathcal{D})]^{-1} \right]\right) \\
\leq |\bar f|_{V^\beta} \; (1-a)^{n_\mathrm{s}tar+(k-1)N} \; \left( 3 N \epsilon +
(\alpha c)^{-1} \left( \mathrm{s}up_{\mathcal{D}} \ V^{\beta +\alpha} + \bar b N^{\beta +\alpha}+ \bar b [\varepsilon \nu(\mathcal{D})]^{-1} \right)\right) \\
\leq 4 \; \epsilon \; |\bar f|_{V^\beta} \; (1-a)^{(k-1)N} \; N \;,
\end{multline*}
where we used the definition of $N$ (see Eq.~(\ref{eq:Controle4})) and
Proposition~\ref{prop:ComparaisonGal}(\ref{prop:CpG1}). This yields the desired
result.
\paragraph{\tt Proof of Eq.(\ref{eq:ResultPropAdaptive2})}
By the strong Markov property and since $\tau^k \geq n_\mathrm{s}tar + N(k-1)$
$\mathbb{P}_{x,\theta}^{(l)}$-\text{a.s.}
\begin{multline*}
\left| \mathbb{E}_{x,\theta}^{(l)} \left[ \mathrm{s}um_{j=0}^{N-1} (1-a)^{\tau^k+ j+1} \;
\bar f \left( X_{\tau^{k}+j} \right)\right] \right| \leq (1-a)^{n_\mathrm{s}tar+
N(k-1)} \mathbb{E}_{x,\theta}^{(l)} \left[ \left|\mathbb{E}_{Z_{\tau^{k}}}^{(\tau^{k}+l)}
\left[ \mathrm{s}um_{j=0}^{N-1} (1-a)^{j+1} \; \bar f(X_j) \right]\right|\right].
\end{multline*}
Furthermore, by Proposition~\ref{prop:ContructionCouplingOpt},
\begin{multline*}
\mathbb{E}_{Z_{\tau^{k}}}^{(\tau^{k}+l)} \left[ \mathrm{s}um_{j=0}^{N-1} (1-a)^{j+1} \; \bar
f(X_j) \right] = \overline{\mathbb{E}}_{Z_{\tau^k}, Z_{\tau^{k}}}^{(\tau^{k}+l)} \left[
\mathrm{s}um_{j=0}^{N-1} (1-a)^{j+1} \; \bar f(X_j) \right]
\\
= \overline{\mathbb{E}}_{Z_{\tau^{k}}, Z_{\tau^{k}}}^{(\tau^{k}+l)} \left[ \mathrm{s}um_{j=0}^{N-1}
(1-a)^{j+1} \; \bar f(\tilde X_j) \right] \ + \overline{\mathbb{E}}_{Z_{\tau^{k}},
Z_{\tau^{k}}}^{(\tau^{k}+l)} \left[ \mathrm{s}um_{j=0}^{N-1} (1-a)^{j+1} \; \{ \bar
f( X_j) - \bar f(\tilde X_j) \} \ensuremath{\mathbbm{1}}_{\mathcal{E}^c_N}\right].
\end{multline*}
On one hand, we have $ \mathbb{P}_{x,\theta}^{(l)}-\text{a.s.} $,
\begin{multline*}
\left|\overline{\mathbb{E}}_{Z_{\tau^{k}}, Z_{\tau^{k}}}^{(\tau^{k}+l)} \left[
\mathrm{s}um_{j=0}^{N-1} (1-a)^{j+1} \; \bar f(\tilde X_j) \right] \right| \leq
|\bar f|_{V^\beta} \; \mathrm{s}um_{j=0}^{N-1} (1-a)^{j+1} \; \mathrm{s}up_{\mathcal{D} \times \Theta}
\; \| P_{\theta}^j(x,\cdot) -\pi(\cdot) \|_{V^\beta} \leq |\bar f|_{V^\beta}
\; N \epsilon
\end{multline*}
by (\ref{eq:Controle3}). On the other hand, $ \mathbb{P}_{x,\theta}^{(l)}-\text{a.s.} $,
\begin{multline*}
\left| \overline{\mathbb{E}}_{Z_{\tau^{k}}, Z_{\tau^{k}}}^{(\tau^{k}+l)} \left[
\mathrm{s}um_{j=0}^{N-1} (1-a)^{j+1} \; \{ \bar f( X_j) - \bar f(\tilde X_j) \}
\ensuremath{\mathbbm{1}}_{\mathcal{E}^c_N}\right] \right| \\
\leq |\bar f|_{V^\beta} \; \overline{\mathbb{E}}_{Z_{\tau^{k}},Z_{\tau^{k}}}^{(\tau^{k}+l)}
\left[ \mathrm{s}um_{j=0}^{N-1} (1-a)^{j+1} \; \{ V^\beta( X_j) + V^\beta(\tilde X_j)
\}
\ensuremath{\mathbbm{1}}_{\mathcal{E}^c_N}\right] \\
\leq |\bar f|_{V^\beta} \; \overline{\mathbb{E}}_{Z_{\tau^{k}}, Z_{\tau^{k}}}^{(\tau^{k}+l)}
\left[ \left( \mathrm{s}um_{j=0}^{N-1} (1-a)^{j+1} \; \left\{ V^\beta ( X_j) +
V^\beta(\tilde X_j) \right\} \right)^{\beta^{-1}}\right]^{\beta} \left(
\overline{\mathbb{P}}_{Z_{\tau^{k}}, Z_{\tau^{k}}}^{(\tau^{k}+l)} \left(\mathcal{E}^c_N \right)
\right)^{1-\beta}
\end{multline*}
by using the Jensen's inequality ($\beta <1$). By the Minkowski inequality, by
Proposition~\ref{prop:ComparaisonGal}(\ref{prop:CpG1}), and by iterating the
drift inequality A\ref{A5}
\begin{multline*}
\overline{\mathbb{E}}_{Z_{\tau^{k}}, Z_{\tau^{k}}}^{(\tau^{k}+l)} \left[ \left(
\mathrm{s}um_{j=0}^{N-1} (1-a)^{j+1} \; \left\{ V^\beta ( X_j) + V^\beta(\tilde
X_j) \right\}
\right)^{\beta^{-1}}\right]^{\beta} \\
\leq \mathrm{s}um_{j=0}^{N-1} (1-a)^{j+1} \; \left\{ \overline{\mathbb{E}}_{Z_{\tau^{k}},
Z_{\tau^{k}}}^{(\tau^{k}+l)} \left[ V ( X_j) \right]^\beta +
\overline{\mathbb{E}}_{Z_{\tau^{k}}, Z_{\tau^{k}}}^{(\tau^{k}+l)} \left[ V ( \tilde X_j)
\right]^\beta \right\}
\\
\leq \mathrm{s}um_{j=0}^{N-1} (1-a)^{j+1} \; \left\{ \mathrm{s}up_{l, \mathcal{D} \times \Theta}
\left(\mathbb{E}_{x,\theta}^{(l)} \left[ V(X_j)\right]\right)^\beta + \left(
\mathrm{s}up_{\mathcal{D} \times \Theta} P^j_{\theta} V(x)\right)^\beta \right\}
\\
\leq 2 \; \mathrm{s}um_{j=0}^{N-1} (1-a)^{j+1} \left( \mathrm{s}up_\mathcal{D} V +j \bar b
\right)^\beta \leq 2 N \left( \mathrm{s}up_\mathcal{D} V^\beta + \bar b N^{-1}
\mathrm{s}um_{j=1}^{N-1} j^\beta\right) \;.
\end{multline*}
Finally,
\[
\mathbb{E}_{x,\theta}^{(l)} \left[ \left( \overline{\mathbb{P}}_{Z_{\tau^k},
Z_{\tau^k}}^{(\tau^k+l)}(\mathcal{E}^c_N) \right)^{1-\beta} \right] \leq
\left(\mathbb{E}_{x,\theta}^{(l)} \left[ \overline{\mathbb{P}}_{Z_{\tau^k},
Z_{\tau^k}}^{({\tau^k}+l)}(\mathcal{E}^c_N) \right]\right)^{1-\beta} \leq
\left( N^2 \epsilon_N \right)^{1-\beta}
\]
where we used (\ref{eq:CouplingProbability2}) in the last inequality. To
conclude the proof, use the definition of $\epsilon_N$.
\end{proof}
\mathrm{s}ubsection{Proof of Theorem~\ref{theo:MarginalUnifCase}}
Let $\epsilon >0$. We prove that there exists $n_\epsilon$ such that for any
$n\geq n_\epsilon$, $\mathrm{s}up_{\{f, |f|_1 \leq 1\}} \left|\mathbb{E}_{\xi_1,\xi_2}\left[
\bar f(X_n) \right] \right| \leq \epsilon$.
\mathrm{s}ubsubsection{Definition of $\mathcal{D}$, $N$, $Q$ and $n_\mathrm{s}tar$}
By A\ref{A-VCset}(\ref{Anew}), choose $Q$ such that
\begin{equation}
\label{eq:DefinitionL}
\mathrm{s}up_l \mathrm{s}up_{(x,\theta) \in \mathcal{C} \times \Theta} \mathbb{E}_{x,\theta}^{(l)} \left[ \mathbf{r}(\tau_\mathcal{C}) \right] \ \mathrm{s}um_{k \geq Q} \frac{1}{\mathbf{r}(k)} \leq \epsilon \;.
\end{equation}
By A\ref{A-VCset}(\ref{A4rev}), choose $N$ such that
\begin{equation}
\label{eq:DefinitionN}
\mathrm{s}up_{(x, \theta) \in \mathcal{C} \times \Theta} V^{-1}(x) \ \| P_\theta^N(x,\cdot) - \pi(\cdot) \|_{\mathrm{TV}} \leq \frac{\epsilon}{Q} \;.
\end{equation}
By B\ref{B1}, choose $n_\mathrm{s}tar$ such that for any $n \geq n_\mathrm{s}tar$,
\begin{equation}
\label{eq:DefiNstar}
\mathbb{P}_{\xi_1,\xi_2} \left( D(\theta_n, \theta_{n-1}) \geq \epsilon /(2 (N+Q-1)^2 Q)\right) \leq \frac{\epsilon}{4(N+Q-1)^2 Q } \;.
\end{equation}
\mathrm{s}ubsubsection{Optimal coupling}
We apply Proposition~\ref{prop:ContructionCouplingOpt} with $l =0$ and $N
\leftarrow N+Q$. Set $\mathcal{E}_{N +Q} \eqdef \{X_k = \tilde X_k, 0 \leq k
\leq N +Q \}$. It holds for any $ r \geq n_\mathrm{s}tar$,
\begin{multline}
\label{eq:CouplingProb}
\mathbb{E}_{\xi_1,\xi_2} \left[ \ensuremath{\mathbbm{1}}_{X_r \in \mathcal{C}} \; \overline{\mathbb{P}}_{Z_{r}, Z_r}^{(r)} \left(
\mathcal{E}_{N+Q}^c \right) \right] \leq \mathrm{s}um_{j=1}^{N+Q-1} \mathrm{s}um_{i=1}^j
\mathbb{E}_{\xi_1,\xi_2}\left[\ensuremath{\mathbbm{1}}_{X_r \in \mathcal{C}} \; \mathbb{E}_{Z_r}^{(r)}\left[D(\theta_i,
\theta_{i-1}) \right] \right] \\
\leq \mathrm{s}um_{j=1}^{N+Q-1} \mathrm{s}um_{i=1}^j \mathbb{E}_{\xi_1,\xi_2}\left[ D(\theta_{i+r},
\theta_{i+r-1}) \right] \leq \epsilon Q^{-1} \;,
\end{multline}
where in the last inequality, we use that $D(\theta,\theta') \leq 2$ and the
definition of $n_\mathrm{s}tar$ (see Eq.~(\ref{eq:DefiNstar})).
\mathrm{s}ubsubsection{Proof}
Let $n \geq N+Q+n_\mathrm{s}tar$. We consider the partition given by the last exit from
the set $\mathcal{C}$ before time $n-N$. We use the notation $\{X_{n:m} \notin \mathcal{C}
\}$ as a shorthand notation for $\bigcap_{k=n}^m \{X_k \notin \mathcal{C} \}$; with
the convention that $\{X_{m+1:m} \notin \mathcal{C} \} = \Omega$. We write
\begin{multline*}
\mathbb{E}_{\xi_1,\xi_2} \left[ \bar f(X_n) \right] = \mathbb{E}_{\xi_1,\xi_2} \left[ \bar
f(X_n) \ensuremath{\mathbbm{1}}_{X_{0:n-N} \notin \mathcal{C}} \right] + \mathrm{s}um_{k=0}^{n-N}
\mathbb{E}_{\xi_1,\xi_2} \left[ \bar f(X_n) \ensuremath{\mathbbm{1}}_{X_k \in \mathcal{C} } \ \ensuremath{\mathbbm{1}}_{X_{k+1:n-N}
\notin \mathcal{C}} \right] \;.
\end{multline*}
Since $\bar f$ is bounded on $\mathsf{X}$ by $|\bar f|_1$, we have
\[
\mathbb{E}_{\xi_1,\xi_2} \left[ \bar f(X_n) \ensuremath{\mathbbm{1}}_{X_{0:n-N} \notin \mathcal{C}} \right] \leq
|\bar f|_1 \ \mathbb{P}_{\xi_1, \xi_2} \left( \tau_\mathcal{C} \geq n-N\right) \leq |\bar
f|_1 \ \mathbb{E}_{\xi_1, \xi_2} \left[ \frac{\tau_\mathcal{C} }{n-N} \wedge 1 \right] \;.
\]
The rhs is upper bounded by $|\bar f|_1 \ \epsilon$ for $n$ large enough. By
definition of $Q$ in (\ref{eq:DefinitionL}),
\begin{multline}
\label{eq:weaken}
\mathrm{s}um_{k=0}^{n-(N+Q)} \mathbb{E}_{\xi_1,\xi_2} \left[ \bar f(X_n) \ensuremath{\mathbbm{1}}_{X_k \in \mathcal{C}
} \ \ensuremath{\mathbbm{1}}_{X_{k+1:n-N} \notin \mathcal{C}} \right] \leq |\bar f|_1 \
\mathrm{s}um_{k=0}^{n-(N+Q)} \mathbb{E}_{\xi_1,\xi_2} \left[ \ensuremath{\mathbbm{1}}_{X_k \in \mathcal{C} }
\mathbb{P}_{X_k,\theta_k}^{(k)} \left( \tau_\mathcal{C} \geq
n-N-k\right) \ \right] \\
\leq |\bar f|_1 \ \mathrm{s}up_l \mathrm{s}up_{\mathcal{C} \times \Theta} \mathbb{E}_{x,\theta}^{(l)} \left[
\mathbf{r}(\tau_\mathcal{C}) \right] \mathrm{s}um_{k \geq Q} \frac{1}{\mathbf{r}(k)} \leq |\bar
f|_1 \ \epsilon \;.
\end{multline}
Let $k \in \{n-(N+Q)+1, \cdots, n-N \}$. By definition of $N$ and $n_\mathrm{s}tar$
(see Eqs.~(\ref{eq:DefinitionN}) and (\ref{eq:DefiNstar})), upon noting that $k
\geq n-(N+Q) \geq n_\mathrm{s}tar$,
\begin{multline*}
\mathbb{E}_{\xi_1,\xi_2} \left[ \bar f(X_n) \ensuremath{\mathbbm{1}}_{X_k \in \mathcal{C} } \ \ensuremath{\mathbbm{1}}_{X_{k+1:n-N}
\notin \mathcal{C}} \right] - |\bar f|_1 \ \mathbb{E}_{\xi_1,\xi_2} \left[ \ensuremath{\mathbbm{1}}_{X_k
\in \mathcal{C}} \ \overline{\mathbb{P}}_{Z_k,Z_k}^{(k)} \left(
\mathcal{E}_{N+Q}^c\right) \right] \\
\leq \mathbb{E}_{\xi_1,\xi_2} \left[ \ensuremath{\mathbbm{1}}_{X_k \in \mathcal{C}} \ \overline{\mathbb{E}}_{Z_k,Z_k}^{(k)}
\left[ \bar f(X_{n-k}) \ensuremath{\mathbbm{1}}_{X_{1:n-N-k} \notin \mathcal{C}}
\ensuremath{\mathbbm{1}}_{\mathcal{E}_{N+Q}} \right] \right] \\
\leq \mathbb{E}_{\xi_1,\xi_2} \left[ \ensuremath{\mathbbm{1}}_{X_k \in \mathcal{C}} \ \overline{\mathbb{E}}_{Z_k,Z_k}^{(k)}
\left[ \bar f(\tilde X_{n-k}) \ensuremath{\mathbbm{1}}_{\tilde X_{1:n-N-k} \notin \mathcal{C}}
\ensuremath{\mathbbm{1}}_{\mathcal{E}_{N+Q}} \right] \right] \\
\leq \mathbb{E}_{\xi_1,\xi_2} \left[ \ensuremath{\mathbbm{1}}_{X_k \in \mathcal{C}} \ \overline{\mathbb{E}}_{Z_k,Z_k}^{(k)}
\left[ \bar f(\tilde X_{n-k}) \ensuremath{\mathbbm{1}}_{\tilde X_{1:n-N-k} \notin \mathcal{C}} \right]
\right] + |\bar f|_1 \ \mathbb{E}_{\xi_1,\xi_2} \left[ \ensuremath{\mathbbm{1}}_{X_k \in \mathcal{C}} \
\overline{\mathbb{P}}_{Z_k,Z_k}^{(k)} \left(\mathcal{E}_{N+Q}^c \right) \right] \\
\leq \mathbb{E}_{\xi_1,\xi_2} \left[ \ensuremath{\mathbbm{1}}_{X_k \in \mathcal{C}} \ \overline{\mathbb{E}}_{Z_k,Z_k}^{(k)}
\left[ \ensuremath{\mathbbm{1}}_{\tilde X_{1:n-N-k} \notin \mathcal{C}} P_{\theta_k}^{N}\bar f(\tilde
X_{n-N-k}) \right] \right] + |\bar f|_1 \ \epsilon Q^{-1} \\
\leq |\bar f|_1 \ \epsilon Q^{-1} \mathbb{E}_{\xi_1,\xi_2} \left[ \ensuremath{\mathbbm{1}}_{X_k \in
\mathcal{C}} \ \overline{\mathbb{E}}_{Z_k,Z_k}^{(k)}\left[ \ensuremath{\mathbbm{1}}_{\tilde X_{1:n-N-k} \notin \mathcal{C}}
V(\tilde X_{n-N-k}) \right] \right] + |\bar f|_1 \ \epsilon Q^{-1}
\\
\leq |\bar f|_1 \ \epsilon Q^{-1} \left\{ \mathrm{s}up_{(x,\theta) \in \mathcal{C} \times
\Theta} P_\theta V(x) + \mathrm{s}up_\mathcal{C} V \right\} + |\bar f|_1 \ \epsilon
Q^{-1} \;,
\end{multline*}
where we used A\ref{A-VCset}(\ref{A3rev}) in the last inequality. Hence,
\[
\mathrm{s}um_{k=n-(N+Q)+1}^{n-N} \mathbb{E}_{\xi_1,\xi_2} \left[ \bar f(X_n) \ensuremath{\mathbbm{1}}_{X_k \in
\mathcal{C} } \ \ensuremath{\mathbbm{1}}_{X_{k+1:n-N} \notin \mathcal{C}} \right] \leq \left(1 +
\mathrm{s}up_{(x,\theta) \in \mathcal{C} \times \Theta} P_\theta V(x) + \mathrm{s}up_\mathcal{C} V \right) \epsilon \
|\bar f|_1 \;.
\]
This concludes the proof.
\begin{rem}
\label{rem:YanBai}
In the case the process is non-adaptive, we can assume w.l.g. that it
possesses an atom $\alpha$; in that case, the lines (\ref{eq:weaken}) can be
modified so that the assumptions $\mathrm{s}um_n \{1/\mathbf{r}(n) \}<+\infty$ can be
removed. In the case of an atomic chain, we can indeed apply the above
computations with $\mathcal{C}$ replaced by $\alpha$ and write:
\begin{multline*}
\mathrm{s}um_{k=0}^{n-(N+Q)} \mathbb{E}_{\xi_1} \left[ \bar f(X_n) \ensuremath{\mathbbm{1}}_{X_k \in \alpha } \
\ensuremath{\mathbbm{1}}_{X_{k+1:n-N} \notin \alpha} \right] \leq |\bar f|_1 \
\mathrm{s}um_{k=0}^{n-(N+Q)} \mathbb{P}_{\alpha} \left( \tau_\alpha \geq n-N-k\right) \\
\leq |\bar f|_1 \ \mathrm{s}um_{k \geq Q} \mathbb{P}_{\alpha} \left( \tau_\alpha \geq
k\right) \;.
\end{multline*}
The rhs is small for convenient $Q$, provided
$\mathbb{E}_\alpha[\mathbf{r}(\tau_\alpha)]<+\infty$ with $\mathbf{r}(n) =n$. Unfortunately,
the adaptive chain $\{(X_n, \theta_n), n\geq 0\}$ does not possess an atom thus
explaining the condition on $\mathbf{r}$.
\end{rem}
\mathrm{s}ubsection{Proof of Corollary~\ref{coro:MarginalUnifCase}}
The condition A\ref{A-VCset}(\ref{A4rev}) is established in
Appendix~\ref{app:UniformControl}. Let a level set $\mathcal{D}$ large enough such
that $\nu(\mathcal{D}) >0$; then Proposition~\ref{prop:TimeFiniteAS} implies that
there exists a constant $c < \infty$ such that for any $l \geq 0$,
$\mathbb{E}_{x,\theta}^{(l)}\left[ \tau_\mathcal{D} \right] \leq c V(x)$. This implies that
for $0< \eta \leq 1- \alpha$,
\begin{multline*}
\mathbb{E}_{x,\theta}^{(l)}\left[ \mathrm{s}um_{k=0}^{\tau_\mathcal{D}} (k+1)^{\eta} \right] \leq
\mathbb{E}_{x,\theta}^{(l)}\left[ \mathrm{s}um_{k=0}^{\tau_\mathcal{D}} \left(\mathbb{E}_{X_k, \theta_k}^{(k+l)}
\left[ \tau_\mathcal{D} \right] \right)^{\eta} \right] \leq c^\eta \
\mathbb{E}_{x,\theta}^{(l)}\left[ \mathrm{s}um_{k=0}^{\tau_\mathcal{D}} V^{1-\alpha}(X_k) \right] \\
\leq C \ \left( V(x) + b \ \mathbb{E}_{x,\theta}^{(l)}\left[ \tau_\mathcal{D} \right] \right)
\leq C' \ V(x) \;,
\end{multline*}
for some finite constants $C,C'$ independent upon $\theta$. Hence
A\ref{A-VCset}(\ref{Anew}) holds with $\mathbf{r}(n) \mathrm{s}im n^{1+\eta}$. Finally,
$P_\theta V \leq V - c V^{1-\alpha} +b \ensuremath{\mathbbm{1}}_\mathcal{C}$ implies $P_\theta V \leq V - c
\gamma V^{1-\alpha} + b \ensuremath{\mathbbm{1}}_\mathcal{D}$ for any $\gamma \in (0,1)$ and the level set
$\mathcal{D} \eqdef \{x, V^{1-\alpha} \leq b [c(1-\gamma)]^{-1} \}$. This yields A\ref{A-VCset}(\ref{A3rev}).
\mathrm{s}ubsection{Proof of Proposition~\ref{prop:YanBai}}
Under A\ref{Adrift}, there exists a constant $C$ - that does not depend upon
$\theta$ - such that for any $(x,\theta) \in \mathsf{X} \times \Theta$, $n\geq 0$
and $\kappa \in [1, \alpha^{-1}]$,
\[
\ \| P^n_\theta(x,\cdot) - \pi(\theta) \|_\mathrm{TV} \leq C \frac{
V^{\kappa \alpha}(x)}{(n+1)^{\kappa-1}} \;;
\]
(see Appendix~\ref{app:UniformControl}). To apply \cite[Theorem
13]{rosenthaletroberts05}, we only have to prove that there exists $\kappa \in
[1, \alpha^{-1}]$ such that the sequence $\{V^{\kappa \alpha}(X_n); n\geq 0\}$
is bounded in probability, which is equivalent to prove that $\{V^\beta(X_n);
n\geq 0\}$ is bounded in probability for some (and thus any) $\beta \in (0,1]$
. This is a consequence of Lemma~\ref{lem:YanBai} applied with $W = V^\beta$
for some $\beta \in (0,1]$ and $\mathbf{r}(n) = (n+1)^{1+\eta}$ for some $\eta>0$
(see the proof of Corollary~\ref{coro:MarginalUnifCase} for similar computations).
\begin{lemma}
\label{lem:YanBai}
Assume that there exist a set $\mathcal{C}$ and functions $W: \mathsf{X} \to (0, +\infty)$
and $\mathbf{r} : {\mathbb{N}} \to (0, +\infty)$ such that $\mathbf{r}$ is non-decreasing,
$P_\theta W \leq W $ on $\mathcal{C}^c$ and
\[
\mathrm{s}up_{\mathcal{C} \times \Theta} P_\theta W< +\infty \;, \qquad \qquad \mathrm{s}up_l
\mathrm{s}up_{\mathcal{C}\times \Theta } \mathbb{E}_{x,\theta}^{(l)}\left[ \mathbf{r}(\tau_\mathcal{C}) \right]
< +\infty \;, \qquad \qquad \mathrm{s}um_k \{1/\mathbf{r}(k) \} < +\infty
\]
For any probability distributions $\xi_1, \xi_2$ resp. on $\mathsf{X},\Theta$
$\{W(X_n), n\geq 0 \}$ is bounded in probability for the probability
$\mathbb{P}_{\xi_1,\xi_2}$.
\end{lemma}
\begin{proof}
Let $\epsilon >0$. We prove that there exists $M_\epsilon, N_\epsilon$ such
that for any $M \geq M_\epsilon$ and $n \geq N_\epsilon$, $\mathbb{P}_{x,\theta}\left(
W(X_n) \geq M \right) \leq \epsilon$. Choose $N_\epsilon$ such that for any
$n \geq N_\epsilon$
\[
\mathbb{E}_{\xi_1,\xi_2}\left[\frac{ \tau_{\mathcal{C}}}{n} \wedge 1 \right] \leq
\epsilon/3\;, \qquad \qquad \mathrm{s}up_l \mathrm{s}up_{\mathcal{C} \times \Theta}
\mathbb{E}_{x,\theta}^{(l)}\left[ \mathbf{r}(\tau_\mathcal{C}) \right]\ \mathrm{s}um_{k \geq n}
\{1/\mathbf{r}(k) \} \leq \epsilon/3 \;,
\]
and choose $M_\epsilon $ such that for any $M \geq M_\epsilon$,
$ N_\epsilon \ \mathrm{s}up_{\mathcal{C} \times \Theta} P_\theta W \leq \epsilon M /3$. We write
\[
\mathbb{P}_{\xi_1,\xi_2}\left( W(X_n) \geq M \right) = \mathrm{s}um_{k=0}^{n-1} \mathbb{P}_{\xi_1,\xi_2}
\left( W(X_n) \geq M,X_k \in \mathcal{C}, X_{k+1:n} \notin \mathcal{C} \right) +
\mathbb{P}_{\xi_1,\xi_2} \left( W(X_n) \geq M, X_{0:n} \notin \mathcal{C} \right) \;.
\]
By the Markov inequality, for $n \geq N_\epsilon$,
\[
\mathbb{P}_{\xi_1,\xi_2}\left( W(X_n) \geq M , X_{0:n} \notin \mathcal{C} \right) \leq
\mathbb{P}_{\xi_1,\xi_2}\left( X_{0:n} \notin \mathcal{C} \right) \leq
\mathbb{P}_{\xi_1,\xi_2}\left( \tau_\mathcal{C} > n \right) \leq
\mathbb{E}_{\xi_1,\xi_2}\left[\frac{ \tau_{\mathcal{C}}}{n} \wedge 1 \right] \leq \epsilon/3
\;.
\]
Furthermore, for $n \geq N_\epsilon$,
\begin{multline*}
\mathrm{s}um_{k=0}^{n-N_\epsilon} \mathbb{P}_{\xi_1,\xi_2} \left( W(X_n) \geq M,X_k \in
\mathcal{C}, X_{k+1:n} \notin \mathcal{C} \right) \leq \mathrm{s}um_{k=0}^{n-N_\epsilon}
\mathbb{P}_{\xi_1,\xi_2} \left(X_k \in \mathcal{C}, X_{k+1:n} \notin \mathcal{C} \right) \\
\leq \mathrm{s}um_{k=0}^{n-N_\epsilon} \mathbb{E}_{\xi_1,\xi_2} \left[ \ensuremath{\mathbbm{1}}_\mathcal{C}(X_k) \
\mathrm{s}up_{l} \mathrm{s}up_{\mathcal{C} \times \Theta} \mathbb{P}_{x,\theta}^{(l)}\left( X_{1:n-k}
\notin \mathcal{C} \right)\right] \leq \mathrm{s}um_{k=0}^{n-N_\epsilon} \mathrm{s}up_l
\mathrm{s}up_{\mathcal{C} \times \Theta} \mathbb{P}_{x,\theta}^{(l)} \left( \tau_{\mathcal{C}} \geq n-k
\right) \\
\leq \mathrm{s}um_{k=N_\epsilon}^{n} \frac{1}{\mathbf{r}(k)} \mathrm{s}up_l \mathrm{s}up_{\mathcal{C} \times
\Theta} \mathbb{E}_{x,\theta}^{(l)}\left[ \mathbf{r}(\tau_\mathcal{C}) \right] \leq \epsilon/3
\;.
\end{multline*}
Finally, for $n \geq N_\epsilon$ we write
\begin{multline*}
\mathrm{s}um_{k=n-N_\epsilon+1}^n \mathbb{P}_{x,\theta} \left( W(X_n) \geq M,X_k \in \mathcal{C},
X_{k+1:n} \notin \mathcal{C} \right) \\
\leq \mathrm{s}um_{k=n-N_\epsilon+1}^n \mathbb{E}_{x,\theta} \left[ \ensuremath{\mathbbm{1}}_\mathcal{C}(X_k) \
\mathbb{P}_{X_k,\theta_k}^{(k)}\left(W(X_{n-k}) \geq M, X_{1:n-k} \notin \mathcal{C} \right)
\right]
\end{multline*}
We have, for any $k \in \{n-N_\epsilon+1, \cdots, n \}$ and $(x,\theta) \in
\mathcal{C} \times \Theta$
\begin{multline*}
\mathbb{P}_{x,\theta}^{(k)}\left(W(X_{n-k}) \geq M, X_{1:n-k} \notin \mathcal{C} \right)
\leq \frac{1}{M} \mathbb{E}_{x,\theta}^{(k)}\left[ W(X_{n-k}) \ensuremath{\mathbbm{1}}_{\mathcal{C}^c}(
X_{1:n-k-1}) \right] \leq \frac{1}{M} \mathbb{E}_{x,\theta}^{(k)}\left[ W(X_1)
\right]
\end{multline*}
where, in the last inequality, we used the drift inequality on $W$ outside
$\mathcal{C}$. Hence,
\[
\mathrm{s}um_{k=n-N_\epsilon+1}^n \mathbb{P}_{x,\theta} \left( W(X_n) \geq M,X_k \in \mathcal{C},
X_{k+1:n} \notin \mathcal{C} \right) \leq \frac{N_\epsilon}{M} \mathrm{s}up_{\mathcal{C} \times
\Theta} P_\theta W(x) \leq \epsilon /3 \;.
\]
The proof is concluded.
\end{proof}
\mathrm{s}ubsection{Proof of Theorem~\ref{theo:SLLNUnboundedUnifCase}}
By using the function $\hat{g}_a^{(l)}$ introduced in
Section~\ref{sec:GeneralPoisson} and by Proposition~\ref{prop:QuasiPoissonEq},
we write $\mathbb{P}_{x,\theta}-\text{a.s.} $
\begin{multline*}
n^{-1} \mathrm{s}um_{k=1}^n \bar f(X_k) = n^{-1} \mathrm{s}um_{k=1}^n \left( (1-a)^{-1}\hat
g_a^{(k)}(X_k,\theta_k) -\mathbb{E}_{X_k,\theta_k}^{(k)} \left[ \hat
g_a^{(k+1)}\left(X_1, \theta_1 \right) \right] \right) \\
= n^{-1} (1-a)^{-1} \; \mathrm{s}um_{k=1}^n \left\{ \hat g_a^{(k)}(X_k,\theta_k) -
\mathbb{E}_{x,\theta}\left[\hat g_a^{(k)}(X_k,\theta_k) \vert \mathcal{F}_{k-1}
\right]
\right\} \\
+ n^{-1} (1-a)^{-1} \mathrm{s}um_{k=1}^n \left\{ \mathbb{E}_{x,\theta}\left[\hat
g_a^{(k)}(X_k,\theta_k) \vert \mathcal{F}_{k-1} \right] - (1-a)
\mathbb{E}_{x,\theta} \left[ \hat g_a^{(k+1)}\left(X_{k+1}, \theta_{k+1} \right)
\vert \mathcal{F}_{k} \right]
\right\} \\
= n^{-1} (1-a)^{-1} \; \mathrm{s}um_{k=1}^n \left\{ \hat g_a^{(k)}(X_k,\theta_k) -
\mathbb{E}_{x,\theta}\left[\hat g_a^{(k)}(X_k,\theta_k) \vert \mathcal{F}_{k-1}
\right]
\right\} \\
+ n^{-1} (1-a)^{-1} \left\{ \mathbb{E}_{x,\theta}\left[\hat g_a^{(1)}(X_1,\theta_1)
\vert \mathcal{F}_{0} \right] - \mathbb{E}_{x,\theta}\left[\hat
g_a^{(n+1)}(X_{n+1},\theta_{n+1}) \vert
\mathcal{F}_{n} \right] \right\} \\
+ a \; n^{-1} (1-a)^{-1} \; \mathrm{s}um_{k=1}^n \mathbb{E}_{x,\theta} \left[ \hat
g_a^{(k+1)}\left(X_{k+1}, \theta_{k+1} \right) \vert \mathcal{F}_{k}
\right].
\end{multline*}
We apply the above inequalities with $a = a_n$ and consider the different terms
in turn. We show that they tend $\mathbb{P}_{x,\theta}-\text{a.s.} $ to zero when the
deterministic sequence $\{a_n, n \geq 1 \}$ satisfies conditions which are
verified e.g. with $a_n = (n+1)^{-\zeta}$ for some $\zeta$ such that
\[
\zeta >0 \;, \qquad 2\zeta < 1 - \left(0.5 \vee \beta(1-\alpha)^{-1} \right)
\;, \qquad \zeta < 1 - \beta (1-\alpha)^{-1} \;.
\]
To prove that each term converges a.s. to zero, we use the following
characterization
\[
\left[ \forall \epsilon>0 \;, \quad \lim_{n \to +\infty} \mathbb{P}\left(\mathrm{s}up_{m
\geq n} |X_m| \geq \epsilon \right) \right] \mathcal{L}ongleftrightarrow \left[
\{X_n, n \geq 0 \} \to 0 \qquad \mathbb{P}-\text{a.s.} \right] \;.
\]
Hereafter, we assume that $|f|_{V^\beta} =1$. In the following, $c$ (and below, $c_1,c_2$) are constant the value of which may vary upon each appearance.
\paragraph{\tt Convergence of Term 1.} Set $p \eqdef (1-\alpha) /\beta$.
We prove that
\[
n^{-1} (1-a_n)^{-1} \mathrm{s}um_{k=1}^n \left\{ \hat g_{a_n}^{(k)}(X_k,\theta_k) -
\mathbb{E}_{\xi_1,\xi_2}\left[\hat g_{a_n}^{(k)}(X_k,\theta_k) \vert
\mathcal{F}_{k-1} \right] \right\} \longrightarrow 0 \;,
\mathbb{P}_{\xi_1,\xi_2}-\text{a.s.}
\]
provided the sequence $\{a_n, n\geq 0\}$ is non increasing, $ \lim_{n\to\infty}
\ n^{\max(1/p,1/2)-1} /a_n = 0$, $\mathrm{s}um_n n^{-1} [n^{\max(1/p,1/2)-1} /a_n]^p <
+\infty$ and $\mathrm{s}um_n |a_n -a_{n-1}| a_{n-1}^{-2} \ [n^{\max(1/p,1/2)-1} /a_n] <
+\infty$.
\begin{proof}
Define $D_{n,k} \eqdef \hat g_{a_n}^{(k)}(X_k,\theta_k) -
\mathbb{E}_{\xi_1,\xi_2}\left[\hat g_{a_n}^{(k)}(X_k,\theta_k) \vert
\mathcal{F}_{k-1} \right]$; $S_{n,k} \eqdef \mathrm{s}um_{j=1}^kD_{n,j}$, if $k\leq
n$ and $S_{n,k} \eqdef \mathrm{s}um_{j=1}^nD_{n,j}+\mathrm{s}um_{j=n+1}^kD_{j,j}$ if $k>n$;
and $R_{n} \eqdef \mathrm{s}um_{j=1}^{n-1}D_{n,j}-D_{n-1,j}$. Then for each $n$,
$\{(S_{n,k},\mathcal{F}_k),\;k\geq 1\}$ is a martingale. For $k>n$ and by Lemma
\ref{lem1martingales}, there exists a universal constant $C$ such that
\begin{multline}\mathbb{E}_{\xi_1,\xi_2}\left[|S_{n,k}|^p\right]\leq Ck^{\max(p/2,1)-1}\left(\mathrm{s}um_{j=1}^n\mathbb{E}_{\xi_1,\xi_2}\left[|D_{n,j}|^p\right]+\mathrm{s}um_{j=n+1}^k\mathbb{E}_{\xi_1,\xi_2}\left[|D_{j,j}|^p\right]\right)\\
\leq c_1 \ |\bar f|_{V^\beta} \
k^{\max(p/2,1)-1}a_k^{-p}\mathrm{s}um_{j=1}^k\mathbb{E}_{\xi_1,\xi_2}\left[V(X_j)\right]\leq
c_1 \ |\bar f|_{V^\beta} \ k^{\max(p/2,1)}a_k^{-p}\xi_1(V),
\label{eq:BoundOnSnk} \end{multline} where we used (\ref{eq:MajoRem}) and
Proposition~\ref{prop:ComparaisonGal}(\ref{prop:CpG2}). It follows that for any
$n\geq 1$, $\lim_{N\to\infty}
N^{-p}\mathbb{E}_{\xi_1,\xi_2}\left(|S_{n,N}|^p\right)\leq
c_1\lim_{N\to\infty}\left(N^{\max(1/p,1/2)-1} /a_N\right)^p=0$. Then by the
martingale array extension of the Chow-Birnbaum-Marshall's inequality
(Lemma~\ref{lem:Birnbaum}),
\begin{multline*}
2^{-p}\delta^p\mathbb{P}_{\xi_1,\xi_2}\left(\mathrm{s}up_{m \geq n} m^{-1} (1-a_m)^{-1} \left|\mathrm{s}um_{j=1}^nD_{n,j}\right|>\delta\right)\\
\leq
\mathrm{s}um_{k=n}^\infty\left(k^{-p}-(k+1)^{-p}\right)\mathbb{E}_{\xi_1,\xi_2}\left[|S_{n,k}|^p\right]+
\left(\mathrm{s}um_{k=n+1}^\infty
k^{-1}\mathbb{E}_{\xi_1,\xi_2}^{1/p}\left[|R_k|^p\right]\right)^p
\;.
\end{multline*} Under the assumptions on the sequence $\{a_n, n\geq 0\}$ and given the bound (\ref{eq:BoundOnSnk}), the first term in the rhs tends to zero as $n \to +\infty$.
To bound the second term, we first note that
$\{(\mathrm{s}um_{j=1}^kD_{n,j}-D_{n-1,j},\mathcal{F}_k),\;k\geq 1\}$ is a martingale for each
$n$. Therefore, by Lemma \ref{lem1martingales} and the definition of $D_{n,j}$
\begin{multline*}
\mathbb{E}_{\xi_1,\xi_2}\left[|R_n|^p\right]\leq
C \ n^{\max(p/2,1)-1}\mathrm{s}um_{j=1}^{n-1}\mathbb{E}_{\xi_1,\xi_2}\left[|D_{n,j}-D_{n-1,j}|^p\right] \\
\leq 2 C \ n^{\max(p/2,1)-1}\mathrm{s}um_{j=1}^{n-1}\mathbb{E}_{\xi_1,\xi_2}\left[|\hat
g_{a_n}^{(j)}(X_j,\theta_j) -\hat g_{a_{n-1}}^{(j)}(X_j,\theta_j)|^p\right]
\;.\end{multline*} Then, using (\ref{bounddiffGa}) (with $q=\infty$) and
the usual argument of bounding moments of $V^\beta(X_j)$, we get
\[\mathbb{E}_{\xi_1,\xi_2}^{1/p}\left[|R_n|^p\right]\leq c_1 \ |\bar f|_{V^\beta} \ n^{\max(1/2,1/p)} \ |a_n - a_{n-1}| \ a_n^{-1} a_{n-1}^{- 2}\xi_1(V).\]
Under the assumptions, $\mathrm{s}um_n n^{-1}
\mathbb{E}_{\xi_1,\xi_2}^{1/p}\left[|R_n|^p\right] < +\infty$ and this concludes the proof.
\end{proof}
\paragraph{\tt Convergence of Term 2.}
We prove that
\[
n^{-1} (1-a_n)^{-1} \mathbb{E}_{\xi_1,\xi_2}\left[\hat g_{a_n}^{(1)}(X_1,\theta_1)
\vert \mathcal{F}_{0} \right] \longrightarrow 0 \;,
\]
provided $\lim_n n a_n = +\infty$ and $\lim_n a_n =0$.
\begin{proof}
By Theorem~\ref{theo:controleG} applied with $q= +\infty$, it may be
proved that there exist constants $c,N$ such that
\[ \left|
\mathbb{E}_{\xi_1,\xi_2}\left[\hat g_{a_n}^{(1)}(X_1,\theta_1) \vert \mathcal{F}_{0}
\right] \right| \leq c a_n^{-1} \xi_1(V) + c \left(1-(1-a_n)^N \right)^{-1} N
\]
Divided by $n^{-1} (1-a_n)$, the rhs tends to zero as $n \to +\infty$.
\end{proof}
\paragraph{\tt Convergence of Term 3.}
We prove that
\[
n^{-1} (1-a_n)^{-1} \mathbb{E}_{\xi_1,\xi_2}\left[\hat
g_{a_n}^{(n+1)}(X_{n+1},\theta_{n+1}) \vert \mathcal{F}_{n} \right]
\longrightarrow 0 \;, \mathbb{P}_{\xi_1,\xi_2}-\text{a.s.}
\]
provided the sequence $\{n^{-1} a_n^{-1}, n\geq 1 \}$ is non-increasing, $\lim_n
n^{1-\beta(1-\alpha)^{-1}} a_n = +\infty$, $\mathrm{s}um_n (n a_n)^{-(1-\alpha)\beta^{-1}}
< +\infty$ and $\lim_n a_n =0$.
\begin{proof}
There exist constants $c_1,c_2,N$ such that for any $n$ large enough (i.e.
such that $1-a_n \geq 1/2$) and $p \eqdef (1-\alpha) \beta^{-1} >1$
\begin{multline*}
\mathbb{P}_{\xi_1,\xi_2} \left( \mathrm{s}up_{m \geq n} m^{-1} (1-a_m)^{-1} \; \left|
\mathbb{E}_{\xi_1,\xi_2}\left[ \hat g_{a_m}^{(m+1)}(X_{m+1},\theta_{m+1}) \vert
\mathcal{F}_{m} \right] \right| \geq \delta \right) \\
\leq 2^p \delta^{-p} \; \mathbb{E}_{\xi_1,\xi_2}\left[ \mathrm{s}up_{m \geq n} m^{-p}
\left| \mathbb{E}_{\xi_1,\xi_2}\left[ \hat g_{a_m}^{(m+1)}(X_{m+1},\theta_{m+1})
\vert
\mathcal{F}_{m} \right] \right|^p \right] \\
\leq 2^p \delta^{-p} \; \mathrm{s}um_{m \geq n} m^{-p} \; \mathbb{E}_{\xi_1,\xi_2}\left[
\left| \mathbb{E}_{\xi_1,\xi_2}\left[ \hat g_{a_m}^{(m+1)}(X_{m+1},\theta_{m+1})
\vert \mathcal{F}_{m} \right]
\right|^p \right] \\
\leq 2^p \delta^{-p} \; \mathrm{s}um_{m \geq n} m^{-p} \; \mathbb{E}_{\xi_1,\xi_2}\left[
\left| \hat g_{a_m}^{(m+1)}(X_{m+1},\theta_{m+1}) \right|^p \right] \\
\leq 2^{2p-1} \; \delta^{-p} \; \mathrm{s}um_{m \geq n} m^{-p} \; \left\{
\frac{c_1}{a_m^p} \; \mathbb{E}_{\xi_1,\xi_2}\left[ V^{\beta p}(X_{m+1}) \right]
+ c_2 \left(\frac{N}{(1-(1-a_m)^{N})}\right)^p \right\}
\end{multline*}
where we used Theorem~\ref{theo:controleG} with $q = +\infty$. Furthermore
by Propositions~\ref{prop:ComparaisonGal}(\ref{prop:CpG1}) and
\ref{prop:ComparaisonGal2} and the drift inequality,
\begin{multline*}
\mathbb{P}_{\xi_1,\xi_2} \left( \mathrm{s}up_{m \geq n} m^{-1} (1-a_m)^{-1} \; \left|
\mathbb{E}_{\xi_1,\xi_2}\left[ \hat g_{a_m}^{(n+1)}(X_{m+1},\theta_{m+1})
\vert
\mathcal{F}_{m} \right] \right| \geq \delta \right) \\
\leq \frac{2^p c_3}{\delta^{p}} \; \left\{ n^{-p} a_n^{-p}
\mathbb{E}_{\xi_1,\xi_2}[V(X_n)] + \mathrm{s}um_{m \geq n} m^{-p} a_m^{-p} + \mathrm{s}um_{m
\geq n} m^{-p} \;
\left(\frac{N}{(1-(1-a_m)^{N})}\right)^p \right\} \\
\leq \frac{2^pc_3}{\delta^{p}} \; \left\{ n^{-p} a_n^{-p} \left(\xi_1(V) + n
\bar b \right) + \bar b \mathrm{s}um_{m \geq n} m^{-p} a_m^{-p} + \mathrm{s}um_{m \geq
n} m^{-p} \; \left(\frac{N}{(1-(1-a_m)^{N})}\right)^p \right\} \;.
\end{multline*}
Under the stated conditions on $\{a_n, n\geq 1
\}$, the rhs tends to zero as $n \to +\infty$.
\end{proof}
\paragraph{\tt Convergence of Term 4.}
We prove that
\[
a_n n^{-1} (1-a_n)^{-1} \mathrm{s}um_{k=1}^n \mathbb{E}_{\xi_1,\xi_2}\left[\hat
g_{a_n}^{(k+1)}(X_{k+1},\theta_{k+1}) \vert \mathcal{F}_{k} \right]
\longrightarrow 0 \;, \mathbb{P}_{\xi_1,\xi_2}-\text{a.s.}
\]
provided $\{a_n^{1 \wedge [(1-\alpha-\beta)/\alpha]} \; n^{-1}, n\geq 1\}$ is
non-increasing, $\mathrm{s}um_n a_n^{1 \wedge [(1-\alpha-\beta)/\alpha]} \; n^{-1} <
+\infty$, and $\lim_n a_n =0$.
\begin{proof} Choose $q \geq 1$ such that $\beta + \alpha/q \leq 1-\alpha$. Fix $\epsilon >0$. From Theorem~\ref{theo:controleG}, there exist constants $C,N$ such that for any $n\geq1$, $l\geq 0$, $(x,\theta) \in \mathsf{X} \times \Theta$,
\[
\left| \hat g_{a_n}^{(l)}(x,\theta) \right| \leq C \ a_n^{1/q-1}\ V^{\beta +
\alpha /q}(x) + 4 \epsilon N (1-(1-a_n)^N)^{-1} \;.
\]
Hence for $n$ large enough such that $(1-a_n) \geq 1/2$
\begin{multline*}
\left| a_n n^{-1} (1-a_n)^{-1} \mathrm{s}um_{k=1}^n \mathbb{E}_{\xi_1,\xi_2}\left[\hat
g_{a_n}^{(k+1)}(X_{k+1},\theta_{k+1}) \vert \mathcal{F}_{k} \right] \right| \\
\leq 8 a_n \epsilon N (1-(1-a_n)^N)^{-1} + 2 C \ a_n^{1/q } n^{-1} \;
\mathrm{s}um_{k=1}^n \mathbb{E}_{\xi_1,\xi_2} \left[ V^{\beta +
\alpha /q}(X_{k+1}) \vert \mathcal{F}_k\right] \\
\leq 8 a_n \epsilon N (1-(1-a_n)^N)^{-1} + 2 C \ a_n^{1/q} n^{-1} \;
\mathrm{s}um_{k=1}^n V^{1-\alpha}(X_k) + 2 C\; \ a_n^{1/q} \bar b \;,
\end{multline*}
where we used $\beta + \alpha/q \leq 1-\alpha$ and
Proposition~\ref{prop:ComparaisonGal}(\ref{prop:CpG1}) in the last inequality.
Since $\lim_n a_n =0$ and $\lim_n a_n \epsilon N (1-(1-a_n)^N)^{-1} =
\epsilon$, we only have to prove that $a_n^{1/q} \; n^{-1} \mathrm{s}um_{k=1}^n
V^{1-\alpha}(X_k)$ converges to zero $\mathbb{P}_{\xi_1,\xi_2}$-\text{a.s.} By the Kronecker
Lemma (see e.g \cite[Section 2.6]{halletheyde80}), this amounts to prove that
$\mathrm{s}um_{k \geq 1} a_k^{1/q} k^{-1} \; V^{1-\alpha}(X_k)$ is finite \text{a.s.} This
property holds upon noting that by Proposition~\ref{prop:ComparaisonGal2} and
Proposition~\ref{prop:ComparaisonGal}(\ref{prop:CpG1})
\begin{multline*}
\mathbb{E}_{\xi_1,\xi_2} \left[ \mathrm{s}um_{k \geq n} a_k^{1/q} k^{-1} \;
V^{1-\alpha}(X_k) \right] \leq a_n^{1/q} n^{-1} \; \mathbb{E}_{\xi_1,\xi_2} \left[
V(X_n)\right] +
\mathrm{s}um_{k \geq n} a_k^{1/q} k^{-1} \\
\leq a_n^{1/q} n^{-1} \; \left( \xi_1(V) + \bar b n \right)+ \mathrm{s}um_{k \geq n}
a_k^{1/q} k^{-1},
\end{multline*}
and the rhs tends to zero under the stated assumptions.
\end{proof}
\mathrm{s}ubsection{Proof of Proposition~\ref{prop:SLLNUnboundedUnifCaseBounded}}
We only give the sketch of the proof since the proof is very
similar to that of Theorem~\ref{theo:SLLNUnboundedUnifCase}. We start with
proving a result similar to Theorem~\ref{theo:controleG}. Since $\mathcal{D} =
\mathsf{X}$, the sequence $\{\tau^k, k\geq 0\}$ is deterministic and $\tau^{k+1} =
\tau^k + N +1$. By adapting the proof of Theorem~\ref{theo:controleG} ($f$ is
bounded and $\mathcal{D} = \mathsf{X}$), we establish that for any $\epsilon>0$, there
exists an integer $n \geq 2$ such that for any $0<a<1$, any bounded function
$f$, $l\geq 0$, $(x,\theta) \in \mathsf{X} \times \Theta$
\[
\left( |\bar f|_{1} \right)^{-1} \; \left| \hat g_{a}^{(l)}(x,\theta) \right|
\leq n+ \epsilon \; \left(1-(1-a)^n \right)^{-1} \; n \;.
\]
We then introduce the martingale decomposition as in the proof of
Theorem~\ref{theo:SLLNUnboundedUnifCase} and follow the same lines (with any
$p>1$).
\appendix
\mathrm{s}ection{Explicit control of convergence}
\label{app:UniformControl}
We provide sufficient conditions for the assumptions
A\ref{A-VCset}(\ref{A4rev}) and A\ref{A6}. The technique relies on the
explicit control of convergence of a transition kernel $P$ on a general state
space $(\mathbb{T}, \mathcal{B}(\mathbb{T}))$ to its stationary distribution
$\pi$.
\begin{prop}
\label{prop:ExplicitControlCvg}
Let $P$ be a $\phi$-irreducible and aperiodic transition kernel on
$(\mathbb{T}, \mathcal{B}(\mathbb{T}))$.
\begin{enumerate}[(i)]
\item \label{block1} Assume that there exist a probability measure $\nu$ on
$\mathbb{T}$, positive constants $\varepsilon, b,c$, a measurable set
$\mathcal{C}$, a measurable function $V: \mathbb{T} \to [1, +\infty)$ and $0<
\alpha \leq 1$ such that
\begin{equation}
\label{eq:HypPropExplicitControlCvg}
P(x,\cdot) \geq \ensuremath{\mathbbm{1}}_\mathcal{C}(x) \; \varepsilon \ \nu(\cdot) \;, \qquad \qquad PV
\leq V - c \ V^{1-\alpha} +b \ \ensuremath{\mathbbm{1}}_\mathcal{C} \;.
\end{equation}
Then $P$ possesses an invariant probability measure $\pi$ and
$\pi(V^{1-\alpha})< + \infty$.
\item \label{block2} Assume in addition that $ c \ \inf_{\mathcal{C}^c} V^{1-\alpha}
\geq b$, $\mathrm{s}up_\mathcal{C} V < + \infty$ and $\nu(\mathcal{C})>0$. Then there exists a
constant $C$ depending upon $\mathrm{s}up_\mathcal{C} V$, $\nu(\mathcal{C})$ and $\varepsilon,
\alpha,b,c$ such that for any $0 \leq \beta \leq 1-\alpha$ and $1 \leq \kappa
\leq\alpha^{-1}(1-\beta)$,
\begin{equation}
\label{eq:ConcPropExplicitControlCvg}
(n+1)^{\kappa-1} \; \| P^n(x,\cdot) - \pi(\cdot) \|_{V^\beta} \leq C \ V^{\beta +
\alpha \kappa}(x).
\end{equation}
\end{enumerate}
\end{prop}
\begin{proof}
The conditions (\ref{eq:HypPropExplicitControlCvg}) imply that $V$ is
unbounded off petite set and $P$ is recurrent. It also implies that $\{V<
+\infty \}$ is full and absorbing: hence there exists a level set $\mathcal{D}$ of
$V$ large enough such that $\nu(\mathcal{D}) >0$. Following the same lines as in
the proof of Proposition~\ref{prop:TimeFiniteAS}, we prove that $\mathrm{s}up_\mathcal{D}
\mathbb{E}_x[\tau_\mathcal{D}] < +\infty$. The proof of (\ref{block1}) in concluded by
\cite[Theorems 8.4.3., 10.0.1]{meynettweedie93}. The proof of (\ref{block2})
is given in e.g. \cite{gersendeetmoulines03} (see also
\cite{andrieu:fort:2005,douc:moulines:soulier:2007}).
\end{proof}
When $b \leq c$, $ c \ \inf_{\mathcal{C}^c} V^{1-\alpha} \geq b$. Otherwise, it is
easy to deduce the conditions of (\ref{block2}) from conditions of the form
(\ref{block1}).
\begin{coro}
Let $P$ be a phi-irreducible and aperiodic transition kernel on $(\mathbb{T},
\mathcal{B}(\mathbb{T}))$. Assume that there exist positive constants $ b,c$,
a measurable set $\mathcal{C}$, an unbounded measurable function $V: \mathbb{T} \to
[1, +\infty)$ and $0< \alpha \leq 1$ such that $P V \leq V - c V^{1-\alpha}
+b \ensuremath{\mathbbm{1}}_\mathcal{C}$. Assume in addition that the level sets of $V$ are $1$-small.
Then there exist a level set $\mathcal{D}$ of $V$, positive constants
$\varepsilon_\mathcal{D}$, $c_\mathcal{D}$ and a probability measure $\nu_\mathcal{D}$ such
that
\[
P(x,\cdot) \geq \ensuremath{\mathbbm{1}}_\mathcal{D}(x) \; \varepsilon_\mathcal{D} \ \nu_\mathcal{D}(\cdot) \;,
\qquad \qquad PV \leq V - c_\mathcal{D} \ V^{1-\alpha} +b \ \ensuremath{\mathbbm{1}}_\mathcal{D} \;,
\]
and $\mathrm{s}up_\mathcal{D} V < +\infty$, $\nu_\mathcal{D}(\mathcal{D}) >0$, and $c_\mathcal{D} \
\inf_{\mathcal{D}^c} V^{1-\alpha} \geq b$.
\end{coro}
\begin{proof}
For any $0 < \gamma <1$, $PV \leq V - \gamma \; c \ V^{1-\alpha} +b \
\ensuremath{\mathbbm{1}}_{\mathcal{D}_\gamma}$ with ${\mathcal{D}_\gamma} \eqdef \{ V^{1-\alpha} \leq b [c
(1-\gamma)]^{-1} \}$. Hence, $\mathrm{s}up_{\mathcal{D}_\gamma} V < +\infty$; and for
$\gamma$ close to $1$, we have $\gamma c \; \inf_{\mathcal{D}^c_\gamma}
V^{1-\alpha} \geq b$. Finally, the drift condition
(\ref{eq:HypPropExplicitControlCvg}) implies that the set $\{V < +\infty \}$
is full and absorbing and thus the level sets $\{V \leq d \}$ are accessible
for any $d$ large enough.
\end{proof}
The $1$-smallness assumption is usually done for convenience and is not
restrictive. In the case the level sets are petite (and thus $m$-small for some
$m \geq 1$), the explicit upper bounds get intricate and are never detailed in
the literature (at least in the polynomial case). Nevertheless, it is a
recognized fact that the bounds derived in the case $m=1$ can be extended to the
case $m>1$.
\mathrm{s}ection{$L^p$-martingales and the Chow-Birnbaum-Marshall's inequality}
We deal with martingales and martingale arrays in the paper using the following two results.
\begin{lemma}\label{lem1martingales}
Let $\{(D_k,\mathcal{F}_k),\;1\leq k\geq 1\}$ be a martingale difference sequence and $M_n=\mathrm{s}um_{k=1}^nD_k$. For any $p>1$,
\begin{equation}
\mathbb{E}\left[\left|M_n\right|^p\right]\leq Cn^{\max(p/2,1)-1}\mathrm{s}um_{k=1}^n \mathbb{E}\left(\left|D_k\right|^p\right),\end{equation}
where $C=\left(18pq^{1/2}\right)^p$, $p^{-1}+q^{-1}=1$.
\end{lemma}
\begin{proof}
By Burkholder's inequality (\cite{halletheyde80}, Theorem 2.10) applied to the martingale $\{(M_n,\mathcal{F}_n),\;n\geq 1\}$, we get
\[
\mathbb{E}\left(\left|M_n\right|^p\right)\leq C\mathbb{E}\left[\left(\mathrm{s}um_{k=1}^k|D_k|^2\right)^{p/2}\right],\]
where $C=\left(18pq^{1/2}\right)^p$, $p^{-1}+q^{-1}=1$. The proof follows by noting that
\begin{equation}\label{eq:prop1}\left(\mathrm{s}um_{k=1}^n|D_k|^2\right)^{p/2}\leq n^{\max(p/2,1)-1}\mathrm{s}um_{k=1}^n\left|D_k\right|^p.\end{equation}
To prove (\ref{eq:prop1}), note that if $1<p\leq 2$, the convexity inequality $(a+b)^\alpha\leq a^\alpha+b^\alpha$ which hold true for all $a,b\geq 0$ and $0\leq \alpha\leq 1$ implies that $\left(\mathrm{s}um_{n=1}^n|D_k|^2\right)^{p/2}\leq \mathrm{s}um_{k=1}^n |D_k|^p$. If $p>2$, Holder's inequality gives $\left(\mathrm{s}um_{k=1}^n|D_k|^2\right)^{p/2}\leq n^{p/2-1}\left(\mathrm{s}um_{k=1}^n|D_k|^p\right)$.
\end{proof}
Lemma~\ref{lem:Birnbaum} can be found in \cite{Atchade:2009} and provides a generalization to the classical Chow-Birnbaum-Marshall's
inequality.
\begin{lemma}
\label{lem:Birnbaum}
Let $\{D_{n,i},\mathcal{F}_{n,i},\;1\leq i\leq n\}$, $n\geq 1$ be a martingale-difference array and $\{c_n,\;n\geq 1\}$ a non-increasing sequence of positive numbers. Assume that $\mathcal{F}_{n,i}=\mathcal{F}_i$ for all $i,n$. Define
\[S_{n,k} \eqdef \mathrm{s}um_{i=1}^k D_{n,i},\;\; \mbox{ if }1\; \leq k\leq n \;\; \mbox{ and }\;\;\; S_{n,k} \eqdef \mathrm{s}um_{i=1}^n D_{n,i}+\mathrm{s}um_{j=n+1}^kD_{j,j},\;\;\;k>n;\]
\[R_n \eqdef \mathrm{s}um_{j=1}^{n-1}\left(D_{n,j}-D_{n-1,j}\right).\]
For $n\leq m\leq N$, $p\geq 1$ and $\lambda>0$
\begin{multline}2^{-p}\lambda^p\mathbb{P}\left(\max_{n\leq m\leq N}c_m|M_{m,m}|>\lambda\right)\leq c_N^p\mathbb{E}\left(|S_{n,N}|^p\right)+\mathrm{s}um_{j=n}^{N-1}\left(c_j^p-c_{j+1}^p\right)\mathbb{E}\left(|S_{n,j}|^p\right) \\
+ \mathbb{E}\left[\left(\mathrm{s}um_{j=n+1}^N c_j|R_j|\right)^p\right].\end{multline}
\end{lemma}
\mathrm{s}ection{Proofs of Section~\ref{sec:ex2}}
In the proofs, $C$ will denote a generic finite constant whose actual value
might change from one appearance to the next. The proofs below differ from
earlier works (see e.g. \cite{gersendeetmoulines00,doucetal04}) since $q$ is
not assumed to be compactly supported.
\mathrm{s}ubsection{Proof of Lemma~\ref{driftRWM}}
\begin{lemma}
\label{lem:tool1:proof:driftRWM}
Assume D\ref{D1}-\ref{D2}. For all $x$ large enough and $|z| \leq \eta
|x|^\upsilon$, $t \mapsto V_s(x+tz)$ is twice continuously differentiable on
$[0,1]$. There exist a constant $C < +\infty$ and a positive function
$\varepsilon$ such that $\lim_{|x| \to\infty} \varepsilon(x) = 0$, such that
for all $x$ large enough, $|z| \leq \eta |x|^\upsilon$ and $s \leq s_\mathrm{s}tar$,
\[
\mathrm{s}up_{t \in [0,1]} |\nabla^2 V_s(x+tz)| \leq C \; s V_s(x) |x|^{2(m-1)} \left
(s + \varepsilon(x) \right) \;.
\]
\end{lemma}
\begin{proof}
$|x +z | \geq |x| -\eta |x|^\upsilon \geq (1-\eta) |x|^\upsilon$ so that $t
\mapsto V_s(x+tz)$ is twice continuously differentiable on $[0,1]$ for $|x|$
large enough. We have
\begin{multline*}
|\nabla^2 V_s(x+tz)| \leq s V_s(x) \ \ \frac{V_s(x+tz)}{V_s(x)} | \nabla
\ln \pi(x+tz) \nabla \ln \pi(x+tz)^T | \cdots \\
\left( s + \frac{|\nabla^2 \ln \pi(x+tz) |}{| \nabla \ln \pi(x+tz) \nabla
\ln \pi(x+tz)^T |}\right)
\end{multline*}
Under the stated assumptions, there exists a constant $C$ such that for any
$x$ large enough and $|z| \leq \eta |x|^\upsilon$
\[
\mathrm{s}up_{t \in [0,1] } \left( s + \frac{|\nabla^2 \ln \pi(x+tz) |}{| \nabla \ln
\pi(x+tz) \nabla \ln \pi(x+tz)^T |}\right) \leq s +
\frac{D_2}{d_1^2(1-\eta)} |x|^{-m \upsilon} \;,
\]
and
\[
\mathrm{s}up_{t \in [0,1] } | \nabla \ln \pi(x+tz) \nabla \ln \pi(x+tz)^T | \leq
|x|^{2(m-1)} D_1^2 \left( 1 -\eta |x|^{\upsilon-1} \right)^{2(m-1)} \;..
\]
Finally,
\[
\mathrm{s}up_{t \in [0,1] ,s \leq s_\mathrm{s}tar}\left(\frac{\pi(x+tz)}{\pi(x)} \right)^{-s}
\leq 1 + s_\mathrm{s}tar D_1 \ |z| \mathrm{s}up_{t \in [0,1] } |x+tz|^{m-1} \mathrm{s}up_{t \in [0,1],s
\leq s_\mathrm{s}tar }\left(\frac{\pi(x+tz)}{\pi(x)} \right)^{-s}
\]
which yields the desired result upon noting that $|z| |x+tz|^{m-1} \leq \eta
|x|^{\upsilon+m-1} (1-\eta |x|^{\upsilon -1})$ is arbitrarily small for $x$
large enough.
\end{proof}
We now turn to the proof of Lemma~\ref{driftRWM}. For $x\in\mathsf{X}$, define
$R(x):=\{y\in\mathsf{X}:\; \pi(y)<\pi(x)\}$ and $R(x)-x \eqdef \{y-x:\; y\in
R(x)\}$. We have:
\begin{eqnarray*}
P_\theta V_s(x)-V_s(x)&=&\int\left(V_s(x+z)-V_s(x)\right)q_\theta(z) \ \mu_{Leb}(dz) \\
&&+ \int_{R(x)-x}\left(V(x+z)-V(x)\right)\left(\frac{\pi(x+z)}{\pi(x)}-1\right) q_\theta(z) \ \mu_{Leb}(dz) \;.
\end{eqnarray*}
If $x$ remains in a compact set $\mathcal{C}$, using D\ref{D2}(\ref{D2a}) and the
continuity of $x \mapsto V_s(x)$, we have $V_s(x+z)\leq C(1+ \exp(s D_0
|z|^m))$. It follows that
\[
\mathrm{s}up_{\theta \in \Theta} \mathrm{s}up_{x \in \mathcal{C}} \{ P_\theta V_s(x) - V_s(x) \} \leq
C \ \mathrm{s}up_{\theta \in \Theta} \int_{R(x)-x} (1+ \exp(s D_0 |z|^m)) \ q_\theta(z) \
\mu_{Leb}(dz) < +\infty \;.
\]
More generally, let $x$ large enough. Define $l(x) \eqdef \log\pi(x)$,
$R_V(x,z)\eqdef V_s(x+z)-V_s(x)+ s V_s(x) \pscal{z}{\nabla l(x)}$,
$R_\pi(x,z)\eqdef \pi(x+z)(\pi(x))^{-1}-1-\pscal{z}{\nabla l(x)}$. Using the
fact that the mean of $q_\theta$ is zero, we can write: $P_\theta V_s(x) -
V_s(x)=I_1(x,\theta,s)+I_2(x,\theta,s)+I_3(x,\theta,s) $ where
\[I_1(x,\theta,s)\eqdef - s V_s(x) \int_{R(x)-x} \pscal{z}{\nabla l(x)}^2 \ q_\theta(z) \ \mu_{Leb}(dz) \;,\]
\[I_2(x,\theta,s)\eqdef \int R_V(x,z) \; q_\theta(z) \ \mu_{Leb}(dz)+\int_{R(x)-x}R_V(x,z)\left(\frac{\pi(x+z)}{\pi(x)}-1\right) \ q_\theta(z) \ \mu_{Leb}(dz) \;,\]
and
\[I_3(x,\theta,s) \eqdef - s V_s(x) \ \int_{R(x)-x} R_\pi(x,z)\pscal{z}{\nabla l(x)} \ q_\theta(z) \ \mu_{Leb}(dz) \;.\]
\mathrm{s}ubsubsection{First term}
It follows from \cite[Lemma B.3. and proof of Proposition
3]{gersendeetmoulines00} that, under D\ref{D2}(\ref{D2z}), there exists $b>0$,
such that for all $\theta \in \Theta$,
\[
\int_{R(x)-x} \pscal{z}{\nabla l(x)}^2 \ q_\theta(z) \ \mu_{Leb}(dz) \geq b \;
|\nabla l(x) |^2 \;.
\]
Hence, $\mathrm{s}up_{\theta \in \Theta} I_1(x,\theta,s) \leq -s \; V_s(x) \ b \; d_1^2
|x|^{2(m-1)}$.
\mathrm{s}ubsubsection{Second term}
For $z\in R(x)-x$, $\pi(x+z)<\pi(x)$. Therefore $|I_2(x,\theta,s)|\leq 2\int
|R_V(x,z)|q_\theta(z) \ \mu_{Leb}(dz)$. By
Lemma~\ref{lem:tool1:proof:driftRWM}, there exists $C< + \infty$ - independent
of $s$ for $s \leq s_\mathrm{s}tar$- such that for any $|z| \leq \eta |x|^\upsilon$,
\[
|R_V(x,z) |\leq C \; s \; V_s(x) \ |x|^{2(m-1)} \ |z|^2 \ \left(s +
\varepsilon(x) \right)\;.
\]
This implies that there exists a constant $C< +\infty$ - independent of $s$ for
$s \leq s_\mathrm{s}tar$ - such that
\begin{multline*}
\int |R_V(x,z)|q_\theta(z) \ \mu_{Leb}(dz) \leq C \; s \; V_s(x) \
|x|^{2(m-1)} \ \left(s +
\varepsilon(x) \right) \ \int |z|^2 q_\theta(z) \mu_{Leb}(dz) \\
+ V_s(x) \; \int_{\{z, |z| \geq \eta |x|^\upsilon \}}
\frac{V_s(x+z)}{V_s(x)} \ q_\theta(z) \mu_{Leb}(dz) \\
+ C \; V_s(x) \; |x|^{m-1} \int_{\{z, |z| \geq \eta |x|^\upsilon \}} |z| \;
q_\theta(z) \mu_{Leb}(dz) \;.
\end{multline*}
There exists a constant $C$ such that for $\theta \in \Theta$ and $s \leq
s_\mathrm{s}tar$, the first term in the rhs is upper bounded by $C \; s \; V_s(x) \
|x|^{2(m-1)} \left(s + \varepsilon(x) \right)$. Under D\ref{D3}, the second
term is upper bounded by $V_s(x) \; |x|^{2(m-1)} \; \varepsilon(x) $ with
$\lim_{|x| \to +\infty} \varepsilon(x) = 0$ uniformly in $\theta$ for $\theta
\in \Theta$, and in $s$ for $s \leq s_\mathrm{s}tar$. Since $q_\theta$ is a
multivariate Gaussian distribution, there exists $\lambda_\mathrm{s}tar>0$ such that
$\mathrm{s}up_{\theta \in\Theta} \int \exp(\lambda_\mathrm{s}tar |z|^2) q_\theta(z)
\mu_{Leb}(dz)< +\infty$. Under D\ref{D3}, the third term is upper bounded by $C
\; V_s(x) \; |x|^{2(m-1)} \; \exp(-\lambda \eta^2 |x|^{2 \upsilon})$ for some
$\lambda \in (0,\lambda_\mathrm{s}tar)$, uniformly in $\theta$ for $\theta \in \Theta$,
and in $s$ for $s \leq s_\mathrm{s}tar$. Hence, we proved that there exists $C_\mathrm{s}tar<
\infty$ such that for any $s \leq s_\mathrm{s}tar$,
\[
\mathrm{s}up_{\theta \in \Theta} |I_2(x,\theta,s)|\leq C_\mathrm{s}tar \; V_s(x) \;
|x|^{2(m-1)} \; \left(s^2+ \varepsilon(x) \right) \;,
\]
for a positive function $\varepsilon$ independent of $s$ and such that
$\lim_{|x| \to +\infty} \varepsilon(x) = 0$.
\mathrm{s}ubsubsection{Third term} Following the same lines as in the control of $I_2(x,\theta,s)$, it may be proved that
\begin{multline*}
I_3(x,\theta,s) \leq s V_s(x) D_1 |x|^{m-1} \int_{\{z, |z| \geq \eta
|x|^\upsilon \}} |z | \left( 1 + D_1 |z| |x|^{m-1} \right) q_\theta(z) \mu_{Leb}(dz) \\
+ C \ V_s(x) |x|^{3(m-1)} \; \int_{\{z, |z| \leq \eta |x|^\upsilon \}} |z|^3
\ q_\theta(z) \mu_{Leb}(dz) \leq C \ V_s(x) |x|^{2(m-1)} \varepsilon(x)
\end{multline*}
for a positive function $\varepsilon$ independent of $s,\theta$ and such that
$\lim_{|x| \to +\infty} \varepsilon(x) = 0$.
\mathrm{s}ubsubsection{Conclusion}
Let $\alpha \in (0,1)$. By combining the above calculations, we prove that by
choosing $s$ small enough such that $c_\mathrm{s}tar \eqdef b d_1^2 - C_\mathrm{s}tar s>0$, we
have
\begin{align}
\mathrm{s}up_{\theta \in \Theta} P_\theta V_s(x) & \leq V_s(x) - c_\mathrm{s}tar V_s(x)
|x|^{2(m-1)} + b_\mathrm{s}tar \ensuremath{\mathbbm{1}}_\mathcal{C}(x) \label{eq:drift:sous-geom} \\
& \leq V_s(x) - 0.5 c_\mathrm{s}tar V_s^{1-\alpha}(x) + b_\mathrm{s}tar \ensuremath{\mathbbm{1}}_\mathcal{C}(x)
\end{align}
for a compact set $\mathcal{C}$. This proves A\ref{Adrift}(ii) and A\ref{A5}.
A\ref{A6} follows from the results of Appendix~\ref{app:UniformControl}.
A\ref{Adrift}(iii) and A\ref{A2} follow from Lemma~\ref{lem:example:smallset}.
\mathrm{s}ubsection{Proof of Lemma~\ref{ex:lem:HypB}}
An easy modification in the proof of \cite[Proposition 11]{andrieuetal06} (to
adjust for the difference in the drift function) shows that
$D(\theta,\theta')\leq 2\int_\mathsf{X} |q_{e^c\Sigma}(x)-q_{e^{c'}\Sigma'}(x)|
\mu_{Leb}(dx)$. We then apply \cite[Lemma 12]{andrieuetal06} to obtain that
$D(\theta,\theta')\leq C \, |e^c \Sigma-e^{c'}\Sigma'|_\mathrm{s}$ where $C$ is a
finite constant depending upon the compact $\Theta$. Hereafter, $C$ is finite
and its value may change upon each appearance. For any $l,n\geq 0$,
$\epsilon>0$, $x \in \mathbb R^p$ and $\theta\in\Theta$, we have
\begin{eqnarray*}
\mathbb{P}^{(l)}_{x,\theta} \left(D(\theta_n,\theta_{n+1})\geq \epsilon\right) &\leq& \epsilon^{-1}\mathbb{E}^{(l)}_{x,\theta}\left[D(\theta_n, \theta_{n+1})\right]\\
&\leq& C \, \mathbb{E}^{(l)}_{x,\theta}\left[ |c_{n+1}-c_n| + |\Sigma_{n+1} - \Sigma_n|_\mathrm{s} \right]\\
&\leq& C \, (l+n+1)^{-1}\left(1+\mathbb{E}^{(l)}_{x,\theta}\left[|X_{n+1}|^2\right]+\mathrm{s}qrt{\mathbb{E}^{(l)}_{x,\theta}\left[|X_{n+1}|^2\right]}\right) \;.
\end{eqnarray*}
D\ref{D2}(\ref{D2a}) implies that we can find $C<\infty$ such that $|x|^2\leq C
\; \phi(V_s(x))$ for all $x\in\mathsf{X}$ where $\phi(t) = [\ln t]^{2/m}$. From the
drift condition (Lemma~\ref{driftRWM}),
Proposition~\ref{prop:ComparaisonGal}(\ref{prop:CpG1}) and the concavity of
$\phi$, we deduce that there exists $C$ such that
$\mathbb{E}^{(l)}_{x,\theta}\left[|X_n|^2\right]\leq C\; [\ln V_s(x) ]^{2/m} \; [\ln
n]^{2/m}$. We conclude that for any probability $\xi_1$ such that $\xi_1([\ln
V_s]^{2/m}) < +\infty$, $\lim_n \mathbb{P}_{\xi_1,\xi_2}
\left(D(\theta_n,\theta_{n+1}) \geq \epsilon\right)=0$ and for any level set
$\mathcal{D}$ of $V_s$,
\[
\lim_{n\to\infty}\mathrm{s}up_{l\geq 0}\mathrm{s}up_{\mathcal{D}\times
\Theta}\mathbb{P}^{(l)}_{x,\theta} \left(D(\theta_n,\theta_{n+1}) \geq
\epsilon\right)=0 \;.\]
{\bf Acknowledgment:} We would like to thank Michael Woodroofe for helpful
discussions on the resolvent approach to limit theorems and Prof. Pierre
Priouret and Christophe Andrieu for helpful discussions. We also thank M.
Vihola for helpful comments.
\end{document} |
\begin{document}
\title[SMALL BALL PROBABILITIES FOR SPDE WITH COLORED NOISE]{SMALL BALL PROBABILITIES FOR THE FRACTIONAL STOCHASTIC HEAT EQUATION DRIVEN BY A COLORED NOISE}
\author{Jiaming Chen}
\address{Dept. of Mathematics
\\University of Rochester
\\mathbb{R}ochester, NY 14627}
\email{jchen143@ur.rochester.edu}
\keywords{fractional stochastic heat equation, colored noise, small ball probability.}
\subjclass[2020]{Primary, 60H15; Secondary, 60G60.}
\begin{abstract}
We consider the fractional stochastic heat equation on the $d$-dimensional torus $\mathbb{T}^d:=[-1,1]^d$, $d\geq 1$, with periodic boundary conditions:
$$
\mathcalartial_t u(t,x)= -(-\Delta)^{\alpha/2}u(t,x)+\sigma(t,x,u)\dot{F}(t,x)\quad x\in \mathbb{T}^d,t\in\mathbb{R}^+
,$$
where $\alpha\in(1,2]$ and $\dot{F}(t,x)$ is a white in time and colored in space noise. We assume that $\sigma$ is Lipschitz in $u$ and uniformly bounded. We provide small ball probabilities for the solution $u$ when $u(0,x)\equiv 0$.
\end{abstract}
\maketitle
\section{Introduction}
In this paper we consider small ball probabilities for solutions to the fractional stochastic heat equation of the type:
\begin{equation}
\label{SHE}
\mathcalartial_t u(t,{\bf x})=-(-\Delta)^{\alpha/2}u(t,{\bf x})+\sigma(t,{\bf x},u)\dot{F}(t,{\bf x})\quad {\bf x}\in \mathbb{T}^d,t\in\mathbb{R}^+,
\end{equation}
with given initial profile $u(0,\cdot)=u_0:\mathbb{T}^d\to \mathbb{R}$ where $\mathbb{T}^d:=[-1,1]^d$ is a $d$-dimensional torus. The operator $-(-\Delta)^{\alpha/2}$, where $1<\alpha\leq 2$, is the fractional power Laplacian on $\mathbb{T}^d$. The centered Gaussian noise $\dot{F}$ is white in time and colored in space, i.e.,
\begin{equation*}
\mathbb{E}\left(\dot{F}(t,{\bf x}),\dot{F}(s,{\bf y})\right)=\delta_0(t-s)\Lambda({\bf x-y}),
\end{equation*}
where $\delta_0$ is the Dirac delta generalized function and $\Lambda:\mathbb{T}^d\to\mathbb{R}_+$ is a nonnegative generalized function whose Fourier series is given by
\begin{equation}
\label{lambdafourier}
\Lambda({\bf x})=\sum_{{\bf n}\in\mathbb{Z}^d}\lambda({\bf n})\exp(\mathcali i {\bf n\cdot x})
\end{equation}
where ${\bf n}\cdot{\bf x}$ represents the dot product of two $d$-dimensional vectors. We will need the following two assumptions on the function $\sigma:\mathbb{R}_+\times\mathbb{T}^d\times\mathbb{R}\to \mathbb{R}$.
\begin{hyp}
There exists a constant $\mathcal{D}>0$ such that for all $t\geq 0$, ${\bf x}\in\mathbb{T}^d$, $u,v\in\mathbb{R}$,
\begin{equation}
\label{hypothesis1}
\vert \sigma(t,{\bf x},u)-\sigma(t,{\bf x},v)\vert\leq\mathcal{D}|u-v|.
\end{equation}
\end{hyp}
\begin{hyp}
There exist constants $\mathcal{C}_1$, $\mathcal{C}_2>0$ such that for all $t\geq 0$, ${\bf x}\in\mathbb{T}^d$, $u\in\mathbb{R}$,
\begin{equation}
\label{hypothesis2}
\mathcal{C}_1\leq \sigma(t,{\bf x},u)\leq \mathcal{C}_2.
\end{equation}
\end{hyp}
In fact, \eqref{SHE} is not well-posed since the solution $u$ is not differentiable and $\dot{F}$ exists as a generalized function. However, under the assumptions \eqref{hypothesis1} and \eqref{hypothesis2}, we define the mild solution $u(t,{\bf x})$ to (1.1) in the sense of Walsh \cite{walsh1986anintroductiontostochastic} satisfying
\begin{equation}
\label{mild}
u(t,{\bf x})=\int_{\mathbb{T}^d}\bar{p}(t,{\bf x-y})u_0({\bf y})d{\bf y}+\int_{[0,t]\times\mathbb{T}^d}\bar{p}(t-s,{\bf x-y})\sigma(s,{\bf y},u(s,{\bf y}))F(dsd{\bf y}),
\end{equation}
where $\bar{p}:\mathbb{R}_+\times\mathbb{T}^d\to \mathbb{R}_+$ is the fundamental solution of the fractional heat equation on $\mathbb{T}^d$
\begin{equation}
\label{equation1}
\begin{split}
\mathcalartial_t&\bar{p}(t,{\bf x})=-(-\Delta)^{\alpha/2}\bar{p}(t,{\bf x})\\
&\bar{p}(0,{\bf x})=\delta_0({\bf x}).
\end{split}
\end{equation}
Following \cite{dalang1999extending}, it is well known (see also \cite{dalang2009minicourse}) that if $\lambda({\bf n})$, the Fourier coefficients of $\Lambda({\bf x})$, satisfy
\begin{equation}
\label{Lambda}
\sum_{{\bf n}\in\mathbb{Z}^d}\frac{\lambda({\bf n})}{1+\vert {\bf n}\vert^\alpha}<\infty,
\end{equation}
where $\vert \cdot\vert$ is the Euclidean norm, then there exists a unique random field solution $u(t,{\bf x})$ to equation \eqref{mild}. Examples of spatial correlation satisfying \eqref{Lambda} are:
\begin{enumerate}
\item[1.] The Riesz kernel $\Lambda({\bf x})=\vert {\bf x}\vert^{-\beta}$, $0<\beta<d$. In this case, there exist positive constants $c_1$, $c_2$ such that for all ${\bf n}\in\mathbb{Z}^d$,
\begin{equation}
\label{lambdabound}
c_1\vert {\bf n}\vert^{-(d-\beta)}\leq\lambda({\bf n})\leq c_2\vert {\bf n}\vert^{-(d-\beta)},
\end{equation}
and it is easy to check that condition \eqref{Lambda} holds whenever $\beta<\alpha$.
\item[2.] The space-time white noise case $\Lambda({\bf x})=\delta_0({\bf x})$. In this case, $\lambda({\bf n})$ is a constant and \eqref{Lambda} is only satisfied when $\alpha>d$, that is, $d=1$ and $1<\alpha\leq 2$.
\end{enumerate}
Small ball probability problems have a long history, and one can see \cite{li2001gaussian} for more surveys. In short, we are interested in the probability that a stochastic process $X_t$ starting at 0 stays in a small ball for a long time period, i.e.,
$$P\left(\sup_{0\leq t\leq T}\vert X_t\vert<\varepsilon\right)$$
where $\varepsilon>0$ is small. A recent paper \cite{athreya2021small} has studied this problem when $X_t$ is the solution of the stochastic heat equation with $d=1$, $\alpha=2$ and $\Lambda=\delta_0$. The objective of this paper is to generalize their results with the Riesz kernel.
\section{Main Result}
\begin{theorem}
\label{thm}
Under the assumptions \eqref{hypothesis1} and \eqref{hypothesis2}, if $u(t,{\bf x})$ is the solution to \eqref{SHE} with $u_0({\bf x})\equiv 0$, then there are positive constants $\textbf{C}_0,\textbf{C}_1,\textbf{C}_2,\textbf{C}_3,\mathcal{D}_0$ depending only on $\mathcal{C}_1,\mathcal{C}_2$, $\alpha$, $\beta$ and $d$, such that for all $\mathcal{D}<\mathcal{D}_0,\varepsilon_0>\varepsilon>0$, $T>1$, we have
\begin{enumerate}
\item[(a)] when $d=1$ and $\alpha\geq 2\beta$,
\begin{equation*}
\textbf{C}_0\exp\left(-\frac{\textbf{C}_1T}{\varepsilon^{\frac{2(2\alpha-\beta)}{\beta}}}\right)< P\left(\sup_{\substack{0\leq t\leq T\\ {\bf x}\in\mathbb{T}^d}}\vert u(t,{\bf x})\vert\leq \varepsilon\right)<\textbf{C}_2\exp\left(-\frac{\textbf{C}_3T}{\varepsilon^{\frac{2(\alpha+\beta)}{\alpha-\beta}}}\right),
\end{equation*}
\item[(b)] or in other cases,
\begin{equation*}
0\leq P\left(\sup_{\substack{0\leq t\leq T\\ {\bf x}\in\mathbb{T}^d}}\vert u(t,{\bf x})\vert\leq \varepsilon\right)<{\bf C_2}\exp\left(-\frac{{\bf C_3}T}{\varepsilon^{\frac{2\alpha}{\alpha-\beta}\left(\left(1+\frac{\beta}{\alpha d}\right)\wedge\left(\frac{2\alpha-\beta}{\alpha}\right)\right)}}\right).
\end{equation*}
\end{enumerate}
\end{theorem}
Here we make a couple of remarks. These could be of independent interests.
\begin{remark}
\begin{enumerate}
\item[(a)] The lack of lower bound for small ball probability in part $(b)$ is due to an exponential growth number of grids in space.
\item[(b)] When $d\geq 2$ and $\Lambda({\bf x})=\delta_0({\bf x})$, the solution exists as a distribution. Is there a way to estimate the small probability for some norm of this solution?
\item[(c)] The small ball probability estimation has a close relation with the Chung's type Law of the Iterated Logarithm (see \cite{li2001gaussian} for more details). Can we follow the idea from \cite{lee2021chung} to get a similar result for non-Gaussian random fields/strings?
\end{enumerate}
\end{remark}
Here is the organization of this paper. In Section 3 we state the key proposition and how this proposition relates to the main result. In Section 4 we give some useful estimations. In Section 5 we prove the key proposition.
Throughout the entire paper, $C$ and $C'$ denote positive constants whose values may vary from line to line. The dependence of constants on parameters will be denoted by mentioning the parameters in parenthesis.
\section{Key Proposition}
We decompose $[-1,1]$ into intervals of length $\varepsilon^2$ on each dimension and divide $[0,T]$ into intervals of length $c_0\varepsilon^4$ where $c_0$ satisfies
\begin{equation}
\label{c0bound1}
0<c_0<\min\left\lbrace 1, \left(\frac{C_6}{36\mathcal{C}_2^2\ln C_5}\right)^{\frac{\alpha}{\alpha-\beta}}\right\rbrace
\end{equation}
where $C_5,C_6$ are constants specified in Lemma \ref{larged}. Moreover, for $\forall\varepsilon>0$ and $\mathcal{C}$ is specified in Lemma \ref{coeffbound}, we require
\begin{equation}
\label{c0}
0<c_0<\mathcal{C}\varepsilon^{\frac{2\alpha d-4\beta}{\beta}}.
\end{equation}
\begin{remark}
Unlike the white noise case in \cite{athreya2021small}, $c_0$ needs to be selected depending on $\varepsilon$ in this paper. Indeed, $c_0$ does not appear in the bounds for small ball probability.
\end{remark}
Define $t_i=ic_0\varepsilon^4,x_{j}=j\varepsilon^2$ and
\begin{equation*}
n_1:=\min\{n\in\mathbb{Z}:n\varepsilon^2>1\},
\end{equation*}
where $i\in\mathbb{N}$ and $j\in\mathbb{Z}$. Consider a sequence of sets $R_{i,j}\subset \mathbb{R}\times\mathbb{R}^d$ as
\begin{equation}
\label{Pij}
R_{i,j}=\left\lbrace(t_i,x_{j_1},x_{j_2},...x_{j_d})\vert -n_1+1\leq j_k\leq j,k=1,2,...,d\right\rbrace.
\end{equation}
By symmetry, $(x_{j_1},x_{j_2},...x_{j_d})$ lies in $[-1,1]^d$ when
\begin{equation}
\label{jbound}
-n_1+1\leq j_k\leq n_1-1 \text{~for $k=1,2,...,d$}
\end{equation}
For $n\geq 0$, we define a sequence of events that we can use for the upper bound in Theorem \ref{thm},
\begin{equation}
\label{Fn}
F_{n}=\left\lbrace\vert u(t,{\bf x})\vert\leq t_1^{\frac{\alpha-\beta}{2\alpha}}\text{~for all $(t,{\bf x})\in R_{n,n_1-1}$}\right\rbrace.
\end{equation}
In addition, let $E_{-1}=\Omega$ and for $n\geq 0$, we define a sequence of events that we can use for the lower bound in Theorem \ref{thm},
\begin{equation}
\label{En}
E_{n}=\left\lbrace\vert u(t_{n+1},{\bf x})\vert\leq \frac{1}{3}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}\text{~and~} \vert u(t,{\bf x})\vert\leq \varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}\text{~for all~} t\in[t_n,t_{n+1}],{\bf x}\in[-1,1]^d\right\rbrace.
\end{equation}
The following proposition along with the Markov property will lead to Theorem \ref{thm}.
\begin{prop}
\label{prop}
Consider the solution to \eqref{SHE} with $u_0({\bf x})\equiv 0$. Then, there exist $\varepsilon_1>0$ and ${\bf C_4,C_5,C_6,C_7},\mathcal{D}_0>0$ depending only on $\mathcal{C}_1$, $\mathcal{C}_2$, $\alpha$, $\beta$, and $d$ such that for any $0<\varepsilon<\varepsilon_1$ and $\mathcal{D}<\mathcal{D}_0$,
\begin{enumerate}
\item[(a)]
\begin{equation*}
P\left(F_n\bigg| \bigcap_{k=0}^{n-1}F_k\right)\leq{\bf C_4}\exp\left(-\frac{{\bf C_5}}{\varepsilon^2+\mathcal{D}^2t_1^\frac{\alpha-\beta}{\alpha}}\right),
\end{equation*}
\item[(b)] and when $d=1$ and $\alpha\geq 2\beta$,
\begin{equation*}
P\left(E_n\bigg| \bigcap_{k=-1}^{n-1}E_k\right)\geq {\bf C_6}\exp\left(-\frac{{\bf C_7}}{\varepsilon^{\frac{4(\alpha-\beta)^2}{\alpha\beta}}}\right).
\end{equation*}
\end{enumerate}
\end{prop}
Next we show how Theorem \ref{thm} follows from Proposition \ref{prop}.
\textbf{Proof of Theorem \ref{thm}}: The event $F_{n}$ deals with $u(t,{\bf x})$ at the time $t_n$, so putting these events together indicates
$$F:=\bigcap_{n=0}^{\left\lfloor\frac{T}{t_1}\right\rfloor}F_{n}\supset \left\lbrace\vert u(t,{\bf x})\vert\leq t_1^{\frac{\alpha-\beta}{2\alpha}}, t\in[0,T],{\bf x}\in[-1,1]^d\right\rbrace,$$
and
$$P(F)=P\left(\bigcap_{n=0}^{\left\lfloor\frac{T}{t_1}\right\rfloor}F_{n}\right)=P(F_{0})\mathcalrod_{n=1}^{\left\lfloor\frac{T}{t_1}\right\rfloor}P\left(F_{n}\bigg| \bigcap_{k=0}^{n-1}F_{k}\right).$$
With $u_0({\bf x})\equiv 0$, Proposition \ref{prop} immediately yields
\begin{align*}
P(F)&\leq \left[{\bf C_4}\exp\left(-\frac{{\bf C_5}}{\varepsilon^2+\mathcal{D}^2t_1^\frac{\alpha-\beta}{\alpha}}\right)\right]^{\left\lfloor\frac{T}{t_1}\right\rfloor}\leq {\bf C_4'}\exp\left(-\frac{{\bf C_5'}T}{\varepsilon^2t_1+\mathcal{D}^2t_1^\frac{2\alpha-\beta}{\alpha}}\right)\\
&\leq {\bf C_2}\exp\left(-\frac{{\bf C_3}T}{t_1^{\left(1+\frac{\beta}{\alpha d}\right)\wedge\left(\frac{2\alpha-\beta}{\alpha}\right)}}\right).
\end{align*}
The last inequality follows from the inequality of $c_0$ in \eqref{c0} and $\mathcal{D}<\mathcal{D}_0$. Therefore we have
$$
P\left(\left\lbrace\vert u(t,{\bf x})\vert\leq t_1^\frac{\alpha-\beta}{2\alpha}, t\in[0,T],{\bf x}\in[-1,1]^d\right\rbrace\right)<{\bf C_2}\exp\left(-\frac{{\bf C_3}T}{t_1^{\left(1+\frac{\beta}{\alpha d}\right)\wedge\left(\frac{2\alpha-\beta}{\alpha}\right)}}\right),
$$
then replacing $t_1^\frac{\alpha-\beta}{2\alpha}$ with $\varepsilon$ and adjusting $\varepsilon_1$ to $\varepsilon_0$ give the upper bound in Theorem \ref{thm}.
For the lower bound, the event $E_{n}$ deals with $u(t,{\bf x})$ in the time interval $[t_n,t_{n+1}]$, so putting these events together indicates
$$E:=\bigcap_{n=-1}^{\left\lfloor\frac{T}{t_1}\right\rfloor-1}E_{n}\subset \left\lbrace\vert u(t,{\bf x})\vert\leq \varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}, t\in[0,T],{\bf x}\in[-1,1]^d\right\rbrace,$$
and
$$P(E)=P\left(\bigcap_{n=-1}^{\left\lfloor\frac{T}{t_1}\right\rfloor-1}E_{n}\right)=P(E_{-1})\mathcalrod_{n=0}^{\left\lfloor\frac{T}{t_1}\right\rfloor-1}P\left(E_{n}\bigg| \bigcap_{k=-1}^{n-1}E_{k}\right).$$
With $u_0({\bf x})\equiv 0$, Proposition \ref{prop} immediately yields
$$P(E)\geq \left[{\bf C_6}\exp\left(-\frac{{\bf C_7}}{\varepsilon^{\frac{4(\alpha-\beta)^2}{\alpha\beta}}}\right)\right]^\frac{T}{t_1}\geq {\bf C_0}\exp\left(-\frac{{\bf C_1}T}{t_1\varepsilon^{\frac{4(\alpha-\beta)^2}{\alpha\beta}}}\right).$$
Therefore, from the inequality of $c_0$ in \eqref{c0bound2}, we have
$$
P\left(\left\lbrace\vert u(t,{\bf x})\vert\leq \varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}, t\in[0,T],{\bf x}\in[-1,1]^d\right\rbrace\right)>{\bf C_0}\exp\left(-\frac{{\bf C_1}T}{\varepsilon^{\frac{4(\alpha-\beta)(2\alpha-\beta)}{\alpha\beta}}}\right),
$$
then replacing $\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}$ with $\varepsilon$ and adjusting $\varepsilon_1$ to $\varepsilon_0$ give the lower bound in Theorem \ref{thm}.
\section{Preliminary}
In this section, we provide some preliminary results that are used to prove the key proposition \ref{prop}.
\subsection{Heat Kernel Estimates} For ${\bf x}\in \mathbb{R}^d$, $p(t,{\bf x})$ is the smooth function determined by its Fourier transform in ${\bf x}$
\begin{equation*}
\hat{p}(t,\nu):=\int_{\mathbb{R}^d}p(t,{\bf x})\exp(2\mathcali i\nu\cdot {\bf x})d{\bf x}=\exp(-t(2\mathcali\vert \nu\vert)^\alpha),\quad \nu\in\mathbb{R}^d.
\end{equation*}
For ${\bf x}\in \mathbb{T}^d$, from the standard Fourier decomposition we have
\begin{equation}
\label{pfourier}
\bar{p}(t,{\bf x})=2^{-d}\sum_{{\bf n}\in\mathbb{Z}^d}\exp\left(-\mathcali^\alpha\vert {\bf n}\vert^\alpha t\right)\exp(\mathcali i{\bf n}\cdot {\bf x}).
\end{equation}
The following lemma gives an estimation on heat kernel $\bar{p}(t,{\bf x})$, which is similar to Lemma 2.1 and Lemma 2.2 in \cite{li2017holder}.
\begin{lemma}\label{pdiff} For all $t\geq s>0$ and ${\bf x,y}\in \mathbb{T}^d$, there exist constants $C,C'>0$ depending only on $\alpha,d$ such that
\begin{equation}
\label{pdiffspace}
\int_{\mathbb{T}^d}\vert \bar{p}(t,{\bf y-x})-\bar{p}(t,{\bf y})\vert d{\bf y}\leq C\left(\frac{\vert {\bf x}\vert}{t^{1/\alpha}}\wedge 1\right),
\end{equation}
\begin{equation}
\label{pdifftime}
\int_{\mathbb{T}^d}\vert \bar{p}(t,{\bf x})-\bar{p}(s,{\bf x})\vert d{\bf x}\leq C'\left(\log\left(\frac{t}{s}\right)\wedge 1\right).
\end{equation}
\end{lemma}
\begin{proof}
We begin with inequality \eqref{pdiffspace},
\begin{equation}
\begin{split}
\label{pdiffproof}
\int_{\mathbb{T}^d}\vert \bar{p}(t,{{\bf y-x}})-\bar{p}(t,{\bf y})\vert d{\bf y}&=\int_{\mathbb{T}^d}\left\vert \sum_{{\bf n}\in \mathbb{Z}^d}\left[p(t,{\bf y-x}+2{\bf n})-p(t,{\bf y}+2{\bf n})\right]\right\vert d{\bf y}\\
&\leq \int_{\mathbb{T}^d}\sum_{{\bf n}\in \mathbb{Z}^d}\left\vert p(t,{\bf y-x}+2{\bf n})-p(t,{\bf y}+2{\bf n})\right\vert d{\bf y}\\
&=\int_{\mathbb{R}^d}\vert p(t,{\bf y-x})-p(t,{\bf y})\vert d{\bf y}\\
&\leq \int_{\mathbb{R}^d} \vert {\bf x}\vert\cdot\sup_{c_0\in[0,1]}\left\vert\nabla_{{\bf z}} p(t,{\bf y}-c_0{\bf x})\right\vert d{\bf y}.
\end{split}
\end{equation}
By Lemma 5 in \cite{bogdan2007estimates} and $(2.3)$ of \cite{jakubowski2016stable}, we have
\begin{equation}
\label{pgradiant}
\left\vert\nabla_{{\bf z}} p(t,{\bf z})\right\vert \leq C(d,\alpha)\vert {\bf z}\vert\left(\frac{t}{\vert {\bf z}\vert^{d+2+\alpha}}\wedge t^{-(d+2)/\alpha}\right)\leq C(d,\alpha)\frac{t\vert {\bf z}\vert}{(t^{1/\alpha}+\vert {\bf z}\vert)^{d+2+\alpha}}.
\end{equation}
We put \eqref{pgradiant} into \eqref{pdiffproof} to get
\begin{equation*}
\begin{split}
\int_{\mathbb{T}^d}\vert \bar{p}(t,{{\bf y-x}})-\bar{p}(t,{\bf y})\vert d{\bf y}&\leq C(d,\alpha)\vert {\bf x}\vert\int_{\mathbb{R}^d}\frac{t\vert {\bf y}\vert}{(t^{1/\alpha}+\vert {\bf y}\vert)^{d+2+\alpha}}d{\bf y}\\
&\leq C(d,\alpha)\vert {\bf x}\vert\int_0^{\infty}\frac{tx}{(t^{1/\alpha}+x)^{d+2+\alpha}}x^{d-1}dx\\
&=\frac{C(d,\alpha)\vert {\bf x}\vert}{t^{1/\alpha}}\int_0^{\infty}\frac{w^d}{(1+w)^{d+2+\alpha}}dw\\
&\leq \frac{C(d,\alpha)\vert {\bf x}\vert}{t^{1/\alpha}}.
\end{split}
\end{equation*}
Clearly, $\int_{\mathbb{T}^d}\vert \bar{p}(t,{{\bf y-x}})-\bar{p}(t,{\bf y})\vert d{\bf y}\leq 2$, so that \eqref{pdiffspace} follows. For inequality \eqref{pdifftime}, we have
\begin{equation}
\label{pdiffproof2}
\begin{split}
\int_{\mathbb{T}^d}\vert \bar{p}(t,{{\bf x}})-\bar{p}(s,{\bf x})\vert d{\bf x}&=\int_{\mathbb{T}^d}\left\vert \sum_{{\bf n}\in \mathbb{Z}^d}\left[p(t,{\bf x}+2{\bf n})-p(s,{\bf x}+2{\bf n})\right]\right\vert d{\bf x}\\
&\leq \int_{\mathbb{T}^d}\sum_{{\bf n}\in \mathbb{Z}^d}\left\vert p(t,{\bf x}+2{\bf n})-p(s,{\bf x}+2{\bf n})\right\vert d{\bf x}\\
&=\int_{\mathbb{R}^d}\vert p(t,{\bf x})-p(s,{\bf x})\vert d{\bf x}\\
&\leq \int_{\mathbb{R}^d}\int_s^t\vert\mathcalartial_r p(r,{\bf x})\vert drd{\bf x}.
\end{split}
\end{equation}
Proposition 2.1 in \cite{vazquez2017classical} shows
\begin{equation}
\label{ptimegradient}
\vert(-\Delta)^{\alpha/2} p(r,{\bf x})\vert\leq \frac{C(d,\alpha)}{(r^{2/\alpha}+\vert {\bf x}\vert^2)^{\frac{d+\alpha}{2}}}.
\end{equation}
Applying \eqref{equation1}, \eqref{ptimegradient} and Fubini's theorem to \eqref{pdiffproof2} yields that
\begin{align*}
\int_{\mathbb{T}^d}\vert \bar{p}(t,{{\bf x}})-\bar{p}(s,{\bf x})\vert d{\bf x}&\leq C(d,\alpha)\int_{\mathbb{R}^d}\int_s^t\frac{1}{(r^{2/\alpha}+\vert {\bf x}\vert^2)^{\frac{d+\alpha}{2}}} drd{\bf x}\\
&=C(d,\alpha)\int_s^t\int_0^\infty \frac{x^{d-1}}{(r^{2/\alpha}+x^2)^{\frac{d+\alpha}{2}}} dxdr\\
&=C(d,\alpha)\int_s^t\frac{dr}{r}\int_0^\infty \frac{w^{d-1}}{(1+w^2)^{\frac{d+\alpha}{2}}} dw\\
&\leq C(d,\alpha)\left(\log(t)-\log(s)\right).
\end{align*}
Similarly, $\int_{\mathbb{T}^d}\vert \bar{p}(t,{{\bf x}})-\bar{p}(s,{\bf x})\vert d{\bf x}\leq 2$, so that \eqref{pdifftime} follows.
\end{proof}
\subsection{Noise Term Estimates}
We denote the second integral of \eqref{mild}, i.e. noise term, by
\begin{equation}
\label{noiseterm}
N(t,{\bf x}):=\int_{[0,t]\times\mathbb{T}^d} \bar{p}(t-s,{\bf x-y})\sigma(s,{\bf y},u(s,{\bf y}))F(dsd{\bf y}),
\end{equation}
We will now estimate the regularity of $N(t,{\bf x})$ in the following two lemmas.
\begin{lemma}\label{spatialregularity} There exists a constant $C>0$ depending only on $\alpha,\beta,d$ and $\mathcal{C}_2$ in \eqref{hypothesis2} such that for any $\xi\in\left(0,\frac{1}{\alpha}\wedge\frac{\alpha-\beta}{\alpha}\right)$, $t\in[0,1]$ and ${\bf x,y}\in \mathbb{T}^d$, we have
\begin{equation*}
\mathbb{E}\left[(N(t,{\bf x})-N(t,{\bf y}))^2\right]\leq C\mathcal{C}_2^2\vert {\bf x-y}\vert^{\alpha\xi}.
\end{equation*}
\end{lemma}
\begin{proof}
To simplify our computation, fix $t,s,{\bf x},{\bf y}$ and we denote
$$K({\bf z}):=\bar{p}(t-s,{\bf x-z})-\bar{p}(t-s,{\bf y-z}).$$
Using Fubini's theorem, \eqref{hypothesis2} and the triangle inequality, we have
\begin{equation}
\label{spatialregularityproof}
\begin{split}
&\mathbb{E}\left[(N(t,{\bf x})-N(t,{\bf y}))^2\right]\\
&=\int_0^t\int_{\mathbb{T}^d}\int_{\mathbb{T}^d}K({\bf z})K({\bf w})\Lambda({\bf w-z})\mathbb{E}[\sigma(s,{\bf z},u(s,{\bf z}))\sigma(s,{\bf w},u(s,{\bf w}))]d{\bf w}d{\bf z}ds\\
&\leq \sup_{r,{\bf u}}\mathbb{E}\left[\sigma(r,{\bf u},u(r,{\bf u}))^2\right]\int_0^t\int_{\mathbb{T}^d}\int_{\mathbb{T}^d}\vert K({\bf z})\vert \vert K({\bf w})\vert\Lambda({\bf w-z})d{\bf w}d{\bf z}ds\\
&\leq \mathcal{C}_2^2\int_0^t\int_{\mathbb{T}^d}\int_{\mathbb{T}^d}\vert K({\bf z})\vert[\bar{p}(t-s,{\bf x-w})+\bar{p}(t-s,{\bf y-w})]\Lambda({\bf w-z})d{\bf w}d{\bf z}ds.
\end{split}
\end{equation}
Then we use the standard Fourier decomposition \eqref{pfourier} to estimate the spatial convolution,
\begin{equation}
\label{pconv}
\begin{split}
\int_{\mathbb{T}^d}\bar{p}(t-s,{\bf x-w})\Lambda({\bf w-z})d{\bf w}&=C(d)\sum_{{\bf n}\in\mathbb{Z}^d}\lambda({\bf n})\exp(-\mathcali^\alpha\vert {\bf n}\vert^\alpha(t-s))\exp(\mathcali i{\bf n}\cdot({\bf x-z}))\\
&\leq C(d)\sum_{{\bf n}\in\mathbb{Z}^d}\lambda({\bf n})\exp(-\mathcali^\alpha\vert {\bf n}\vert^\alpha(t-s))\\
&\leq C(d)\sum_{{\bf n}\in\mathbb{Z}^d}\vert {\bf n}\vert^{-d+\beta}\exp(-\mathcali^\alpha\vert {\bf n}\vert^\alpha(t-s))\\
&\leq C(d)\int_0^\infty x^{-d+\beta}\exp(-\mathcali^\alpha x^\alpha(t-s))x^{d-1}dx\\
&=C(d)(t-s)^{-\beta/\alpha}\int_0^\infty x^{\frac{\beta-\alpha}{\alpha}}\exp(-x)dx\\
&=C(\alpha,\beta,d)(t-s)^{-\beta/\alpha}.
\end{split}
\end{equation}
We can get a similar result for $\int_{\mathbb{T}^d}\bar{p}(t-s,{\bf y-w})\Lambda({\bf w-z})d{\bf w}$. Applying \eqref{pconv}, Lemma \ref{pdiff} to \eqref{spatialregularityproof} and since $1\wedge x<x^{\alpha\xi}$ for all $x>0,\xi\in(0,1/\alpha)$, we get
\begin{equation*}
\begin{split}
\mathbb{E}\left[(N(t,{\bf x})-N(t,{\bf y}))^2\right]&\leq C(\alpha,\beta,d)\mathcal{C}_2^2\int_0^t\int_{\mathbb{T}^d}\vert K({\bf z})\vert(t-s)^{-\beta/\alpha}d{\bf z}ds\\
&\leq C(\alpha,\beta,d)\mathcal{C}_2^2\int_0^t(t-s)^{-\beta/\alpha}\left(\frac{\vert {\bf x-y}\vert}{(t-s)^{1/\alpha}}\wedge 1\right)ds\\
&\leq C(\alpha,\beta,d)\mathcal{C}_2^2\vert {\bf x-y}\vert^{\alpha\xi}\int_0^t(t-s)^{-\xi-\beta/\alpha}ds\\
&\leq C(\alpha,\beta,d)\mathcal{C}_2^2\vert {\bf x-y}\vert^{\alpha\xi}.
\end{split}
\end{equation*}
Note that the integral $\int_0^t(t-s)^{-\xi-\beta/\alpha}ds$ converges provided $\xi\in\left(0,\frac{1}{\alpha}\wedge\frac{\alpha-\beta}{\alpha}\right)$.
\end{proof}
\begin{lemma}\label{timeregularity} There exists a constant $C>0$ depending only on $\alpha,\beta,d$ and $\mathcal{C}_2$ in \eqref{hypothesis2} such that for any $\zeta\in\left(0,\frac{\alpha-\beta}{\alpha}\right)$, $1\geq t\geq s>0$ and ${\bf x,y}\in \mathbb{T}^d$, we have
\begin{equation*}
\mathbb{E}\left[(N(t,{\bf x})-N(s,{\bf x}))^2\right]\leq C\mathcal{C}_2^2\vert t-s\vert^{\zeta}.
\end{equation*}
\end{lemma}
\begin{proof}Using Fubini's theorem, \eqref{hypothesis2} and the triangle inequality, we have
\begin{equation}
\label{timeregularityproof}
\begin{split}
\mathbb{E}&[(N(t,{\bf x})-N(s,{\bf x}))^2]\\
&= \mathbb{E}\left[\left(\int_0^s\int_{\mathbb{T}^d}\left[\bar{p}(t-r,{\bf x-z})-\bar{p}(s-r,{\bf x-z})\right]\sigma(r,{\bf z},u(r,{\bf z}))F(d{\bf z}dr)\right.\right.\\
&\hspace{5cm}\left.\left.+\int_s^t\int_{\mathbb{T}^d}\bar{p}(t-r,{\bf x-z})\sigma(r,{\bf z},u(r,{\bf z}))F(d{\bf z}dr)\right)^2\right]\\
&\leq \sup_{r,{\bf u}}\mathbb{E}\left[\sigma(r,{\bf u},u(r,{\bf u}))^2\right]\left(\int_0^s\int_{\mathbb{T}^d}\int_{\mathbb{T}^d}\vert\bar{p}(t-r,{\bf x-z})-\bar{p}(s-r,{\bf x-z})\vert \cdot\right.\\
&\hspace{4cm}[\bar{p}(t-r,{\bf x-w})+\bar{p}(s-r,{\bf x-w})]\Lambda({\bf w-z}) d{\bf w}d{\bf z}dr\\
&\hspace{2cm}\left.+\int_s^t\int_{\mathbb{T}^d}\int_{\mathbb{T}^d}\bar{p}(t-r,{\bf x-z})\bar{p}(t-r,{\bf x-w})\Lambda({\bf w-z})d{\bf w}d{\bf z}dr\right)\\
&=:\mathcal{C}_2^2\left(I_1+I_2\right).
\end{split}
\end{equation}
Applying \eqref{pconv} and Lemma \ref{pdiff} to $I_1$ and since $1\wedge \log(1+x)<x^{\zeta}$ for all $x>0,\zeta\in(0,1)$, we get
\begin{equation}
\begin{split}
\label{timeregularityproof2}
I_1&\leq C(\alpha,\beta,d)\int_0^s\left(\log\left(\frac{t-r}{s-r}\right)\wedge 1\right)\cdot[(t-r)^{-\beta/\alpha}+(s-r)^{-\beta/\alpha}]dr\\
&\leq C(\alpha,\beta,d)\int_0^s\left(\log\left(\frac{t-s+x}{x}\right)\wedge 1\right)x^{-\beta/\alpha}dx\\
&\leq C(\alpha,\beta,d)(t-s)^\zeta\int_0^sx^{-\beta/\alpha-\zeta}dx\\
&\leq C(\alpha,\beta,d)(t-s)^\zeta.
\end{split}
\end{equation}
Note that the integral $\int_0^sx^{-\beta/\alpha-\zeta}ds$ converges provided $\zeta\in\left(0,\frac{\alpha-\beta}{\alpha}\right)$. In order to estimate $I_2$, we use the standard Fourier decomposition \eqref{pfourier} to bound the spatial convolution,
\begin{equation*}
\begin{split}
\int_{\mathbb{T}^d}\int_{\mathbb{T}^d}\bar{p}(t-r,{\bf x-z})\bar{p}(t-r,{\bf x-w})&\Lambda({\bf w-z})d{\bf w}=C(d)\sum_{{\bf n}\in\mathbb{Z}^d}\lambda({\bf n})\exp(-2\mathcali^\alpha\vert {\bf n}\vert^\alpha(t-r))\\
&\leq C(d)\sum_{{\bf n}\in\mathbb{Z}^d}\vert {\bf n}\vert^{-d+\beta}\exp(-2\mathcali^\alpha\vert {\bf n}\vert^\alpha(t-r))\\
&\leq C(d)\int_0^\infty x^{-d+\beta}\exp(-2\mathcali^\alpha x^\alpha(t-r))x^{d-1}dx\\
&=C(d)(t-r)^{-\beta/\alpha}\int_0^\infty x^{\frac{\beta-\alpha}{\alpha}}\exp(-x)dx\\
&=C(\alpha,\beta,d)(t-r)^{-\beta/\alpha}.
\end{split}
\end{equation*}
Then for $I_2$ in \eqref{timeregularityproof}, we have
\begin{equation}
\label{I2equ}
I_2\leq C(\alpha,\beta,d)\int_s^t(t-r)^{-\beta/\alpha}dr=C(\alpha,\beta,d)(t-s)^{\frac{\alpha-\beta}{\alpha}}.
\end{equation}
By \eqref{timeregularityproof}, \eqref{timeregularityproof2} and \eqref{I2equ}, we conclude
\begin{equation*}
\label{timereg}
\mathbb{E}\left[(N(t,{\bf x})-N(s,{\bf x}))^2\right]\leq C\mathcal{C}_2^2(t-s)^{\zeta}.
\end{equation*}
\end{proof}
\begin{lemma}There exist constants $C_1,C_2,C_3,C_4>0$ depending only on $\alpha,\beta,d$ and $\mathcal{C}_2$ in \eqref{hypothesis2} such that for all $0\leq s<t\leq 1$, ${\bf x,y}\in\mathbb{T}^d$, $\xi\in\left(0,\frac{1}{\alpha}\wedge\frac{\alpha-\beta}{\alpha}\right),\zeta\in\left(0,\frac{\alpha-\beta}{\alpha}\right)$, and $\kappa>0$,
\begin{equation}
\label{spacep}
P(\vert N(t,{\bf x})-N(t,{\bf y})\vert>\kappa)\leq C_1\exp\left(-\frac{C_2\kappa^2}{\mathcal{C}_2^2\vert {\bf x-y}\vert^{\alpha\xi}}\right),
\end{equation}
\begin{equation}
\label{timep}
P(\vert N(t,{\bf x})-N(s,{\bf x})\vert>\kappa)\leq C_3\exp\left(-\frac{C_4\kappa^2}{\mathcal{C}_2^2\vert t-s\vert^{\zeta}}\right).
\end{equation}
\end{lemma}
\begin{proof}
For a fixed $t$, define
$$N_t(s,{\bf x}):=\int_{[0,s]\times\mathbb{T}^d} \bar{p}(t-r,{\bf x-y})\sigma(r,{\bf y},u(r,{\bf y}))F(drd{\bf y}).$$
Note that $N_t(t,{\bf x})=N(t,{\bf x})$ and $N_t(s,{\bf x})$ is a continuous $\mathcal{F}_s^F$ adapted martingale in $s\leq t$ since the integrand does not depend on $s$. For fixed $t,{\bf x}$ and ${\bf y}$, let
$$M_s:=N_t(s,{\bf x})-N_t(s,{\bf y})=\int_{[0,s]\times\mathbb{T}^d} [\bar{p}(t-r,{\bf x-z})-\bar{p}(t-r,{\bf y-z})]\sigma(r,{\bf z},u(r,{\bf z}))F(drd{\bf z}),$$
and it is easy to check that $M_t=N(t,{\bf x})-N(t,{\bf y})$. As $M_s$ is a continuous $\mathcal{F}_s^F$ adapted martingale with $M_0=0$, it is a time changed Brownian motion. In particular, we have
$$M_t=B_{\langle M\rangle_t},$$
and Lemma \ref{spatialregularity} gives a uniform bound on the time change as
$$\langle M\rangle_t\leq C\mathcal{C}_2^2\vert {\bf x-y}\vert^{\alpha\xi}.$$
Therefore, by the reflection principle for the Brownian motion $B_{\langle M\rangle_t}$,
\begin{align*}
P(N(t,{\bf x})-N(t,{\bf y})>\kappa)&=P(M_t>\kappa)=P(B_{\langle M\rangle_t}>\kappa)\\
&\leq P\left(\sup_{s\leq C\mathcal{C}_2^2\vert {\bf x-y}\vert^{\alpha\xi}}B_s>\kappa\right)=2P\left(B_{C\mathcal{C}_2^2\vert {\bf x-y}\vert^{\alpha\xi}}>\kappa\right)\\
&\leq C_1\exp\left(-\frac{C_2\kappa^2}{\mathcal{C}_2^2\vert {\bf x-y}\vert^{\alpha\xi}}\right).
\end{align*}
Switching ${\bf x}$ and ${\bf y}$ gives
\begin{align*}
P(-N(t,{\bf x})+N(t,{\bf y})>\kappa)&=P(M_t<-\kappa)\leq 2P\left(B_{C\mathcal{C}_2^2\vert {\bf x-y}\vert^{\alpha\xi}}<-\kappa\right)\\
&\leq C_1\exp\left(-\frac{C_2\kappa^2}{\mathcal{C}_2^2\vert {\bf x-y}\vert^{\alpha\xi}}\right).
\end{align*}
Consequently, for $\forall\xi\in\left(0,\frac{1}{\alpha}\wedge\frac{\alpha-\beta}{\alpha}\right)$,
$$
P(\vert N(t,{\bf x})-N(t,{\bf y})\vert>\kappa)\leq C_1\exp\left(-\frac{C_2\kappa^2}{\mathcal{C}_2^2\vert {\bf x-y}\vert^{\alpha\xi}}\right),
$$
which completes the proof of \eqref{spacep}. For a fixed ${\bf x}$, we define
$$U_{q_1}=\int_{[0,{q_1}]\times\mathbb{T}^d} [\bar{p}(t-r,{\bf x-y})-\bar{p}(s-r,{\bf x-y})]\sigma(r,{\bf y},u(r,{\bf y}))F(drd{\bf y})$$
where $0\leq q_1\leq s$. Note $U_{q_1}$ is a continuous $\mathcal{F}_{q_1}^F$ adapted martingale with $U_0=0$. Also define
$$V_{q_2}=\int_{[s,s+{q_2}]\times\mathbb{T}^d}\bar{p}(t-r,{\bf x-y})\sigma(r,{\bf y},u(r,{\bf y}))F(drd{\bf y})$$
where $0\leq q_2\leq t-s$. Note $V_{q_2}$ is a continuous $\mathcal{F}_{q_2}^F$ adapted martingale with $V_0=0$. Thus, both $U_{q_1}$ and $V_{q_2}$ are time changed Brownian motions, i.e.,
$$U_t=B_{\langle U\rangle_t}\text{~and~}V_{t-s}=B'_{\langle V\rangle_{t-s}}$$
where $B$, $B'$ are two different Brownian motions. Note that $N(t,{\bf x})-N(s,{\bf x})=U_t+V_{t-s}$, then
$$P(N(t,{\bf x})-N(s,{\bf x})>\kappa)\leq P(U_t>\kappa/2)+P(V_{t-s}>\kappa/2).$$
Lemma \ref{timeregularity} provides a uniform bound on the time changes as
$$\langle U\rangle_t\leq C\mathcal{C}_2^2(t-s)^{\zeta}\text{~and~}\langle V\rangle_{t-s}\leq C\mathcal{C}_2^2(t-s)^{\zeta}.$$
By the reflection principle for the Brownian motions $B_{\langle U\rangle_t}$ and $B'_{\langle V\rangle_{t-s}}$,
\begin{align*}
P(N(t,{\bf x})-N(s,{\bf x})>\kappa)&\leq P\left(B_{\langle U\rangle_t}>\kappa/2\right)+P\left(B'_{\langle V\rangle_{t-s}}>\kappa/2\right)\\
&\leq 2P\left(\sup_{r\leq C\mathcal{C}_2^2\vert t-s\vert^{\zeta}}B_r>\frac{\kappa}{2}\right)=4P\left(B_{C\mathcal{C}_2^2\vert t-s\vert^{\zeta}}>\frac{\kappa}{2}\right)\\
&\leq C_3\exp\left(-\frac{C_4\kappa^2}{\mathcal{C}_2^2\vert t-s\vert^{\zeta}}\right).
\end{align*}
In addition,
\begin{align*}
P(-N(t,{\bf x})+N(s,{\bf x})>\kappa)&\leq P(U_t<-\kappa/2)+P(V_{t-s}<-\kappa/2)\\
&\leq 4P\left(B_{C\mathcal{C}_2^2\vert t-s\vert^{\zeta}}<-\frac{\kappa}{2}\right)\\
&\leq C_3\exp\left(-\frac{C_4\kappa^2}{\mathcal{C}_2^2\vert t-s\vert^{\zeta}}\right).
\end{align*}
Consequently, for $\forall\zeta\in\left(0,\frac{\alpha-\beta}{\alpha}\right)$,
$$
P(\vert N(t,{\bf x})-N(s,{\bf x})\vert>\kappa)\leq C_3\exp\left(-\frac{C_4\kappa^2}{\mathcal{C}_2^2\vert t-s\vert^{\zeta}}\right),
$$
which completes the proof of \eqref{timep}.
\end{proof}
\begin{definition}
\label{definition}
Given a grid
$$\mathbb{G}_n=\left\lbrace\left(\frac{j}{2^{2n}},\frac{k_1}{2^{n}},...,\frac{k_d}{2^{n}}\right): 0\leq j\leq 2^{2n},0\leq k_1,...,k_d\leq 2^n,j,k_1,...,k_d\in\mathbb{Z}\right\rbrace,$$
we write
$$\left(t_j^{(n)},x_{k_1}^{(n)},...,x_{k_d}^{(n)}\right)=\left(\frac{j}{2^{2n}},\frac{k_1}{2^n},...,\frac{k_d}{2^n}\right).$$
Two points $\left(t_j^{(n)},x_{k_1}^{(n)},...,x_{k_d}^{(n)}\right),\left(t_{j'}^{(n)},x_{k'_1}^{(n)},...,x_{k'_d}^{(n)}\right)$ are called \textbf{nearest neighbors} if either
\begin{enumerate}
\item[{\bf 1}.] $j=j',\vert k_i-k'_i\vert=1 \text{~for only one~} i \text{~and~} k_l=k'_l \text{~for the other indices~}l, or$
\item[{\bf 2}.] $\vert j-j'\vert=1~\text{and}~k_i=k'_i~\forall i.$
\end{enumerate}
\end{definition}
The following lemma generalizes the Lemma 3.4 in \cite{athreya2021small}, which plays a key role in estimating the small ball probability.
\begin{lemma}\label{larged}There exist constants $C_5,C_6>0$ depending on $\alpha,\beta,d$ and $\mathcal{C}_2$ in \eqref{hypothesis2} such that for all $\gamma,\kappa,\varepsilon>0$ and $\gamma\varepsilon^4\leq 1$, we have
\begin{equation*}
P\left(\sup_{\substack{0\leq t\leq\gamma\varepsilon^4\\ {\bf x}\in [0,\varepsilon^2]^d}}\vert N(t,{\bf x})\vert>\kappa\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}\right)\leq \frac{C_5}{1\wedge \sqrt{\gamma^d}}\exp\left(-\frac{C_6\kappa^2}{\mathcal{C}_2^2\gamma^{\frac{\alpha-\beta}{\alpha}}}\right).
\end{equation*}
\end{lemma}
\begin{proof}
Fix $\gamma\geq 1$, and consider the grid
$$\mathbb{G}_n=\left\lbrace\left(\frac{j}{2^{2n}},\frac{k_1}{2^{n}},...,\frac{k_d}{2^{n}}\right): 0\leq j\leq \gamma\varepsilon^42^{2n},0\leq k_1,...,k_d\leq \varepsilon^22^n,j,k_1,...,k_d\in\mathbb{Z}\right\rbrace.$$
Let
\begin{equation}
\label{n0}
n_0=\left\lceil \log_2\left(\gamma^{-1/2}\varepsilon^{-2}\right)\right\rceil,
\end{equation}
and for $n< n_0$, $\mathbb{G}_n$ contains only the origin. For $n\geq n_0$, the grid $\mathbb{G}_n$ has at most $\left(\gamma\varepsilon^4 2^{2n}+1\right)\cdot\left(\varepsilon^2 2^n+1\right)^d\leq 2^{d+1+(2+d)n}\varepsilon^{2d+4}\gamma\leq 2^{2d+3}2^{(2+d)(n-n_0)}$ many points. We will choose two parameters $0<\delta_1(\alpha,\beta)<\delta_0(\alpha,\beta)<\frac{\alpha-\beta}{\alpha}$ satisfying the following constraint
\begin{equation}
\label{deltaconstraint}
2\zeta\wedge\alpha\xi=\frac{2(\alpha-\beta)}{\alpha}+2\delta_1-2\delta_0,
\end{equation}
where $\xi\in\left(0,\frac{1}{\alpha}\wedge\frac{\alpha-\beta}{\alpha}\right),\zeta\in\left(0,\frac{\alpha-\beta}{\alpha}\right)$. Fix the constant
$$\mathcal{M}=\frac{1-2^{-\delta_1}}{(3+d)2^{(\delta_0-\delta_1)n_0}},$$
and consider the event
$$A(n,\kappa)=\left\lbrace\vert N(p)-N(q)\vert\leq \kappa \mathcal{M}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}} 2^{-\delta_1n}2^{\delta_0n_0}\text{ for all $p,q\in \mathbb{G}_n$ nearest neighbors}\right\rbrace.$$
If $p,q\in \mathbb{G}_n$ are the case {\bf 1} nearest neighbors in the Definition \ref{definition}, \eqref{spacep} implies
$$P\left(\vert N(p)-N(q)\vert> \kappa \mathcal{M}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}} 2^{-\delta_1n}2^{\delta_0n_0}\right)\leq C_1\exp\left(-\frac{C_2\kappa^2\mathcal{M}^2\varepsilon^{\frac{4(\alpha-\beta)}{\alpha}}}{2^{-n\alpha\xi}\mathcal{C}_2^2}2^{-2\delta_1n}2^{2\delta_0n_0}\right).$$
If $p,q\in \mathbb{G}_n$ are the case {\bf 2} nearest neighbors in the Definition \ref{definition}, \eqref{timep} implies
$$P\left(\vert N(p)-N(q)\vert> \kappa \mathcal{M}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}} 2^{-\delta_1n}2^{\delta_0n_0}\right)\leq C_3\exp\left(-\frac{C_4\kappa^2\mathcal{M}^2\varepsilon^{\frac{4(\alpha-\beta)}{\alpha}}}{2^{-2n\zeta}\mathcal{C}_2^2}2^{-2\delta_1n}2^{2\delta_0n_0}\right).$$
Therefore, a union bound gives
\begin{align*}
&P(A^c(n,\kappa))\leq \sum_{\substack{p,q\in \mathbb{G}_n\\ \text{nearest neighbors}}}P\left(\vert N(p)-N(q)\vert> \kappa \mathcal{M}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}} 2^{-\delta_1n}2^{\delta_0n_0}\right)\\
&\leq C2^{(2+d)(n-n_0)}\exp\left(-\frac{C'\kappa^2\mathcal{M}^2\varepsilon^{\frac{4(\alpha-\beta)}{\alpha}}}{\mathcal{C}_2^2}2^{n(2\zeta\wedge \alpha\xi)}2^{-2\delta_1n}2^{2\delta_0n_0}\right)\\
&=C2^{(2+d)(n-n_0)}\exp\left(-\frac{C'\kappa^2\mathcal{M}^2}{\mathcal{C}_2^2}\left(\varepsilon^{\frac{4(\alpha-\beta)}{\alpha}} 2^{\frac{2n_0(\alpha-\beta)}{\alpha}}\right)2^{n(2\zeta\wedge \alpha\xi)}2^{-2\delta_1n}2^{2\delta_0n_0}\right)\\
&\leq C2^{(2+d)(n-n_0)}\exp\left(-\frac{C'\kappa^2\mathcal{M}^2}{\mathcal{C}_2^2\gamma^{\frac{(\alpha-\beta)}{\alpha}}}2^{(2\zeta\wedge \alpha\xi-2\delta_1)(n-n_0)}\right),
\end{align*}
where $C,C'$ are positive constants depending only on $\alpha,\beta,T,d$. The last inequality follows from that $\varepsilon^{\frac{4(\alpha-\beta)}{\alpha}} 2^\frac{2n_0(\alpha-\beta)}{\alpha}\geq\gamma^{-\frac{(\alpha-\beta)}{\alpha}}$ by the definition of $n_0$ in \eqref{n0}, and our choice of $\delta_0,\delta_1$ in \eqref{deltaconstraint}.
Let $A(\kappa)=\bigcap\limits_{n\geq n_0}A(n,\kappa)$ and we can bound $P(A^c(\kappa))$ by summing all $P(A^c(n,\kappa))$,
\begin{align*}
P\left(A^c(\kappa)\right)\leq\sum_{n\geq n_0}P\left(A^c(n,\kappa)\right)&\leq \sum_{n\geq n_0}C2^{(2+d)(n-n_0)}\exp\left(-\frac{C'\kappa^2\mathcal{M}^2}{\mathcal{C}_2^2\gamma^{\frac{(\alpha-\beta)}{\alpha}}}2^{(2\zeta\wedge \alpha\xi-2\delta_1)(n-n_0)}\right)\\
&\leq C_5\exp\left(-\frac{C_6\kappa^2\mathcal{M}^2}{\mathcal{C}_2^2\gamma^{\frac{(\alpha-\beta)}{\alpha}}}\right).\\
\end{align*}
Now we consider a point $(t,x)$, which is in a grid $\mathbb{G}_n$ for some $n \geq n_0$. From arguments similar to page 128 of \cite{dalang2009minicourse}, we can find a sequence of points from the origin to $(t,{\bf x})$ as $(0,{\bf 0})= p_0,p_1,...,p_k = (t,{\bf x})$ such that each pair is the nearest neighbor in some grid $\mathbb{G}_m, n_0\leq m \leq n$, and at most $(3+d)$ such pairs are nearest neighbors in any given grid. On the event $A(\kappa)$, we have
\begin{align*}
\vert N(t,{\bf x})\vert\leq \sum_{j=0}^{k-1}\vert N(p_j)-N(p_{j+1})\vert\leq (3+d)\sum_{n\geq n_0}\kappa \mathcal{M}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}} 2^{-\delta_1n}2^{\delta_0n_0}\leq\kappa\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}} .
\end{align*}
Points in $\mathbb{G}_n$ are dense in $[0,\gamma\varepsilon^4]\times[0,\varepsilon^2]$, and we may extend $N(t,{\bf x})$ to a continuous version. Therefore, for $\gamma\geq 1$,
$$
P\left(\sup_{\substack{0\leq t\leq\gamma\varepsilon^4\\ {\bf x}\in [0,\varepsilon^2]^d}}\vert N(t,{\bf x})\vert>\kappa\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}\right)\leq C_5\exp\left(-\frac{C_6\kappa^2}{\mathcal{C}_2^2\gamma^{\frac{\alpha-\beta}{\alpha}}}\right).
$$
For $0<\gamma<1$, a union bound and stationarity in ${\bf x}$ imply that
$$\sqrt{\gamma^d}P\left(\sup_{\substack{0\leq t\leq\gamma\varepsilon^4\\ {\bf x}\in[0,\varepsilon^2]^d}}\vert N(t,{\bf x})\vert>\kappa\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}\right)\leq P\left(\sup_{\substack{0\leq t\leq\gamma\varepsilon^4\\ {\bf x}\in[0,\sqrt{\gamma}\varepsilon^2]^d}}\vert N(t,{\bf x})\vert>\kappa\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}\right)$$
$$=P\left(\sup_{\substack{0\leq t\leq(\sqrt{\gamma}\varepsilon^2)^2\\ {\bf x}\in[0,\sqrt{\gamma}\varepsilon^2]^d}}\vert N(t,{\bf x})\vert>\frac{\kappa}{\gamma^{\frac{\alpha-\beta}{2\alpha}}}\left(\gamma^{1/4}\varepsilon\right)^{\frac{2(\alpha-\beta)}{\alpha}}\right)\leq C_5\exp\left(-\frac{C_6\kappa^2}{ \mathcal{C}_2^2\gamma^{\frac{\alpha-\beta}{\alpha}}}\right).$$
As a result,
$$
P\left(\sup_{\substack{0\leq t\leq\gamma\varepsilon^4\\ {\bf x}\in [0,\varepsilon^2]^d}}\vert N(t,{\bf x})\vert>\kappa\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}\right)\leq \frac{C_5}{1\wedge \sqrt{\gamma^d}}\exp\left(-\frac{C_6\kappa^2}{\mathcal{C}_2^2\gamma^{\frac{\alpha-\beta}{\alpha}}}\right).
$$
\end{proof}
\begin{remark}
\label{largeremark}
If we suppose $\sigma$ in \eqref{noiseterm} satisfies $\vert \sigma(s,{\bf y},u(s,{\bf y}))\vert\leq C\left(\gamma\varepsilon^4\right)^{\frac{\alpha-\beta}{2\alpha}}$, then the probability in Lemma \ref{larged} is bounded above by
$$\frac{C_5}{1\wedge \sqrt{\gamma^d}}\exp\left(-\frac{C_6\kappa^2}{C^2(\gamma\varepsilon^2)^{\frac{2(\alpha-\beta)}{\alpha}}}\right),$$
which can be proved similarly to the above lemma.
\end{remark}
\section{Proof of Proposition \ref{prop}}
The following lemma gives a lower bound for variance of the noise term $N(t_1,{\bf x})$ and an upper bound on the decay of covariance between two random variables $N(t_1,{\bf x})$, $N(t_1,{\bf y})$ as $\vert {\bf x}-{\bf y}\vert$ increases.
\begin{lemma}
\label{varbound}
Consider noise terms $N(t_1,{\bf x})$, $N(t_1,{\bf y})$ with a deterministic $\sigma(t,{\bf x},u)=\sigma(t,{\bf x})$, then there exist constants $C_7,C_8>0$ depending only on $\mathcal{C}_1$, $\mathcal{C}_2$, $d$, $\alpha$, and $\beta$ such that
\begin{equation*}
C_7t_1^{\frac{\alpha-\beta}{\alpha}}\leq {\rm Var}[N(t_1,{\bf x})],
\end{equation*}
\begin{equation*}
{\rm Cov}[N(t_1,{\bf x}),N(t_1,{\bf y})]\leq C_8t_1\left\vert {\bf x}-{\bf y}\right\vert^{-\beta}.
\end{equation*}
\end{lemma}
\begin{proof} We use the Fubini's theorem, \eqref{lambdabound} the expression \eqref{lambdafourier} and \eqref{pfourier} to show that
\begin{align*}
\textrm{Var}[N(t_1,{\bf x})]&=\int_0^{t_1}\int_{\mathbb{T}^d}\int_{\mathbb{T}^d}\bar{p}(t_1-s,{\bf x}-{\bf y})\bar{p}(t_1-s,{\bf x}-{\bf z})\sigma(s,{\bf y})\sigma(s,{\bf z})\Lambda({\bf y}-{\bf z})d{\bf y}d{\bf z}ds\\
&\geq C(d)\mathcal{C}_1^2\left(\sum_{{\bf n}\in\mathbb{Z}^d} \lambda({\bf n})\int_0^{t_1}e^{-2\mathcali^\alpha \vert {\bf n}\vert^\alpha(t_1-s)}ds\right)\\
&=C(d)\mathcal{C}_1^2\left(\lambda({\bf 0})t_1+\sum_{{\bf n}\in\mathbb{Z}^d,{\bf n}\neq {\bf 0}} \lambda({\bf n})\frac{1-e^{-2\mathcali^\alpha \vert {\bf n}\vert^\alpha t_1}}{2\mathcali^\alpha \vert {\bf n}\vert^\alpha}\right)\\
&\geq C(d,\mathcal{C}_1)\int_1^\infty\frac{1-e^{-2\mathcali^\alpha x^\alpha t_1}}{2\mathcali^\alpha x^{d+\alpha-\beta}}x^{d-1}dx.
\end{align*}
The last inequality follows from that $\int_0^{t_1}e^{-2\mathcali^\alpha \vert {\bf n}\vert^\alpha(t_1-s)}ds$ decreases as $\vert {\bf n}\vert$ increases. Changing variable to $w=2\mathcali^\alpha x^\alpha t_1$ yields
$$\int_1^\infty\frac{1-e^{-2\mathcali^\alpha x^\alpha t_1}}{2\mathcali^\alpha x^{\alpha-\beta+1}}dx=C(\alpha,\beta,d)t_1^{\frac{\alpha-\beta}{\alpha}}\int_{2\mathcali^\alpha t_1}^\infty \frac{1-e^{-w}}{w^{2-\beta/\alpha}}dw\geq C(\alpha,\beta,d)t_1^{\frac{\alpha-\beta}{\alpha}}\int_{2\mathcali^\alpha }^\infty \frac{1-e^{-w}}{w^{2-\beta/\alpha}}dw.$$
The last integral converges with $0<\beta<\alpha\wedge d$, which completes the proof of the first part. In addition, we use the definition of $\Lambda(x)$ in \eqref{lambdafourier} and the fact $1-e^{-x}\leq x$ to derive the upper bound of covariance between $N(t_1,{\bf x})$ and $N(t_1,{\bf y})$ when ${\bf x}\neq {\bf y}$,
\begin{align*}
&\textrm{Cov}[N(t_1,{\bf x}),N(t_1,{\bf y})]=\mathbb{E}[N(t_1,{\bf x})N(t_1,{\bf y})]\\
&\leq C(d)\mathcal{C}_2^2\left(\sum_{{\bf n}\in \mathbb{Z}^d} \lambda({\bf n})\exp(\mathcali i{\bf n}\cdot ({\bf x}-{\bf y}))\int_0^{t_1}e^{-2\mathcali^\alpha \vert {\bf n}\vert^\alpha(t_1-s)}ds\right)\\
&\leq C_8t_1\sum_{{\bf n}\in \mathbb{Z}^d} \lambda({\bf n})\exp(\mathcali i{\bf n}\cdot ({\bf x}-{\bf y}))= C_8t_1\left\vert {\bf x}-{\bf y}\right\vert^{-\beta}.
\end{align*}
\end{proof}
\textbf{Proof of Proposition \ref{prop}(a)} The Markov property of $u(t,\cdot)$ (see page 247 in \cite{da2014stochastic}) implies
\begin{equation*}
P\left(F_{j}\vert\sigma\{u(t_i,\cdot)\}_{0\leq i<j}\right)=P\left(F_{j}\vert u(t_{j-1},\cdot)\right).
\end{equation*}
If we can prove that $P\left(F_{j}\vert u(t_{j-1},\cdot)\right)$ has a uniform bound ${\bf C_4}\exp\left(-\frac{{\bf C_5}}{\varepsilon^2+\mathcal{D}^2t_1^\frac{\alpha-\beta}{\alpha}}\right)$, then it is still a bound for the conditional probability $P\left(F_{j}\vert \bigcap_{k=0}^{j-1}F_{k}\right)$, which is conditioned on a realization of $u(t_k,\cdot),0\leq k<j$. Thus, it is enough to show that
$$
P\left(F_{1}\right)\leq{\bf C_4}\exp\left(-\frac{{\bf C_5}}{\varepsilon^2+\mathcal{D}^2t_1^\frac{\alpha-\beta}{\alpha}}\right),
$$
where ${\bf C_4}$, ${\bf C_5}$ do not depend on $u_0$. Consider the truncated function
$$f_\varepsilon({\bf x})=\begin{cases}
{\bf x} & \vert {\bf x}\vert\leq t_1^{\frac{\alpha-\beta}{2\alpha}}\\
\frac{{\bf x}}{\vert {\bf x}\vert}\cdot t_1^{\frac{\alpha-\beta}{2\alpha}} & \vert {\bf x}\vert> t_1^{\frac{\alpha-\beta}{2\alpha}}
\end{cases},$$
and, particularly, we have $\vert f_\varepsilon({\bf x})\vert\leq t_1^{\frac{\alpha-\beta}{2\alpha}}$. Consider the following two equations
\begin{equation*}
\label{shef}
\mathcalartial_tv(t,{\bf x})=-(-\Delta)^{\alpha/2}v(t,{\bf x})+\sigma(t,{\bf x},f_\varepsilon(v(t,{\bf x}))) \dot{F}(t,{\bf x}),
\end{equation*}
and
\begin{equation*}
\mathcalartial_tv_g(t,{\bf x})=-(-\Delta)^{\alpha/2}v_g(t,{\bf x})+\sigma(t,{\bf x},f_\varepsilon(u_0({\bf x}))) \dot{F}(t,{\bf x})
\end{equation*}
with the same initial $u_0({\bf x})$. We can decompose $v(t,{\bf x})$ by
$$v(t,{\bf x})=v_g(t,{\bf x})+D(t,{\bf x})$$
with
$$D(t,{\bf x})=\int_{[0,t]\times\mathbb{T}^d} \bar{p}(t-s,{\bf x}-{\bf y})[\sigma(s,{\bf y},f_\varepsilon(v(s,{\bf y})))-\sigma(s,{\bf y},f_\varepsilon(u_0({\bf y})))]F(dsd{\bf y}).$$
The Lipschitz property on the third variable of $\sigma(t,{\bf x},u)$ in \eqref{hypothesis1} gives
\begin{equation}
\label{sigmadiff1}
\begin{split}
\vert \sigma(s,{\bf y},f_\varepsilon(v(s,{\bf y})))-\sigma(s,{\bf y},f_\varepsilon(u_0({\bf y})))\vert&\leq \mathcal{D}\vert f_\varepsilon(v(s,{\bf y}))-f_\varepsilon(u_0({\bf y}))\vert\\
&\leq 2\mathcal{D}t_1^{\frac{\alpha-\beta}{2\alpha}}.
\end{split}
\end{equation}
Recall that $R_{i,j}$ in \eqref{Pij} and define a new sequence of events,
$$H_j=\left\lbrace\vert v(t,{\bf x})\vert\leq t_1^{\frac{\alpha-\beta}{2\alpha}},\forall (t,{\bf x})\in R_{1,j}\setminus R_{1,j-1}\right\rbrace.$$
Clearly, the property of $f_\varepsilon({\bf x})$ and \eqref{Fn} imply
$$F_{1}=\bigcap_{j=-n_1+1}^{n_1-1}H_j.$$
Also, define another two sequences of events
$$A_j=\left\lbrace\vert v_g(t,{\bf x})\vert\leq 2t_1^{\frac{\alpha-\beta}{2\alpha}},\forall (t,{\bf x})\in R_{1,j}\setminus R_{1,j-1}\right\rbrace,$$
$$B_j=\left\lbrace\vert D(t,{\bf x})\vert>t_1^{\frac{\alpha-\beta}{2\alpha}}, \exists(t,{\bf x})\in R_{1,j}\setminus R_{1,j-1}\right\rbrace.$$
It is straightforward to check that
$$H_j^c\supset A_j^c\cap B_j^c,$$
which implies
\begin{equation}
\label{sumprob}
\begin{split}
P(F_{1})&= P\left(\bigcap_{j=-n_1+1}^{n_1-1}H_j\right)\leq P\left(\bigcap_{j=-n_1+1}^{n_1-1}[A_j\cup B_j]\right)\\
&\leq P\left(\left(\bigcap_{j=-n_1+1}^{n_1-1}A_j\right)\bigcup\left(\bigcup_{j=-n_1+1}^{n_1-1}B_j\right)\right)\\
&\leq P\left(\bigcap_{j=-n_1+1}^{n_1-1}A_j\right)+P\left(\bigcup_{j=-n_1+1}^{n_1-1}B_j\right)\\
&\leq P\left(\bigcap_{j=-n_1+1}^{n_1-1}A_j\right)+\sum_{j=-n_1+1}^{n_1-1} P(B_j).
\end{split}
\end{equation}
The second inequality can be showed by using induction. Moreover, for $j=-n_1+1$,
\begin{equation}
\label{b1}
B_j \subseteq\left\lbrace\sup_{\substack{0\leq s\leq c_0\varepsilon^4\\ {\bf y}\in [(-n_1+1)\varepsilon^2,(-n_1+2)\varepsilon^2]^d}}\vert D(s,{\bf y})\vert>t_1^{\frac{\alpha-\beta}{2\alpha}}\right\rbrace,
\end{equation}
and for $j>-n_1+1$,
\begin{equation}
\label{bj}
\begin{split}
B_j&\subseteq \left\lbrace\sup_{(t,{\bf x})\in R_{1,j}\setminus R_{1,j-1}}\vert D(t,{\bf x})\vert>t_1^{\frac{\alpha-\beta}{2\alpha}}\right\rbrace\\&\subseteq\bigcup_{(t,{\bf x})\in R_{1,j-1}\setminus R_{1,j-2}}\left\lbrace\sup_{\substack{0\leq s\leq c_0\varepsilon^4\\ {\bf y}\in {\bf x}+[0,\varepsilon^2]^d}}\vert D(s,{\bf y})\vert>t_1^{\frac{\alpha-\beta}{2\alpha}}\right\rbrace.\\
\end{split}
\end{equation}
From \eqref{sigmadiff1} and Remark \ref{largeremark}, we get
\begin{equation}
\label{bsig}
P\left(\sup_{\substack{0\leq s\leq c_0\varepsilon^4\\ {\bf y}\in {\bf x}+[0,\varepsilon^2]^d}}\vert D(s,{\bf y})\vert>t_1^{\frac{\alpha-\beta}{2\alpha}}\right)\leq \frac{C_5}{1\wedge \sqrt{{c_0}^d}}\exp\left(-\frac{C_6}{4\mathcal{D}^2t_1^{\frac{\alpha-\beta}{\alpha}}}\right),
\end{equation}
where the proof does not rely on ${\bf x}$ since $u_0({\bf x})\equiv 0$. Therefore, \eqref{b1} implies
\begin{equation}
\label{b1upper}
P(B_{-n_1+1})\leq P\left(\sup_{\substack{0\leq s\leq c_0\varepsilon^4\\ {\bf y}\in [0,\varepsilon^2]^d}}\vert D(s,{\bf y})\vert>t_1^{\frac{\alpha-\beta}{2\alpha}}\right),
\end{equation}
and \eqref{bj} implies, for $j>-n_1+1$,
\begin{equation}
\label{bjupper}
P(B_j)\leq \left[(j+n_1-1)^d-(j+n_1-2)^d\right]P\left(\sup_{\substack{0\leq s\leq c_0\varepsilon^4\\ {\bf y}\in [0,\varepsilon^2]^d}}\vert D(s,{\bf y})\vert>t_1^{\frac{\alpha-\beta}{2\alpha}}\right).
\end{equation}
Hence, using \eqref{bsig}, \eqref{b1upper} and \eqref{bjupper}, we conclude that
\begin{equation}
\label{probB}
\sum_{j=-n_1+1}^{n_1-1}P(B_j)\leq \left((2n_1-2)^d+1\right)\leq\frac{C(d)}{\varepsilon^{2d}}\cdot\frac{C_5}{1\wedge \sqrt{{c_0}^d}}\exp\left(-\frac{C_6}{4\mathcal{D}^2t_1^{\frac{\alpha-\beta}{\alpha}}}\right).
\end{equation}
To compute the upper bound for $P\left(\bigcap_{j=-n_1+1}^{n_1-1}A_j\right)$, we define a sequence of events involving $v_g$,
$$I_j=\left\lbrace\vert v_g(t,{\bf x})\vert\leq 2t_1^{\frac{\alpha-\beta}{2\alpha}}, \forall(t,{\bf x})\in R_{1,j}\right\rbrace\quad\text{and}\quad I_{-n_1}=\Omega.$$
Then we can write $P\left(\bigcap_{j=-n_1+1}^{n_1-1}A_j\right)$ in terms of conditional probability as
\begin{equation}
\label{Acond}
P\left(\bigcap_{j=-n_1+1}^{n_1-1}A_j\right)=P(I_{n_1-1})=P(I_{-n_1})\mathcalrod_{j=-n_1+1}^{n_1-1}\frac{P(I_j)}{P(I_{j-1})}=\mathcalrod_{j=-n_1+1}^{n_1-1}P(I_j\vert I_{j-1}).
\end{equation}
Let $\mathcal{G}_j$ be the $\sigma-$algebra generated by
$$N_\varepsilon(t,{\bf x})=\int_0^{t}\int_{\mathbb{T}^d} p(t-s,{\bf x}-{\bf y})\sigma(s,{\bf y},f_\varepsilon(u_0({\bf y})))F(d{\bf y}ds),~~(t,{\bf x})\in R_{1,j}.$$
If we can show that there is a uniform bound for $P\left(I_j\vert \mathcal{G}_{j-1}\right)$, then it is still a bound for the conditional probability $P\left(I_j\vert I_{j-1}\right)$. Notice that $\sigma(s,{\bf y},f_\varepsilon(u_0({\bf y})))$ is deterministic and uniformly bounded, then by Lemma \ref{varbound}, we have
\begin{equation}
\label{varboundelp}
\textrm{Var}[N_\varepsilon(t_1,{\bf x})]\geq C_7 t_1^{\frac{\alpha-\beta}{\alpha}},
\end{equation}
and for $(t,{\bf x})\in R_{1,j}\setminus R_{1,j-1}$, one can decompose
\begin{equation}
\label{vgdecom}
v_g(t,{\bf x})=\int_{\mathbb{T}^d}p(t,{\bf x}-{\bf y})u_0({\bf y})d{\bf y}+X+Y,
\end{equation}
where $X=\mathbb{E}[N_\varepsilon(t,{\bf x})\vert \mathcal{G}_{j-1}]$ is a Gaussian random variable, which can be written as
\begin{equation}
\label{Xdecom}
X=\sum_{(t,{\bf x})\in R_{1,j-1}}\eta^{(j)}(t,{\bf x})N_\varepsilon(t,{\bf x}),
\end{equation}
for some coefficients $\left(\eta^{(j)}(t,{\bf x})\right)_{(t,{\bf x})\in R_{1,j-1}}$. Then the conditional variance equals
\begin{align*}
&\textrm{Var}(Y\vert\mathcal{G}_{j-1})=\mathbb{E} [(N_\varepsilon(t,{\bf x})-X\vert\mathcal{G}_{j-1})^2]-(\mathbb{E}[N_\varepsilon(t,{\bf x})-X\vert\mathcal{G}_{j-1}])^2\\
&=\mathbb{E}[(N_\varepsilon(t,{\bf x})-\mathbb{E}[N_\varepsilon(t,{\bf x})\vert \mathcal{G}_{j-1}]\vert\mathcal{G}_{j-1})^2]=\textrm{Var}[N_\varepsilon(t,{\bf x})\vert \mathcal{G}_{j-1}].
\end{align*}
Since $Y=N_\varepsilon(t,{\bf x})-X$ is independent of $\mathcal{G}_{j-1}$, we write $\textrm{Var}(Y)$ as
$$\textrm{Var}(Y)=\textrm{Var}(Y\vert\mathcal{G}_{j-1})=\textrm{Var}[N_\varepsilon(t,{\bf x})\vert \mathcal{G}_{j-1}].$$
In fact, for a Gaussian random variable $Z \sim N(\mu,\sigma^2)$ and any $a > 0$, the probability $P(\vert Z\vert \leq a)$ is maximized when $\mu = 0$, thus
\begin{equation*}
\begin{split}
P\left(I_j\vert \mathcal{G}_{j-1}\right)&\leq P\left(\vert v_g(t,{\bf x})\vert\leq 2t_1^{\frac{\alpha-\beta}{2\alpha}}, (t,{\bf x})\in R_{1,j}\setminus R_{1,j-1}\bigg| \mathcal{G}_{j-1}\right)\\
&\leq P\left(\vert Z'\vert\leq \frac{2t_1^\frac{\alpha-\beta}{2\alpha}}{\sqrt{\textrm{Var}[N_\varepsilon(t,{\bf x})\vert \mathcal{G}_{j-1}]}}\right)
\end{split}
\end{equation*}
where $Z'\sim N(0,1)$. Let's use the notation $\textrm{SD}$ to denote the standard deviation of a random variable. By the Minkowski inequality,
$$\textrm{SD}(X)\leq \sum_{(t,{\bf x})\in R_{1,j-1}}\left\vert\eta^{(j)}(t,{\bf x})\right\vert\cdot \textrm{SD}[N_\varepsilon(t,{\bf x})],$$
and
$$\textrm{SD}[N_\varepsilon(t,{\bf x})]\leq \textrm{SD}(X)+\textrm{SD}(Y).$$
If we can control coefficients by restricting
$$\sum_{(t,{\bf x})\in R_{1,j-1}}\left\vert\eta^{(j)}(t,{\bf x})\right\vert\leq \frac{1}{2},$$
then the standard deviation of $X$ is less than one half the standard deviation of $N_\varepsilon(t,{\bf x})$,
$$
\textrm{SD}[N_\varepsilon(t,{\bf x})]\leq \textrm{SD}(X)+\textrm{SD}(Y)\leq \frac{1}{2} \textrm{SD}[N_\varepsilon(t,{\bf x})]+\textrm{SD}(Y).
$$
From \eqref{varboundelp}, $\textrm{Var}(Y)$ is bounded below by $C_7 t_1^{\frac{\alpha-\beta}{\alpha}}$, so that we can derive the uniform upper bound of $P(I_j\vert \mathcal{G}_{j-1})$,
\begin{equation*}
\label{probA}
\begin{split}
P(I_j\vert \mathcal{G}_{j-1})&\leq P\left(\vert Z'\vert\leq \frac{2t_1^{\frac{\alpha-\beta}{2\alpha}}}{\sqrt{\textrm{Var}[N_\varepsilon(t,{\bf x})\vert \mathcal{G}_{j-1}]}}\right)\\
&\leq P\left(\vert Z'\vert\leq \frac{2t_1^{\frac{\alpha-\beta}{2\alpha}}}{\sqrt{C_7 t_1^{\frac{\alpha-\beta}{\alpha}}}}\right)\\
&=P(\vert Z'\vert\leq C')<1,
\end{split}
\end{equation*}
where $C'$ depends only on $\mathcal{C}_1$, $d$, $\alpha$, and $\beta$. A bound \eqref{jbound} on $j$ and \eqref{Acond} together yield
\begin{equation}
\label{probAbound}
P\left(\bigcap_{j=-n_1+1}^{n_1-1}A_j\right)\leq C^{2\varepsilon^{-2}}=C\exp\left(-\frac{C'}{\varepsilon^2}\right),
\end{equation}
where $C,C'$ depends only on $\mathcal{C}_1$, $d$, $\alpha$, and $\beta$. The following lemma shows how to select $c_0$ to make $\sum\limits_{(t,{\bf x})\in R_{1,j-1}}\vert\eta^{(j)}(t,{\bf x})\vert\leq \frac{1}{2}$, which completes the proof.
\begin{lemma}\label{coeffbound}
For a given $\varepsilon>0$, we may choose $c_0>0$ in \eqref{c0} such that
\begin{equation*}
\sum_{(t,{\bf x})\in R_{1,j-1}}\vert\eta^{(j)}(t,{\bf x})\vert\leq \frac{1}{2}.
\end{equation*}
\end{lemma}
\begin{proof}
Let $X$ and $Y$ be random variables defined in \eqref{vgdecom} and \eqref{Xdecom}. Since $Y$ and $\mathcal{G}_{j-1}$ are independent, for $\forall(t,{\bf x})\in R_{1,j-1}$,
$$\textrm{Cov}[Y,N_\varepsilon(t,{\bf x})]=0$$
and for $(t,{\bf y})\in R_{1,j}\setminus R_{1,j-1}$, we have
\begin{equation}
\label{noisecov}
\textrm{Cov}[N_\varepsilon(t,{\bf x}),N_\varepsilon(t,{\bf y})]=\textrm{Cov}[N_\varepsilon(t,{\bf x}),X]=\sum_{(t,{\bf x'})\in R_{1,j-1}}\eta^{(j)}(t,{\bf x'}) \textrm{Cov}[N_\varepsilon(t,{\bf x}),N_\varepsilon(t,{\bf x'})].
\end{equation}
We write the equation \eqref{noisecov} in a matrix form as
\begin{equation}
\label{matrixform}
{\bf X} ={\bf \Sigma}{\bf \eta},
\end{equation}
where the vector ${\bf \eta}=\left(\eta^{(j)}(t,{\bf x})\right)_{(t,{\bf x})\in R_{1,j-1}}^T$, the vector
${\bf X}=\left\lbrace\textrm{Cov}[N_\varepsilon(t,{\bf x}),N_\varepsilon(t,{\bf y})]\right\rbrace_{(t,{\bf x})\in R_{1,j-1}}^T,$
and ${\bf \Sigma}$ is the covariance matrix of $\left(N_\varepsilon(t,{\bf x})\right)_{(t,{\bf x})\in R_{1,j-1}}$. Let $\vert\vert\cdot\vert\vert_{1,1}$ be the matrix norm induced by the $\vert\vert\cdot\vert\vert_{l_1}$ norm, that is for a matrix ${\bf A}$,
$$\vert\vert{\bf A}\vert\vert_{1,1}:=\sup_{{\bf x\neq 0}}\frac{\vert\vert {\bf Ax}\vert\vert_{l_1}}{\vert\vert{\bf x}\vert\vert_{l_1}}.$$
It can be shown that $\vert\vert {\bf A}\vert\vert_{1,1}=\max\limits_j\sum\limits_{i=1}^n\vert a_{ij}\vert$ (see page 259 of \cite{rao2000linear}). Therefore, we have
\begin{equation*}
\vert\vert{\bf \eta}\vert\vert_{l_1}=\vert\vert{\bf \Sigma}^{-1}{\bf X}\vert\vert_{l_1}\leq\vert\vert{\bf \Sigma}^{-1}\vert\vert_{1,1}\vert\vert{\bf X}\vert\vert_{l_1}.
\end{equation*}
We rewrite ${\bf \Sigma}={\bf D}{\bf T}{\bf D}$, where ${\bf D}$ is a diagonal matrix with diagonal entries $\sqrt{\textrm{Var}[N_\varepsilon(t,{\bf x})]}$, and ${\bf T}$ is the correlation matrix with entries
$$e_{{\bf xx'}}=\frac{\textrm{Cov}[N_\varepsilon(t,{\bf x}),N_\varepsilon(t,{\bf x'})]}{\sqrt{\textrm{Var}[N_\varepsilon(t,{\bf x})]}\cdot\sqrt{\textrm{Var}[N_\varepsilon(t,{\bf x'})]}}.$$
Thanks to Lemma \ref{varbound}, for ${\bf x}\neq {\bf x'}$, $\vert e_{{\bf xx'}}\vert$ can be bounded above by
$$ \vert e_{{\bf xx'}}\vert\leq \frac{C_8t_1\left\vert {\bf x}-{\bf x'}\right\vert^{-\beta}}{C_7t_1^{1-\beta/\alpha}}.$$
Define ${\bf A}={\bf I-T}$. Because ${\bf A}$ has zero diagonal entries, we can bound $\vert\vert{\bf A}\vert\vert_{1,1}$ by
\begin{align*}
\vert\vert{\bf A}\vert\vert_{1,1}&=\max_{{\bf x}}\sum_{{\bf x}\neq{\bf x'}}\vert t_{{\bf xx'}}\vert\leq \sum_{(t,{\bf x})\in R_{1,n_1-1}}\vert e_{{\bf 0x}}\vert=\frac{C_8t_1^{\beta/\alpha}}{C_7}\sum_{(t,{\bf x})\in R_{1,n_1-1}}\vert {\bf x}\vert^{-\beta}\\
&\leq\frac{C(d)C_8t_1^{\beta/\alpha}}{C_7\varepsilon^{2\beta}}\int_0^{\sqrt{d}\varepsilon^{-2}}r^{d-\beta-1}dr=\frac{C(d,\beta)C_8}{C_7}\cdot\frac{(c_0\varepsilon^4)^{\beta/\alpha}}{\varepsilon^{2d}}.
\end{align*}
For any $\varepsilon>0$, we denote $\mathcal{C}=\left(\frac{C_7}{3C(d,\beta)C_8}\right)^{\alpha/\beta}$ and choose $c_0<\mathcal{C}\varepsilon^{\frac{2\alpha d-4\beta}{\beta}}$ in \eqref{c0}, which makes $\vert\vert{\bf A}\vert\vert_{1,1}<\frac{1}{3}.$ Therefore, summing the geometric series gives that
$$\vert\vert{\bf T}^{-1}\vert\vert_{1,1}=\vert\vert{\bf (I-A)}^{-1}\vert\vert_{1,1}\leq \frac{1}{1-\vert\vert{\bf A}\vert\vert_{1,1}}\leq \frac{3}{2},$$
and $\vert\vert{\bf \Sigma}^{-1}\vert\vert_{1,1}\leq \vert\vert{\bf D}^{-1}\vert\vert_{1,1}\cdot\vert\vert{\bf T}^{-1}\vert\vert_{1,1}\cdot\vert\vert{\bf D}^{-1}\vert\vert_{1,1}\leq \frac{3}{2}C_7^{-1}t_1^{-\frac{\alpha-\beta}{\alpha}}$. Substituting the bounds into \eqref{matrixform} and choosing $c_0$ as in \eqref{c0}, we obtain
$$\vert\vert{\bf \eta}\vert\vert_{l_1}\leq \frac{3}{2}C_7^{-1}t_1^{-\frac{\alpha-\beta}{\alpha}}\vert\vert{\bf X}\vert\vert_{l_1}<\frac{3}{2}\cdot\frac{1}{3}=\frac{1}{2}.$$
\end{proof}
Combining \eqref{sumprob}, \eqref{probB} and \eqref{probAbound} yields
\begin{equation*}
\begin{split}
P(F_{1})&\leq \frac{C(d)C_5}{(1\wedge \sqrt{{c_0}^d})\varepsilon^{2d}}\exp\left(-\frac{C_6}{4\mathcal{D}^2t_1^{\frac{\alpha-\beta}{\alpha}}}\right)+C\exp\left(-\frac{C'}{\varepsilon^2}\right)\\
&\leq C_5'\exp\left(-\frac{d}{2}\ln t_1-\frac{C_6}{4\mathcal{D}^2t_1^{\frac{\alpha-\beta}{\alpha}}}\right)+C\exp\left(-\frac{C'}{\varepsilon^2}\right)
\end{split}
\end{equation*}
We choose a $\mathcal{D}_0$ depending only on $\alpha,\beta$ and $d$ such that for any $\mathcal{D}<\mathcal{D}_0$,
\begin{align*}
P(F_1)&\leq C_5'\exp\left(-\frac{C_6'}{\mathcal{D}^2t_1^{\frac{\alpha-\beta}{\alpha}}}\right)+C\exp\left(-\frac{C'}{\varepsilon^2}\right)\\
&\leq{\bf C_4}\exp\left(-\frac{{\bf C_5}}{\varepsilon^2+\mathcal{D}^2t_1^\frac{\alpha-\beta}{\alpha}}\right).
\end{align*}
which completes the proof of Proposition \ref{prop} (a).
\textbf{Proof of Proposition \ref{prop} (b)} We first state the Gaussian correlation inequality, which is crucial to proof Proposition \ref{prop} (b).
\begin{lemma}\label{Gaussiancorr}For any convex symmetric sets $K, L$ in $\mathbb{R}^d$ and any centered Gaussian measure $\mu$ on $\mathbb{R}^d$, we have
$$
\mu(K\cap L)\geq \mu(K)\mu(L).
$$
\end{lemma}
\begin{proof}
See in paper \cite{royen2014simple}, \cite{latala2017royen}.
\end{proof}
By the Markov property of $u(t,\cdot)$, the behavior of $u(t,\cdot)$ in the interval $[t_n,t_{n+1}]$ depends only on $u(t_n,\cdot)$ and $\dot{F}(t,{\bf x})$ on $[t_n,t]\times[-1,1]^d$. Therefore, it is enough to show that
$$
P\left(E_{0}\right)\geq {\bf C_6}\exp\left(-\frac{{\bf C_7}}{\varepsilon^{\frac{2d(\alpha d-\beta)}{\beta}}}\right),
$$
where ${\bf C_6}$, ${\bf C_7}$ do not depend on $u_0$ and $\vert u_0(x)\vert\leq \frac{1}{3}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}$. Now we are ready to compute the lower bound for the small ball probability with a smooth and deterministic $\sigma(s,y,u)=\sigma(s,y)$, which is a Gaussian case. For $n \geq 0$, define a sequence of events
\begin{equation}
\label{Dsequence}
D_n=\left\lbrace\vert u(t_{n+1},{\bf x})\vert\leq \frac{1}{6}\varepsilon^\frac{2(\alpha-\beta)}{\alpha},\text{and}~\vert u(t,{\bf x})\vert\leq\frac{2}{3}\varepsilon^\frac{2(\alpha-\beta)}{\alpha},~\forall t\in[t_n,t_{n+1}],{\bf x}\in[-1,1]^d\right\rbrace.
\end{equation}
Denote
$$\bar{p}_t(u_0)({\bf x})=\bar{p}(t,\cdot)*u_0({\bf x})=\int_{\mathbb{T}^d}\bar{p}(t,{\bf x-y})u_0({\bf y})d{\bf y},$$
and we have
\begin{equation}
\label{u0convolute}
\bar{p}_t(u_0)({\bf x})\leq \sup_{{\bf x}}\vert u_0({\bf x})\vert\leq \frac{1}{3}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}.
\end{equation}
We consider the measure $Q$ given by
$$\frac{dQ}{dP}=\exp\left(Z_{t_1}-\frac{1}{2}\langle Z\rangle_{t_1}\right)$$
where
\begin{equation*}
Z_{t_1}=-\int_{[0,t_1]\times\mathbb{T}^d}f(s,{\bf y})F(dsd{\bf y}).
\end{equation*}
If $Z_{t_1}$ satisfies Novikov's condition in \cite{allouba1998different}, then
\begin{equation*}
\widetilde{F}(t,{\bf x}):=F(t,{\bf x})-\langle F(\cdot,{\bf x}),Z\rangle_{t}
\end{equation*}
is a centered spatially homogeneous Wiener process under the measure $Q$ (see \cite{allouba1998different} for more details). Therefore, for ${\bf x}\in [-1,1]^d$, Fubini's Theorem with the covariance structure of $\dot{F}(t,{\bf x})$ gives
\begin{align*}
\dot{\widetilde{F}}(t,{\bf x})=\dot{F}(t,{\bf x})+\int_{\mathbb{T}^d}f(t,{\bf y})\Lambda({\bf x-y})d{\bf y},
\end{align*}
which is a colored noise under measure $Q$. Since $\frac{\bar{p}_t(u_0)({\bf x})}{t_1\sigma(t,{\bf x})}$ is smooth and bounded function, and $\Lambda({\bf x})$ is the Riesz kernel on $\mathbb{T}^d$, \cite{roncal2016fractional} shows that there is a continuous formula for the fractional Laplacian of $\frac{\bar{p}_t(u_0)({\bf x})}{t_1\sigma(t,{\bf x})}$ on $\mathbb{T}^d$, so that one may assume that there is a function $f(t,{\bf y})$ such that,
$$\int_{\mathbb{T}^d}f(t,{\bf y})\Lambda({\bf x-y})d{\bf y}=\frac{\bar{p}_t(u_0)({\bf x})}{t_1\sigma(t,{\bf x})}.$$
Moreover, $\Lambda({\bf x})\geq d^{-\beta/2}$ and \eqref{u0convolute} imply
\begin{equation}
\label{novikov}
\begin{split}
&\mathbb{E}\left[(Z_{t_1})^2\right]=\mathbb{E}\left[\int_{[0,t_1]\times\mathbb{T}^d}\int_{[0,t_1]\times\mathbb{T}^d}f(s,{\bf y})f(t,{\bf z})F(dsd{\bf y})F(dtd{\bf z})\right]\\
&=\int_0^{t_1}\int_{\mathbb{T}^d}\int_{\mathbb{T}^d}f(s,{\bf y})f(s,{\bf z})\Lambda({\bf y-z})d{\bf y}d{\bf z}ds=\int_0^{t_1}\int_{\mathbb{T}^d}f(s,{\bf y})\frac{\bar{p}_s(u_0)({\bf y})}{t_1\sigma(s,{\bf y})}d{\bf y}ds\\
&\leq\int_0^{t_1}\int_{\mathbb{T}^d}f(s,{\bf y})\left(\frac{1}{3}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}\right)\cdot\frac{1}{t_1\mathcal{C}_1}d{\bf y}ds\leq\frac{C(d,\beta)\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}}{\mathcal{C}_1t_1}\int_0^{t_1}\int_{\mathbb{T}^d}f(s,{\bf y})\Lambda({\bf 1-y})d{\bf y}ds\\
&=\frac{C(d,\beta)\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}}{\mathcal{C}_1t_1}\int_0^{t_1}\frac{\bar{p}_s(u_0)({\bf 1})}{t_1\sigma(s,{\bf 1})}ds\leq C(d,\beta,\mathcal{C}_1)\frac{\varepsilon^{\frac{4(\alpha-\beta)}{\alpha}}}{t_1}<\infty,
\end{split}
\end{equation}
which satisfies Novikov's condition with a deterministic $f$. Thus, we can rewrite equation $(1.1)$ with deterministic $\sigma$ as
\begin{align*}
u(t,{\bf x})&=\bar{p}_t(u_0)({\bf x})+\int_{[0,t]\times\mathbb{T}^d}\bar{p}(t-s,{\bf x-y})\sigma(s,{\bf y})\left[\widetilde{F}(dsd{\bf y})-\frac{\bar{p}_s(u_0)({\bf y})}{t_1\sigma(s,{\bf y})}dsd{\bf y}\right]\\
&=\bar{p}_t(u_0)({\bf x})-\frac{t\bar{p}_t(u_0)({\bf x})}{t_1}+\int_{[0,t]\times\mathbb{T}^d}\bar{p}(t-s,{\bf x-y})\sigma(s,{\bf y})\widetilde{F}(dsd{\bf y})\\
&=\left(1-\frac{t}{t_1}\right)\bar{p}_t(u_0)({\bf x})+\int_{[0,t]\times\mathbb{T}^d}\bar{p}(t-s,{\bf x-y})\sigma(s,{\bf y})\widetilde{F}(dsd{\bf y}).
\end{align*}
The first term is $0$ at $t_1$, and $\vert u_0({\bf x})\vert\leq \frac{1}{3}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}$, we have
\begin{equation}
\label{initialbound}
\left|\left(1-\frac{t}{t_1}\right)\bar{p}_t(u_0)({\bf x})\right|\leq\frac{1}{3}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}, {\bf x}\in[-1,1]^d,t<t_1.
\end{equation}
Define
$$\widetilde{N}(t,{\bf x})=\int_{[0,t]\times\mathbb{T}^d}\bar{p}(t-s,{\bf x-y})\sigma(s,{\bf y})\widetilde{F}(d{\bf y}ds),$$
and suppose $c_0<1$, then applying Lemma \ref{larged} to $\widetilde{F}$ gives
$$Q\left(\sup_{\substack{0\leq t\leq c_0\varepsilon^4\\ {\bf x}\in[0,c_0\varepsilon^2]^d}}\vert \widetilde{N}(t,{\bf x})\vert>\frac{1}{6}\varepsilon^\frac{2(\alpha-\beta)}{\alpha}\right)\leq C_5\exp\left(-\frac{C_6}{36\mathcal{C}_2^2c_0^{\frac{\alpha-\beta}{\alpha}}}\right),$$
where $\gamma = c_0^{-1}>1$ and $\kappa=\left(6c_0^{\frac{\alpha-\beta}{\alpha}}\right)^{-1}$. To make sure that the right hand side is strictly less than 1, we require
\begin{equation*}
c_0<\min\left\lbrace 1, \left(\frac{C_6}{36\mathcal{C}_2^2\ln C_5}\right)^{\frac{\alpha}{\alpha-\beta}}\right\rbrace,
\end{equation*}
which is mentioned in \eqref{c0bound1}. By the Gaussian correlation inequality in Lemma \ref{Gaussiancorr}, we obtain
\begin{align*}
Q\left(\sup_{\substack{0\leq t\leq t_1\\ {\bf x}\in[-1,1]^d}}\vert \widetilde{N}(t,{\bf x})\vert\leq\frac{1}{6}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}\right)&\geq Q\left(\sup_{\substack{0\leq t\leq t_1\\ {\bf x}\in[0,c_0\varepsilon^2]^d}}\vert \widetilde{N}(t,{\bf x})\vert\leq\frac{1}{6}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}\right)^{\left(\frac{2}{c_0\varepsilon^2}\right)^d}\\
&\geq \left[1-C_5\exp\left(-\frac{C_6}{36\mathcal{C}_2^2c_0^{\frac{\alpha-\beta}{\alpha}}}\right)\right]^{\left(\frac{2}{c_0\varepsilon^2}\right)^d}.
\end{align*}
From \eqref{Dsequence} and \eqref{initialbound}, we get
$$Q(D_0)\geq Q\left(\sup_{\substack{0\leq t\leq t_1\\ {\bf x}\in[-1,1]^d}}\vert \widetilde{N}(t,{\bf x})\vert\leq\frac{1}{6}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}\right),$$
and if we replace $f(s,y)$ with $2f(s,y)$ for $Z_{t_1}$,
\begin{equation}
\label{radon}
1=\mathbb{E}\left[\frac{dQ}{dP}\right]=\mathbb{E}\left[\exp\left(Z_{t_1}-\frac{1}{2}\langle Z\rangle_{t_1}\right)\right]=\mathbb{E}[\exp\left(2Z_{t_1}-2\langle Z\rangle_{t_1}\right)].
\end{equation}
Because $f(s,y)$ is deterministic, we may estimate the Radon-Nikodym derivative,
\begin{align*}
\mathbb{E}\left[\left(\frac{dQ}{dP}\right)^2\right]&=\mathbb{E}[\exp\left(2Z_{t_1}-\langle Z\rangle_{t_1}\right)]=\mathbb{E}[\exp\left(2Z_{t_1}-2\langle Z\rangle_{t_1}\right)\cdot\exp(\langle Z\rangle_{t_1})]\\
&\leq \exp\left(C(d,\beta,\mathcal{C}_1)\frac{\varepsilon^{\frac{4(\alpha-\beta)}{\alpha}}}{t_1}\right).
\end{align*}
The last inequality follows from \eqref{novikov} and \eqref{radon}. The Cauchy-Schwarz inequality implies
$$Q(D_0)\leq \sqrt{\mathbb{E}\left[\left(\frac{dQ}{dP}\right)^2\right]}\cdot\sqrt{P(D_0)},$$
and as a consequence, we get
\begin{equation}
\label{probD}
P(D_0)\geq \exp\left(-\frac{C\varepsilon^{\frac{4(\alpha-\beta)}{\alpha}}}{t_1}\right)\exp\left(\frac{C'}{c_0^d\varepsilon^{2d}}\ln\left[1-C_5\exp\left(-\frac{C_6}{36\mathcal{C}_2^2c_0^{\frac{\alpha-\beta}{\alpha}}}\right)\right]\right),
\end{equation}
where $C,C'$ depend only on $d,\beta,\mathcal{C}_1$. For the lower bound with a non-deterministic $\sigma(t,{\bf x},u)$, we write
$$u(t,{\bf x})=u_g(t,{\bf x})+D(t,{\bf x})$$
where $u_g(t,{\bf x})$ satisfies the equation
$$\mathcalartial_t u_g(t,{\bf x})=-(-\Delta)^{\alpha/2}u_g(t,{\bf x})+\sigma(t,{\bf x},u_0({\bf x}))\dot{F}(t,{\bf x})$$
and
$$D(t,{\bf x})=\int_{[0,t]\times\mathbb{T}^d}\bar{p}(t-s,{\bf x-y})[\sigma(s,{\bf y},u(s,{\bf y}))-\sigma(s,{\bf y},u_0({\bf y}))]F(dsd{\bf y})$$
with an initial profile $u_0$. Since $u_g$ is Gaussian, for an event defined as
$$\widetilde{D}_0=\left\lbrace\vert u_g(t_{1},{\bf x})\vert\leq \frac{1}{6}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}},\text{and}~\vert u_g(t,{\bf x})\vert\leq\frac{2}{3}\varepsilon^{\frac{2(\alpha-\beta)}{4}}~\forall t\in[0,t_{1}],{\bf x}\in[-1,1]^d\right\rbrace,$$
we can apply \eqref{probD} to it and get
\begin{equation}
\label{probD0}
P(\widetilde{D}_0)\geq\exp\left(-\frac{C\varepsilon^{\frac{4(\alpha-\beta)}{\alpha}}}{t_1}\right)\exp\left(\frac{C'}{c_0^d\varepsilon^{2d}}\ln\left[1-C_5\exp\left(-\frac{C_6}{36\mathcal{C}_2^2c_0^{\frac{\alpha-\beta}{\alpha}}}\right)\right]\right).
\end{equation}
Define the stopping time
$$\tau=\inf\left\lbrace t:\vert u(t,{\bf x})-u_0({\bf x})\vert>2\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}\text{~for some ${\bf x}\in[-1,1]^d$}\right\rbrace,$$
and clearly we have $\tau>t_1$ on the event $E_{0}$ in \eqref{En} since $\vert u_0(x)\vert\leq \frac{1}{3}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}$, and $\vert u(t,x)\vert\leq \varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}$ for $\forall t\in[0,t_1]$ on the event $E_{0}$. We make another definition
$$\widetilde{D}(t,x)=\int_{[0,t]\times\mathbb{T}^d}\bar{p}(t-s,{\bf x-y})[\sigma(s,{\bf y},u(s\wedge \tau,{\bf y}))-\sigma(s,{\bf y},u_0({\bf y}))]F(dsd{\bf y}),$$
and $D(t,{\bf x})=\widetilde{D}(t,{\bf x})$ for $t\leq t_1$ on the event $\{\tau>t_1\}$. Moreover, from \eqref{En}, we have
\begin{equation}
\label{E0bound}
\begin{split}
P(E_{0})&\geq P\left(\widetilde{D}_0\bigcap \left\lbrace \sup_{\substack{0\leq t\leq t_1\\ {\bf x}\in[-1,1]^d}}\vert D(t,{\bf x})\vert\leq\frac{1}{6}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}\right\rbrace\right)\\
&=P\left(\left(\widetilde{D}_0\bigcap \left\lbrace \sup_{\substack{0\leq t\leq t_1\\ {\bf x}\in[-1,1]^d}}\vert D(t,{\bf x})\vert\leq\frac{1}{6}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}\right\rbrace\bigcap\{\tau>t_1\}\right)\right.\\
&\hspace{3cm}\bigcup\left.\left(\widetilde{D}_0\bigcap \left\lbrace \sup_{\substack{0\leq t\leq t_1\\ {\bf x}\in[-1,1]^d}}\vert D(t,{\bf x})\vert\leq\frac{1}{6}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}\right\rbrace\bigcap\{\tau\leq t_1\}\right)\right).
\end{split}
\end{equation}
On the event $\{\tau>t_1\}$, we have
$$\sup_{\substack{0\leq t\leq t_1\\ {\bf x}\in[-1,1]^d}}\vert D(t,{\bf x})\vert=\sup_{\substack{0\leq t\leq t_1\\ {\bf x}\in[-1,1]^d}}\vert \widetilde{D}(t,{\bf x})\vert,$$
and on the event $\widetilde{D}_0\cap\{\tau\leq t_1\}$, we have, for some ${\bf x}$,
$$\vert u_g(\tau,{\bf x})\vert\leq \frac{2}{3}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}},~\vert u(\tau,{\bf x})-u_0({\bf x})\vert>2\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}} ~\text{and}~ \vert u_0({\bf x})\vert\leq\frac{1}{3}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}.$$
The above leads to
\begin{align*}
\sup_{\substack{{\bf x}}}\vert D(\tau,{\bf x})\vert&=\sup_{\substack{{\bf x}}}\vert u(\tau,{\bf x})-u_g(\tau,{\bf x})\vert\geq \sup_{\substack{{\bf x}}}(\vert u(\tau,{\bf x})\vert-\vert u_g(\tau,{\bf x})\vert)\\
&\geq \sup_{\substack{{\bf x}}}\vert u(\tau,{\bf x})\vert-\frac{2}{3}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}\geq 2\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}-\frac{1}{3}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}-\frac{2}{3}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}\\
&\geq \varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}>\frac{1}{6}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}},
\end{align*}
which implies
$$\widetilde{D}_0\cap \left\lbrace \sup_{\substack{0\leq t\leq t_1\\ {\bf x}\in[-1,1]^d}}\vert D(t,{\bf x})\vert\leq\frac{1}{6}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}\right\rbrace\cap\{\tau\leq t_1\}=\mathcalhi.$$
Combining the above with \eqref{E0bound} yields
\begin{equation}
\label{E0boundsim}
\begin{split}
P(E_{0})&\geq P\left(\widetilde{D}_0\bigcap \left\lbrace \sup_{\substack{0\leq t\leq t_1\\ {\bf x}\in[-1,1]^d}}\vert \widetilde{D}(t,{\bf x})\vert\leq\frac{1}{6}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}\right\rbrace\bigcap\{\tau>t_1\}\right)\\
&\geq P(\widetilde{D}_0)-P\left( \sup_{\substack{0\leq t\leq t_1\\ {\bf x}\in[-1,1]^d}}\vert \widetilde{D}(t,{\bf x})\vert>\frac{1}{6}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}\right),
\end{split}
\end{equation}
and $\vert u(t,{\bf x})-u_0({\bf x})\vert\leq 2\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}$ for all $t\in[0,t_1]$ and ${\bf x}\in[-1,1]^d$. We apply the Lipschitz property on the third variable of $\sigma(t,{\bf x},u)$ in \eqref{hypothesis1} to Remark \ref{largeremark} and use a union bound from \eqref{probB} to get
\begin{equation}
\label{probDtilde}
P\left( \sup_{\substack{0\leq t\leq t_1\\ {\bf x}\in[-1,1]^d}}\vert \widetilde{D}(t,{\bf x})\vert>\frac{1}{6}\varepsilon^{\frac{2(\alpha-\beta)}{\alpha}}\right)\leq \frac{C_5}{(c_0\varepsilon^4)^{d/2}}\exp\left(-\frac{C_6}{144\mathcal{D}^2 (c_0\varepsilon^4)^{\frac{\alpha-\beta}{\alpha}}}\right).
\end{equation}
Consequently, from \eqref{probD0}, \eqref{E0boundsim} and \eqref{probDtilde}, we conclude that,
\begin{equation}
\begin{split}
\label{E_0}
P(E_{0})&\geq \exp\left(-\frac{C\varepsilon^{\frac{4(\alpha-\beta)}{\alpha}}}{t_1}\right)\exp\left(\frac{C'}{c_0^d\varepsilon^{2d}}\ln\left[1-C_5\exp\left(-\frac{C_6}{36\mathcal{C}_2^2c_0^{\frac{\alpha-\beta}{\alpha}}}\right)\right]\right)\\
&\hspace{3cm}-\frac{C_5}{(c_0\varepsilon^4)^{d/2}}\exp\left(-\frac{C_6}{144\mathcal{D}^2 (c_0\varepsilon^4)^{\frac{\alpha-\beta}{\alpha}}}\right)\\
&=\exp\left(-\frac{C\varepsilon^{\frac{4(\alpha-\beta)}{\alpha}}}{t_1}+\frac{C'}{c_0^d\varepsilon^{2d}}\ln\left[1-C_5\exp\left(-\frac{C_6}{36\mathcal{C}_2^2c_0^{\frac{\alpha-\beta}{\alpha}}}\right)\right]\right)\\
&\hspace{3cm}-C_5\exp\left(-\frac{d}{2}\ln t_1-\frac{C_6}{144\mathcal{D}^2 t_1^{\frac{\alpha-\beta}{\alpha}}}\right).
\end{split}
\end{equation}
When $d=1$ and $\alpha\geq 2\beta$, we may choose $c_0$ in \eqref{c0} satisfying
\begin{equation}
\label{c0bound2}
\mathcal{C}'\varepsilon^{\frac{4\alpha-8\beta}{\beta}}<c_0<\mathcal{C}\varepsilon^{\frac{2\alpha-4\beta}{\beta}},
\end{equation}
where $0<\mathcal{C}'<\mathcal{C}$ and $\varepsilon$ is small enough. Then choose a $\mathcal{D}_0$ depending only on $\alpha,\beta$ and $d$ such that for any $\mathcal{D}<\mathcal{D}_0$ , we have
\begin{align*}
P(E_{0})&\geq \exp\left(-C\varepsilon^{\frac{-4(\alpha-\beta)^2}{\alpha\beta}}\right)-C_5'\exp\left(-\frac{C_6'}{\mathcal{D}^2t_1^{\frac{\alpha-\beta}{\alpha}}}\right)\\
&\geq C\exp\left(-C'\varepsilon^{\frac{-4(\alpha-\beta)^2}{\alpha\beta}}\right)-C_5'\exp\left(-\frac{C_6'}{\mathcal{D}^2\varepsilon^{\frac{4(\alpha-\beta)^2}{\alpha\beta}}}\right)\\
&\geq {\bf C_6}\exp\left(-\frac{{\bf C_7}}{\varepsilon^{\frac{4(\alpha-\beta)^2}{\alpha\beta}}}\right).
\end{align*}
When $d=1$ and $\alpha<2\beta$, we may choose $c_0$ in \eqref{c0bound1}. However, the second term could exceed the first term in \eqref{E_0} for small enough $\varepsilon$ and we may not achieve a lower bound for small probability for any $0<\varepsilon<\varepsilon_0$. Similarly, for $d\geq 2$, the first term decays exponentially from \eqref{probD0} and the second term grows exponentially from \eqref{probDtilde}, hence we cannot achieve a lower bound for small probability for any $0<\varepsilon<\varepsilon_0$, which completes the proof.
\section{Acknowledgment}
The author would like to thank his advisor Carl Mueller for useful discussions and productive comments.
\end{document} |
\begin{document}
\title[Addendum BMO]{Addendum to \textquotedblleft BMO: OSCILLATIONS, SELF IMPROVEMENT, GAGLIARDO
COORDINATE SPACES AND REVERSE HARDY INEQUALITIES"}
\author{Mario Milman}
\email{mario.milman@gmail.com}
\urladdr{https://sites.google.com/site/mariomilman}
\begin{abstract}
We provide a precise statement and self contained proof of a Sobolev
inequality (cf. \cite[ page 236 and page 237]{A}) stated in the original
paper. Higher order and fractional inequalities are treated as well.
\end{abstract}
\maketitle
\section{Introduction}
One of the purposes of the original paper (cf. \cite{A}) was to highlight some
connections between interpolation theory, and inequalities connected with the
theory of $BMO$ and Sobolev spaces. This resulted in a somewhat lengthy paper
and as consequence many known results were only stated, and the reader was
referred to the relevant literature for proofs. It has become clear, however,
that a complete account of some of the results could be useful. In this
expository addendum we update and correct one paragraph of the original text
by providing a precise statement and proof of a Sobolev inequality which was
stated in the original paper (cf. \cite[(13) page 236, and line 10, page
237]{A}). Included as well are the corresponding results for higher order and
fractional inequalities.
All the results discussed in this note are known\footnote{In presenting the
results yet again we have followed in part advise from Rota \cite{82}.}: The
only novelty is perhaps in the unified presentation.
We shall follow the notation and the ordering of references of the original
paper \cite{A} to which we shall also refer for background, priorities,
historical comments, etc. Newly referenced papers will be labeled with letters.
\section{The Hardy-Littlewood-Sobolev-O'Neil program}
We let
\begin{equation}
\left\Vert f\right\Vert _{L(p,q)}=\left\{
\begin{array}
[c]{cc}
\left\{ \int_{0}^{\infty}\left( f^{\ast}(t)t^{1/p}\right) ^{q}\frac{dt}
{t}\right\} ^{1/q} & 1\leq p<\infty,1\leq q\leq\infty\\
\left\Vert f\right\Vert _{L(\infty,q)} & 1\leq q\leq\infty,
\end{array}
\right. \label{berta}
\end{equation}
where
\begin{equation}
\left\Vert f\right\Vert _{L(\infty,q)}:=\left\{ \int_{0}^{\infty}(f^{\ast
\ast}(t)-f^{\ast}(t))^{q}\frac{dt}{t}\right\} ^{1/q}. \label{berta1}
\end{equation}
In particular we note that in this notation
\[
\left\Vert f\right\Vert _{L(\infty,\infty)}=\{f:\sup_{t>0}\{f^{\ast\ast
}(t)-f^{\ast}(t)\}<\infty\},
\]
\[
L(1,1)=L^{1}.
\]
Moreover, if $f$ has bounded support
\[
\left\Vert f\right\Vert _{L(\infty,1)}=\left\Vert f\right\Vert _{L^{\infty}}.
\]
In \cite[(13) page 236]{A} we stated that \textquotedblleft it was shown in
\cite{7} that
\begin{equation}
\left\Vert f\right\Vert _{L(\bar{p},q)}\leq c_{n}\left\Vert \nabla
f\right\Vert _{L(p,q)},1\leq p\leq n,\frac{1}{\bar{p}}=\frac{1}{p}-\frac{1}
{n},\text{ }1\leq q\leq\infty,f\in C_{0}^{\infty}(R^{n})." \label{sobolev}
\end{equation}
However, to correctly state what was actually shown in \cite{7}, the indices
in the displayed formula need to be restricted when $p=1$. The precise
statement reads as follows:
\begin{theorem}
\label{teo1}Let $n>1.$ Let $1\leq p\leq n,$ $1\leq q\leq\infty,$ and define
$\frac{1}{\bar{p}}=\frac{1}{p}-\frac{1}{n}.$ Then, if $(p,q)\in(1,n]\times
\lbrack1,\infty]$ \textbf{or if} $p=q=1,$ we have
\begin{equation}
\left\Vert f\right\Vert _{L(\bar{p},q)}\leq c_{n}(p,q)\left\Vert \left\vert
\nabla f\right\vert \right\Vert _{L(p,q)},\text{ }f\in C_{0}^{\infty}(R^{n}).
\label{sobol1}
\end{equation}
\end{theorem}
\begin{remark}
If $n=1,$ then $p=q=1,$ and (\ref{sobol1}) is an easy consequence of the
fundamental theorem of Calculus.
\end{remark}
The corresponding higher order result (cf. \cite[line 10, page 237]{A}) reads
as follows,
\begin{theorem}
\label{teo2}Let $k\in N$, $k\leq n,$ $1\leq p\leq\frac{n}{k},$ $1\leq
q\leq\infty.$ Define $\frac{1}{\bar{p}}=\frac{1}{p}-\frac{k}{n}.$ Then, (i) if
$k<n,$ and $(p,q)\in(1,\frac{n}{k}]\times\lbrack1,\infty],$ or $(p,q)\in
\{1\}\times\{1\},$ or (ii) if $n=k,$ and $p=q=1,$ we have
\[
\left\Vert f\right\Vert _{L(\bar{p},q)}\leq c_{n,k}(p,q)\left\Vert \left\vert
D^{k}f\right\vert \right\Vert _{L(p,q)},\text{ }f\in C_{0}^{\infty}(R^{n}),
\]
where $\left\vert D^{k}f\right\vert $ is the length of the vector whose
components are all the partial derivatives of order $k.$
\end{theorem}
\begin{remark}
Observe that when $p=\frac{n}{k},$ $p>1$, and $q=1,$ we have
\[
\left\Vert f\right\Vert _{L^{\infty}}=\left\Vert f\right\Vert _{L(\infty
,1)}\preceq\left\Vert \left\vert D^{k}f\right\vert \right\Vert _{L(\frac{n}
{k},1)},\text{ }f\in C_{0}^{\infty}(R^{n}).
\]
We also obtain an $L^{\infty}$ estimate when $p=\frac{n}{k}=1,$ and $q=1,$
\begin{equation}
\left\Vert f\right\Vert _{L^{\infty}}=\left\Vert f\right\Vert _{L(\infty
,1)}\preceq\left\Vert D^{n}f\right\Vert _{L^{1}},\text{ }f\in C_{0}^{\infty
}(R^{n}). \label{deacuerdo}
\end{equation}
\end{remark}
In the particular case when we are working with $L^{p}$ spaces, i.e. $p=q$,
\textbf{there is no need to separate the cases }$p=1$\textbf{ and }$p>1,$ and
Theorems \ref{teo1} and \ref{teo2} give us what we could call the
\textquotedblleft completion" of the Hardy-Littlewood-Sobolev-O'Neil program, namely
\begin{corollary}
Let $1\leq k\leq n,1\leq p\leq\frac{n}{k},\frac{1}{\bar{p}}=\frac{1}{p}
-\frac{k}{n}.$ Then
\begin{equation}
\left\Vert f\right\Vert _{L(\bar{p},p)}\leq c_{n}(p)\left\Vert D^{k}
f\right\Vert _{L(p,p)},\text{ }f\in C_{0}^{\infty}(R^{n}). \label{sobsup}
\end{equation}
\end{corollary}
\begin{proof}
\textbf{(of Theorem \ref{teo1})}. \textbf{The case }$1<p\leq n.$ We start with
the inequality (cf. \cite[(58) page 263]{A}),
\begin{equation}
f^{\ast\ast}(t)-f^{\ast}(t)\leq c_{n}t^{1/n}(\nabla f)^{\ast\ast}(t),
\label{sob2}
\end{equation}
which yields
\begin{equation}
\left( f^{\ast\ast}(t)-f^{\ast}(t)\right) t^{1/p}t^{-1n}\leq c_{n}
t^{1/p}(\nabla f)^{\ast\ast}(t). \label{sobol2}
\end{equation}
If $q<\infty,$ we integrate (\ref{sobol2}) and find
\begin{align*}
\left\{ \int_{0}^{\infty}[\left( f^{\ast\ast}(t)-f^{\ast}(t)\right)
t^{1/\bar{p}}]^{q}\frac{dt}{t}\right\} ^{1/q} & \leq c_{n}\left\{ \int
_{0}^{\infty}[t^{1/p}(\nabla f)^{\ast\ast}(t)]^{q}\frac{dt}{t}\right\}
^{1/q}\\
& \leq C_{n}(p,q)\left\Vert \nabla f\right\Vert _{L(p,q)},
\end{align*}
where in the last step we used Hardy's inequality (cf. \cite[Appendix 4, page
272]{St}). To identify the left hand side we consider two cases. If $p=n,$
then $\bar{p}=\infty$ and the desired result follows directly from the
definitions (cf. (\ref{berta})). If $p<n,$ then we can write
\begin{equation}
f^{\ast\ast}(t)=\int_{t}^{\infty}f^{\ast\ast}(s)-f^{\ast}(s)\frac{ds}{s},
\label{from}
\end{equation}
and use Hardy's inequality (cf. \cite[Appendix 4, page 272]{St}) to get
\[
\left\Vert f\right\Vert _{L(\bar{p},q)}\leq\left\{ \int_{0}^{\infty}
[f^{\ast\ast}(t)t^{1/\bar{p}}]^{q}\frac{dt}{t}\right\} ^{1/q}\preceq\left\{
\int_{0}^{\infty}[\left( f^{\ast\ast}(t)-f^{\ast}(t)\right) t^{1/\bar{p}
}]^{q}\frac{dt}{t}\right\} ^{1/q}.
\]
The case $q=\infty$ is easier. Indeed, if $p=n,$ the desired result follows
taking a sup in (\ref{sobol2})$,$ while if $p<n,$ from (\ref{from}) we find
\begin{align*}
f^{\ast\ast}(t) & \leq\int_{t}^{\infty}\left( f^{\ast\ast}(s)-f^{\ast
}(s)\right) s^{1/\bar{p}}s^{-1/\bar{p}}\frac{ds}{s}\\
& \preceq t^{-1/\bar{p}}\sup_{s}\left( f^{\ast\ast}(s)-f^{\ast}(s)\right)
s^{1/\bar{p}}.
\end{align*}
Consequently
\[
\left\Vert f\right\Vert _{L(\bar{p},\infty)}\preceq\sup_{s}\left( f^{\ast
\ast}(s)-f^{\ast}(s)\right) s^{1/\bar{p}}.
\]
Therefore, combining the estimates we have obtained for the right and left
hand sides, we obtain
\[
\left\Vert f\right\Vert _{L(\bar{p},\infty)}\preceq\left\Vert \nabla
f\right\Vert _{L(p,q)},1<p\leq n,1\leq q\leq\infty.
\]
Finally, we consider the case when\textbf{ }$p=q=1.$ In this case we have
$\frac{1}{\bar{p}}=1-\frac{1}{n}.$ At this point recall the inequality (cf.
\cite[page 264]{A})
\begin{align}
\int_{0}^{t}\left( f^{\ast\ast}(s)-f^{\ast}(s)\right) s^{1/\bar{p}}\frac
{ds}{s} & =\int_{0}^{t}\left( f^{\ast\ast}(s)-f^{\ast}(s)\right)
s^{1-1/n}\frac{ds}{s}\\
& \preceq\int_{0}^{t}(\nabla f)^{\ast}(s)ds. \label{dav1}
\end{align}
Let $t\rightarrow\infty,$ to find
\[
\int_{0}^{\infty}\left( f^{\ast\ast}(s)-f^{\ast}(s)\right) s^{1/\bar{p}
}\frac{ds}{s}\preceq c_{n}\int_{0}^{\infty}(\nabla f)^{\ast}(s)ds=c_{n}
\left\Vert \nabla f\right\Vert _{L^{1}}=c_{n}\left\Vert \nabla f\right\Vert
_{L(1,1)}.
\]
We conclude the proof remarking that, as we have seen before,
\[
\left\Vert f\right\Vert _{L(\bar{p},1)}\preceq\int_{0}^{\infty}\left(
f^{\ast\ast}(s)-f^{\ast}(s)\right) s^{1/\bar{p}}\frac{ds}{s}.
\]
\end{proof}
\section{Higher Order}
We will only deal in detail with the case $k=2$ (i.e. the case of second order
derivatives) since the general case follows by induction,\textit{ mutatis
mutandi}.
\begin{proof}
(i) Suppose first that $n>2.$ Let $\bar{p}_{1}$ and $\bar{p}_{2}$ be defined
by $\frac{1}{\bar{p}_{1}}=\frac{1}{p}-\frac{1}{n}$ and $\frac{1}{\bar{p}_{2}
}=\frac{1}{\bar{p}_{1}}-\frac{1}{n}=\frac{1}{p}-\frac{2}{n}=\frac{1}{\bar{p}
}.$ The first step of the iteration is to observe (cf. \cite{75}) the
elementary fact:
\[
\left\vert \nabla(\nabla f)\right\vert \leq\left\vert D^{2}(f)\right\vert .
\]
Therefore, by (\ref{sobol1}) we have
\begin{align*}
(\nabla f)^{\ast\ast}(t)-(\nabla f)^{\ast}(t) & \preceq t^{1/n}
[\nabla(\nabla f)]^{\ast\ast}(t)\\
& \preceq t^{1/n}\left\vert D^{2}(f)\right\vert ^{\ast\ast}(t).
\end{align*}
Consequently, we find
\begin{equation}
\left( (\nabla f)^{\ast\ast}(t)-(\nabla f)^{\ast}(t)\right) t^{1/\bar{p}
_{1}}\preceq t^{\frac{1}{p}}\left\vert D^{2}(f)\right\vert ^{\ast\ast}(t).
\label{sob4}
\end{equation}
Suppose that $1<p\leq\frac{n}{2},$ and let $1\leq q<\infty.$ Then, from
(\ref{sob4}) and a familiar argument, we get
\begin{align*}
\left\Vert \nabla f\right\Vert _{L(\bar{p}_{1},q)} & \preceq\left\{
\int_{0}^{\infty}[\left( (\nabla f)^{\ast\ast}(t)-(\nabla f)^{\ast
}(t)\right) t^{1/\bar{p}_{1}}]^{q}\frac{dt}{t}\right\} ^{1/q}\\
& \preceq\left\{ \int_{0}^{\infty}[t^{1/p}\left\vert D^{2}(f)\right\vert
^{\ast\ast}(t)]^{q}\frac{dt}{t}\right\} ^{1/q}.
\end{align*}
Thus,
\[
\left\Vert \nabla f\right\Vert _{L(\bar{p}_{1},q)}\preceq\left\Vert \left\vert
D^{2}(f)\right\vert \right\Vert _{L(p,q)}.
\]
Now, combining the previous inequality with the already established first
order case (cf. Theorem \ref{teo1}) we find,
\begin{align*}
\left\Vert f\right\Vert _{L(\bar{p}_{2},q)} & \preceq\left\Vert \nabla
f\right\Vert _{L(\bar{p}_{1},q)}\\
& \preceq\left\Vert \left\vert D^{2}(f)\right\vert \right\Vert _{L(p,q)}.
\end{align*}
Likewise we can treat the case when $q=\infty.$ The analysis also works in the
case $p=1=q.$ In this case we replace (\ref{sob4}) with (\ref{dav1}):
\[
\int_{0}^{t}\left( \nabla f)^{\ast\ast}(s)-(\nabla f)^{\ast}(s)\right)
s^{1-1/n}\frac{ds}{s}\preceq\int_{0}^{t}(D^{2}f)^{\ast}(s)ds,
\]
which yields
\[
\int_{0}^{\infty}\left( \nabla f)^{\ast\ast}(s)-(\nabla f)^{\ast}(s)\right)
s^{1-1/n}\frac{ds}{s}\preceq\int_{0}^{\infty}(D^{2}f)^{\ast}(s)ds.
\]
Therefore
\[
\left\Vert \nabla f\right\Vert _{L(\bar{p}_{1},1)}\preceq\int_{0}^{\infty
}(D^{2}f)^{\ast}(s)ds.
\]
At this point recall that the first order case gives us
\[
\left\Vert f\right\Vert _{L(\bar{p}_{2},1)}\preceq\left\Vert \nabla
f\right\Vert _{L(\bar{p}_{1},1)}.
\]
Thus,
\[
\left\Vert f\right\Vert _{L(\bar{p}_{2},1)}\preceq\left\Vert D^{2}f\right\Vert
_{L^{1}}.
\]
Finally consider the case when $n=2=$ $k,$ this means that $p=\frac{2}{2}=1,$
and we let $q=1.$ Then, from
\[
\int_{0}^{t}\left( (Df)^{\ast\ast}(s)-\left( Df\right) ^{\ast}(s)\right)
s^{1-1/2}\frac{ds}{s}\preceq\int_{0}^{t}(D^{2}f)^{\ast}(s)ds
\]
we once again derive
\[
\left\Vert \nabla f\right\Vert _{L(2,1)}\preceq\left\Vert D^{2}f\right\Vert
_{L^{1}}.
\]
Moreover, since
\[
\left( f^{\ast\ast}(t)-f^{\ast}(t)\right) \preceq t^{1/2}\left( \nabla
f\right) ^{\ast\ast}(t)
\]
integrating we get
\[
\left\Vert f\right\Vert _{L(\infty,1)}\preceq\left\Vert \nabla f\right\Vert
_{L(2,1)},
\]
consequently, we see that,
\[
\left\Vert f\right\Vert _{L^{\infty}}=\left\Vert f\right\Vert _{L(\infty
,1)}\preceq\left\Vert D^{2}f\right\Vert _{L^{1}}.
\]
\end{proof}
\begin{example}
In the case $n>2,p=\frac{n}{2},$ $q=1,$ we have
\[
\left\Vert f\right\Vert _{L(\infty,1)}\preceq\left\Vert \nabla f\right\Vert
_{L(n,1)}\preceq\left\Vert D^{2}f\right\Vert _{L(\frac{n}{2},1)},
\]
in other words
\begin{equation}
\left\Vert f\right\Vert _{L^{\infty}}\preceq\left\Vert D^{2}f\right\Vert
_{L(\frac{n}{2},1)}. \label{steine}
\end{equation}
\end{example}
\begin{remark}
Sobolev inequalities involving only the Laplacian are usually referred to as
*reduced Sobolev inequalities* and there is a large literature devoted to
them. For example, in the context of the previous Example, since $n/2>1$ it is
possible to replace $D^{2}$ by the Laplacian in (\ref{steine}) (cf. the
discussion in \cite[Chapter V]{St}). The correct *reduced* analog of
(\ref{steine}) when $n=2$ involves a stronger condition on the Laplacian, as
was recently shown by Steinerberger \cite{Stef}, who, in particular, shows
that for a domain $\Omega\subset R^{2}$ of finite measure, and $f\in
C^{2}(\Omega)\cap C(\bar{\Omega}),$ there exists an absolute constant $c>0$
such that
\[
\max_{x\in\Omega}\left\vert f(x)\right\vert \leq\max_{x\in\partial\Omega
}\left\vert f(x)\right\vert +c\max_{x\in\Omega}\int_{\Omega}\max\{1,\log
\frac{\left\vert \Omega\right\vert }{\left\vert x-y\right\vert ^{2}
}\}\left\vert \Delta f(y)\right\vert dy.
\]
In particular, when $f$ is zero at the boundary, Steinerberger's result gives
\begin{equation}
\max_{x\in\Omega}\left\vert f(x)\right\vert \leq c\max_{x\in\Omega}
\int_{\Omega}\max\{1,\log\frac{\left\vert \Omega\right\vert }{\left\vert
x-y\right\vert ^{2}}\}\left\vert \Delta f(y)\right\vert dy. \label{steine2}
\end{equation}
By private correspondence Steinerberger showed the author that (\ref{steine2})
implies an inequality of the form
\begin{equation}
\left\Vert f\right\Vert _{L^{\infty}(\Omega)}\preceq\left\Vert \Delta
f\right\Vert _{L^{1}(\Omega)}+\left\Vert \Delta f\right\Vert _{L(LogL)(\Omega
)}. \label{steine3}
\end{equation}
Let us informally put forward here that one can develop an approach to
Steinerberger's result (\ref{steine3}) using the symmetrization techniques of
this paper, if one uses a variant of symmetrization inequalities for the
Laplacian, originally obtained by Maz'ya-Talenti, that was recorded in
\cite[Theorem 13 (ii), page 178]{Mm}. We hope to give a detailed discussion elsewhere.
\end{remark}
\section{The Fractional Case}
In this section we remark that a good deal of the analysis can be also adapted
to the fractional case (cf. \cite{59}). Let us go through the details. Let
$X(R^{n})$ be a rearrangement invariant space, and let $\phi_{X}(t)=\left\Vert
\chi_{(0,t)}\right\Vert _{X},$ be its fundamental function. Let $w_{X}$ be the
modulus of continuity associated with $X:$
\[
w_{X}(t,f)=\sup_{\left\vert h\right\vert \leq t}\left\Vert f(\circ
+h)-f(\circ)\right\Vert _{X}.
\]
Our basic inequality will be (cf. [50] and [59])
\begin{equation}
f^{\ast\ast}(t)-f^{\ast}(t)\leq c_{n}\frac{w_{X}(t^{1/n},f)}{\phi_{X}(t)},f\in
C_{0}^{\infty}(R^{n}). \label{nueva}
\end{equation}
Let $\alpha\in(0,1),1\leq p\leq\frac{n}{\alpha},$ $1\leq q\leq\infty.$ Let
(with the usual modification if $q=\infty)$
\[
\left\Vert f\right\Vert _{\mathring{B}_{p}^{\alpha,q}}=\left\{ \int
_{0}^{\infty}[t^{-\alpha}w_{L^{p}}(t,f)]^{q}\frac{dt}{t}\right\} ^{1/q}.
\]
\begin{theorem}
Suppose that $\alpha\in(0,1),1\leq p\leq\frac{n}{\alpha},\frac{1}{\bar{p}
}=\frac{1}{p}-\frac{\alpha}{n}.$ Then, we have
\[
\left\Vert f\right\Vert _{L(\bar{p},q)}\preceq\left\Vert f\right\Vert
_{\mathring{B}_{p}^{\alpha,q}},f\in C_{0}^{\infty}(R^{n}).
\]
\end{theorem}
\begin{proof}
Consider first the case $q<\infty.$ Let $X=L^{p},$ then $\phi_{X}(t)=t^{1/p},$
consequently (\ref{nueva}) becomes
\[
f^{\ast\ast}(t)-f^{\ast}(t)\leq c_{n}\frac{w_{L^{p}}(t^{1/n},f)}{t^{1/p}},f\in
C_{0}^{\infty}(R^{n}),
\]
which yields
\begin{align*}
\left\{ \int_{0}^{\infty}[(f^{\ast\ast}(t)-f^{\ast}(t))t^{\frac{1}{\bar{p}}
}]^{q}\frac{dt}{t}\right\} ^{1/q} & =\left\{ \int_{0}^{\infty}[(f^{\ast
\ast}(t)-f^{\ast}(t))t^{-\alpha/n}t^{1/p}]^{q}\frac{dt}{t}\right\} ^{1/q}\\
& \leq c_{n}\left\{ \int_{0}^{\infty}[t^{-\alpha/n}w_{L^{p}}(t^{1/n}
,f)]^{q}\frac{dt}{t}\right\} ^{1/q}\\
& \simeq\left\{ \int_{0}^{\infty}[t^{-\alpha}w_{L^{p}}(t,f)]^{q}\frac{dt}
{t}\right\} ^{1/q}\\
& \simeq\left\Vert f\right\Vert _{\mathring{B}_{p}^{\alpha,q}}.
\end{align*}
It follows readily that
\[
\left\Vert f\right\Vert _{L(\bar{p},q)}\preceq\left\Vert f\right\Vert
_{\mathring{B}_{p}^{\alpha,q}},\text{ }f\in C_{0}^{\infty}(R^{n}).
\]
For the case $q=\infty$ we simply go back to
\begin{equation}
f^{\ast\ast}(t)-f^{\ast}(t))t^{\frac{1}{\bar{p}}}\leq c_{n}t^{-\alpha/n}
w_{p}(t^{1/n},f),\label{antigua}
\end{equation}
and take a sup over all $t>0$.
\end{proof}
\begin{example}
Note that when $p=\frac{n}{\alpha},$ then $\frac{1}{\bar{p}}=0,$ consequently
if $1\leq q\leq\infty$, we have that for $f\in C_{0}^{\infty}(R^{n}),$
\begin{align}
\left\Vert f\right\Vert _{L(\infty,q)} & =\left\{ \int_{0}^{\infty
}[(f^{\ast\ast}(t)-f^{\ast}(t))]^{q}\frac{dt}{t}\right\} ^{1/q}
\label{nueva2}\\
& \leq c_{n}\left\Vert f\right\Vert _{\mathring{B}_{\frac{n}{\alpha}}
^{\alpha,q}}.\nonumber
\end{align}
In particular, if $q=1,$
\[
\left\Vert f\right\Vert _{L^{\infty}}=\left\Vert f\right\Vert _{L(\infty
,1)}\leq c_{n}\left\Vert f\right\Vert _{\mathring{B}_{\frac{n}{\alpha}
}^{\alpha,1}},f\in C_{0}^{\infty}(R^{n}).
\]
\end{example}
The corresponding result for Besov spaces anchored on Lorentz spaces follows
the same analysis. Let $1\leq p<\infty,1\leq r\leq\infty,1\leq q\leq
\infty,0<\alpha<1.$ We let (with the usual modification if $q=\infty$)
\[
\left\Vert f\right\Vert _{\mathring{B}_{L(p,r)}^{\alpha,q}}=\left\{ \int
_{0}^{\infty}[t^{-\alpha}w_{L(p,r)}(t,f)]^{q}\frac{dt}{t}\right\} ^{1/q}.
\]
Note that since
\[
\phi_{L(p,r)}(t)\sim t^{1/p},1\leq p<\infty,1\leq r\leq\infty,
\]
our basic inequality now takes the form
\begin{equation}
f^{\ast\ast}(t)-f^{\ast}(t)\leq c_{n}\frac{w_{L(p,r)}(t^{1/n},f)}{t^{1/p}
},f\in C_{0}^{\infty}(R^{n}),1\leq p<\infty,1\leq r\leq\infty. \label{denueva}
\end{equation}
Then,\textit{ mutatis mutandi} we have
\begin{theorem}
Suppose that $\alpha\in(0,1),1\leq p\leq\frac{n}{\alpha},\frac{1}{\bar{p}
}=\frac{1}{p}-\frac{\alpha}{n}.$ Then, if $p>1,1\leq r\leq\infty,$ or $p=r=1,$
we have
\[
\left\Vert f\right\Vert _{L(\bar{p},q)}\preceq\left\Vert f\right\Vert
_{\mathring{B}_{L(p,r)}^{\alpha,q}},f\in C_{0}^{\infty}(R^{n}).
\]
\end{theorem}
\section{More Examples and Remarks}
\subsection{On the role of the $L(\infty,q)$ spaces}
In the range $1<p<n,$ (\ref{sobol1}) and (\ref{sobsup}) yield the classical
Sobolev inequalities. Suppose that $p=n.$ Then $\frac{1}{\bar{p}}=0,$ and
(\ref{sobol1}) becomes
\begin{equation}
\left\Vert f\right\Vert _{L(\infty,q)}\preceq\left\Vert \nabla f\right\Vert
_{L(n,q)},1\leq q\leq\infty. \label{hbr}
\end{equation}
When dealing with domains $\Omega$ with $\left\vert \Omega\right\vert
<\infty,$ from (\ref{sob2}) we get, $1\leq q\leq\infty,$
\begin{equation}
\left\{ \int_{0}^{\left\vert \Omega\right\vert }\left( f^{\ast\ast
}(s)-f^{\ast}(s)\right) ^{q}\frac{ds}{s}\right\} ^{1/q}\preceq\left\Vert
\nabla f\right\Vert _{L(n,q)},\text{ }f\in C_{0}^{\infty}(\Omega).
\label{hbr1}
\end{equation}
To compare this result with classical results it will be convenient to
normalize the *norm* as follows
\[
\left\Vert f\right\Vert _{L(\infty,q)(\Omega)}=\left\{ \int_{0}^{\left\vert
\Omega\right\vert }\left( f^{\ast\ast}(s)-f^{\ast}(s)\right) ^{q}\frac
{ds}{s}\right\} ^{1/q}+\frac{1}{\left\vert \Omega\right\vert }\int_{\Omega
}\left\vert f(x)\right\vert dx.
\]
\begin{remark}
Note that this does not change the nature of (\ref{hbr1}) since if $f$ has
compact support on $\Omega,$ then if we let $t\rightarrow\left\vert
\Omega\right\vert $ in
\[
f^{\ast\ast}(t)-f^{\ast}(t)\leq c_{n}t^{1/n}(\nabla f)^{\ast\ast}(t)
\]
we find that
\begin{align*}
\frac{1}{\left\vert \Omega\right\vert }\int_{\Omega}\left\vert f(x)\right\vert
dx & =f^{\ast\ast}(\left\vert \Omega\right\vert )\leq\left\vert
\Omega\right\vert ^{1/n-1}\left\Vert \nabla f\right\Vert _{L^{1}(\Omega)}\\
& \leq\left\Vert \nabla f\right\Vert _{L(n,q)}.
\end{align*}
\end{remark}
Let us consider the case $q=n.$ It was shown in \cite[page 1227]{7} (the so
called Hansson-Brezis-Wainger-Maz'ya embedding) that
\begin{align*}
\left\{ \int_{0}^{\left\vert \Omega\right\vert }\left( \frac{f^{\ast\ast
}(s)}{1+\log\frac{\left\vert \Omega\right\vert }{s}}\right) ^{n}\frac{ds}
{s}\right\} ^{1/n} & \preceq\left\Vert f\right\Vert _{L(\infty,n)(\Omega
)}\\
& \preceq\left\Vert \nabla f\right\Vert _{L(n,q)}+\left\Vert f\right\Vert
_{L^{1}(\Omega)}.
\end{align*}
Therefore, (\ref{hbr}) implies an improvement on the
Hansson-Brezis-Wainger-Maz'ya embedding. The connection with $BMO$ appears
when $q=\infty,$ for then we have
\[
\left\Vert f\right\Vert _{L(\infty,\infty)}\preceq\left\Vert \nabla
f\right\Vert _{L(n,\infty)},f\in C_{0}^{\infty}(R^{n}).
\]
In the case $p=n,q=1.$ Then, (\ref{sobol1}) gives
\begin{equation}
\left\Vert f\right\Vert _{L(\infty,1)}\preceq\left\Vert \nabla f\right\Vert
_{L(n,1)},f\in C_{0}^{\infty}(R^{n}), \label{comparada}
\end{equation}
which ought to be compared with the following (cf. \cite{St1})
\begin{equation}
\left\Vert f\right\Vert _{L^{\infty}}\preceq\left\Vert \nabla f\right\Vert
_{L(n,1)},f\in C_{0}^{\infty}(R^{n}). \label{comparada1}
\end{equation}
Indeed, let us show that (\ref{comparada}) gives (\ref{comparada1}). From
\[
\frac{d}{dt}(tf^{\ast\ast}(t))=\frac{d}{dt}(\int_{0}^{t}f^{\ast}
(s)ds)=f^{\ast}(t),
\]
it follows (by the product rule of Calculus) that
\[
\frac{d}{dt}(f^{\ast\ast}(t))=-\left( \frac{f^{\ast\ast}(t)-f^{\ast}(t)}
{t}\right) .
\]
Therefore, if $f$ has compact support$,$
\begin{align*}
\left\Vert f\right\Vert _{L(\infty,1)} & =\lim_{t\rightarrow\infty}\int
_{0}^{t}\left( f^{\ast\ast}(s)-f^{\ast}(s)\right) \frac{ds}{s}
=\lim_{t\rightarrow\infty}\left( f^{\ast\ast}(0)-f^{\ast\ast}(t)\right) \\
& =\left\Vert f\right\Vert _{L^{\infty}}-\lim_{t\rightarrow\infty}\frac{1}
{t}\left\Vert f\right\Vert _{L^{1}}\\
& =\left\Vert f\right\Vert _{L^{\infty}}.
\end{align*}
\subsection{The Gagliardo-Nirenberg Inequality and Weak type vs Strong Type}
It is well known that the Sobolev inequalities have remarkable self improving
properties. In this section we wish to discuss the connections of these self
improving effects with symmetrization. The study is important when trying to
extend Sobolev inequalities to more general contexts.
We consider three forms of \ the Gagliardo-Nirenberg inequality. The strong
form of the Gagliardo-Nirenberg inequality
\begin{equation}
\left\Vert f\right\Vert _{L(n^{\prime},1)}\preceq\left\Vert \nabla
f\right\Vert _{L^{1}},f\in C_{0}^{\infty}(R^{n}), \label{via}
\end{equation}
which implies the classical version of the Gagliardo-Nirenberg inequality
\begin{equation}
\left\Vert f\right\Vert _{L^{n^{\prime}}}\preceq\left\Vert \nabla f\right\Vert
_{L^{1}},f\in C_{0}^{\infty}(R^{n}). \label{via2}
\end{equation}
which in turn implies the weaker version of the Gagliardo-Nirenberg inequality
\begin{equation}
\left\Vert f\right\Vert _{L(n^{\prime},\infty)}\preceq\left\Vert \nabla
f\right\Vert _{L^{1}},f\in C_{0}^{\infty}(R^{n}). \label{via1}
\end{equation}
Let us now show that (\ref{via1}) implies (\ref{via}). In \cite[(55) page
261]{A} we showed that (\ref{via1}) implies the symmetrization inequality
\begin{equation}
f^{\ast\ast}(t)-f^{\ast}(t)\preceq t^{1/n}(\nabla f)^{\ast\ast}(t). \label{v2}
\end{equation}
Conversely, (\ref{v2}) can be rewritten as
\begin{equation}
(f^{\ast\ast}(t)-f^{\ast}(t))t^{1-1/n}\preceq\int_{0}^{t}(\nabla f)^{\ast
}(s)ds. \label{v3}
\end{equation}
Consequently, taking a sup over all $t>0$ we see that (\ref{v2}) in turn
implies (\ref{via1}). Moreover, let us show that (\ref{v2}) implies the
isoperimetric inequality (here we ignore the issue of constants to simplify
the considerations). To see this suppose that $E$ is a bounded set with smooth
border and let $f_{n}$ be \ a sequence of smooth functions with compact
support such that $f_{n}\rightarrow\chi_{E}$ in $L^{1},$ with
\[
\left\Vert \nabla f_{n}\right\Vert _{L^{1}}\rightarrow Per(E)
\]
where $Per(E)$ is the perimeter of $E.$ Selecting $t>\left\vert E\right\vert
,$ we see that $(f_{n}^{\ast\ast}(t)-f_{n}^{\ast}(t))\rightarrow\frac{1}
{t}\left\vert E\right\vert ,$ therefore from (\ref{v3}) we find
\[
\frac{1}{t}\left\vert E\right\vert t^{1-1/n}\preceq Per(E)
\]
therefore letting $t\rightarrow\left\vert E\right\vert ,$ gives
\[
\left\vert E\right\vert ^{1-1/n}\preceq Per(E).
\]
This concludes our proof that (\ref{via1}) is equivalent to (\ref{via}) since
it is a well known consequence of the co-area formula that the isoperimetric
inequality is equivalent to (\ref{via}) (cf. \cite{67}). At the level of
symmetrization inequalities we have shown in \cite[page 263]{A} that
(\ref{via}) implies the symmetrization inequality
\begin{equation}
\int_{0}^{t}(f^{\ast\ast}(s)-f^{\ast}(s))s^{1-1/n}\frac{ds}{s}\preceq\int
_{0}^{t}(\nabla f)^{\ast}(s)ds. \label{v4}
\end{equation}
Moreover, conversely, taking a sup over all $t>0$ in (\ref{v4})$,$ shows that
(\ref{v4}) implies (\ref{via}).
A direct proof of the fact that (\ref{v4}) implies (\ref{v2}) is
straightforward. Indeed, starting with
\[
\int_{t/2}^{t}\left( f^{\ast\ast}(s)-f^{\ast}(s)\right) s^{1-1/n}\frac
{ds}{s}\preceq\int_{0}^{t}(\nabla f)^{\ast}(s)ds,
\]
and using the fact that $\left( f^{\ast\ast}(t)-f^{\ast}(t\right)
)t=\int_{f^{\ast}(t)}^{\infty}\lambda_{f}(s)ds$ increases, we see that
\[
\left( f^{\ast\ast}(t/2)-f^{\ast}(t/2)\right) t^{1-1/n}\preceq\int_{0}
^{t}(\nabla f)^{\ast}(s)ds,
\]
and (\ref{v2}) follows readily. The proof that we give now, showing that
(\ref{v2}) implies (\ref{v4}) is indirect. First, as we have seen (\ref{v2})
is equivalent to the validity of (\ref{via1}) which in turn implies the
following inequality\footnote{Note that by P\'{o}lya-Szeg\"{o}, $f^{\ast}$ is
absolutely continuous} due to Maz'ya-Talenti (cf. \cite{65}),
\begin{equation}
t^{1-1/n}[-f^{\ast}(t)]^{\prime}\preceq\frac{d}{dt}(\int_{\{\left\vert
f(x)\right\vert >f^{\ast}(t)\}}\left\vert \nabla f(x)\right\vert
dx).\label{v5}
\end{equation}
To proceed further we need a new expression for $f^{\ast\ast}(t)-f^{\ast}(t),$
which we derive integrating by parts:
\begin{align}
f^{\ast\ast}(t)-f^{\ast}(t) & =\frac{1}{t}\int_{0}^{t}[f^{\ast}(s)-f^{\ast
}(t)]ds\nonumber\\
& =\frac{1}{t}\left. (s[f^{\ast}(s)-f^{\ast}(t)])\right\vert _{s=0}
^{s=t}+\frac{1}{t}\int_{0}^{t}s[-f^{\ast}(s)]^{\prime}ds\nonumber\\
& =\frac{1}{t}\int_{0}^{t}s[-f^{\ast}(s)]^{\prime}ds.\label{numer}
\end{align}
Therefore,
\begin{align*}
\int_{0}^{t}\left( f^{\ast\ast}(s)-f^{\ast}(s)\right) s^{-1/n}ds &
=\int_{0}^{t}\frac{1}{s}\int_{0}^{s}u[-f^{\ast}(u)]^{\prime}dus^{-1/n}ds\\
& =-n\int_{0}^{t}\left( \int_{0}^{s}u[-f^{\ast}(u)]^{\prime}du\right)
ds^{-1/n}\\
& =\left. -n\left( \int_{0}^{s}u[-f^{\ast}(u)]^{\prime}du\right)
s^{-1/n}\right\vert _{s=0}^{s=t}+n\int_{0}^{t}s[-f^{\ast}(s)]^{\prime}
s^{-1/n}ds.
\end{align*}
We claim that we can discard the integrated term since its contribution makes
the right hand side smaller. To see this note that, since (\ref{v2}) holds,
(\ref{numer}) implies
\[
\left( \int_{0}^{s}u[-f^{\ast}(u)]^{\prime}du\right) s^{-1/n}=\left(
f^{\ast\ast}(s)-f^{\ast}(s)\right) s^{1-1/n}\preceq\int_{0}^{s}(\nabla
f)^{\ast}(u)du,
\]
which in turn implies that $\left( \int_{0}^{s}u[-f^{\ast}(u)]^{\prime
}du\right) s^{-1/n}\rightarrow0$ when $s\rightarrow0.$ Consequently, we can
continue our estimates to obtain,
\begin{align*}
\int_{0}^{t}\left( f^{\ast\ast}(s)-f^{\ast}(s)\right) s^{-1/n}ds & \preceq
n\int_{0}^{t}s[-f^{\ast}(s)]^{\prime}s^{-1/n}ds\\
& \preceq\int_{0}^{t}[-f^{\ast}(s)]^{\prime}s^{1-1/n}ds\\
& \preceq\int_{0}^{t}\frac{d}{dt}(\int_{\{\left\vert f(x)\right\vert
>f^{\ast}(s)\}}\left\vert \nabla f(x)\right\vert dx)ds\text{ \ \ (by(\ref{v5}
))}\\
& \leq\int_{\{\left\vert f(x)\right\vert >f^{\ast}(t)\}}\left\vert \nabla
f(x)\right\vert dx\\
& \leq\int_{0}^{t}\left( \nabla f\right) ^{\ast}(s)ds.
\end{align*}
Underlying these equivalences between weak and strong inequalities is the
Maz'ya truncation principle (cf. \cite{34}) which, informally, shows that,
contrary to what happens for most other inequalities in analysis, in the case
of Sobolev inequalities: weak implies strong!
In \cite{A} we showed the connection of the truncation method to a certain
form of extrapolation of inequalities initiated by Burkholder and Gundy. The
import of these considerations is that the symmetrization inequalities hold in
a very general context and allow for some unification of Sobolev inequalities.
For example, the preceding analysis and the corresponding symmetrization
inequalities can be extended for gradients defined in metric measure spaces
using a variety of methods. One method, often favored by probabilists, goes
via defining the gradient by suitable limits, in this case, under suitable
assumptions, we can use isoperimetry to reformulate the symmetrization
inequalities and embeddings (cf. \cite{63}, \cite{64}, and the references
therein). In the context of metric probability spaces with concave
isoperimetric profile $I,$ the basic inequality takes the form
\begin{equation}
f^{\ast\ast}(t)-f^{\ast}(t)\leq\frac{t}{I(t)}\left\vert \nabla f\right\vert
^{\ast\ast}(t).\label{v6}
\end{equation}
For example, if we consider $R^{n}$ with Gaussian measure, the isoperimetric
profile satisfies
\[
I(t)\sim t(\log\frac{1}{t})^{1/2},\text{ }t\text{ near zero.}
\]
Thus in the Gaussian case (\ref{v6}) yields logarithmic Sobolev inequalities
(cf. \cite{Mm}, \cite{63}, \cite{64}, for more on this story). A somewhat
different approach, which yields however similar symmetrization inequalities,
obtains if we define the gradient indirectly via Poincar\'{e} inequalities and
then derive the symmetrization inequalities using maximal inequalities. The
analysis here depends an a large body of classical research on maximal
functions and Poincar\'{e} inequalities (for the symmetrization inequalities
that result we refer to \cite{47}, and Kalis' 2007 PhD thesis at FAU).
\end{document} |
\begin{document}
\title{ Hausdorff operators on Bergman spaces of the upper half plane}
\alpha} \delta} \def\la{\lambda} \def\om{\omegaef\b{\beta} \delta} \def\la{\lambda} \def\om{\omegaef\g{\gammauthor{Georgios Stylogiannis}
\maketitle
\begin{abstract}
In this paper we study Hausdorff operators on the Bergman spaces $A^{p}(\mathbb{U})$ of the upper half plane.
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{abstract}
\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiection{introduction}
Given a $\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiigma$-finite positive Borel measure
$\mu$ on $ (0,\infty)$, the associated Hausdorff operator $\mathcal{H}_\mu$,
for suitable functions $f$ is given by
\begin{equation}\label{Df Hausd 2}
\mathcal{H}_{\mu}(f)(z):=\int_{0}^{\infty}\frac{1}{t}f\left(\frac{z}{t}\right)\,d\mu(t),\quad z\in \mathbb{U}
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{equation}
where $\mathbb{U}=\{z\in\mathbb{C}: \mbox{Im}\,z>0\}$ is the upper half plane.
Its formal adjoint, the quasi-Hausdorff operator $\mathcal{H}^{*}_{\mu}$ in the case of real Hardy spaces $H^p(\mathbb{R})$ is
\begin{equation}\label{Df Hausd 1}
\mathcal{H}_{\mu}^{*}(f)(z):=\int_{0}^{\infty}f(tz)\,d\mu(t).
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{equation}
Moreover for appropriate functions $f$ and measures $\mu$ they satisfy the fundamental identity:
\begin{equation}\label{Df Hausd 3}
\widehat{\mathcal{H}_{\mu}(f)}(x)=\int_{0}^{\infty}\widehat{f}(tx)\, d\mu(t)=
\mathcal{H}_{\mu}^{*}(\widehat{f})(x),\quad x\in\mathbb{R},
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{equation}
where $\widehat{f}$ denotes the Fourier transform of $f$.
The theory of Hausdorff summability of Fourier series
started with the paper of Hausdorff \cite{Ha21} in 1921. Much later
Hausdorff summability of power series of analytic functions was considered in \cite{Si87} and \cite{Si90} on composition operators and the Ces\'{a}ro means in Hardy $H^p$ spaces. General Hausdorff matrices were
considered in \cite{GaSi01} and \cite{GaPa06}. In \cite{GaPa06} the authors studied Hausdorff matrices on a large class of analytic function spaces such as Hardy spaces, Bergman spaces, BMOA, Bloch etc. They characterized those Hausdorff matrices which induce bounded operators on these spaces.\\
Results on Hausdorff operators on spaces of analytic functions were extended in the Fourier
transform setting on the real line, starting with \cite{LiMo00} and \cite{Ka01}. There are
many classical operators in analysis which are special cases of the Hausdorff operator
if one chooses suitable measures $\mu$ such as the classical Hardy operator, its
adjoint operator, the Ces\'{a}ro type operators and the Riemann-Liouville fractional integral
operator. See the survey article \cite{Li013} and the references there in. In recent years, there
is an increasing interest on the study of boundedness of the Hausdorff operator on the
real Hardy spaces and Lebesque spaces (see for example \cite{An03}, \cite{BaGo19}, \cite{FaLi14}, \cite{LiMo01} and \cite{HuKyQu18}).
Motivated by the paper of Hung et al. \cite{HuKyQu18} we describe the measures $\mu$ that will induce bounded operators on the Bergman spaces $A^{p}(\mathbb{U})$ of the upper half-plane. Next Theorem summarizes the main results (see Theorems \ref{B Haus Berg} and \ref{Norm Haus Berg} ):
\begin{Th}
Let $1\leq p< \infty$ and $\mu$ be an $\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiigma$-finite positive measure on $(0,\infty)$. The Hausdorff operator $\mathcal{H}_{\mu}$ is bounded on $A^{p}(\mathbb{U})$ if and only if
$$
\int_{0}^{\infty}\frac{1}{t^{1-\frac{2}{p}}}\,d\mu(t)<\infty.
$$
Moreover
$$
||\mathcal{H}_{\mu}||_{A^{p}(\mathbb{U})\to A^{p}(\mathbb{U})}=\int_{0}^{\infty}\frac{1}{t^{1-\frac{2}{p}}}\,d\mu(t).
$$
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{Th}
\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiection{preliminaries}
To define single-valued functions, the principal value of the argument it is chosen to
be in the interval $(-\psi} \def\r{\rho} \def\z{\zetai, \psi} \def\r{\rho} \def\z{\zetai]$. For $1\leq p < \infty$, we denote by $L^{p}(dA)$
the Banach space of all measurable functions on $\mathbb{U}$ such that
$$
||f||_{L^{p}(dA_{a})}:=\left(\frac{1}{\psi} \def\r{\rho} \def\z{\zetai}\int_{\mathbb{U}}|f|^p dA\right)^{1/p}<\infty,
$$
where $dA$ is the area measure.
The Bergman space $A^{p}(\mathbb{U})$ consists of all
holomorphic functions $f$ on $\mathbb{U}$ that belong to $L^{p}(dA)$.
Sub-harmonicity yields a constant $C> 0$ such that
\begin{equation}\label{Grouth in A(p,a)}
|f(z)|^{p} \leq \frac{C}{(\mbox{Im}(z))^{2}}||f||^{p}_{A^{p}(\mathbb{U})},\quad z\in \mathbb{U},
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{equation}
for $f \in A^{p}(\mathbb{U})$ and
$$
\lim_{z\to \psi} \def\r{\rho} \def\z{\zetaartial{\widehat{\mathbb{U}}}} (\mbox{Im}(z))^{2}|f(z)|^p = 0
$$
for functions $A^{p}(\mathbb{U})$, where $\widehat{\mathbb{U}} :=
\overline{\mathbb{U}}{\mathcal U}p\{\infty\}$ (see \cite{ChKoSm17}). In particular, this
shows that each point evaluation is a continuous linear functional on
$A^{p}(\mathbb{U})$.
The duality properties of Bergman spaces are well known in literature see\cite{Zh90} and \cite{BaBoMiMi16}.
It is proved that for $1 < p < \infty$, $\frac{1}{p}+\frac{1}{q}=1$, the dual space of the Bergman space $A^{p}(\mathbb{U})$ is
$(A^{p}(\mathbb{U}))^{*}\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiim A^{q}(\mathbb{U})$ under the duality pairing,
$$
\langle f,g\rangle= \frac{1}{\psi} \def\r{\rho} \def\z{\zetai}\int_{\mathbb{U}}f(z)\overline{g(z)}\,dA(z).
$$
\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiection{Main results}
In what follows, unless otherwise stated, $\mu$ is a positive $\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiigma$-finite
measure on $(0,\infty)$. We start by giving a condition under which
$\mathcal{H}_{\mu}$ is well defined.
\begin{Lem}\label{Well defined lemma 1}
Let $1\leq p<\infty$ and $f\in A^{p}(\mathbb{U})$. If $
\int_{0}^{\infty}\frac{1}{t^{1-\frac{2}{p}}}\,d\mu(t)<\infty, $ then
$$
\mathcal{H}_{\mu}(f)(z)=\int_{0}^{\infty}\frac{1}{t}f\left(\frac{z}{t}\right)\,d\mu(t)
$$
is a well defined holomorphic function on $\mathbb{U}$.
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{Lem}
\begin{proof}
For $f\in A^{p}(\mathbb{U})$ using (\ref{Grouth in A(p,a)}) we have
\begin{align*}
|\mathcal{H}_{\mu}(f)(z)|&\leq \int_{0}^{\infty}\frac{1}{t}\left|f\left(\frac{z}{t}\right)\right|\,d\mu(t)\\
&\leq C\,\frac{||f||_{A^{p}(\mathbb{U})}}{\mbox{Im}(z)^{\frac{2}{p}}}\int_{0}^{\infty}\frac{1}{t^{1-\frac{2}{p}}}\,d\mu(t)<\infty.
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{align*}
Thus $\mathcal{H}_{\mu}f$ is well defined, and is given by an absolutely convergent integral, so it is holomorphic.
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{proof}
\begin{Lem}\label{Grouth f(e)}
Let $\lambda>0$ and $\delta} \def\la{\lambda} \def\om{\omegaelta>0$. If $g_{\lambda, \delta} \def\la{\lambda} \def\om{\omegaelta}(z)=|z+\delta} \def\la{\lambda} \def\om{\omegaelta i|^{-\frac{2+\lambda}{p}}$, then
$$
\left(\frac{1}{2}\right)^{2+\lambda} \cdot \frac{1}{\lambda \delta} \def\la{\lambda} \def\om{\omegaelta^{\lambda}}\leq ||g_{\lambda,\delta} \def\la{\lambda} \def\om{\omegaelta}||_{L^{p}(dA)}^{p}\leq2^{\frac{2+\lambda}{2}} \cdot\frac{1}{\lambda\delta} \def\la{\lambda} \def\om{\omegaelta^{\lambda}}
$$
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{Lem}
\begin{proof}
Using polar coordinates for the integral over $\mathbb{U}$ we find
\begin{align*}
||g_{\lambda,\delta} \def\la{\lambda} \def\om{\omegaelta}||_{L^{p}(dA)}^{p}
&=\frac{1}{\psi} \def\r{\rho} \def\z{\zetai}\int_{\mathbb{U}}\left|\frac{1}{z+ \delta} \def\la{\lambda} \def\om{\omegaelta i}\right|^{2+\lambda}dA(z)\\
&=\frac{1}{\psi} \def\r{\rho} \def\z{\zetai}\int_{0}^{\psi} \def\r{\rho} \def\z{\zetai}\int_{0}^{\infty}\left(\frac{1}{r^{2}+\delta} \def\la{\lambda} \def\om{\omegaelta^{2}+2r\delta} \def\la{\lambda} \def\om{\omegaelta\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiin(\theta)}\right)^{\frac{2+\lambda}{2}}r\,drd\theta.\\
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{align*}
Denote by $I$ the last double integral. Then
\begin{align*}
I&\leq \int_{0}^{\infty}\left(\frac{1}{r^{2}+\delta} \def\la{\lambda} \def\om{\omegaelta^{2}}\right)^{\frac{2+\lambda}{2}}rdr\\
&\leq 2^{\frac{2+\lambda}{2}} \int_{0}^{\infty}\left(\frac{1}{r+\delta} \def\la{\lambda} \def\om{\omegaelta}\right)^{2+\lambda}(r+\delta} \def\la{\lambda} \def\om{\omegaelta)dr\\
&=2^{\frac{2+\lambda}{2}}\frac{1}{\lambda\delta} \def\la{\lambda} \def\om{\omegaelta^{\lambda}}.
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{align*}
On the other hand,
\begin{align*}
I&\geq \int_{0}^{\infty}\left(\frac{1}{r+\delta} \def\la{\lambda} \def\om{\omegaelta}\right)^{2+\lambda}rdr\\
&\geq \int_{\delta} \def\la{\lambda} \def\om{\omegaelta}^{\infty}\left(\frac{1}{r+\delta} \def\la{\lambda} \def\om{\omegaelta}\right)^{2+\lambda}rdr\\
&\geq \left(\frac{1}{2}\right)^{2+\lambda} \int_{\delta} \def\la{\lambda} \def\om{\omegaelta}^{\infty}\left(\frac{1}{r}\right)^{2+\lambda}rdr\\
&=\left(\frac{1}{2}\right)^{2+\lambda} \frac{1}{\lambda \delta} \def\la{\lambda} \def\om{\omegaelta^{\lambda}},
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{align*}
and the assertion follows.
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{proof}
\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiubsection{Test functions} We now consider the test functions which are defined as follows. Let
$z=x+iy\in \mathbb{U}$ and
$$
\varphi_{\varepsilon}(z)=\frac{\overline{z+\varepsilon i}}{|z+\varepsilon i|}
=\frac{x-i(y+\varepsilon)}{\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiqrt{x^2+(y+\varepsilon)^2}}
$$
and
$$
f_{\varepsilon}(z)=\frac{1}{(z+ \varepsilon i)^{\frac{2}{p}+\varepsilon}},
$$
with $\varepsilon>0$ small enough. Note that, $|f_{\varepsilon}|\varepsilon} \def\vr{\varrho} \def\vf{\varphiquiv
g_{p\varepsilon,\varepsilon}$ with respect to the notation of Lemma \ref{Grouth
f(e)}, and that $\varphi_{\varepsilon}(z)$ lies on the unit circle with
$-\psi} \def\r{\rho} \def\z{\zetai<\alpha} \delta} \def\la{\lambda} \def\om{\omegaef\b{\beta} \delta} \def\la{\lambda} \def\om{\omegaef\g{\gammarg(\varphi_{\varepsilon}(z))< 0$, and the following identity
holds
\begin{align*}
f_{\varepsilon}(z)=\varphi_{\varepsilon}(z)^{\frac{2}{p}+\varepsilon}|f_{\varepsilon}(z)|.
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{align*}
Let $a,b \in (-\psi} \def\r{\rho} \def\z{\zetai,\psi} \def\r{\rho} \def\z{\zetai]$ and set $A_{[a,b]}= \{ z\in\mathbb{U}: a\leq \alpha} \delta} \def\la{\lambda} \def\om{\omegaef\b{\beta} \delta} \def\la{\lambda} \def\om{\omegaef\g{\gammarg(z)\leq b\}$, with obvious modifications in the case of $A_{(a,b)}, A_{(a,b]}$ and $A_{[a,b)}$.
\begin{Lem}\label{Grouth f(e) 1}
The following holds:\\
\noindent $(i)$ If $2<p<\infty$ and $\frac{2}{p}+\varepsilon\leq1$, then
\begin{equation*}
|\mbox{Re}\,f_{\varepsilon}(z)|\geq |\mbox{Re}\,\varphi_{\varepsilon}(z)|
|f_{\varepsilon}(z)|
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{equation*}
for every $z\in A_{(0,\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}]}$.\\
\noindent $(ii)$ If $1<p\leq 2$ and $1<\frac{2}{p}+\varepsilon<2$, then
\begin{equation*}
|\mbox{Im}\,f_{\varepsilon}(z)|> C(p)|\mbox{Im}\,\varphi_{\varepsilon}(z)|
|f_{\varepsilon}(z)|
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{equation*}
for every $z\in A_{[\frac{\psi} \def\r{\rho} \def\z{\zetai}{4},\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}]}$.\\
\noindent $(ii)$ If $p=1$, $0<\theta_0<\frac{\psi} \def\r{\rho} \def\z{\zetai}{16}$ and $(2+\varepsilon)(\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}+\theta_0)<\frac{5\psi} \def\r{\rho} \def\z{\zetai}{4}$, then
\begin{equation*}
|\mbox{Re}\,f_{\varepsilon}(z)|> |\mbox{Re}\,\varphi_{\varepsilon}(z)|
|f_{\varepsilon}(z)|
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{equation*}
for every $z\in A_{[\frac{\psi} \def\r{\rho} \def\z{\zetai}{2},\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}+\theta_{0}]}$
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{Lem}
\begin{proof}
Taking real and imaginary parts we have
\begin{align*}
\mbox{Re}\,f_{\varepsilon}(z)=|f_{\varepsilon}(z)|\mbox{Re}\,\varphi_{\varepsilon}(z)^{\frac{2}{p}+\varepsilon}=
|f_{\varepsilon}(z)|\cos((\frac{2}{p}+\varepsilon)\theta)
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{align*}
and
\begin{align*}
\mbox{Im}\,f_{\varepsilon}(z)=|f_{\varepsilon}(z)|\mbox{Im}\,\varphi_{\varepsilon}(z)^{\frac{2}{p}+\varepsilon}=
|f_{\varepsilon}(z)|\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiin((\frac{2}{p}+\varepsilon)\theta),
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{align*}
where $\theta:=\theta(z,\varepsilon)=\alpha} \delta} \def\la{\lambda} \def\om{\omegaef\b{\beta} \delta} \def\la{\lambda} \def\om{\omegaef\g{\gammarg\varphi_{\varepsilon}(z)$. \\
\noindent $(i)$: It easy is see that
\begin{align*}
|\mbox{Re}\,f_{\varepsilon}(z)|&=|f_{\varepsilon}(z)|\cos((\frac{2}{p}+\varepsilon)\theta)\\
&\geq |f_{\varepsilon}(z)|\cos(\theta)=|\mbox{Re}\,\varphi_{\varepsilon}(z)|
|f_{\varepsilon}(z)|.
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{align*}
\noindent $(ii)$: Let $a=a(p)>0$ such that $1<\frac{2}{p}+\varepsilon<a<2$. Since $z\in A_{[\frac{\psi} \def\r{\rho} \def\z{\zetai}{4}, \frac{\psi} \def\r{\rho} \def\z{\zetai}{2}]}$ simple geometric arguments imply that $\theta \in [-\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}, -\frac{\psi} \def\r{\rho} \def\z{\zetai}{4})$. Moreover $-\psi} \def\r{\rho} \def\z{\zetai<- \frac{a\psi} \def\r{\rho} \def\z{\zetai}{2}<(\frac{2}{p}+\varepsilon) \theta < -\frac{\psi} \def\r{\rho} \def\z{\zetai}{4}$. This implies that
$$
\min\{\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiin(\frac{a\psi} \def\r{\rho} \def\z{\zetai}{2}),\frac{\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiqrt{2}}{2}\}<\left|\frac{\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiin((\frac{2}{p}+\varepsilon) \theta)}{\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiin(\theta)}\right|< \sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiqrt{2}
$$
for every $z\in A_{[\frac{\psi} \def\r{\rho} \def\z{\zetai}{4}, \frac{\psi} \def\r{\rho} \def\z{\zetai}{2}]}$. We calculate
\begin{align*}
&|\mbox{Im}\,f_{\varepsilon}(z)|=|f_{\varepsilon}(z)||\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiin((\frac{2}{p}+\varepsilon)\theta)|> \min\{\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiin(\frac{a\psi} \def\r{\rho} \def\z{\zetai}{2}),\frac{\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiqrt{2}}{2}\} |f_{\varepsilon}(z)||\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiin(\theta)|\\
&= \min\{\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiin(\frac{a\psi} \def\r{\rho} \def\z{\zetai}{2}),\frac{\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiqrt{2}}{2}\}|\mbox{Im}\,\varphi_{\varepsilon}(z)|
|f_{\varepsilon}(z)|.
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{align*}
This proves $(ii)$ with $C(p)=\min\{\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiin(\frac{a(p)\psi} \def\r{\rho} \def\z{\zetai}{2}),\frac{\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiqrt{2}}{2}\}$.\\
\noindent $(iii)$: Since $z\in A_{[\frac{\psi} \def\r{\rho} \def\z{\zetai}{2},\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}+\theta_{0}]}$ we have that $\theta\in (-\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}-\theta_{0}, -\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}]$. Thus
$$
-\frac{5\psi} \def\r{\rho} \def\z{\zetai}{4}<-(\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}+\theta_0)(2+\varepsilon)< (2+\varepsilon)\theta\leq -\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}(2+\varepsilon)<-\psi} \def\r{\rho} \def\z{\zetai.
$$
This implies that $|\cos((2+\varepsilon)\theta)|>|\cos(\theta)|$ and therefore
\begin{align*}
|\mbox{Re}\,f_{\varepsilon}(z)|&>|\mbox{Re}\,\varphi_{\varepsilon}(z)|
|f_{\varepsilon}(z)|.
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{align*}
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{proof}
\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiubsection{Growth estimates}
Let $a,b \in (-\psi} \def\r{\rho} \def\z{\zetai,\psi} \def\r{\rho} \def\z{\zetai]$ and set
$$
S_{[a,b]}= \{ z\in\mathbb{U}: a\leq \alpha} \delta} \def\la{\lambda} \def\om{\omegaef\b{\beta} \delta} \def\la{\lambda} \def\om{\omegaef\g{\gammarg(z)\leq b, \, |z|\geq1\},
$$
be a truncated sector with obvious modifications in the case of $S_{(a,b)}, S_{(a,b]}$ and $S_{[a,b)}$.
Since $\mu$ is positive
$$\mbox{Re}\, \mathcal{H}_{\mu}(f_{\varepsilon})=\mathcal{H}_{\mu}(\mbox{Re}\,f_{\varepsilon})\quad \mbox{and} \quad
\mbox{Im}\, \mathcal{H}_{\mu}(f_{\varepsilon})=\mathcal{H}_{\mu}(\mbox{Im}\,f_{\varepsilon}).
$$
Note that if $\mbox{Re}\,f_{\varepsilon}$ or $\mbox{Im}\,f_{\varepsilon}$ have constant sign on some sector $A$, then
$$
|\mathcal{H}_{\mu}(\mbox{Re}\,f_{\varepsilon})(z)|=\mathcal{H}_{\mu}(|\mbox{Re}\,f_{\varepsilon}|)(z) \quad \mbox{and} \quad|\mathcal{H}_{\mu}(\mbox{Im}\,f_{\varepsilon})(z)|=\mathcal{H}_{\mu}(|\mbox{Im}\,f_{\varepsilon}|)(z)
$$
for every $z\in A$.
\begin{Lem}\label{Grouth Hf(e) 1}
Let $1\leq p<\infty$ and suppose that $\mathcal{H}_{\mu}$ is bounded on
$A^{p}(\mathbb{U})$. Then there are $\varepsilon(p)$ and $k(p)$ positive constants such that
$$
||\mathcal{H}_{\mu}(f_{\varepsilon})||_{A^{p}(\mathbb{U})}^{p}\geq k(p)\left(\int_{0}^{\frac{1}{\varepsilon}} \frac{1}{t^{1-\frac{2}{p}-\varepsilon}}d\mu(t)\right)^{p}\frac{1}{p\varepsilon},
$$
for every $\varepsilon$ in $(0,\varepsilon(p)]$.
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{Lem}
\begin{proof}
We will consider three cases for the range of $p$. Note that if $z$ is in a truncated sector $S$ then $z/t$ belongs to the corresponding sector $A$ for every $t>0$. \\
\noindent \textbf{Case I}. Let $2<p<\infty$ and $\varepsilon(p)$ such that $\frac{2}{p}+\varepsilon(p)<1$. Then for every $\varepsilon$ in $(0,\varepsilon(p)]$
\begin{align*}
||\mathcal{H}_{\mu}(f_{\varepsilon})||_{A^{p}(\mathbb{U})}^{p}&\geq ||\mbox{Re}\,\mathcal{H}_{\mu}(f_{\varepsilon})||_{A^{p}(\mathbb{U})}^{p}
=||\mathcal{H}_{\mu}(\mbox{Re}\, f_{\varepsilon})||_{A^{p}(\mathbb{U})}^{p}\\
&\geq\frac{1}{\psi} \def\r{\rho} \def\z{\zetai}\int_{S_{(0,\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}]}}\left|\int_{0}^{\infty}\frac{1}{t}\mbox{Re}\, f_{\varepsilon}(z/t)\,d\mu(t)\right|^{p} dA(z)\\
&= \frac{1}{\psi} \def\r{\rho} \def\z{\zetai}\int_{S_{(0,\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}]}}\left(\int_{0}^{\infty}\frac{1}{t}|\mbox{Re}\, f_{\varepsilon}(z/t)|\,d\mu(t)\right)^{p} dA(z).\\
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{align*}
Denote by $I$ the last integral on $S_{(0,\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}]}$.
By $(i)$ of Lemma \ref{Grouth f(e) 1} we have
\begin{align*}
&I\geq \frac{1}{\psi} \def\r{\rho} \def\z{\zetai}\int_{S_{(0,\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}]}}\left(\int_{0}^{\frac{1}{\varepsilon}}\frac{1}{t}|\mbox{Re}\, \varphi_{\varepsilon}(z/t)|\,|f_{\varepsilon}(z/t)|\,d\mu(t)\right)^{p} dA(z)\\
&=\frac{1}{\psi} \def\r{\rho} \def\z{\zetai}\int_{S_{(0,\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}]}}\left(\int_{0}^{\frac{1}{\varepsilon}} \left(\frac{1}{\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiqrt{x^2+ (y+t\varepsilon)^2}}\right)^{\frac{2}{p}+\varepsilon+1}\frac{d\mu(t)}{t^{1-\frac{2}{p}-\varepsilon}}\right)^{p} x^{p}dxdy.\\
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{align*}
Using polar coordinates and noting that $t\varepsilon <|z|$ for $|z|\geq1$ and $t\leq \varepsilon^{-1}$, we have
\begin{align*}
&I\geq\int_{1}^{\infty} \int_{0}^{\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}}\left(\int_{0}^{\frac{1}{\varepsilon}} \left(\frac{1}{\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiqrt{r^2+ 2rt\varepsilon\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiin(\theta)+t^{2}\varepsilon^2}}\right)^{\frac{2}{p}+\varepsilon+1}\frac{d\mu(t)}{t^{1-\frac{2}{p}-\varepsilon}}\right)^{p} r^{p+1}(\cos(\theta))^{p}\frac{d\theta}{\psi} \def\r{\rho} \def\z{\zetai} dr\\
&\geq\int_{1}^{\infty} \int_{0}^{\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}}\left(\int_{0}^{\frac{1}{\varepsilon}} \left(\frac{1}{\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiqrt{r^2+ 2rt\varepsilon+t^{2}\varepsilon^2}}\right)^{\frac{2}{p}+\varepsilon+1}\frac{d\mu(t)}{t^{1-\frac{2}{p}-\varepsilon}}\right)^{p} r^{p+1}(\cos(\theta))^{p}\frac{d\theta}{\psi} \def\r{\rho} \def\z{\zetai} dr\\
&\geq k(p)\left(\int_{0}^{\frac{1}{\varepsilon}} \frac{1}{t^{1-\frac{2}{p}-\varepsilon}}d\mu(t)\right)^{p}\int_{1}^{\infty}\frac{1}{r^{1+p\varepsilon}}dr\\
&=k(p)\left(\int_{0}^{\frac{1}{\varepsilon}} \frac{1}{t^{1-\frac{2}{p}-\varepsilon}}d\mu(t)\right)^{p}\frac{1}{p\varepsilon},
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{align*}
where $k(p)=2^{-p(\varepsilon+1)}\int_{0}^{\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}}(\cos(\theta))^{p}\frac{d\theta}{4\psi} \def\r{\rho} \def\z{\zetai}$.\\
\noindent \textbf{Case II}. Let $2<p<\infty$ and $\varepsilon(p)$ such that $1<\frac{2}{p}+\varepsilon(p)<2$. Then for every $\varepsilon$ in $(0,\varepsilon(p)]$
\begin{align*}
||\mathcal{H}_{\mu}(f_{\varepsilon})||_{A^{p}(\mathbb{U})}^{p}&
\geq\frac{1}{\psi} \def\r{\rho} \def\z{\zetai}\int_{S_{[\frac{\psi} \def\r{\rho} \def\z{\zetai}{4},\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}]}}\left|\int_{0}^{\infty}\frac{1}{t}\mbox{Im}\, f_{\varepsilon}(z/t)\,d\mu(t)\right|^{p} dA(z)\\
&= \frac{1}{\psi} \def\r{\rho} \def\z{\zetai}\int_{S_{[\frac{\psi} \def\r{\rho} \def\z{\zetai}{4},\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}]}}\left(\int_{0}^{\infty}\frac{1}{t}|\mbox{Im}\, f_{\varepsilon}(z/t)|\,d\mu(t)\right)^{p} dA(z).\\
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{align*}
Denote by $I$ the last integral on $S_{[\frac{\psi} \def\r{\rho} \def\z{\zetai}{4},\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}]}$.
By $(ii)$ of Lemma \ref{Grouth f(e) 1} we have
\begin{align*}
&I\geq \frac{C(p)}{\psi} \def\r{\rho} \def\z{\zetai}\int_{S_{[\frac{\psi} \def\r{\rho} \def\z{\zetai}{4},\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}]}}\left(\int_{0}^{\frac{1}{\varepsilon}}\frac{1}{t}|\mbox{Im}\, \varphi_{\varepsilon}(z/t)|\,|f_{\varepsilon}(z/t)|\,d\mu(t)\right)^{p} dA(z)\\
&\geq\frac{C(p)}{\psi} \def\r{\rho} \def\z{\zetai}\int_{S_{[\frac{\psi} \def\r{\rho} \def\z{\zetai}{4},\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}]}}\left(\int_{0}^{\frac{1}{\varepsilon}} \left(\frac{1}{\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiqrt{x^2+ (y+t\varepsilon)^2}}\right)^{\frac{2}{p}+\varepsilon+1}\frac{d\mu(t)}{t^{1-\frac{2}{p}-\varepsilon}}\right)^{p} y^{p}dxdy.\\
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{align*}
Using polar coordinates and working as is Case I, we arrive at the desired conclusion with constant $k(p)=C(p)2^{-p(\varepsilon+1)}\int_{\frac{\psi} \def\r{\rho} \def\z{\zetai}{4}}^{\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}}(\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiin(\theta))^{p}\frac{d\theta}{4\psi} \def\r{\rho} \def\z{\zetai}$.\\
\noindent \textbf{Case III}. Let $p=1$ and $\theta_0$ as in Lemma \ref{Grouth f(e) 1}. Let $\varepsilon(1)$ such that $(2+\varepsilon(1))(\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}+\theta_{0})<\frac{5\psi} \def\r{\rho} \def\z{\zetai}{4}$. Then for every $\varepsilon$ in $(0,\varepsilon(1)]$
\begin{align*}
||\mathcal{H}_{\mu}(f_{\varepsilon})||_{A^{1}(\mathbb{U})}&
\geq\frac{1}{\psi} \def\r{\rho} \def\z{\zetai}\int_{S_{[\frac{\psi} \def\r{\rho} \def\z{\zetai}{2},\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}+\theta_{0}]}}\left|\int_{0}^{\infty}\frac{1}{t}\mbox{Re}\, f_{\varepsilon}(z/t)\,d\mu(t)\right| dA(z)\\
&= \frac{1}{\psi} \def\r{\rho} \def\z{\zetai}\int_{S_{[\frac{\psi} \def\r{\rho} \def\z{\zetai}{2},\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}+\theta_{0}]}}\int_{0}^{\infty}\frac{1}{t}|\mbox{Re}\, f_{\varepsilon}(z/t)|\,d\mu(t)\, dA(z).\\
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{align*}
Denote by $I$ the last integral on $S_{[\frac{\psi} \def\r{\rho} \def\z{\zetai}{2},\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}+\theta_{0}]}$.
By $(iii)$ of Lemma \ref{Grouth f(e) 1} we have
\begin{align*}
&I\geq \frac{1}{\psi} \def\r{\rho} \def\z{\zetai}\int_{S_{[\frac{\psi} \def\r{\rho} \def\z{\zetai}{2},\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}+\theta_{0}]}}\int_{0}^{\frac{1}{\varepsilon}}\frac{1}{t}|\mbox{Re}\, \varphi_{\varepsilon}(z/t)|\,|f_{\varepsilon}(z/t)|\,d\mu(t)\, dA(z)\\
&=\frac{1}{\psi} \def\r{\rho} \def\z{\zetai}\int_{S_{[\frac{\psi} \def\r{\rho} \def\z{\zetai}{2},\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}+\theta_{0}]}}\int_{0}^{\frac{1}{\varepsilon}} \left(\frac{1}{\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiqrt{x^2+ (y+t\varepsilon)^2}}\right)^{3+\varepsilon}\frac{d\mu(t)}{t^{-1-\varepsilon}} (-x)dxdy.\\
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{align*}
Using polar coordinates and working as is Case I, we arrive at the desired conclusion with constant $k(1)=-2^{-(\varepsilon+1)}\int_{\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}}^{\frac{\psi} \def\r{\rho} \def\z{\zetai}{2}+\theta_{0}}cos(\theta)\frac{d\theta}{4\psi} \def\r{\rho} \def\z{\zetai}$.\\
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{proof}
\begin{Th}\label{B Haus Berg}
Let $1\leq p<\infty$. The operator $\mathcal{H}_{\mu}$ is bounded on
$A^{p}(\mathbb{U})$ if and only if
$$
\int_{0}^{\infty}\frac{1}{t^{1-\frac{2}{p}}}\,d\mu(t)<\infty.
$$
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{Th}
\begin{proof}
Suppose that
$$
\int_{0}^{\infty}\frac{1}{t^{1-\frac{2}{p}}}\,d\mu(t)<\infty,
$$
then Lemma \ref{Well defined lemma 1} implies that $\mathcal{H}_{\mu}(f)$ is well defined and holomorphic in $\mathbb{U}$.
An easy computation involving the Minkowski inequality shows that for all
$1\leq p<\infty$
\begin{align*}
||\mathcal{H}_{\mu}(f)||_{A^{p}(\mathbb{U})}&=\left(\int_{\mathbb{U}}\left|\int_{0}^{\infty}\frac{1}{t}f\left(\frac{z}{t}\right)\,d\mu(t)\right|^{p}dA(z)\right)^{1/p}\\
&\leq ||f||_{A^{p}(\mathbb{U})}\int_{0}^{\infty}\frac{1}{t^{1-\frac{2}{p}}}\, d\mu(t)<\infty.
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{align*}
Thus $\mathcal{H}_{\mu}$ is bounded on $A^{p}(\mathbb{U})$.\\
Conversely, suppose that $\mathcal{H}_{\mu}$ is bounded. Let $f_{\varepsilon}(z)=(z+ \varepsilon i)^{-(\frac{2}{p}+\varepsilon)}$ with $\varepsilon>0$ small enough. By Lemma \ref{Grouth f(e)}
\begin{equation}\label{norm f(e)}
||f_\varepsilon||^{p}_{A^{p}(\mathbb{U})}\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiim \frac{1}{p\varepsilon \varepsilon^{p\varepsilon}}.
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{equation}
Moreover Lemma \ref{Grouth Hf(e) 1} implies that there is a constant $k=k(p)>0$ such that
\begin{align*}
||f_\varepsilon||^{p}_{A^{p}(\mathbb{U})} ||\mathcal{H}_{\mu}||_{A^{p}(\mathbb{U})\to A^{p}(\mathbb{U})}&\geq ||\mathcal{H}_{\mu}(f_{\varepsilon})||_{A^{p}(\mathbb{U})}^{p}\\
&\geq k \left(\int_{0}^{\frac{1}{\varepsilon}} \frac{1}{t^{1-\frac{2}{p}-\varepsilon}}d\mu(t)\right)^{p}\frac{1}{p\varepsilon}.
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{align*}
Thus by letting $\varepsilon\to 0$, we have in comparison to (\ref{norm f(e)})
$$
\int_{0}^{\infty}\frac{1}{t^{1-\frac{2}{p}}}\,d\mu(t)<\infty.
$$
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{proof}
In our way to computing the norm of $\mathcal{H}_{\mu}$ we will firstly compute the norm of the truncated Hausdorff operator $\mathcal{H}_{\mu}^{\delta} \def\la{\lambda} \def\om{\omegaelta}$ given by :
$$
\mathcal{H}_{\mu}^{\delta} \def\la{\lambda} \def\om{\omegaelta}(f)(z):=\int_{0}^{\infty}\frac{1}{t}f\left(\frac{z}{t}\right)X_{[\delta} \def\la{\lambda} \def\om{\omegaelta,1/\delta} \def\la{\lambda} \def\om{\omegaelta]}(t)\,d\mu(t)=
\int_{\delta} \def\la{\lambda} \def\om{\omegaelta}^{\frac{1}{\delta} \def\la{\lambda} \def\om{\omegaelta}}\frac{1}{t}f\left(\frac{z}{t}\right)\,d\mu(t).
$$
\begin{Prop}\label{Norm Haus Berg Truncate}
Let $1\leq p<\infty$ and $0<\delta} \def\la{\lambda} \def\om{\omegaelta<1$. If
$$
\int_{0}^{\infty}\frac{1}{t^{1-\frac{2}{p}}}\,d\mu(t)<\infty,
$$
then $\mathcal{H}_{\mu}^{\delta} \def\la{\lambda} \def\om{\omegaelta}$ is bounded with
$$
||\mathcal{H}_{\mu}^{\delta} \def\la{\lambda} \def\om{\omegaelta}||_{A^{p}(\mathbb{U})\to A^{p}(\mathbb{U})}=\int_{\delta} \def\la{\lambda} \def\om{\omegaelta}^{\frac{1}{\delta} \def\la{\lambda} \def\om{\omegaelta}}\frac{1}{t^{1-\frac{2}{p}}}\,d\mu(t).
$$
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{Prop}
\begin{proof}
As in Theorem \ref{B Haus Berg} an application of Minkowski inequality gives
\begin{equation}\label{Cut Hausd}
||\mathcal{H}_{\mu}^{\delta} \def\la{\lambda} \def\om{\omegaelta}||_{A^{p}(\mathbb{U})\to A^{p}(\mathbb{U})}\leq \int_{\delta} \def\la{\lambda} \def\om{\omegaelta}^{\frac{1}{\delta} \def\la{\lambda} \def\om{\omegaelta}}\frac{1}{t^{1-\frac{2}{p}}}d\mu(t).
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{equation}
Let $f_{\varepsilon}(z)=(z+i)^{-\frac{2}{p}-\varepsilon}$ with $\varepsilon>0$ small enough. We calculate
\begin{align*}
\mathcal{H}_{\mu}^{\delta} \def\la{\lambda} \def\om{\omegaelta}(f_{\varepsilon})(z)&-f_{\varepsilon}(z)\int_{\delta} \def\la{\lambda} \def\om{\omegaelta}^{\frac{1}{\delta} \def\la{\lambda} \def\om{\omegaelta}}\frac{1}{t^{1-\frac{2}{p}}}d\mu(t)\\
&=\int_{\delta} \def\la{\lambda} \def\om{\omegaelta}^{\frac{1}{\delta} \def\la{\lambda} \def\om{\omegaelta}}\frac{1}{t^{1-\frac{2}{p}}}\left(\varphi_{\varepsilon,z}(t)-\varphi_{\varepsilon,z}(1)\right)d\mu(t),
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{align*}
where
$$
\varphi_{\varepsilon,z}(t)=\frac{t^{\varepsilon}}{(z+ti)^{\frac{2}{p}+\varepsilon}}.
$$
For any $t \in [\delta} \def\la{\lambda} \def\om{\omegaelta, 1/\delta} \def\la{\lambda} \def\om{\omegaelta]$, calculus gives
\begin{align*}
\left|\varphi_{\varepsilon,z}(t)-\varphi_{\varepsilon,z}(1)\right|&\leq|t-1|\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiup\{|\varphi_{\varepsilon,z}'(s)|:s\in [\delta} \def\la{\lambda} \def\om{\omegaelta, 1/\delta} \def\la{\lambda} \def\om{\omegaelta]\}\\
&\leq \frac{1}{\delta} \def\la{\lambda} \def\om{\omegaelta}\left(\frac{\varepsilon \delta} \def\la{\lambda} \def\om{\omegaelta^{\varepsilon-1}}{|z+i\delta} \def\la{\lambda} \def\om{\omegaelta|^{\frac{2}{p}+\varepsilon}}+
\frac{(\frac{2}{p}+\varepsilon) (1/\delta} \def\la{\lambda} \def\om{\omegaelta)^{\varepsilon}}{|z+i\delta} \def\la{\lambda} \def\om{\omegaelta|^{\frac{2}{p}+\varepsilon+1}} \right)\\
&=\varepsilon \delta} \def\la{\lambda} \def\om{\omegaelta ^{\varepsilon-2}g_{p\varepsilon,\delta} \def\la{\lambda} \def\om{\omegaelta}(z)+ (\frac{2}{p}+\varepsilon) (1/\delta} \def\la{\lambda} \def\om{\omegaelta)^{\varepsilon+1}g_{p(\varepsilon+1),\delta} \def\la{\lambda} \def\om{\omegaelta}(z).
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{align*}
Where above we followed the notation of Lemma \ref{Grouth f(e)}.
Thus by an easy application of Minkowski inequality followed by the triangular inequality we have
\begin{align*}
||\mathcal{H}_{\mu}^{\delta} \def\la{\lambda} \def\om{\omegaelta}(f_{\varepsilon})(z)&-f_{\varepsilon}(z)\int_{\delta} \def\la{\lambda} \def\om{\omegaelta}^{\frac{1}{\delta} \def\la{\lambda} \def\om{\omegaelta}}\frac{1}{t^{1-\frac{2}{p}}}d\mu(t)||_{A^{p}(\mathbb{U})}\\
&\leq\int_{\delta} \def\la{\lambda} \def\om{\omegaelta}^{\frac{1}{\delta} \def\la{\lambda} \def\om{\omegaelta}}\frac{1}{t^{1-\frac{2}{p}}}||\left(\varphi_{\varepsilon,z}(t)-\varphi_{\varepsilon,z}(1)\right)||_{A^{p}(\mathbb{U})}d\mu(t)\\
&\leq \int_{\delta} \def\la{\lambda} \def\om{\omegaelta}^{\frac{1}{\delta} \def\la{\lambda} \def\om{\omegaelta}}\frac{1}{t^{1-\frac{2}{p}}}\,d\mu(t)\left(\varepsilon \delta} \def\la{\lambda} \def\om{\omegaelta ^{\varepsilon-2}||g_{p\varepsilon,\delta} \def\la{\lambda} \def\om{\omegaelta}||_{L^{p}(dA)}+(\frac{2}{p}+\varepsilon) (1/\delta} \def\la{\lambda} \def\om{\omegaelta)^{\varepsilon+1}||g_{p(\varepsilon+1),\delta} \def\la{\lambda} \def\om{\omegaelta}||_{L^{p}(dA)}\right).
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{align*}
This, together with Lemma \ref{Grouth f(e)} (recall that $|f_{\varepsilon}|=g_{p\varepsilon,\varepsilon}$), yields
\begin{align*}
&\frac{||\mathcal{H}_{\mu}^{\delta} \def\la{\lambda} \def\om{\omegaelta}(f_{\varepsilon})(z)-f_{\varepsilon}(z)\int_{\delta} \def\la{\lambda} \def\om{\omegaelta}^{\frac{1}{\delta} \def\la{\lambda} \def\om{\omegaelta}}\frac{1}{t^{1-\frac{2}{p}}}
d\mu(t)||_{A^{p}(\mathbb{U})}}{||f_{\varepsilon}||_{A^{p}(\mathbb{U})}}\\
&\leq \int_{\delta} \def\la{\lambda} \def\om{\omegaelta}^{\frac{1}{\delta} \def\la{\lambda} \def\om{\omegaelta}}\frac{1}{t^{1-\frac{2}{p}}}\,d\mu(t)\times
\frac{\varepsilon \delta} \def\la{\lambda} \def\om{\omegaelta ^{\varepsilon-2}||g_{p\varepsilon,\delta} \def\la{\lambda} \def\om{\omegaelta}||_{L^{p}(dA)}+(\frac{2}{p}+\varepsilon) (1/\delta} \def\la{\lambda} \def\om{\omegaelta)^{\varepsilon+1}||g_{p(\varepsilon+1),\delta} \def\la{\lambda} \def\om{\omegaelta}||_{L^{p}(dA)}}{||f_{\varepsilon}||_{A^{p}(\mathbb{U})}}\to 0
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{align*}
as $\varepsilon \to 0$. This and (\ref{Cut Hausd}) imply that
$$
\int_{\delta} \def\la{\lambda} \def\om{\omegaelta}^{\frac{1}{\delta} \def\la{\lambda} \def\om{\omegaelta}}\frac{1}{t^{1-\frac{2}{p}}}d\mu(t)= ||\mathcal{H}_{\mu}^{\delta} \def\la{\lambda} \def\om{\omegaelta}||_{A^{p}(\mathbb{U})\to A^{p}(\mathbb{U})}.
$$
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{proof}
\begin{Th}\label{Norm Haus Berg}
Let $1\leq p<\infty$. If
$$
\int_{0}^{\infty}\frac{1}{t^{1-\frac{2}{p}}}\,d\mu(t)<\infty
$$
then
$$
||\mathcal{H}_{\mu}||_{A^{p}(\mathbb{U})\to A^{p}(\mathbb{U})}=\int_{0}^{\infty}\frac{1}{t^{1-\frac{2}{p}}}\,d\mu(t).
$$
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{Th}
\begin{proof}
By Theorem \ref{B Haus Berg} we have that
$$
||\mathcal{H}_{\mu}||_{A^{p}(\mathbb{U})\to A^{p}(\mathbb{U})}\leq \int_{0}^{\infty}\frac{1}{t^{1-\frac{2}{p}}}\, d\mu(t).
$$
Minkowski inequality implies that
\begin{equation}\label{Diff Hausd}
||\mathcal{H}_{\mu}-\mathcal{H}_{\mu}^{\delta} \def\la{\lambda} \def\om{\omegaelta}||_{A^{p}(\mathbb{U})\to A^{p}(\mathbb{U})}\leq \int_{(0,\delta} \def\la{\lambda} \def\om{\omegaelta){\mathcal U}p (\frac{1}{\delta} \def\la{\lambda} \def\om{\omegaelta},\infty)}\frac{1}{t^{1-\frac{2}{p}}}d\mu(t).
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{equation}
By Proposition \ref{Norm Haus Berg Truncate}
$$
\int_{\delta} \def\la{\lambda} \def\om{\omegaelta}^{\frac{1}{\delta} \def\la{\lambda} \def\om{\omegaelta}}\frac{1}{t^{1-\frac{2}{p}}}d\mu(t)= ||\mathcal{H}_{\mu}^{\delta} \def\la{\lambda} \def\om{\omegaelta}||_{A^{p}(\mathbb{U})\to A^{p}(\mathbb{U})}.
$$
This, combined with (\ref{Diff Hausd}), allows us to conclude that
$$
||\mathcal{H}_{\mu}||_{A^{p}(\mathbb{U})\to A^{p}(\mathbb{U})}\geq \int_{0}^{\infty}\frac{1}{t^{1-\frac{2}{p}}}d\mu(t)-
2\int_{(0,\delta} \def\la{\lambda} \def\om{\omegaelta){\mathcal U}p(1/\delta} \def\la{\lambda} \def\om{\omegaelta,\infty)}\frac{1}{t^{1-\frac{2}{p}}}d\mu(t)\to\int_{0}^{\infty}\frac{1}{t^{1-\frac{2}{p}}}d\mu(t)
$$
as $\delta} \def\la{\lambda} \def\om{\omegaelta\to 0$. Hence,
$$
||\mathcal{H}_{\mu}||_{A^{p}(\mathbb{U})\to A^{p}(\mathbb{U})}=\int_{0}^{\infty}\frac{1}{t^{1-\frac{2}{p}}}d\mu(t).
$$
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{proof}
\sigma} \def\t{\theta} \def\f{\psi} \def\r{\rho} \def\z{\zetahiubsection{The quasi-Hausdorff operator}
Let $f,g\in A^{2}(\mathbb{U})$ and assume that $\mathcal{H}_{\mu}$ is bounded on $A^{2}(\mathbb{U})$. Thus
$$
\int_{0}^{\infty}\frac{1}{t^{1-\frac{2}{p}}}d\mu(t)<\infty.
$$
We have
\begin{align*}
\int_{\mathbb{U}}\int_{0}^{\infty}\frac{1}{t}\left|f\left(\frac{z}{t}\right)\right||g(z)|\, d\mu(t) dA(z)&\leq
\left(\int_{\mathbb{U}}\left(\int_{0}^{\infty}\frac{1}{t}\left|f\left(\frac{z}{t}\right)\right|\, d\mu(t)\right)^{2} dA(z)\right)^{1/2}||g||_{A^{2}(\mathbb{U})}\\
&\leq \left(\int_{0}^{\infty}\frac{1}{t^{1-\frac{2}{p}}}d\mu(t)\right)^{1/2} ||f||_{A^{2}(\mathbb{U})}||g||_{A^{2}(\mathbb{U})}<\infty,
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{align*}
where we applied the Cauchy-Schwarz and Minkowski inequalities. Therefore
\begin{align*}
\langle \mathcal{H}_{\mu}(f),g\rangle &=\int_{\mathbb{U}}\mathcal{H}_{\mu}(f)(z)\overline{g(z)}dA(z)\\
&=\frac{1}{\psi} \def\r{\rho} \def\z{\zetai}\int_{\mathbb{U}}\int_{0}^{\infty}\frac{1}{t}f(\frac{z}{t})\, d\mu(t) \overline{g(z)}dA(z)\\
&=\frac{1}{\psi} \def\r{\rho} \def\z{\zetai}\int_{0}^{\infty}\int_{\mathbb{U}}\frac{1}{t}f(\frac{z}{t}) \overline{g(z)}\,dA(z)\, d\mu(t)\\
&=\frac{1}{\psi} \def\r{\rho} \def\z{\zetai}\int_{\mathbb{U}}f(z) \overline{\int_{0}^{\infty} t g(tz)\, d\mu(t)}\,dA(z),\\
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{align*}
where we applied a change of variables and Fubini's Theorem twice. This means that the adjoint $\mathcal{H}_{\mu}^{*}$ of $\mathcal{H}_{\mu}$ on $A^{2}(\mathbb{U})$ is:
$$
\mathcal{H}_{\mu}^{*}(f)(z)=\int_{0}^{\infty} t f(tz)\, d\mu(t).
$$
We will consider $\mathcal{H}_{\mu}^{*}$ on $A^{p}(\mathbb{U})$ and suppose for a moment that it is well defined for functions in $A^{p}(\mathbb{U})$.
Let $\lambda(t)=t^{-1}, t>0$, then $\lambda$ maps $(0,\infty)$ onto $(0,\infty)$ and is measurable. Set $f(tz)=f_{z}(t)$ then
\begin{align*}
\mathcal{H}_{\mu}^{*}(f)(z)&=\int_{0}^{\infty} t f(tz)\, d\mu(t)\\
&=\int_{0}^{\infty} t f_{z}(t)\, d\mu(t)\\
&=\int_{0}^{\infty} \frac{1}{\lambda(t)} f_{z}\left(\frac{1}{\lambda(t)}\right)\, d\mu(t)\\
&=\int_{0}^{\infty} \frac{1}{t} f\left(\frac{z}{t}\right)\, d\nu(t),\\
&=\mathcal{H}_{\nu}(f)(z)
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{align*}
where $d\nu =d \lambda_{*}(\mu)(t)$ and $\lambda_{*}(\mu)$ is the push-forward measure of $\mu$ with respect to $\lambda$.
We can now apply the results of the first part of the paper to have:
\begin{Th}
Let $1\leq p< \infty$. The quasi-Hausdorff operator $\mathcal{H}_{\mu}^{*}$ is bounded on $A^{p}(\mathbb{U})$ if and only if
$$
\int_{0}^{\infty}t^{1-\frac{2}{p}}\,d\mu(t)<\infty.
$$
Moreover
$$
||\mathcal{H}_{\mu}^{*}||_{A^{p}(\mathbb{U})\to A^{p}(\mathbb{U})}=\int_{0}^{\infty}t^{1-\frac{2}{p}}\,d\mu(t).
$$
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{Th}
\begin{thebibliography}{99}
\bibitem[An03]{An03} K. F. Andersen, Boundedness of Hausdorff operators on $L^{p}(\mathbb{R}^{n})$, $H^{1}(\mathbb{R}^{n})$, and $BMO(\mathbb{R}^{n})$. Acta Sci. Math. (Szeged) 69 (2003), no. 1-2, 409-418.\\
\bibitem[BaGo19]{BaGo19} R. Bandaliyev, and P. G\'{o}rka, Hausdorff operator in Lebesgue spaces. Math. Inequal. Appl. 22 (2019), no. 2, 657-676.\\
\bibitem[BaBoMiMi16]{BaBoMiMi16} S. Ballamoole, J. O. Bonyo, T. L. Miller and V. G. Miller, Ces\'{a}ro-like operators on the Hardy and Bergman spaces of the half plane. Complex Anal. Oper. Theory 10 (2016), no. 1, 187-203.\\
\bibitem[ChDaFa18]{ChDaFa18} J. Chen, J. Dai, D. Fan and X. Zhu, Boundedness of Hausdorff operators on Lebesgue spaces and Hardy spaces. Sci. China Math. 61 (2018), no. 9, 1647-1664.\\
\bibitem[ChFaZh16]{ChFaZh16} J. Chen, D. Fan and X. Zhu, The Hausdorff operator on the Hardy space $H^{1}(\mathbb{R}^{1})$. Acta Math. Hungar. 150 (2016), no. 1, 142-152.\\
\bibitem[ChKoSm17]{ChKoSm17} Choe B. R., Koo H. and Smith W. Difference of composition operators over the half-plane. Trans. Amer. Math. Soc. 369 (2017), no. 5, 3173-3205.\\
\bibitem[FaLi14]{FaLi14} D. Fan and X. Lin, Hausdorff operator on real Hardy spaces. Analysis (Berlin) 34 (2014), no. 4, 319-337.\\
\bibitem[GaSi01]{GaSi01} P. Galanopoulos and A. G. Siskakis, Hausdorff matrices and composition operators. Illinois J. Math. 45 (2001), no. 3, 757-773.\\
\bibitem[GaPa06]{GaPa06} P. Galanopoulos and M. Papadimitrakis, Hausdorff and quasi-Hausdorff matrices on spaces of analytic
functions. Canad. J. Math. 58 (2006), no. 3, 548-579.\\
\bibitem[Ga07]{Ga07} J. B. Garnett, Bounded analytic functions. Revised first edition. Graduate Texts in Mathematics, 236. Springer, New York, 2007.\\
\bibitem[Ge92]{Ge92} C. Georgakis, The Hausdorff mean of a Fourier-Stieltjes transform. Proc. Amer. Math. Soc. 116 (1992), no. 2, 465-471. \\
\bibitem[GiMo95]{GiMo95} D. V. Giang and F. M\'{o}ricz, The Ces\'{a}ro operator is bounded on the Hardy space $H^1$. Acta Sci. Math. (Szeged) 61 (1995), no. 1-4, 535-544.\\
\bibitem[Go59]{Go59} R. R. Goldberg, Averages of Fourier coefficients. Pacific J. Math. 9 (1959), 695-699.\\
\bibitem[Go98]{Go98} B. I. Golubov, Boundedness of the Hardy and Hardy-Littlewood operators in the spaces Re$H^1$
and BMO (in Russian). Mat. Sb. 188(1997), 93-106. - English transl. in Russian Acad. Sci. Sb.
Math. 86 (1998).\\
\bibitem[Ha21]{Ha21} F. Hausdorff, Summationsmethoden und Momentfolgen I. Math. Z. 9(1921), 74-109.\\
\bibitem[HuSi17]{HuSi17} W. A. Hurwitz and L. L. Silverman, The consistency and equivalence of certain definitions of summability.
Trans. Amer. Math. Soc. 18(1917), 1-20.\\
\bibitem[HuKyQu18]{HuKyQu18} H. D. Hung, L. D. Ky, and T. T. Quang, Norm of the Hausdorff operator on the real Hardy space $H^1(\mathbb{R})$. Complex Anal. Oper. Theory 12 (2018), no. 1, 235-245.\\
\bibitem[Ka01]{Ka01} Y. Kanjin, The Hausdorff operators on the real Hardy spaces Hp(R). Studia Math. 148 (2001), no. 1, 37-45.\\
\bibitem[Ko80]{Ko80} P. Koosis, Introduction to $H^p$ spaces. With an appendix on Wolff's proof of the corona theorem. London Mathematical Society Lecture
Note Series, 40. Cambridge University Press, Cambridge-New York, 1980.\\
\bibitem[Li013]{Li013} E. Liflyand, Hausdorff operators on Hardy spaces. Eurasian Math. J. 4 (2013), no. 4, 101-141.\\
\bibitem[LiMi09]{LiMi09} E. Liflyand and A. Miyachi, Boundedness of the Hausdorff operators in $H^p$ spaces, $0 <p <1$, Boundedness of the Hausdorff operators in $H^p$ spaces, $0<p<1$. Studia Math. 194 (2009), no. 3, 279-292.\\
\bibitem[LiMo02]{LiMo02} E. Liflyand, F. M\'{o}ricz, Commuting relations for Hausdorff operators and Hilbert transforms on real Hardy spaces. Acta Math. Hungar. 97 (2002), no. 1-2, 133-143. \\
\bibitem[LiMo01]{LiMo01} E. Liflyand, F. M\'{o}ricz, The multi-parameter Hausdorff operator is bounded on the product Hardy space $H^{11}(\mathbb{R}\times\mathbb{R})$. Analysis (Munich) 21 (2001), no. 2, 107-118. \\
\bibitem[LiMo00]{LiMo00} E. Liflyand and F. Moricz, The Hausdorff operator is bounded on the real Hardy space $H^1(\mathbb{R})$, Proc. Amer. Math. Soc. 128 (2000), no. 5, 1391-1396.\\
\bibitem[Mo05]{Mo05} F. M\'{o}ricz, Multivariate Hausdorff operators on the spaces $H^{1}(\mathbb{R}^{n})$ and $BMO(\mathbb{R}^{n})$. Anal. Math. 31 (2005), no. 1, 31-41.\\
\bibitem[RuFa16]{RuFa16} J. Ruan and D. Fan, Hausdorff operators on the power weighted Hardy spaces. J. Math. Anal. Appl. 433 (2016), no. 1, 31-48.\\
\bibitem[Si87]{Si87} A. G. Siskakis, Composition operators and the Ces\'{a}ro operator on $H^p$. J. London Math. Soc. (2) 36 (1987), no. 1, 153-164.\\
\bibitem[Si90]{Si90} A. G. Siskakis, The Ces\'{a}ro operator is bounded on $H^1$. Proc. Amer. Math. Soc. 110 (1990), no. 2, 461-462.\\
\bibitem[Zh90]{Zh90} K. H. Zhu, Operator theory in function spaces. Monographs and Textbooks in Pure and Applied Mathematics, 139. Marcel Dekker, Inc., New York, 1990.
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{thebibliography}
\varepsilon} \def\vr{\varrho} \def\vf{\varphind{document} |
\betaegin{equation}gin{document}
\tauitle[ORTHOGONALITY PRESERVING]
{ON VOLTERRA AND ORTHOGONALITY PRESERVING QUADRATIC STOCHAISTIC
OPERATORS}
\alphauthor{Farrukh Mukhamedov}
\alphaddress{Farrukh Mukhamedov\\
Department of Computational \& Theoretical Sciences\\
Faculty of Science, International Islamic University Malaysia\\
P.O. Box, 141, 25710, Kuantan\\
Pahang, Malaysia} {\betaf 1}\!\!{\rm I}mail{{\taut far75m@yandex.ru} {\taut
farrukh\_m@iium.edu.my}}
\alphauthor{Muhammad Hafizuddin Bin Mohd Taha}
\alphaddress{Muhammad Hafizuddin Bin Mohd Taha\\
Department of Computational \& Theoretical Sciences\\
Faculty of Science, International Islamic University Malaysia\\
P.O. Box, 141, 25710, Kuantan\\
Pahang, Malaysia}
\sigmaubjclass{Primary 37E99; Secondary 37N25, 39B82, 47H60, 92D25}
\keywords{Quadratic stochastic operator, Volterra operator,
orthogonal preserving.}
\betaegin{equation}gin{abstract}
A quadratic stochastic operator (in short QSO) is usually used to
present the time evolution of differing species in biology. Some
quadratic stochastic operators have been studied by Lotka and
Volterra. In the present paper, we first give a simple
characterization of Volterra QSO in terms of absolutely continuity
of discrete measures. Moreover, we provide its generalization in
continuous setting. Further, we introduce a notion of orthogonal
preserving QSO, and describe such kind of operators defined on two
dimensional simplex. It turns out that orthogonal preserving QSOs
are permutations of Volterra QSO. The associativity of genetic
algebras generated by orthogonal preserving QSO is studied too.
{\betaf 1}\!\!{\rm I}nd{abstract}
\muaketitle
\sigmaection{Introduction}
The history of quadratic stochastic operators (QSO) can be traced
back to Bernstein's work \cite{B} where such kind of operators
appeared from the problems of population genetics (see also
\cite{Ly2}). Such kind of operators describe time evolution of
variety species in biology are represented by so-called
Lotka-Volterra(LV) systems \cite{L,V1,V2}.
A quadratic stochastic operator is usually used to present the time
evolution of species in biology, which arises as follows. Consider a
population consisting of $m$ species (or traits) $1,2,\cdots,m$. We
denote a set of all species (traits) by $I=\{1,2,\cdots,m\}$. Let
$x^{(0)}=\lambdaeft(x_1^{(0)},\cdots,x_m^{(0)}\right)$ be a probability
distribution of species at an initial state and $P_{ij,k}$ be a
probability that individuals in the $i^{th}$ and $j^{th}$ species
(traits) interbreed to produce an individual from $k^{th}$ species
(trait). Then a probability distribution
$x^{(1)}=\lambdaeft(x_{1}^{(1)},\cdots,x_{m}^{(1)}\right)$ of the spices
(traits) in the first generation can be found as a total
probability, i.e.,
\betaegin{equation}gin{equation*} x_k^{(1)}=\sigmaum_{i,j=1}^m P_{ij,k} x_i^{(0)} x_j^{(0)}, \quad k=\overline{1,m}.
{\betaf 1}\!\!{\rm I}nd{equation*}
This means that the association $x^{(0)} \tauo x^{(1)}$ defines a
mapping $V$ called \tauextit{the evolution operator}. The population
evolves by starting from an arbitrary state $x^{(0)},$ then passing
to the state $x^{(1)}=V(x^{(0)})$ (the first generation), then to
the state
$x^{(2)}=V(x^{(1)})=V(V(x^{(0)}))=V^{(2)}\lambdaeft(x^{(0)}\right)$ (the
second generation), and so on. Therefore, the evolution states of
the population system described by the following discrete dynamical
system
$$x^{(0)}, \quad x^{(1)}=V\lambdaeft(x^{(0)}\right), \quad x^{(2)}=V^{(2)}\lambdaeft(x^{(0)}\right), \quad x^{(3)}=V^{(3)}\lambdaeft(x^{(0)}\right) \cdots$$
In other words, a QSO describes a distribution of the next
generation if the distribution of the current generation was given.
The fascinating applications of QSO to population genetics were
given in \cite{Ly2}. Furthermore, the quadratic stochastic operator
was considered an important source of analysis for the study of
dynamical properties and modelings in various fields such as biology
\cite{HHJ,HS,May,MO,NSE}, physics \cite{PL,T}, economics and
mathematics \cite{G,Ly2,T,U,V}.
In \cite{11}, it was given along self-contained exposition of the
recent achievements and open problems in the theory of the QSO. The
main problem in the nonlinear operator theory is to study the
behavior of nonlinear operators. This problem was not fully finished
even in the class of QSO (the QSO is the simplest nonlinear
operator). The difficulty of the problem depends on the given cubic
matrix $(P_{ijk})_{i,j,k=1}^m$. An asymptotic behavior of the QSO
even on the small dimensional simplex is complicated \cite{MSQ,V,Z}.
In the present paper, we first give a simple characterization of
Volterra QSO (see \cite{G}) in terms of absolutely continuity of
discrete measures (see Section 3). Further, in section 4 we
introduce a notion of orthogonal preserving QSO, and describe such
kind of operators defined on two dimensional simplex. It turns out
that orthogonal preserving QSOs are permutations of Volterra QSO. In
section 5, we study associativity of genetic algebras generated by
orthogonal preserving QSO.
\sigmaection{Preliminaries}
An evolutionary operator of a free population is a (quadratic)
mapping of the simplex
\betaegin{equation}gin{equation}\lambdaabel{1.2}
S^{m-1}=\{\muathbf{x}=(x_1,\lambdadots,x_m)\varepsilonn \muathbb{R}^m|x_i\gammaeq0, \ \
\sigmaum_{i=1}^mx_i=1\}
{\betaf 1}\!\!{\rm I}nd{equation}
into itself of the form
\betaegin{equation}gin{equation}\lambdaabel{1.3}
V:x_k^{\prime}=\sigmaum_{i,j=1}^mP_{ij,k}x_ix_j, \ \ k=1,2,\lambdadots,m
{\betaf 1}\!\!{\rm I}nd{equation}
where $P_{ij,k}$ are coefficient of heredity and
\betaegin{equation}gin{equation}\lambdaabel{1.4}
P_{ij,k}\gammaeq0, \ \ P_{ij,k}=P_{ji,k}, \ \ \sigmaum_{k=1}^{m}P_{ij,k}=1,
\ \ i,j,k=1,2,\lambdadots,m
{\betaf 1}\!\!{\rm I}nd{equation}
Such a mapping is called \tauextit{quadratic stochastic operator
(QSO)}.
Note that every element $\muathbf{x}\varepsilonn S^{m-1}$ is a probability
distribution on $E=\{1,\lambdadots,m\}$. The population evolves starting
from an arbitrary initial state $\muathbf{x}\varepsilonn S^{m-1}$ (probability
distribution on $E$) to the state
$\muathbf{x}^{\prime}=V(\muathbf{x})$ in the next generation, then to
the state
$\muathbf{x}^{\prime\prime}=V^2(\muathbf{x})=V(V(\muathbf{}x))$, and so
on.
For a given $\muathbf{x}^{(0)}\varepsilonn S^{m-1}$, the trajectory
$$\{x^{(n)}\}, \ \ m=0,1,2,\lambdadots$$
of $\muathbf{x}^{(0)}$ under the action of QSO {\betaf 1}\!\!{\rm I}qref{1.3} is defined
by
$$\muathbf{x}^{m+1}=V(\muathbf{x}^{(m)}), \ \ m=0,1,2,\lambdadots$$
A QSO $V$ defined by {\betaf 1}\!\!{\rm I}qref{1.3} is called \tauextit{Volterra
operator} \cite{G} if one has
\betaegin{equation}gin{equation}\lambdaabel{1.5}
P_{ij,k}=0 \ \ {\muathbf{m}}ox{if} \ \ k\nuot\varepsilonn \{i,j\}, \ \ \forall i,j,k\varepsilonn
E.
{\betaf 1}\!\!{\rm I}nd{equation}
Note that it is obvious that the biological behavior of condition
{\betaf 1}\!\!{\rm I}qref{1.5} is that the offspring repeats one of its parents'
genotype (see \cite{G,11}).
\betaegin{equation}gin{defn}
Let $\muathbf{x}=(x_1,\lambdadots,x_n)$ and $\muathbf{y}=(y_1,\lambdadots,y_n)$.
$\muathbf{x}$ is \tauextit{equivalent} to $\muathbf{y}$
($\muathbf{x}\sigmaim\muathbf{y}$) if
\betaegin{equation}gin{itemize}
\varepsilontem[(i)]$\muathbf{x}\prec\muathbf{y}$ ($\muathbf{x}$ is \tauextit{absolutely
continuous} with respect to $\muathbf{y}$) if $y_k=0\Rightarrow
x_k=0$, \varepsilontem[(ii)]$\muathbf{y}\prec\muathbf{x}$ if $x_k=0\Rightarrow
y_k=0$.
{\betaf 1}\!\!{\rm I}nd{itemize}
{\betaf 1}\!\!{\rm I}nd{defn}
\betaegin{equation}gin{defn}
Let $I=\{1,2,\lambdadots,n\}$ and $Supp(x)=\{i\varepsilonn I|x_i\nueq0\}$. Then
$\muathbf{x}$ is \tauextit{singular} or \tauextit{orthogonal} to
$\muathbf{y}$ ($\muathbf{x}\betaot\muathbf{y}$) if $Supp(\muathbf{x})\cap
Supp(\muathbf{y})={\betaf 1}\!\!{\rm I}mptyset$.
{\betaf 1}\!\!{\rm I}nd{defn}
Note that if $\muathbf{x}\betaot\muathbf{y}\Rightarrow
\muathbf{x}\cdot\muathbf{y}=0$, whenever $x,y\varepsilonn S^n$. Here
$\muathbf{x}\cdot\muathbf{y}$ stands for the usual scalar product in
$\muathbb{R}^n$.
\sigmaection{On Volterra QSO}
In this section we are going to give a characterization of Volterra
quadratic operator in terms of the above given order. Note that
dynamics of Volterra QSO was investigated in \cite{G}. Certain other
properties of such kind of operators has been studied in \cite{MS}.
Some generalizations of Volterra QSO were studied in
\cite{MS1,RN,RZ}.
Recall that the vertices of the simplex $S^{m-1}$ are described by
the elements $e_k=(\deltaelta_{1k},\deltaelta_{2k},\deltaots,\deltaelta_{mk})$,
where $\deltaelta_{ik}$ is the Kronecker's delta.
\betaegin{equation}gin{thm}
Let $V:S^{n-1}\rightarrow S^{n-1}$ be a QSO. Then the following
conditions are equivalent:
\betaegin{equation}gin{itemize}
\varepsilontem[(i)] $V$ is a Volterra QSO;
\varepsilontem[(ii)] one has $V(\muathbf{x})\prec \muathbf{x}$ for all $\muathbf{x}\varepsilonn
S^{n-1}$.
{\betaf 1}\!\!{\rm I}nd{itemize}
{\betaf 1}\!\!{\rm I}nd{thm}
\betaegin{equation}gin{proof} (i)$\Rightarrow$ (ii). It is known \cite{G} that any
Volterra QSO can be represented as follows:
\betaegin{equation}gin{equation}\lambdaabel{1v}
(V(x))_k=x_k\lambdaeft(1+\sigmaum\lambdaimits_{i=1}^{m}a_{ki}x_i\right), \
k=\overline{1,m},
{\betaf 1}\!\!{\rm I}nd{equation}
where $a_{ki}=-a_{ik}$, $|a_{ki}|\lambdae 1$.
From the equality we immediately get $V(\muathbf{x})\prec \muathbf{x}$
for all $\muathbf{x}\varepsilonn S^{n-1}$.
(ii)$\Rightarrow$ (i). Let $\muathbf{x}=e_k$, $(k\varepsilonn\{1,\deltaots,n\})$.
Then due to $V(\muathbf{x})\prec \muathbf{x}$ from {\betaf 1}\!\!{\rm I}qref{1.3} one
finds
\betaegin{equation}gin{equation}\lambdaabel{3v}
P_{kk,k}=1 \qquad P_{kk,i}=0, \ i\nueq k.
{\betaf 1}\!\!{\rm I}nd{equation}
Now assume that $\muathbf{x}=\lambda e_i+(1-\lambda)e_j$, where $\lambda\varepsilonn(0,1)$.
Let $k\nuotin \{i,j\}$, then from {\betaf 1}\!\!{\rm I}qref{1.3} one finds that
\betaegin{equation}gin{equation}\lambdaabel{2v}
V(\muathbf{x})_k=P_{ii,k}\lambda^2+2\lambda(1-\lambda)P_{ij,k}+P_{jj,k}(1-\lambda)^2
{\betaf 1}\!\!{\rm I}nd{equation}
Taking into account {\betaf 1}\!\!{\rm I}qref{3v} and the relation $V(\muathbf{x})\prec
\muathbf{x}$ with {\betaf 1}\!\!{\rm I}qref{2v} one gets $P_{ij,k}=0$. This completes
the proof.
{\betaf 1}\!\!{\rm I}nd{proof}
The proved theorem characterizes Volterra QSO in terms of absolute
continuity of distributions. Therefore, this theorem will allow to
define such kind of operators in abstract settings. Let us
demonstrate it.
Assume that $(E,{\muathcal F})$ be a measurable space and $S(E,{\muathcal F})$ be the
set of all probability measures on $(E,{\muathcal F})$.
Recall that a mapping $V :S(E,{\muathcal F})\tauo S(E,{\muathcal F})$ is called a
\tauextit{quadratic stochastic operator (QSO)} if, for an arbitrary
measure $\lambda\varepsilonn S(E,{\muathcal F})$ the measure $\lambda'= V(\lambda)$ is defined as
follows
\betaegin{equation}gin{eqnarray}\lambdaabel{VQ}
\lambda'(A)=\varepsilonnt_E\varepsilonnt_E P(x,y,A)d\lambda(x)d\lambda(y), \ \ A\varepsilonn{\muathcal F},
{\betaf 1}\!\!{\rm I}nd{eqnarray}
where $P(x,y,A)$ satisfies the following conditions:
\betaegin{equation}gin{enumerate}
\varepsilontem[(i)] $P(x,y,\cdot)\varepsilonn S(E,{\muathcal F})$ for any fixed $x,y\varepsilonn E$;
\varepsilontem[(ii)] For any fixed $A\varepsilonn{\muathcal F}$ the function $P(x,y,A)$ is
measurable of two variables $x$ and $y$ on $(E\tauimes
E,{\muathcal F}\otimes{\muathcal F})$;
\varepsilontem[(iii)] the function $P(x,y,A)$ is symmetric, i.e.
$P(x,y,A)=P(y,x,A)$ for any $x,y\varepsilonn E$ and $A\varepsilonn{\muathcal F}$.
{\betaf 1}\!\!{\rm I}nd{enumerate}
Note that when $E$ is finite, i.e. $E=\{1,\deltaots,m\}$, then a QSO on
$S(E,{\muathcal F})= S^{m-1}$ is defined as in {\betaf 1}\!\!{\rm I}qref{1.3} with $P_{ij,k}=
P(i, j, k)$.
Certain construction of QSO in general setting was studied in
\cite{GR}.
We recall that a measure $\mu\varepsilonn S(E,{\muathcal F})$ is \tauextit{absolutely
continuous} w.r.t. a measure $\nu\varepsilonn S(E,{\muathcal F})$ if $\nu(A)=0$ implies
$\mu(A)=0$, and they are denoted by $\mu\prec\nu$. Put
$$
\tauextrm{null}(\mu)=\betaigcup\lambdaimits_{\mu(A)=0} A.
$$
Then support of the measure $m$ is defined by
$supp(\mu)=E\sigmaetminus\tauextrm{null}(\mu)$. Two measures $\mu,\nu\varepsilonn
S(E,{\muathcal F})$ are called \tauextit{singular} if $supp(\mu)\cap
supp(\nu)={\betaf 1}\!\!{\rm I}mptyset$, and they are denoted by $\mu\perp\nu$.
\betaegin{equation}gin{defn}
A QSO given by {\betaf 1}\!\!{\rm I}qref{VQ} is called \tauextit{Volterra} if
$V\lambda\prec\lambda$ for all $\lambda\varepsilonn S(E,{\muathcal F})$.
{\betaf 1}\!\!{\rm I}nd{defn}
\betaegin{equation}gin{thm} Let $V$ be given by {\betaf 1}\!\!{\rm I}qref{VQ}. Then $V$ is Volterra QSO
if and only if $P(x,y,A)=0$ for all $x,y\nuotin A$.
{\betaf 1}\!\!{\rm I}nd{thm}
\betaegin{equation}gin{proof} First we assume that $V$ is Volterra QSO. Take any
$x,y\varepsilonn E$ and consider the measure $\nu=\frac{1}{2}(\delta_x+\delta_y)$,
where $\delta_x$ is a delta-measure, i.e. $\delta_x(A)=\chi_A(x)$. Then from
{\betaf 1}\!\!{\rm I}qref{VQ} one finds that
$$
V(\nu)(A)=\frac{1}{4}\betaig(P(x,x,A)+P(y,y,A)+2P(x,y,A)\betaig).
$$
From $V\nu\prec\nu$ and $\nu(A)=0$ (if $x,y\nuotin A$) we infer that
$V(\nu)(A)=0$, this yields that $P(x,x,A)=P(y,y,A)=P(x,y,A)=0$ if
$x,y\nuotin A$.
Let us suppose that $P(x,y,A)=0$ is valid for all $x,y\nuotin A$.
Assume that for $\mu\varepsilonn S(E,{\muathcal F})$ one has $\mu(B)=0$ for some
$B\varepsilonn{\muathcal F}$. Let us show that $V(\mu)(B)=0$. Indeed, from {\betaf 1}\!\!{\rm I}qref{VQ}
and the conditions one gets
\betaegin{equation}gin{eqnarray*}
V(\mu)(B)&=&\varepsilonnt_E\varepsilonnt_E P(x,y,B)d\mu(x)d\mu(y)\\[2mm]
&=&\varepsilonnt_{E\sigmaetminus B}\varepsilonnt_{E\sigmaetminus B} P(x,y,B)d\mu(x)d\mu(y)+
\varepsilonnt_{E\sigmaetminus B}\varepsilonnt_{B}
P(x,y,B)d\mu(x)d\mu(y)\\[2mm]
&&+\varepsilonnt_{B}\varepsilonnt_{E\sigmaetminus B} P(x,y,B)d\mu(x)d\mu(y)+\varepsilonnt_{B}\varepsilonnt_{B}
P(x,y,B)d\mu(x)d\mu(y)\\[2mm]
&=&\varepsilonnt_{E\sigmaetminus B}\varepsilonnt_{E\sigmaetminus B} P(x,y,B)d\mu(x)d\mu(y)=0
{\betaf 1}\!\!{\rm I}nd{eqnarray*}
This completes the proof.
{\betaf 1}\!\!{\rm I}nd{proof}
\sigmaection{Orthogonal Preserving(OP) QSO in 2D Simplex}
We recall that two vectors $\muathbf{x}$ and $\muathbf{y}$ belonging
to $S^{n-1}$ are called \tauextit{singular} or \tauextit{orthogonal} if
$\muathbf{x}\cdot\muathbf{y}=0$.
A mapping $V:S^{n-1}\tauo S^{n-1}$ is called is \tauextit{Orthogonal
Preserving (O.P.)} if one has $V(\muathbf{x})\perp V(\muathbf{y})$
whenever $\muathbf{x}\perp\muathbf{y}$.
In this section we are going to describe orthogonal preserving QSO
defined in 2D simplex.
Let us assume that $V:S^2\tauo S^2$ be a orthogonal preserving QSO.
This means that
\betaegin{equation}gin{equation*}
V(1,0,0)\perp V(0,1,0) \perp V(0,0,1)
{\betaf 1}\!\!{\rm I}nd{equation*}
Now from the definition of QSO, we immediately get
\betaegin{equation}gin{equation*}
(P_{11,1},P_{11,2},P_{11,3})\perp (P_{22,1},P_{22,2},P_{22,3})\perp
(P_{33,1},P_{33,2},P_{33,3})
{\betaf 1}\!\!{\rm I}nd{equation*}
Since in the simplex $S^2$ there are 3 orthogonal vectors which are
$$(1,0,0),\ \ (0,1,0), \ \ (0,0,1).$$ We conclude the vectors
$$(P_{11,1},P_{11,2},P_{11,3}), \ \ (P_{22,1},P_{22,2},P_{22,3}), \ \
(P_{33,1},P_{33,2},P_{33,3})$$ could be permutation of the given
orthogonal vectors. Therefore we have 6 possibilities and we
consider each of these possibilities one by one.
Let us first assume that
\betaegin{equation}gin{equation*}
\betaegin{equation}gin{matrix}
P_{11,1}=0 & P_{11,2}=0 & P_{11,3}=1 \\
P_{22,1}=0 & P_{22,2}=1 & P_{22,3}=0 \\
P_{33,1}=1 & P_{33,2}=0 & P_{33,3}=0
{\betaf 1}\!\!{\rm I}nd{matrix}
{\betaf 1}\!\!{\rm I}nd{equation*}
Now our aim is to find condition for the others coefficients of the
given QSO. Let us consider the following vectors
\betaegin{equation}gin{equation*}
\muathbf{x}=\lambdaeft(\frac{1}{2},\frac{1}{2},0 \right) \qquad
\muathbf{y}=(0,0,1)
{\betaf 1}\!\!{\rm I}nd{equation*}
which are clearly orthogonal. One can see that
\betaegin{equation}gin{align*}
V(\muathbf{x})&=1/4(2P_{12,1},2P_{12,2}+1,1+2P_{12,3}) \\
V(\muathbf{y})&=(1,0,0)
{\betaf 1}\!\!{\rm I}nd{align*}
Therefore, the orthogonal preservably of $V$ yields $P_{12,1}=0$.
From $\sigmaum^3_{i=1}P_{12,i}=1$ one gets
\betaegin{equation}gin{equation*}
P_{12,2}+P_{12,3}=1
{\betaf 1}\!\!{\rm I}nd{equation*}
Now consider
\betaegin{equation}gin{equation*}
\muathbf{x}=\lambdaeft(0,\frac{1}{2},\frac{1}{2} \right) \qquad
\muathbf{y}=(1,0,0)
{\betaf 1}\!\!{\rm I}nd{equation*}
Then we have
\betaegin{equation}gin{align*}
V(\muathbf{x})&=1/4(2P_{23,1}+1,1+2P_{23,2},1+2P_{23,3}), \\
V(\muathbf{y})&=(0,0,1)
{\betaf 1}\!\!{\rm I}nd{align*}
Again the orthogonal preservability of $V$ implies $P_{23,3}=0$ and
hence we get
\betaegin{equation}gin{equation*}
P_{23,1}+P_{23,2}=1
{\betaf 1}\!\!{\rm I}nd{equation*}
Now consider
\betaegin{equation}gin{equation*}
\muathbf{x}=\lambdaeft(\frac{1}{2},0,\frac{1}{2} \right) \qquad
\muathbf{y}=(0,1,0)
{\betaf 1}\!\!{\rm I}nd{equation*}
Then one has
\betaegin{equation}gin{align*}
V(\muathbf{x})&=1/4(1+2P_{13,1},2P_{13,2},1+2P_{13,3}), \\
V(\muathbf{y})&=(0,1,0)
{\betaf 1}\!\!{\rm I}nd{align*}
Hence, we conclude that $P_{13,2}=0$ and get
\betaegin{equation}gin{equation*}
P_{13,1}+P_{13,3}=1
{\betaf 1}\!\!{\rm I}nd{equation*}
Taking into account the obtained equations, we denote
\betaegin{equation}gin{equation*}
P_{12,2}=\alphalpha \qquad P_{23,1}=\betaegin{equation}ta \qquad P_{13,1}=\gammaamma
{\betaf 1}\!\!{\rm I}nd{equation*}
Correspondingly one gets
\betaegin{equation}gin{equation*}
P_{12,3}=1-\alphalpha \qquad P_{23,2}=1-\betaegin{equation}ta \qquad P_{13,3}=1-\gammaamma
{\betaf 1}\!\!{\rm I}nd{equation*}
Therefore $V$ has the following form
\betaegin{equation}gin{equation*}
V^{(1)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}: \lambdaeft\{
\betaegin{equation}gin{array}{l}
x'=z^2+2\gammaamma xz+2\betaegin{equation}ta yz\\
y'=y^2+2\alphalpha xy+2(1-\betaegin{equation}ta)yz\\
z'=x^2+2(1-\alphalpha) xy+2(1-\gammaamma)xz\\
{\betaf 1}\!\!{\rm I}nd{array} \right.
{\betaf 1}\!\!{\rm I}nd{equation*}
Similarly, considering other possibilities we obtain the following
operators:
\betaegin{equation}gin{equation*}
V^{(2)}_{\alphalpha,\betaegin{equation}ta,\gammaamma} : \lambdaeft\{
\betaegin{equation}gin{array}{l}
x'=x^2+2\alphalpha xy+2\gammaamma xz\\
y'=y^2+2(1-\alphalpha) xy+2\betaegin{equation}ta yz\\
z'=z^2+2(1-\gammaamma) xz+2(1-\betaegin{equation}ta)yz\\
{\betaf 1}\!\!{\rm I}nd{array} \right.
{\betaf 1}\!\!{\rm I}nd{equation*}
\betaegin{equation}gin{equation*}
V^{(3)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}: \lambdaeft\{
\betaegin{equation}gin{array}{l}
x'=x^2+2\alphalpha xy+2\gammaamma xz\\
y'=z^2+2(1-\gammaamma) xz+2\betaegin{equation}ta yz\\
z'=y^2+2(1-\alphalpha) xy+2(1-\betaegin{equation}ta)yz\\
{\betaf 1}\!\!{\rm I}nd{array} \right.
{\betaf 1}\!\!{\rm I}nd{equation*}
\betaegin{equation}gin{equation*}
V^{(4)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}: \lambdaeft\{
\betaegin{equation}gin{array}{l}
x'=y^2+2\alphalpha xy+2\betaegin{equation}ta yz\\
y'=z^2+2\gammaamma xz+2(1-\betaegin{equation}ta)yz\\
z'=x^2+2(1-\alphalpha) xy+2(1-\gammaamma)xz\\
{\betaf 1}\!\!{\rm I}nd{array} \right.
{\betaf 1}\!\!{\rm I}nd{equation*}
\betaegin{equation}gin{equation*}
V^{(5)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}: \lambdaeft\{
\betaegin{equation}gin{array}{l}
x'=y^2+2\alphalpha xy+2\betaegin{equation}ta yz\\
y'=x^2+2(1-\alphalpha) xy+2\gammaamma xz\\
z'=z^2+2(1-\gammaamma) xz+2(1-\betaegin{equation}ta)yz\\
{\betaf 1}\!\!{\rm I}nd{array} \right.
{\betaf 1}\!\!{\rm I}nd{equation*}
\betaegin{equation}gin{equation*}
V^{(6)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}:\lambdaeft\{
\betaegin{equation}gin{array}{l}
x'=z^2+2\gammaamma xz+2\betaegin{equation}ta yz\\
y'=x^2+2\alphalpha xy+2(1-\gammaamma)xz\\
z'=y^2+2(1-\alphalpha) xy+2(1-\betaegin{equation}ta)yz\\
{\betaf 1}\!\!{\rm I}nd{array} \right.
{\betaf 1}\!\!{\rm I}nd{equation*}
So, if $V$ is OP QSO, then it can be one of the above given
operators. Now we are going to show the obtained operators are
indeed orthogonal preserving.
\betaegin{equation}gin{thm}\lambdaabel{OP} Let $V$ be an orthogonal preserving QSO. Then
$V$ has one of the following forms:
\betaegin{equation}gin{equation}\lambdaabel{list}
V^{(1)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}, \ V^{(2)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}, \
V^{(3)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}, \ V^{(4)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}, \
V^{(5)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}, V^{(6)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}.
{\betaf 1}\!\!{\rm I}nd{equation}
{\betaf 1}\!\!{\rm I}nd{thm}
\betaegin{equation}gin{proof}
According to the above done calculation we have six listed
operators. Now we show that these operators indeed OP. Without loss
of generality, we may consider operator
$V^{(1)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}$.
Assume that $\muathbf{x}\perp \muathbf{y}$. Then there are following
possibilities:
\betaegin{equation}gin{equation*}
\muathbf{x}\perp \muathbf{y}\Lambdaongleftrightarrow \lambdaeft\{
\betaegin{equation}gin{array}{l}
\quad \muathbf{x}=(x,y,0) \quad \muathbf{y}=(0,0,1), \\
\quad \muathbf{x}=(x,0,z) \quad \muathbf{y}=(0,1,0), \\
\quad \muathbf{x}=(0,y,z) \quad \muathbf{y}=(1,0,0). \\
{\betaf 1}\!\!{\rm I}nd{array} \right.
{\betaf 1}\!\!{\rm I}nd{equation*}
Let $\muathbf{x}=(x,y,0)$ and $\muathbf{x}=(0,0,1)$. Then one gets
\betaegin{equation}gin{equation*}
V^{(1)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}(\muathbf{x})=(0,y^2+2\alphalpha
xy,x^2+2(1-\alphalpha)xy),\qquad
V^{(1)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}(\muathbf{y})=(1,0,0).
{\betaf 1}\!\!{\rm I}nd{equation*}
It is clear there are orthogonal. By the same argument, for other
two cases, we can establish the orthogonality of
$V^{(1)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}(\muathbf{x})$ and
$V^{(1)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}(\muathbf{y})$. This completes the
proof.
{\betaf 1}\!\!{\rm I}nd{proof}
\betaegin{equation}gin{rem}\lambdaabel{permut} We note that the operators given in {\betaf 1}\!\!{\rm I}qref{list} are
permutations of Volterra QSO. In \cite{GE} it was proved that
permutations of Volterra operators are automorphisms of the simplex.
{\betaf 1}\!\!{\rm I}nd{rem}
\betaegin{equation}gin{rem} It is well-known that linear stochastic operators are
orthogonal preserving if and only if they are permutations of the
simplex. We point out that if $\alpha=\beta=\gamma=1/2$, then the operators
{\betaf 1}\!\!{\rm I}qref{list} reduce to such kind of permutations.
{\betaf 1}\!\!{\rm I}nd{rem}
To investigate dynamic of obtained operators, it is usual to
investigate by means of the conjugacy.
Let us recall we say two QSO $V^{(1)}$ and $V^{(2)}$ are conjugate
if there exist a permutation
$T_{\pi}:(x,y,z)\rightarrow(\pi(x),\pi(y),\pi(z))$ such that
$T_{\pi}^{-1}V^{(1)}T_{\pi}=V^{(2)}$ and we denote this by
$V^{(1)}\sigmaim^\pi V^{(2)}$.
In our case, we need to consider only permutations of $(x,y,z)$
given by:
\betaegin{equation}gin{equation*}
\pi=
\betaegin{equation}gin{bmatrix}
x & y & z\\
y & z & x
{\betaf 1}\!\!{\rm I}nd{bmatrix} \qquad
\pi_1=
\betaegin{equation}gin{bmatrix}
x & y & z \\
x & z & y
{\betaf 1}\!\!{\rm I}nd{bmatrix}
{\betaf 1}\!\!{\rm I}nd{equation*}
Note that other permutations can be derived by the given two ones.
\betaegin{equation}gin{thm}\lambdaabel{OP-K} Orthogonal
Preserving QSO can be divided into three non-conjugate classes
\betaegin{equation}gin{align*}
K_1&=\{V^{(1)}_{\alphalpha,\betaegin{equation}ta,\gammaamma},V^{(5)}_{\alphalpha,\betaegin{equation}ta,\gammaamma},V^{(3)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}\} \\
K_2&=\{V^{(4)}_{\alphalpha,\betaegin{equation}ta,\gammaamma},V^{(6)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}\} \\
K_3&=\{V^{(2)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}\}
{\betaf 1}\!\!{\rm I}nd{align*}
{\betaf 1}\!\!{\rm I}nd{thm}
\betaegin{equation}gin{proof}
Let us consider $V^{(1)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}$. Then one has
\betaegin{equation}gin{align*}
&T_{\pi}^{-1}V^{(1)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}T_{\pi}(x,y,z)=T_{\pi}^{-1}V^{(1)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}(y,z,x) \\
&=(y^2+2(1-\alphalpha)yz+2(1-\gammaamma)yx,x^2+2\gammaamma yx+2\betaegin{equation}ta zx,z^2 +2\alphalpha yz+2(1-\betaegin{equation}ta)zx) \\
&=V^{(5)}_{1-\gammaamma,1-\alphalpha,\betaegin{equation}ta}
{\betaf 1}\!\!{\rm I}nd{align*}
This means tht $V^{(1)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}\sigmaim^\pi
V^{(5)}_{1-\gammaamma,1-\alphalpha,\betaegin{equation}ta}$.
Similarly, we have
$T_{\pi}^{-1}V^{(5)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}T_{\pi}(x,y,z)=V^{(3)}_{1-\gammaamma,\alphalpha,1-\betaegin{equation}ta}$.
Hence,$V^{(5)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}\sigmaim
V^{(3)}_{1-\gammaamma,\alphalpha,1-\betaegin{equation}ta}$. By the same argument one finds
$T_{\pi}^{-1}V^{(3)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}T_{\pi}(x,y,z)=V^{(1)}_{\gammaamma,1-\alphalpha,1-\betaegin{equation}ta}$
which means $V^{(3)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}\sigmaim^\pi
V^{(1)}_{\gammaamma,1-\alphalpha,1-\betaegin{equation}ta}$.
This implies that the operators
$V^{(1)}_{\alphalpha,\betaegin{equation}ta,\gammaamma},V^{(5)}_{\alphalpha,\betaegin{equation}ta,\gammaamma},V^{(3)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}$
are conjugate and we put them into one class denoted by $K_1$.
One can obtain that $V^{(2)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}\sigmaim^\pi
V^{(2)}_{1-\gammaamma,\alphalpha,1-\betaegin{equation}ta}$ and
$V^{(4)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}\sigmaim^\pi
V^{(4)}_{1-\gammaamma,1-\alphalpha,\betaegin{equation}ta}$ and
$V^{(6)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}\sigmaim^\pi
V^{(6)}_{\gammaamma,1-\alphalpha,1-\betaegin{equation}ta}$. Therefore we need to consider
another permutation $\pi_1$
Consequently, one finds $V^{(2)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}\sigmaim^{\pi_1}
V^{(2)}_{\gammaamma,1-\betaegin{equation}ta,\alphalpha}$,
$V^{(4)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}\sigmaim^{\pi_1}
V^{(6)}_{1-\gammaamma,\betaegin{equation}ta,\alphalpha}$.
Thus by $K_2$ we denote the class containing
$V^{(4)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}$ and $V^{(6)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}$
and by $K_3$ class containing only $V^{(2)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}$.
This completes the proof.
{\betaf 1}\!\!{\rm I}nd{proof}
\betaegin{equation}gin{rem} One can see that the
operator $V^{(2)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}$ is a Volterra QSO, and its
dynamics investigated in \cite{G}. From the result of \cite{V,Z} one
can conclude that even dynamics of Volterra QSO is very complicated.
We note that if $\alpha,\beta,\gamma\varepsilonn\{0,1\}$ then dynamics of operators
taken from the classes $K_1$,$K_2$ were investigated in
\cite{MJ,MSJ}. In \cite{GE} certain general properties of dynamics
of permuted Volterra QSO were studied.
{\betaf 1}\!\!{\rm I}nd{rem}
\betaegin{equation}gin{rem} We can also defined orthogonal preserving
in general setting. Namely, we call a QSO given by {\betaf 1}\!\!{\rm I}qref{VQ}
\tauextit{orthogonal preserving} if $V(\mu)\perp V(\nu)$ whenever
$\mu\perp\nu$, where $\mu,\nu\varepsilonn S(E,{\muathcal F})$. Taking into account Remark
\ref{permut} we can formulate the following
\betaegin{equation}gin{conj} Let $V$ be a QSO given by {\betaf 1}\!\!{\rm I}qref{VQ}. Then $V$ is orthogonal preserving
if and only if there is a measurable automorphism $\alpha:E\tauo E$ (i.e.
$\alpha^{-1}({{\muathcal F}})\sigmaubset{\muathcal F}$) and a Volterra QSO $V_0$ such that
$V\mu=V_0(\mu\circ\alpha^{-1})$.
{\betaf 1}\!\!{\rm I}nd{conj}
{\betaf 1}\!\!{\rm I}nd{rem}
\sigmaection{Associativity of Orthogonality Preserving QSO}
In this section we state basic definitions and properties of
genetics algebras.
Let $V$ be a QSO and suppose that $\muathbf{x},\muathbf{y}\varepsilonn
\muathbb{R}^n$ are arbitrary vectors, we introduce a multiplication
rule on $\muathbb{R}^n $ by
\betaegin{equation}gin{equation*}
\muathbf{x}\circ
\muathbf{y}=\frac{1}{4}\betaig(V(\muathbf{x}+\muathbf{y})-V(\muathbf{x}-\muathbf{y})\betaig)
{\betaf 1}\!\!{\rm I}nd{equation*}
This multiplication can be written as follows:
\betaegin{equation}gin{equation}\lambdaabel{4.1}
(\muathbf{x}\circ \muathbf{y})_k=\sigmaum^n_{i,j=1}P_{ij,k}x_iy_j
{\betaf 1}\!\!{\rm I}nd{equation}
where $\muathbf{x}=(x_1,\lambdadots ,x_n),\muathbf{y}=(y_1,\lambdadots ,y_n)\varepsilonn
\muathbb{R}^n $.
The pair $(\muathbb{R}^n,\circ)$ is called \tauextit{genetic algebra}. We
note the this algebra is commutative. This means
$\muathbf{x}\circ\muathbf{y}=\muathbf{y}\circ\muathbf{x}$. Certain
algebraic properties of such kind of algebras were investigated in
\cite{W-B,Ly2}. In general, the genetic algebra no need to be
associative. Therefore, we introduce the following
\betaegin{equation}gin{defn} A QSO $V$ is called \tauextit{associative} if the corresponding
multiplication given by {\betaf 1}\!\!{\rm I}qref{4.1} is associative, i.e
\betaegin{equation}gin{equation}\lambdaabel{4.2}
(\muathbf{x}\circ\muathbf{y})\circ \muathbf{z}=\muathbf{x}\circ
(\muathbf{y}\circ\muathbf{z})
{\betaf 1}\!\!{\rm I}nd{equation} hold for all
$\muathbf{x},\muathbf{y},\muathbf{z}\varepsilonn\muathbb{R}^n$.
{\betaf 1}\!\!{\rm I}nd{defn}
In this section we are going to find associative orthogonal
preserving QSO. According to the previous section, we have only
three classes of OP QSO. Now we are interested whether these
operators will be associative. Note that associativity of some
classes of QSO has been investigated in \cite{G2008}.
\betaegin{equation}gin{thm} The QSO $V^{(2)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}$ is associative
if and only if one of the following conditions are satisfied:
\betaegin{equation}gin{align*}
(1)\quad \alphalpha=0,\quad\betaegin{equation}ta=0,\quad \gammaamma=0 \\
(2)\quad \alphalpha=1,\quad\betaegin{equation}ta=0,\quad \gammaamma=0 \\
(3)\quad \alphalpha=0,\quad\betaegin{equation}ta=1,\quad \gammaamma=1 \\
(4)\quad \alphalpha=1,\quad\betaegin{equation}ta=1,\quad \gammaamma=1 \\
(5)\quad \alphalpha=1,\quad\betaegin{equation}ta=1,\quad \gammaamma=0 \\
(6)\quad \alphalpha=0,\quad\betaegin{equation}ta=1,\quad \gammaamma=0 \\
{\betaf 1}\!\!{\rm I}nd{align*}
{\betaf 1}\!\!{\rm I}nd{thm}
\betaegin{equation}gin{proof}
To show the associativity we will check the equality {\betaf 1}\!\!{\rm I}qref{4.2},
which can be rewritten as follows:
\betaegin{equation}gin{equation}\lambdaabel{4.3}
\sigmaum^3_{i,j=1}P_{ij,u}x_{i}\lambdaeft(\sigmaum^3_{m,k=1}P_{mk,j}y_{m}z_{k}\right)=\sigmaum^3_{i,j=1}P_{ij,u}\lambdaeft(\sigmaum^3_{m,k=1}P_{mk,i}x_{m}y_{k}\right)z_j \qquad u=1,2,3 \\
{\betaf 1}\!\!{\rm I}nd{equation}
where we have use the following equalities
\betaegin{equation}gin{align*}
(x\circ y)\circ z&=\sigmaum^3_{i,j=1}P_{ij,l}x_{i}\lambdaeft(\sigmaum^3_{m,k=1}P_{mk,j}y_{m}z_{k}\right) \\
x\circ (y\circ
z)&=\sigmaum^3_{i,j=1}P_{ij,l}\lambdaeft(\sigmaum^3_{m,k=1}P_{mk,i}x_{m}y_{k}\right)z_j
\qquad l=1,2,3
{\betaf 1}\!\!{\rm I}nd{align*}
For $V^{(2)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}$ the equality {\betaf 1}\!\!{\rm I}qref{4.3} can be
written as follows:
\betaegin{equation}gin{align*}
x_1(y_1z_1+\alphalpha y_1z_2+\gammaamma y_1z_3+\alphalpha y_2z_1+\gammaamma y_3z_1) \\
+\alphalpha x_1((1-\alphalpha)y_1z_2+(1-\alphalpha)y_2z_1+y_2z_2+\betaegin{equation}ta y_2z_3+\betaegin{equation}ta y_3z_2) \\
+\gammaamma x_1((1-\gammaamma)y_1z_3+(1-\betaegin{equation}ta)y_2z_3+(1-\gammaamma)y_3z_1+(1-\betaegin{equation}ta)y_3z_2+y_3z_3) \\
+\alphalpha x_2(y_1z_1+\alphalpha y_1z_2+\gammaamma y_1z_3+\alphalpha y_2z_1+\gammaamma y_3z_1) \\
+\gammaamma x_3(y_1z_1+\alphalpha y_1z_2+\gammaamma y_1z_3+\alphalpha y_2z_1+\gammaamma y_3z_1) \\
=z_1(x_1y_1+\alphalpha x_1y_2+\gammaamma x_1y_3+\alphalpha x_2y_1+\gammaamma x_3y_1) \\
+\alphalpha z_2(x_1y_1+\alphalpha x_1y_2+\gammaamma x_1y_3+\alphalpha x_2y_1+\gammaamma x_3y_1) \\
+\gammaamma z_3(x_1y_1+\alphalpha x_1y_2+\gammaamma x_1y_3+\alphalpha x_2y_1+\gammaamma x_3y_1) \\
+\alphalpha z_1((1-\alphalpha)x_1y_2+(1-\alphalpha)x_2y_1+x_2y_2+\betaegin{equation}ta x_2y_3+\betaegin{equation}ta x_3y_2) \\
+\gammaamma
z_1((1-\gammaamma)x_1y_3+(1-\betaegin{equation}ta)x_2y_3+(1-\gammaamma)x_3y_1+(1-\betaegin{equation}ta)x_3y_2+x_3y_3);
{\betaf 1}\!\!{\rm I}nd{align*}
\betaegin{equation}gin{align*}
(1-\alphalpha)x_1((1-\alphalpha)y_1z_2+(1-\alphalpha)y_2z_1+y_2z_2+\betaegin{equation}ta y_2z_3+\betaegin{equation}ta y_3z_2) \\
+(1-\alphalpha)x_2(y_1z_1+\alphalpha y_1z_2+\gammaamma y_1z_3+\alphalpha y_2z_1+\gammaamma y_3z_1) \\
+x_2((1-\alphalpha)y_1z_2+(1-\alphalpha)y_2z_1+y_2z_2+\betaegin{equation}ta y_2z_3+\betaegin{equation}ta y_3z_2) \\
+\betaegin{equation}ta x_2((1-\gammaamma)y_1z_3+(1-\betaegin{equation}ta)y_2z_3+(1-\gammaamma)y_3z_1+(1-\betaegin{equation}ta)y_3z_2+y_3z_3) \\
+\betaegin{equation}ta x_3((1-\alphalpha)y_1z_2+(1-\alphalpha)y_2z_1+y_2z_2+\betaegin{equation}ta y_2z_3+\betaegin{equation}ta y_3z_2) \\
=(1-\alphalpha)z_2(x_1y_1+\alphalpha x_1y_2+\gammaamma x_1y_3+\alphalpha x_2y_1+\gammaamma x_3y_1) \\
+(1-\alphalpha)z_1((1-\alphalpha)x_1y_2+(1-\alphalpha)x_2y_1+x_2y_2+\betaegin{equation}ta x_2y_3+\betaegin{equation}ta x_3y_2) \\
+z_2((1-\alphalpha)x_1y_2+(1-\alphalpha)x_2y_1+x_2y_2+\betaegin{equation}ta x_2y_3+\betaegin{equation}ta x_3y_2) \\
+\betaegin{equation}ta z_3((1-\alphalpha)x_1y_2+(1-\alphalpha)x_2y_1+x_2y_2+\betaegin{equation}ta x_2y_3+\betaegin{equation}ta x_3y_2) \\
+\betaegin{equation}ta
z_2((1-\gammaamma)x_1y_3+(1-\betaegin{equation}ta)x_2y_3+(1-\gammaamma)x_3y_1+(1-\betaegin{equation}ta)x_3y_2+x_3y_3);
{\betaf 1}\!\!{\rm I}nd{align*}
\betaegin{equation}gin{align*}
(1-\gammaamma)x_1((1-\gammaamma)y_1z_3+(1-\betaegin{equation}ta)y_2z_3+(1-\gammaamma)y_3z_1+(1-\betaegin{equation}ta)y_3z_2+y_3z_3) \\
+(1-\betaegin{equation}ta)x_2((1-\gammaamma)y_1z_3+(1-\betaegin{equation}ta y_2z_3+(1-\gammaamma)y_3z_1+(1-\betaegin{equation}ta)y_3z_2+y_3z_3) \\
+(1-\gammaamma)x_3(y_1z_1+\alphalpha y_1z_2+\gammaamma y_1z_3+\alphalpha y_2z_1+\gammaamma y_3z_1) \\
+(1-\betaegin{equation}ta)x_3((1-\alphalpha)y_1z_2+(1-\alphalpha)y_2z_1+y_2z_2+\betaegin{equation}ta y_2z_3+\betaegin{equation}ta y_3z_2) \\
+x_3((1-\gammaamma)y_1z_3+(1-\betaegin{equation}ta)y_2z_3+(1-\gammaamma)y_3z_1+(1-\betaegin{equation}ta)y_3z_2+y_3z_3) \\
=(1-\gammaamma)z_3(x_1y_1+\alphalpha x_1y_2+\gammaamma x_1y_3+\alphalpha x_2y_1+\gammaamma x_3y_1) \\
+(1-\betaegin{equation}ta)z_3((1-\alphalpha)x_1y_2+(1-\alphalpha)x_2y_1+x_2y_2+\betaegin{equation}ta x_2y_3+\betaegin{equation}ta x_3y_2) \\
+(1-\gammaamma)z_1((1-\gammaamma)x_1y_3+(1-\betaegin{equation}ta)x_2y_3+(1-\gammaamma)x_3y_1+(1-\betaegin{equation}ta)x_3y_2+x_3y_3) \\
+(1-\betaegin{equation}ta)z_2((1-\gammaamma)x_1y_3+(1-\betaegin{equation}ta)x_2y_3+(1-\gammaamma)x_3y_1+(1-\betaegin{equation}ta)x_3y_2+x_3y_3) \\
+z_3((1-\gammaamma)x_1y_3+(1-\betaegin{equation}ta)x_2y_3+(1-\gammaamma)x_3y_1+(1-\betaegin{equation}ta)x_3y_2+x_3y_3).
{\betaf 1}\!\!{\rm I}nd{align*}
Now equalizing the corresponding terms and simplifying the obtained
expressions one gets:
\betaegin{equation}gin{equation*}
\betaegin{equation}gin{matrix}
\betaegin{equation}ta(1-\betaegin{equation}ta)=0 & \alphalpha(1-\gammaamma)=(\alphalpha-\gammaamma)(1-\betaegin{equation}ta) & \alphalpha(1-\alphalpha)=0 \\
\alphalpha(\gammaamma-\betaegin{equation}ta)=0 & \gammaamma(1-\gammaamma)=0 & \gammaamma(1-\betaegin{equation}ta)=0\\
(\betaegin{equation}ta-\gammaamma)(1-\alphalpha)=\betaegin{equation}ta(1-\gammaamma)
{\betaf 1}\!\!{\rm I}nd{matrix}
{\betaf 1}\!\!{\rm I}nd{equation*}
Solving these equations we get the desired equalities which
completes the proof.
{\betaf 1}\!\!{\rm I}nd{proof}
By the same argument one can prove the following
\betaegin{equation}gin{thm} \lambdaabel{Theorem:box} The operators $V^{(1)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}$ and
$V^{(4)}_{\alphalpha,\betaegin{equation}ta,\gammaamma}$ are not associative for any values
of $\alphalpha,\betaegin{equation}ta,\gammaamma$.
{\betaf 1}\!\!{\rm I}nd{thm}
\sigmaection*{Acknowledgement} The author acknowledges the MOHE Grant ERGS13-024-0057. He also
thanks the Junior Associate scheme of the Abdus Salam International
Centre for Theoretical Physics, Trieste, Italy.
\betaegin{equation}gin{thebibliography}{99}
\betaibitem{B} Bernstein S.N., The solution of a mathematical problem concerning the theory of heredity.
{\betaf 1}\!\!{\rm I}mph{Ucheniye-Zapiski N.-I. Kaf. Ukr. Otd. Mat.} {\betaf 1} (1924),
83-115 (Russian).
\betaibitem{G2008} Ganikhodjaev N, Hisamuddin H.H., Associativity in inheritance or are there associative populations.
{\varepsilont Malaysian Journal of Science} {\betaf 27(2)} (2008), 131--136.
\betaibitem{GR} Ganikhodjaev N, Rozikov U., On quadratic stochastic operators generated by
Gibbs distributions, {\varepsilont Reg. Chaot. Dyn.} {\betaf 11} (2006),
467--473.
\betaibitem{G} Ganikhodzhaev R.N., Quadratic stochastic operators,
Lyapunov functions and tournaments. {\betaf 1}\!\!{\rm I}mph{Russian Acad. Sci.
Sbornik. Math.} \tauextbf{76} (1993), 489-506.
\betaibitem{GE} Ganikhodzhaev, R. N., Eshmamatova, D. B. Quadratic automorphisms
of a simplex and the asymptotic behavior of their trajectories. {\varepsilont
Vladikavkaz. Math. Jour.} {\betaf 8} (2006), no. 2, 12--28.
\betaibitem{11} Ganikhodzhaev R., Mukhamedov F., Rozikov U.,
Quadratic stochastic operators and processes: results and open
problems, \tauextit{Infin. Dimens. Anal. Quantum Probab. Relat. Top.}
{\betaf 14}(2011) 270--335.
\betaibitem{HHJ} Hofbauer J., Hutson V., Jansen W., Coexistence for systems
governed by difference equations of Lotka-–Volterra type,
\tauextit{J. Math. Biol.} \tauextbf{25} (1987) 553-–570.
\betaibitem{HS} Hofbauer J., Sigmund K., Evolutionary Games and Population Dynamics,
\tauextit{Cambridge University Press}, Cambridge, 1998.
\betaibitem{HS2} Hofbauer J., Sigmund K., The theory of evolution and dynamical
systems, \tauextit{Cambridge Univ. Press}, (1988).
\betaibitem{L} Lotka A.J., Undamped oscillations derived from the law of mass action,
\tauextit{J. Amer. Chem. Soc.} {\betaf 42} (1920), 1595--1599.
\betaibitem{Ly2} Lyubich Yu.I., Mathematical structures in population genetics,
{\sigmal Biomathematics}, Springer-Verlag, {\betaf 22} (1992).
\betaibitem{May} May R.M., Simple mathematical models with very complicated
dynamics, \tauextit{Nature} {\betaf 261} (1976) 459-–467
\betaibitem{MO} May R.M., Oster G.F., Bifurcations and dynamic complexity in
simple ecological models, \tauextit{Am. Nat.} \tauextbf{110} (1976)
573–-599.
\betaibitem{Moran} Moran P.A.P., Some remarks on animal population dynamics,
\tauextit{Biometrics} \tauextbf{6} (1950) 250–-258.
\betaibitem{MS} Mukhamedov F., Saburov M., On homotopy of volterrian
quadratic stochastic operator, {\varepsilont Appl. Math. \& Inform. Sci.}
{\betaf 4}(2010) 47--62.
\betaibitem{MS1} Mukhamedov F., Saburov M., On Dynamics of Lotka-Volterra
type operators, \tauextit{Bull. Malay. Math. Sci. Soc.} {\betaf
37}(2014), 59--64.
\betaibitem{MJ} Mukhamedov F., Jamal A.H. M., On $\xi^s$-quadratic stochastic
operators in 2-dimensional simplex, In book: \tauextit{Proc. the 6th
IMT-GT Conf. Math., Statistics and its Applications (ICMSA2010)},
Kuala Lumpur, 3-4 November 2010, Universiti Tunku Abdul Rahman,
Malaysia, 2010, pp. 159--172.
\betaibitem{MSJ} Mukhamedov F., Saburov M., Jamal A.H.M., On dynamics of $\xi^s$-quadratic stochastic
operators, \tauextit{Inter. Jour. Modern Phys.: Conference Series}
{\betaf 9} (2012), 299--307.
\betaibitem{MSQ} Mukhamedov F., Saburov M., Qaralleh I. On
$\xi^{(s)}$-quadratic stochastic operators on two dimensional
simplex and their behavior, \tauextit{Abst. Appl. Anal.} {\betaf 2013}
(2013), Article ID 942038, 12 p.
\betaibitem{NSE} Narendra S.G., Samaresh C.M., Elliott W.M., On the
Volterra and other nonlinear moldes of interacting
populations,\tauextit{ Rev. Mod. Phys.} {\betaf 43} (1971), 231--276.
\betaibitem{PL} Plank M., Losert V.,
Hamiltonian structures for the n-dimensional Lotka-Volterra
equations, \tauextit{J. Math. Phys.} {\betaf 36} (1995) 3520--3543.
\betaibitem{RN} Rozikov U.A., Nazir S. Separable quadratic stochastic operators,
{\varepsilont Lobachevskii Jour. Math.} {\betaf 31}(2010) 215–-221.
\betaibitem{RZ} Rozikov U.A., Zada A. On ${\betaf 1}\!\!{\rm I}ll$- Volterra Quadratic stochastic
operators. {\varepsilont Inter. Journal Biomath.} {\betaf 3} (2010), 143--159.
\betaibitem{16} Stein, P.R., Ulam S.M.,
{\varepsilont Non-linear transformation studies on electronic computers},
1962, Los Alamos Scientific Lab., N. Mex.
\betaibitem{T} Takeuchi Y., Global dynamical properties of Lotka--Volterra systems,
\tauextit{World Scientific}, 1996.
\betaibitem{UR} Udwadia F.E., Raju N., Some global properties of a pair of
coupled maps: quasi-symmetry, periodicity and syncronicity,
\tauextit{Physica D} \tauextbf{111} (1998) 16-–26.
\betaibitem{U} Ulam S.M., A collection of mathematical problems.
{\varepsilont Interscience Publ. New York-London}, 1960.
\betaibitem{V} Vallander S.S., On the limit behavior of iteration
sequence of certain quadratic transformations. \tauextit{Soviet Math.
Doklady}, {\betaf 13}(1972), 123-126.
\betaibitem{V1} Volterra V., Lois de fluctuation de la
population de plusieurs esp\`{e}ces coexistant dans le m\^{e}me
milieu, \tauextit{Association Franc. Lyon} {\betaf 1926} (1927), 96--98
(1926).
\betaibitem{V2} Volterra V., Lecons sur la theorie mathematique de la lutte pour la vie,
{\varepsilont Gauthiers-Villars, Paris}, 1931.
\betaibitem{W-B} Worz-Busekros, A., \tauextit{Algebras in Genetics},
Lecture Notes in Biomathematics, Vol. 36, Springer-Verlag, Berlin,
1980.
\betaibitem{Z} Zakharevich M.I., On a limit behavior and ergodic hypothesis for quadratic mappings of a simplex.
{\varepsilont Russian Math. Surveys} {\betaf 33} (1978), 207-208.
{\betaf 1}\!\!{\rm I}nd{thebibliography}
{\betaf 1}\!\!{\rm I}nd{document} |
\begin{document}
\twocolumn[
\mlsystitle{Using Python for Model Inference in Deep Learning}
\mlsyssetsymbol{equal}{*}
\begin{mlsysauthorlist}
\mlsysauthor{Zachary DeVito}{fair}
\mlsysauthor{Jason Ansel}{fair}
\mlsysauthor{Will Constable}{fair}
\mlsysauthor{Michael Suo}{fair}
\mlsysauthor{Ailing Zhang}{fair}
\mlsysauthor{Kim Hazelwood}{fair}
\end{mlsysauthorlist}
\mlsysaffiliation{fair}{Facebook AI Research, Menlo Park, California, USA}
\mlsyscorrespondingauthor{Zachary DeVito}{zdevito@cs.stanford.edu}
\mlsyskeywords{Machine Learning, MLSys}
\vskip 0.3in
\begin{abstract}
Python has become the de-facto language for training deep neural networks, coupling a large suite of scientific computing libraries with efficient libraries for tensor
computation such as PyTorch~\cite{pytorch} or TensorFlow~\cite{tensorflow}. However, when models are used for inference they are typically extracted from Python as TensorFlow graphs or TorchScript~\cite{torchscript} programs in order to meet performance and packaging constraints. The extraction process can be time consuming, impeding fast prototyping. We show how it is possible to meet these performance and packaging constraints while performing inference in Python. In particular, we present a way of using multiple Python interpreters within a single process to achieve scalable inference and describe a new container format for models that contains both native Python code and data. This approach simplifies the model deployment story by eliminating the model extraction step, and makes it easier to integrate existing performance-enhancing Python libraries. We evaluate our design on a suite of popular PyTorch models on Github, showing how they can be packaged in our inference format, and comparing their performance to TorchScript. For larger models, our packaged Python models perform the same as TorchScript, and for smaller models where there is some Python overhead, our multi-interpreter approach ensures inference is still scalable.
\end{abstract}
]
\printAffiliationsAndNotice{}
\section{Introduction}
\label{introduction}
Over the past few years, Python has become the de-facto language for training deep learning models with major deep learning frameworks such as PyTorch~\cite{pytorch}, TensorFlow~\cite{tensorflow}, MxNet/TVM~\cite{mxnet, tvm} and JAX~\cite{jax} primarily providing Python interfaces. Python's ease of writing code and distributing it via package managers such as conda~\cite{conda} make it easy to share libraries and research results. Its ecosystem of libraries for data science such as numpy~\cite{numpy}, pandas~\cite{pandas}, and Jupyter notebooks~cite{jypter} make analyzing training results easy.
However, once a model is trained, using the model resembles traditional software engineering more than data science. It is desireable to have a stand-alone and reproducible artifact that can run the model independently from its training harness. These artifacts are typically then used as part of a service such as TorchServe~\cite{torch_serve} that manages inference of the model, or as a library call made within an application.
The process of extracting a model into a stand-alone artifact can be time consuming. Approaches such as partial evaluating the model~\cite{autograph}, or compiling a subset of Python to a stand alone language (TorchScript~\cite{torchscript}) often require manual intervention to refactor the model into a more limited programming model than the one offered by Python and are a common source of confusion for users of deep learning frameworks. Often features that made the model easy to work with in Python, such as dynamic typing or object-oriented configuration of model layers have to be removed in this step. Frequently the user extracting the model is not the same as the original model author, making the kind of whole-program refactorings needed to get the program extracted difficult.
We offer an alternative workflow by revisiting the assumption that it is not feasible to run Python for model inference. Reticence to running Python as a production language stems from problems of running Python as a platform for web services such as Django~\cite{django}. In these settings ,there are lots of small objects and millions of lines of code. Deep learning models have vastly smaller amounts of code and fewer but bigger objects. In many cases it is possible to simply use the existing CPython interpreter as a platform for model inference. We show how it is possible to organize CPython such that multiple independent interpreters can co-exist in the same process. We also develop a new hermetic packaging format that makes it possible to easily create self-contained model artifacts from existing Python code and trained weights. In particular, we present the following contributions:
\begin{itemize}
\item An analysis of the challenges of using Python for model inference.
\item A scalable approach to serving models using multiple Python interpreters in the same process that can share weight tensors.
\item A hermetic packaging format for Python, \ic{torch.package}, that can create self-contained archives of model code and weights.
\item A library-based C++ API, \ic{torch::deploy}, for using our multi-interpreter approach.
\item An evaluation of the performance of this approach compared to TorchScript, showing model performance for many models is nearly the same as TorchScript, and for smaller models with Python overhead, our multi-interpreter approach is still scalable.
\end{itemize}
\section{Background}
In most deep learning frameworks, model inference is accomplished by first exporting the model into a stand-alone format. TensorFlow represents models using files containing protocol buffers describing the graph of the model. In TensorFlow 2.0, an eager mode was introduced which runs directly in Python. An export process, autograph~\cite{autograph} can partially evaluate the eager-mode program to derive a graph that can be exported. PyTorch's default mode is eager evaluation in Python. To export a model a model users can trace this execution (if it does not contain control-flow), producing an exportable ONNX graph that can be loaded from other inference frameworks~\cite{onnx}. For more complicated models, users can convert the model to TorchScript~\cite{torchscript}, a process that requires writing the model in a statically-typed subset of the Python language. A separate TorchScript interpreter can be used to deploy the model. In any of these approaches, the developer may need to spend significant time refactoring a Python-based model to be ready for export. For instance, the user might need to refactor the model to typecheck TorchScript's static type system, or to remove aggregate data types that cannot be represented in a TensorFlow graph.
While it is possible to simply run inference in Python, deep learning frameworks have not previously recommended it. One possible explanation is the history of using Python in production settings. Python is a popular language for writing webservers with Django~\cite{django} being the most commonly used libraries. However, as individual web servers grow more complex, they face problems with the performance of the standard CPython implementation, leading to efforts to improve Python's performance. This includes Shedskin~\cite{shedskin} a project developed at Google, Pyston~\cite{pyston} developed at Dropbox, and modifications to the CPython interpreter used by Instagram~\cite{instagram}. Eventually, developers sought other approaches to their problem such as migrating to other languages such as Go~\cite{pyston}. While faster Python interpreters such as PyPy~\cite{pypy} exist, they are not commonly used because they do not have the same extension framework as CPython, making the use of common libraries like numpy or TensorFlow or PyTorch difficult.
\section{Challenges using CPython}
Running CPython for deep learning inference is met with skepticism due to these well known challenges in efficiently running Python code using the CPython interpreter. Naively running PyTorch Python programs for inference would run into similar scaling problems, so this initial skepticism is warranted. However by breaking down the challenges in running Python code efficiently, we can separate the problems we have to address for deep learning inference from ones that can be mitigated in other ways.
\subsection{For performance}
\begin{figure}
\caption{A visualization of the library dependencies of our approach. Our library, \ic{libtorch_deploy.so}
\label{libraries}
\end{figure}
\paragraph{Global interpreter lock} The most widely understood challenge with using CPython is that its design requires a single global context, and only a single instance of a Python interpreter can use that context at once. This constraint is enforced using a a global interpreter lock (GIL) that must be held by the thread using the Python context. This design simplifies the implementation of core infrastructure such as garbage collection and the memory format of objects. Languages that do allow for multiple threads to run at once have to carefully reason about the implementation, and provide a detailed contract (e.g. the Java memory model~\cite{java_memory_model}) to users of threads about how objects behave when accessed in multiple threads. Attempts to remove the GIL~\cite{attempts_to_remove_GIL} have faced challenges in providing an implementation that does not degrade single-threaded performance, defining a reasonable memory model, and then updating library code to work within this memory model.
For server applications handling multiple requests at once such as a server for deep learning inference, the Python interpreter becomes a bottleneck to scaling and experiments in our evaluation section confirm this is a significant issue for some of our models.
Recent proposals for Python will eventually allow multiple separate interpreters~\cite{python_interps}. While objects cannot be shared among interpreters, each interpreter would have its own GIL, allowing for parallel execution. However, for practical purposes it will be a some time before this becomes a viable option because popular extensions in the Python ecosystem such as numpy, scipy, or PyTorch are coded with the assumption that there is a single Python context and will need to be modified before they can be used with multiple interpreters.
A popular workaround is to use the Python \ic{multiprocessing} library which provides primitives to allow multiple Python \emph{processes} to cooperate. In this model each CPU core might have its own Python process. Since each process has its own Python state, the processes cannot directly share Python objects. Instead, inter-process communication is implemented using Python's \ic{pickle} protocol to serialize and deserialize objects that are passed between interpreters. Non-python data, such as the numbers inside a PyTorch Tensor can be shared using the OS's facilities to share memory between processes. Several aspects of this setup are not ideal. First, having to manage a process per CPU core (frequently up to 64 cores for big machines) can be cumbersome. Tools like debuggers often do not understand that the collection of processes is a single application, making it hard to use these tools. Task management and logging become more challenging as well, especially when these interpreters are only a small part of a bigger application. Second, sharing non-python resources is not always possible without special OS support. For instance, memory needs to be specially allocated to share it across processes. In PyTorch this manifests as needing to copy tensors to make them sharable. Sharing special memory such as CUDA allocated memory or memory allocated by libraries for other custom accelerator chips is also difficult or impossible.
\paragraph{A highly customizable object model}
CPython objects can be defined using a C extension API. This API exposes the details of the object layout directly to extension writers and allows nearly every aspect of objects to be defined in an object-specific way including custom behavior for common operators and how they are scanned for garbage collection. Common types are also defined using this API. This makes it challenging to use JIT compilation techniques to accelerate Python that have been successful in other dynamically-typed scripting languages such as Javascript~\cite{jit_in_javascript}. Attempts to compile the interpreter bytecodes in Python yield little speedup because most of the time is spent in functions like \ic{PyObject_Add} that have to look up how to dynamically dispatch an add using details of the object model~\cite{python_interpreter_overhead1, python_interpreter_overhead2}. Since most of the complexity is in the object model, attempts like PyPy~\cite{pypy} to restructure the object model allow JITs to accelerate the speed of Python code, but since they do not expose the same extension API, the common parts of the Python ecosystem such as numpy or PyTorch do not natively work. The result is that the speed of Python on small micro-benchmark programs such as those found in the programming language benchmark game~\cite{benchmark_game} are a median of 37 times slower than native execution in C and 13 times slower than Javascript.
\paragraph{Memory Management}
CPython reference counts an object each time an object is saved in the heap or interpreter stack. This design requires a lot more writes to occur than a pure garbage collector, which can decrease the performance of caches, and it complicates efforts to remove the interpreter lock since reference counting would then need to become more costly atomic operators. Since reference counting would leak cycles, Python also includes a cycle collector to find unreachable cycles. While it runs less frequently than a fully-deferred garbage collector, latency can still become variable for programs that create a lot of cycles for small objects. In practice for server uses of Python, this has lead large deployments like Instagram to use tricks like forking the Python process from a clean start state and disabling the garbage collector entirely~\cite{instagram}.
\subsection{For deep learning inference}
Deep learning inference has its own unique properties that present additional challenges to using CPython.
\paragraph{Model Packaging} Model code, unlike normal source code, is closely coupled with large parameters (model weights) normally stored as separate binary data during the training process. For the model to work correctly, the code that executes the model has to be the same as when it was trained. Small changes might cause losses in accuracy because of the way gradient descent tunes the weights for any particular quirks of the implementation. Existing packaging solutions typically store the model code and weights together. For instance, TensorFlow stores serializes the graph and its weights into a protobuf format~\cite{tensorflow}. Similarly TorchScript~\cite{torchscript} uses a zip file format that contains compiled TorchScript code (extracted from the original model code), and embedded weight files.
Python code is typically stored separate from data, either as source files in some source control system, or in versioned packages in managers such as conda~\cite{conda}. Neither approach is ideal for packaging models. Using source control systems to store large binaries, sometimes including gigabytes of weights, is not practical because they retain all versions of the data. Furthermore, models are frequently produced using a search over some hyperparameter space with training that may include manual tweaking of the model code. Asking modellers to upstream those tweaks into a shared code repository is not something that is currently expected, even if it might be good software engineering practice in the future.
Storing Python models as pip or conda packages allows the storage of additional binary data like weights, but also requires that models provide globally unique names for themselves and any components that they include. This can become cumbersome since many models share very similarly named things (e.g. a ResNet block) that nevertheless may have slight differences. This would make it difficult for a multi-tenant inference server to manage a suite of similar but possibly mutually incompatible Python packages.
\paragraph{Embedability}
Model inference is commonly done in two ways: as a function call within a user's existing application (such as a desktop app, or language translation service), or as a remote procedure call to a dedicated server than runs many different kinds of models such as TorchServe~\cite{torch_serve}. In both cases, the application does not care about the details of the implementation of the model, only that it can be run efficiently and return a result.
A single global Python instance like that provided by CPython makes it difficult to embed libraries that use the Python interpreter in other applications. For instance, assume we provide an inference library \ic{libtorchserving.so} that internally uses the Python interpreter. If the application itself also uses Python in some way then the Python environment in our library will leak into the environment of the application, potentially leading to name conflicts such as having two different versions of the same Python library among other unexpected behaviors. Problems with Python multiprocessing are exasperated in this setup as well since the application may already have its own pool of worker threads. That pool might then end up contending with the multiprocessing pool in ways that degrade performance.
Python itself also typically requires its standard library to be present on the filesystem of the machine in order for most commonly used Python modules to function correctly. For an embedded inference library needing to also ship a set of discover-able Python files presents a challenge for integration into arbitrary applications.
\section{Approach}
\begin{figure}
\caption{An example showing how to use the \ic{torch::deploy}
\label{api}
\end{figure}
To use Python models for inference, we need to address the performance and packaging challenges presented in the previous section.
\subsection{Mitigating performance factors}
Some aspects of performance are mitigated by the unique properties of deep learning models compared to general purpose Python programs.
A Python webserver such as Django might have many small objects representing the components of a web application. Each object might have 10s of fields, stored as Python objects in a dictionary. Working with these objects is slow because of the Python overheads of unpacking each dynamically typed object. In contrast, PyTorch programs primarily work with Tensors that typically contain greater than 256 elements and frequently contain many more.
For instance, the size the activation Tensors in an unbatched ResNet50 ranges from 25k to 100k floats. This means that there are fewer Python objects overall compared to general Python programs and the objects themselves are much larger.
The difference in object number and size has a number of mitigating effects on Python performance. First, programs spend relatively little time in the Python interpreter and its object model. As an example, if we run an unbatched ResNet50 model on the CPU, replacing all tensors with dummy objects that do no compute, we find that Python execution only takes 13\% of runtime.
Having fewer objects also makes memory management less concerning. Referencing counting, while expensive for small objects, occurs infrequently in deep learning programs due to the larger object size.
Having few but large tensor objects also opens up new possibilities for data sharing across inference. For instance, it is relatively inexpensive to copy an entire PyTorch model object as long as the Tensors are still shared across copies.
Another mitigating property of model inference is the inherent parallelism of handling multiple inference requests in parallel. Requests can be fulfilled on entirely in independent threads, possibly working on different models. Within a model, data-parallelism can be exploited by doing batched inference. General uses of Python microprocessing face the overhead of pickling and unpickling Python objects to pass them between processes. In model inference, no data needs to be exchanged between requests so the speed of transferring Python objects is less of a concern.
\subsection{Strategy}
The unique properties of model inference means that we can work around Pythons relative slow object and memory management model. However, we still need address the global interpreter lock, the embedability of the interpreter itself, and the challenges of packaging model parameters with Python code. To address the GIL and embedability, we propose a way of building the CPython library so that it can be loaded privately multiple times in the same process. To address packaging, we present a new container format for Python code and parameter data along with a way of loading these containers that ensures its code does not conflict with other containers.
\subsection{An embedded CPython}
\label{multi_python}
Our approach for working around the GIL is to create a version of the CPython interpreter that can be loaded privately multiple times in a single process. On top of this interpreter, we build a model serving infrastructure that can hand off requests to different interpreters in order to scale to multiple threads without GIL contention. We first look at how we construct the private interpreter and then how we use it to build an inference engine.
To construct an interpreter that can be loaded independently, we create a special shared library \ic{libinterp.so} that contains the CPython implementation \ic{libpython.a}, any extensions modules that bind to it such as PyTorch's Python bindings(\ic{libtorch_python.a}), and a small API for transferring data into and out of the private interpreter. These components are linked together statically in the shared library and a linker script is used to mark the visibility of the symbols in the library as hidden. Dependencies that do not use the Python APIs such as PyTorch's C++ library (\ic{libtorch.so}) are linked against dynamically. An inference application can then load a copy of interpreter using \ic{dlopen}. Passing the \ic{RTLD_LOCAL} flag ensures that the symbols loaded in this library will not be visible to the rest of the application.
However, this packaging only provides a single copy of Python because the normal shared library system will ensure any particular shared library is only loaded once. To work around this, we first \emph{copy} the shared library to a temporary file on the file system before loading with \ic{dlopen}, by doing this we ensure we get unique copies of the library. On load, the library will resolve its shared library dependencies to the symbols already loaded in the process ensuring. Figure~\ref{libraries} illustrates the result. Everything loaded in \ic{libdeploy.so} will have multiple copies, but shared library dependencies will be resolved globally ensuring there is only a single copy of libraries like \ic{libtorch.so}.
To ensure that the embedded Python library has access to the Python standard library, we pre-compile the library to Python bytecodes and then embed those bytecodes into the data section of our library, ensuring that the interpreters will not need access to the filesystem.
Because the \ic{libdeploy.so} privatizes the Python APIs, the application cannot directly access the Python state of the interpreters. Instead we provide a minimal C++ API, \ic{torch::deploy} for getting data into and out of the private interpreter. Example uses of the API are shown in Listing~\ref{api}.
We represent each copy of the interpreter as a \textbf{Interpreter} object in C++. An \textbf{InterpreterManager} is a pool of interpreters that can be handed out to different threads when requested. For instance, for an \ic{N} core machine, we create pool of \ic{N} interpreters, treating each interpreter as a resource that can be acquired when the inference library needs to run Python code to either load or run a model. Having one interpreter per real hardware thread ensures we do not end up with a situation where we have a CPU ready to do work but no free interpreter to perform the job.
A \textbf{Package} object is a C++ wrapper around the \ic{torch.package} format described in the next section and is used to load packaged models onto the private interpreters.
To load a model, the user of the library will typically open a package file and load a model from it. This process uses a Python interpreter which is internally acquired from the InterpreterManager. While it would be possible to use the package API to load the model multiple times onto each interpreter, it is not optimal for two reasons. First, loading the model multiple times would result in different copies of the weight tensors. For large models, it is not possible to fit many copies of the model in GPU memory. Second, it is frequently the case that users will want to do some pre-processing to each model, such as wrapping it in a container to manage the movement of data to the GPU, or to connect the model with data post-processing code. Re-doing this preprocessing would be expensive.
To allow models to share weights and pre-processing, we load the model on a single interpreter and then use Python's \ic{pickle} protocol to move the model's Python objects to other interpreters as needed. Because \ic{libtorch.so} is global to the process, we can share the Tensor data between interpreters by customizing the pickling process. The \ic{multiprocessing} framework in Python uses a similar approach but unlike \ic{multiprocessing} the shared data lives in a single process so we do not have to make any special OS calls to create this shared relationship and any use of accelerator libraries such as CUDA works without further modifications.
We abstract this process of loaded and then moving a model in our C++ API as a \textbf{MovableObject} that can be created from a Python object after it is loaded. When the application makes an inference call, an interpreter is allocated out of the pool and we check to see if it has loaded the MovableObject into its Python state. If not, it is loaded from the pickled copy. The loaded object is then used to run inference.
To make an interference call, or otherwise directly interact with an instance of the Python interpreter, we provide a \textbf{InterpreterSession} object. This object acts as a resource guard in C++ that permits direct access to the Python interpreter using \textbf{PyObj} handles which are valid only for the lifetime of the session. A minimal API is exposed to access global values, call functions, and translate primitive values (including Tensors) between C++ and Python. The second example in Figure~\ref{api} shows an example of using this session API directly but typically users will use syntax sugar encapsulates these details.
The inference library intentionally does not have a thread pool. Instead, we expect the inference application to call the library from multiple request threads if desired. The InterpreterManager, rather than being a pool of worker threads, serves as a load balancer for handing out interpreters. This choice is subtle but important. It is typical for applications to be doing other work such as serving models in other formats such as TorchScript or handling non-inference work. It is likely the application already has a threadpool which would fight with an internal threadpool in the inference library.
We also intentionally load multiple models onto a single interpreter rather than use one interpreter per model. This is because each interpreter is relatively expensive. It requires a copy the python interpreter and PyTorch's bindings be made in RAM, and it requires the initialization of Python and PyTorch. By limiting the total number of interpreters to the number of CPU threads, we ensure that we have enough available parallelism to avoid GIL contention but bound the amount of resources the interpreters consume. Instead our packaging format ensures that multiple models do not interfere with each other.
\subsection{Hermetically packaging models}
We propose a new way to package code and model data together to create a self-contained archive for models. Conceptually, it extends Python's existing pickling format to also store the code depended on by the pickled objects. We couple this with a \emph{hermetic} importer that loads archives without polluting the global module table, and ensures that the loaded code only depends on explicitly-declared external dependencies.
\begin{figure*}
\caption{Packaged model structure}
\label{packaging_zip}
\caption{Model export}
\label{example_export}
\caption{Model import}
\label{importing_api}
\caption{An example of the structure of our model packaging format and the code used to export it and import it.}
\label{fig:three graphs}
\end{figure*}
\subsubsection{Format}
The on-disk format is a zip-file archive. PyTorch already stores its serialized data in this form in order to keep the tensor data as separate mmap-able files. Similarly, Python supports loading code from zip archives such as the "egg" format~\cite{python_egg}. The archive stores several kinds of files:
\begin{itemize}
\item Python source files, laid out in a package hierarchy the same way they are stored in the file-system or in egg files.
\item Pickled data, stored an an individual file. Class references inside the pickled files are resolved using the source in the archive.
\item Tensor data files, stored in the ic{data/} folder with the contents of tensors.
\item \ic{external_modules}, a special file that lists the Python modules that should be resolved using the system's builtin in module import system rather than from the package.
\end{itemize}
Figure~\ref{packaging_zip} shows what this layout looks like for the Tacotron 2 application.
We provide a \ic{PackageExporter} that helps create archives in this format, and a \ic{PackageImporter} that loads them in a hermetic way.
\subsubsection{Exporting models}
Figure~\ref{example_export} shows example code for export a model using our packaging format. It uses the method \ic{save_pickle} to save a pickled version of the file along with some annotations to describe how to treat dependencies we describe below.
In normal PyTorch, it is possible to serialize a \ic{torch.nn.Module} using the \ic{torch.save} function. This saves object using Python's pickle protocol for the Python objects, and separate files for the tensor data combined into a single zip file. Python's pickle format saves objects, but does not save the \emph{code} used to define the behavior of objects. For example if a user saves an object of class \ic{my_package.MyClass} which has an attribute \ic{my_object.my_int = 1}, then Pickle will write out bytecode in the pickling language that says ``create a \ic{my_package.MyClass} object, setting its attributes to \mintinline{Python}|{'my_int': 1}|''. When loading the pickled object the system will use Pythons \ic{import} infrastructure to load \ic{my_package} to get the code for the object.
Libraries such as \ic{cloudpickle}~\cite{cloudpickle} extend the pickle format to include the definition of the class as well by serializing the Python code objects as well. However, this is not ideal for model packaging. It is specific to a particular version of Python, because it stores bytecodes which are not stable. More importantly, it does not produce a human-readable form of the code being exported, making attempts to edit or debug that code difficult.
Our packaging format takes an approach based on extracting entire source files instead. In \ic{save_pickle}, we scan the produced pickle bytecodes for references to modules. For each module we resolve it to its source file and include that source file in the archive. This approach produces human-readable source code. By keeping the structure of the code the same as when it was written, users can easily debug packaged models. Users can also fix bugs in the exported source or perform transfer learning with the model by unzipping the archive, editing the code.
Some modules are implemented using C extensions rather than Python code. Others, such as \ic{torch}, are very large and would typically be included in the inference engine for use by all modules. These modules can be marked \emph{extern} in the exporter API to tell the package not to try to include their source, and inform the importer it is allowed to import them from the system library. Python standard library modules and \ic{torch} are treated as extern by default.
Python source files are not self-contained and almost all files include import statements that reference other modules. Our packaging format takes a semi-automated approach to discover these dependencies. Each source file is scanned for \ic{import} statements, we resolve these import statements to actual modules, and then export the code for that module as well, recursively scanning for modules in that code as well.
Because Python is a dynamically typed and very flexible language, it is not possible to guarantee the accuracy of dependency scanning. If code imports modules using the \ic{importlib} module, it can import arbitrary modules that cannot be detected. Code can also be loaded independently of the module system entirely. We provide a way to explicitly include modules that are missed by scanning. In practice, this is rare and none of our example models required this.
A much more common issue with dependency scanning is the inclusion of false dependencies. The object being serialized may only contain one class from a file, but since we work at the level of entire files, the exporter may include much more code than is required for the pickled object. Poorly organized code, such as a \ic{utils.py} file that aggregates a bunch of unrelated functionality can cause large amounts of code to be included that the model does not actually need. Furthermore even within a class such as a particular type of \ic{torch.nn.Module}, there might be code related to initializing the object or performing training that is not actually needed for inference. We have seen examples where this code then relies on modules unrelated to inference such as data loading or check-pointing code.
It is always possible to mitigate the discovery of false dependencies by refactoring files like \ic{utils.py} into smaller independent components and to move functionality for data loading and check-pointing out of the \ic{torch.nn.Module} classes in the Model. However, this refactoring might take significant effort, or these classes might exist in a library that the person packaging cannot easily edit. For these reasons, we provide the concept of a \emph{mocked} module in the packaging API. Before exporting a package, certain modules can be marked as mocked, which will replace their implementation in the package with a stub. Importing the module will succeed, but any attributes accessed (e.g. \ic{my_module.my_method}) will return dummy \ic{MockObjects} that will throw an exceptions when used. This allows statements like \ic{from my_module import my_method} to succeed even through the module code is not present. Mocks can be used to manually eliminate the false dependencies for a model. Our packager provides a verbose interface to help visualize and debug the export process to make it clear what modules should be mocked. Our evaluation section describes our experience packaging models using mocking in our benchmark suite.
\begin{figure*}
\caption{Example models used in our benchmarks. Each model was packaged from popular Github repositories of PyTorch models and is based on a published model designs. Packaging nodes describes any modifications we made to get the models exported in our package format.}
\label{models}
\end{figure*}
\subsubsection{Importing models}
The API for importing modules is shown in Listing~\ref{importing_api} and mirrors the export API. Models for similar domains will often have types of the same name. For instance, two models that contain a ResNet trunk may both contain classes called \ic{models.Resnet} with different implementations. \emph{Hermetic} model loading ensures that both of these models can be loaded into the same Python interpreter without their classes interfering with each other.
We achieve this with customization to the Python unpickler used when loading pickled data, and a custom implementation of the import infrastructure that knows how to resolve modules to the contents of a package.
In Python, the table \ic{sys.modules} holds a global view of the loaded Python modules in the system. For package code, we instead have a package-specific view of modules stored in \ic{the_importer.modules} that manages the code for package objects.
When pickled data is loaded from the archive, we use an unpickler with a modified way of resolving global references that uses \ic{the_importer.import_module} rather than the global \ic{importlib} to resolve references. If a module is in the \ic{extern_modules} file, then the package importer uses the system importer to resolve the module. Otherwise it is loaded from the package.
When a code is loaded from the package, we install a custom \ic{__import__} method in the builtins table of the module. This change causes all \ic{import} statements to use the package-specific import process rather than the global one internal to the package.
Once an object is loaded, users can interact with it as if it were imported normally. The only difference is that the qualified name of the class's module will not match what is in \ic{sys.modules}. In most circumstances this does not affect functionality. The one place where it does is when trying to re-export objects imported from a package. The Pickler normally checks that the module of the class matches the global one to ensure an object will unpickle correctly. To overcome this limitation, our \emph{exporter} optionally takes an ordered list of importers (including the system importer) that it searches to find the implementation of any classes it pickles.
The \ic{torch::deploy::Package} object in our C++ API serves as a wrapper around this Python import infrastructure.
\section{Evaluation}
\begin{figure*}
\caption{Performance comparison of our approach (one Python interpreter per thread) against TorchScript, and a global Python interpreter, which is the way Python is typically used.}
\label{results}
\end{figure*}
By eliminating the need to extract models from Python, our system should make it faster to deploy models. However, this faster deployment is not practical if the performance of the resulting models is poor compared to the traditional approach of exporting the model. To evaluate the experience of using Python-based model inference, we assembled a suite of models, packaged them with \ic{torch.package} and compared their performance to the same models converted to TorchScript when possible. Experiments were run on a 40 core machine (2 sockets, each with 20 core Intel Xeon E5-2698V4 processors) and 2 GP100 NVIDIA GPUs using Python 3.8 and a nightly build of PyTorch from August 2020.
\subsection{The models}
Figure~\ref{models} provides a description of the models we use for our evaluation. These models are part of PyTorch's model-level benchmark suite~\footnote{https://github.com/pytorch/benchmark} and were adapted from popular Github repositories (by star count) containing PyTorch code. Rather than include popular ``trunk'' networks like ResNet in our evaluation we chose to focus on end-to-end models that include these trunks as components because we believe it provides a more accurate picture of how models will be used in practice. The benchmarks contain a number of image processing examples in additional to several models from other domains including language (BERT), audio (Demucs), speech synthesis (Tacotron 2), structure prediction (Struct), and video (Super SloMo). When preparing the models for use in our evaluation, we modified their organization so that could build within the same Python environment and provided a consistent API for loading the model, but we avoided making changes to how the code was organized that would change their performance or the ease with which they were packaged.
\subsection{Packing pre-existing model code.}
We evaluated the usability of the \ic{torch.package} system by writing export code for each model. Export code appears similar to the code showed for the Tacotron 2 model in Figure~\ref{example_export}. By default we marked \ic{torch} as extern, and mocked out \ic{numpy} and \ic{scipy} since these were commonly included for debugging but unused in inference. Despite being real repositories of model code used in research by a variety of authors, the models were easy to package. Seven of the fifteen models required no additional annotations. The remaining models required a small number of mocked out libraries as described in Figure~\ref{models}. The most complicated model to package was MaskRCNN, which required 4 mocks and a stub replacement for the \ic{sys} module because code in the library was examining version information provided by the module. It also required additional kernels for regions of interest that are typically part of \ic{torchvision} but not the main PyTorch codebase. Section~\ref{conclusion} discusses how we can make the process of including additional per-model kernels easier. Once exported with correct mock annotations, each model was tested for correctness by comparing its results to the non-packaged version of the model for consistency.
As part of the effort of assembling the benchmarks, we had PyTorch developers add annotations to some of the model to make them amenable to TorchScript. This process was by far the most time consuming part of preparing the benchmark, with individual developers often spending on the order of several hours to make models able to be exported from Python using TorchScript. Several of our more complicated models (BERT, Yolo v3, DLRM, Mask R-CNN, and Tacotron 2) do not have TorchScript versions because of the complexity of porting them. Part of the difficulty is that TorchScript enforces static typing of the program to be able to perform more aggressive optimizations, but these changes require refactoring models to fit the type system. The process of packaging the models using \ic{torch.package} was qualitatively simpler than preparing models for TorchScript since it mostly involved breaking false dependencies using mocking.
\subsection{Performance of deployed Python models}
To measure the performance of Python-deployed models, we created a benchmark using our \ic{torch::deploy} API that simulates the model inference process. A model is loaded from the package and a number of requester threads are created that make requests to the model using example inputs. We tested both CPU and GPU inference. Since there are only 2 GPUs, when the number of threads exceeded 2, we multiplexed the use of the GPUs across threads the GPUs. For CPU inference, we instructed PyTorch to disable intra-op parallelism with \ic{OMP_NUM_THREADS=1} as is recommended for inference settings when multiple requests will provide parallelism. In this setup, ideal scaling would be linear for CPU inference up to 40 threads. On the GPU, ideal scaling would be linear up to 2 threads, and then level off as additional threads multiplex the GPUs.
Figure~\ref{results} presents the results of the benchmark in three configurations. \emph{One Python interpreter per thread} shows the performance of our approach, using the ability to load multiple python interpreters described in Section~\ref{multi_python}. To simulate what would happen without customizing Python, the \emph{Global Python Interpreter} approach limits the total number of interpreters to 1, similar to how Python is normally used. Finally as a comparison to how the models would perform when extracted from Python entirely we measured the performance of the \emph{TorchScript}-converted models where possible. BERT, DLRM, and MoCo could not be converted to TorchScript, so we used \ic{torch.jit.trace} to get a trace of computation for a particular example input. Each of these models includes some control-flow that is not represented in the trace, so these numbers server as a \emph{upper bound} on the throughput as TorchScript and may perform slower in TorchScript when fully ported. Some models did not include CPU-only models, so we only include CUDA throughput.
In each group, models are ordered by throughput, with slower (bigger) models first. Performance results for these bigger models (e.g., Super SloMo, Attention is All You Need, Star GAN, Demucs, Yolo v3) is almost the same across all three configurations. This reinforces our intuition that large Tensor operations will make Python runtime only a small component of overall time. For these models, extracting from Python via TorchScript provides little benefit as does using multiple Python interpreters. However, even for these examples having a Python packaging system to make hermetic versions of the model, and a consistent API for running the models from C++ is beneficial.
Medium-sized models show different performance characteristics. For instance, MobileNet v3 on the CPU shows good scaling for both TorchScript and multiple Python interpreters each but the single Python interpreter barely scales showing how the global interpreter lock prevents decent inference performance. On the GPU, TorchScript performs up to 1.9x faster than multiple Python interpreters, but only 1.14x better than multiple Python interpreters when using 8 requester threads. This result indicates that there is Python overhead in running this benchmark. However, by using additional CPU cores to parallelize this overhead, it is possible to reduce the overall throughput loss using GPUs while still keeping the model in Python. This approach is wasteful of CPU cores, but might be a decent tradeoff when prototyping the deployment of a model if it prevents significant engineering effort to port the model to TorchScript.
Finally, the smallest models such as DLRM show clear Python overheads with TorchScript performing more than 2x faster. Examining a profile of the DLRM example, we see that half the time is clearly spent in the Python interpreter. In these cases it would make sense to use TorchScript for deployment if possible. Nevertheless, the multiple Python approach, while slower, still scales with the number of threads, and hence offers a scalable option for deployment before putting effort into a faster TorchScript version.
\section{Conclusion}
\label{conclusion}
Our evaluation showed that performing inference in Python using multiple interpreters is a practical way of deploying many models. For models that spend significant time in the Python interpreter, the use of multiple interpreters enables scaling when the GIL would otherwise create contention. Furthermore, for GPU inference, the ability to scale the number of Python interpreters allows the Python overhead to be amortized across multiple request threads.
Our approach to Python inference still has some limitations that can be overcome with future work. Loading of third-party C++ extensions including Python bindings, such as those in Mask RCNN is difficult because CPython bindings directly refer to global symbols in the dynamic linker table. In our approach these symbols are hidden from other extensions. We work around this by recompiling our shared interpreter library with the additional extensions included, but this is more complicated than simply including the extension library with the model package. Furthermore, our approach requires copying and loading the shared interpreter library for each interpreter, duplicating code in memory. This library is 34MB large in release mode, which is acceptable for server applications where memory is plentiful, but it grows to 274MB when debug symbols are enabled.
Both the extensions and code size issues can be resolved by writing custom dynamic library loading code rather than relying on OS primitives like \ic{dlopen}. A custom loader could map the code sections of the file into different places in virtual memory while ensuring that only one real copy of the code exists. Furthermore, since the loader is responsible for resolving external symbols, custom code could resolve Python C API references to their local copies when loading extension modules. We did not pursue this approach in our experiments because it requires significantly more complicated code to parse ELF shared library files.
Finally, even for models whose performance is less using Python, Python inference gives model authors flexibility to quickly prototype and deploy models and then focus on the performance of the models when necessary rather than having to invest in upfront effort to extract the model. Because Python does not have to be entirely eliminated, it also offers a more piecemeal approach to performance. For instance using Python-based libraries like Halide~\cite{halide} or TVM~\cite{tvm} to accelerate the model while still packaging the model as a Python program. Using Python as the packaging format opens up the possibility of employing bespoke compilers and optimizers withou the burden of creating an entire packaging and deployment environment for each technology.
\appendix
\end{document} |
\betaegin{equation}gin{document}
\nablaewtheorem{theorem}{Theorem}
\nablaewtheorem{proposition}{Proposition}
\nablaewtheorem{lemma}{Lemma}
\nablaewtheorem{corollary}{Corollary}
\nablaewtheorem{definition}{Definition}
\nablaewtheorem{remark}{Remark}
\nablaewtheorem{remarks}{Remarks}
\nablaewcommand{\tauextstyle}{\tauextstyletstyle}
\nablaumberwithin{equation}{section} \nablaumberwithin{theorem}{section}
\nablaumberwithin{proposition}{section} \nablaumberwithin{lemma}{section}
\nablaumberwithin{corollary}{section}
\nablaumberwithin{definition}{section} \nablaumberwithin{remark}{section}
\nablaewcommand{\mathbb{R}^N}{\mathbb{R}^N}
\nablaewcommand{\mathbb{R}}{\mathbb{R}}
\nablaewcommand{{\rm div}\,splaystyle}{{\rm div}\,splaystyle}
\nablaewcommand{\nabla}{\nablaabla}
\nablaewcommand{\partialrtial}{\partialrtialartial}
\nablaewcommand{\infty}{\infty}
\nablaewcommand{\partialrtiala}{\partialrtialartial}
\nablaewcommand{\noindentndent}{\nablaoindent}
\nablaewcommand{
\vskip-.1cm}{
\vskip-.1cm}
\nablaewcommand{
}{
}
\nablaewcommand{{\betaf A}}{{\betaf A}}
\nablaewcommand{{\betaf B}}{{\betaf B}}
\nablaewcommand{{\betaf C}}{{\betaf C}}
\nablaewcommand{{\betaf D}}{{\betaf D}}
\nablaewcommand{{\betaf E}}{{\betaf E}}
\nablaewcommand{{\betaf F}}{{\betaf F}}
\nablaewcommand{{\betaf G}}{{\betaf G}}
\nablaewcommand{{\mathbf \omegamega}}{{\mathbf \omegamega}}
\nablaewcommand{{\betaf A}_{2m}}{{\betaf A}_{2m}}
\nablaewcommand{{\betaf C}C}{{\mathbf C}}
\nablaewcommand{{\mathrm{Im}}\,}{{\mathrm{Im}}\,}
\nablaewcommand{{\mathrm{Re}}\,}{{\mathrm{Re}}\,}
\nablaewcommand{{\mathrm e}}{{\mathrm e}}
\nablaewcommand{{\mathcal{N}}}{{\mathcal{N}}}
\nablaewcommand{L^2_\rho(\ren)}{L^2_\rho(\mathbb{R}^N)}
\nablaewcommand{L^2_\rho(\ren)L}{L^2_{\rho^*}(\mathbb{R}^N)}
\mathbb{R}^Newcommand{\alpha}{\alphalpha}
\mathbb{R}^Newcommand{\beta}{\betaegin{equation}ta}
\nablaewcommand{\gamma}{\gammaamma}
\nablaewcommand{\Gamma}{\Gammaamma}
\mathbb{R}^Newcommand{\delta}{\,dlta}
\nablaewcommand{\Delta}{\Deltaelta}
\nablaewcommand{\varphiepsilon}{\varphiepsilon}
\nablaewcommand{\varphi}{\varphiphi}
\mathbb{R}^Newcommand{\lambda}{\lambdaambda}
\mathbb{R}^Newcommand{\omega}{\omegamega}
\mathbb{R}^Newcommand{\Omega}{\Omegamega}
\nablaewcommand{\sigma}{\sigmaigma}
\mathbb{R}^Newcommand{\tau}{\tauau}
\mathbb{R}^Newcommand{\tauh}{\tauheta}
\nablaewcommand{\zeta}{\zetaeta}
\nablaewcommand{\widetilde x}{\widetilde x}
\nablaewcommand{\widetilde t}{\widetilde t}
\nablaewcommand{\nablaoi}{\nablaoindent}
\nablaewcommand{{\betaf u}}{{\betaf u}}
\nablaewcommand{{\betaf x}}{{\betaf x}}
\nablaewcommand{{\betaf y}}{{\betaf y}}
\nablaewcommand{\zetaz}{{\betaf z}}
\nablaewcommand{{\betaf a}}{{\betaf a}}
\nablaewcommand{{\betaf c}}{{\betaf c}}
\nablaewcommand{{\betaf j}}{{\betaf j}}
\nablaewcommand{{\betaf U}}{{\betaf U}}
\nablaewcommand{{\betaf Y}}{{\betaf Y}}
\nablaewcommand{{\betaf H}}{{\betaf H}}
\nablaewcommand{{\betaf G}G}{{\betaf G}}
\nablaewcommand{{\betaf V}}{{\betaf V}}
\nablaewcommand{{\betaf w}}{{\betaf w}}
\nablaewcommand{{\betaf v}}{{\betaf v}}
\nablaewcommand{{\betaf h}}{{\betaf h}}
\nablaewcommand{{\rm div}\,}{{\rm div}\,}
\nablaewcommand{{\rm i}\,}{{\rm i}\,}
\,df{\rm Id}{{\rm Id}}
\nablaewcommand{\quad \mbox{in} \quad \ren \tauimes \re_+}{\quad \mbox{in} \quad \mathbb{R}^N \tauimes \mathbb{R}_+}
\nablaewcommand{\quad \mbox{in} \quad}{\quad \mbox{in} \quad}
\nablaewcommand{\quad \mbox{in} \quad \re \tauimes \re_+}{\quad \mbox{in} \quad \mathbb{R} \tauimes \mathbb{R}_+}
\nablaewcommand{\quad \mbox{in} \quad \re}{\quad \mbox{in} \quad \mathbb{R}}
\nablaewcommand{\quad \mbox{for} \quad}{\quad \mbox{for} \quad}
\nablaewcommand{,\quad \mbox{where} \quad}{,\quad \mbox{where} \quad}
\nablaewcommand{\quad \mbox{as} \quad}{\quad \mbox{as} \quad}
\nablaewcommand{\quad \mbox{and} \quad}{\quad \mbox{and} \quad}
\nablaewcommand{,\quad \mbox{with} \quad}{,\quad \mbox{with} \quad}
\nablaewcommand{,\quad \mbox{or} \quad}{,\quad \mbox{or} \quad}
\nablaewcommand{\quad \mbox{at} \quad}{\quad \mbox{at} \quad}
\nablaewcommand{\quad \mbox{on} \quad}{\quad \mbox{on} \quad}
\nablaewcommand{\varphiepsilonf}{\varphiepsilonqref}
\nablaewcommand{\mathcal}{\mathcal}
\nablaewcommand{\mathfrak}{\mathfrak}
\nablaewcommand{\Gammae}{\Gamma_\varphiepsilon}
\nablaewcommand{ H^{1}(\Rn)}{ H^{1}(\Rn)}
\nablaewcommand{W^{1,2}(\Rn)}{W^{1,2}(\Rn)}
\nablaewcommand{\Wan}{W^{\frac{\alpha}{2},2}(\Rn)}
\nablaewcommand{\Wa}{W^{\frac{\alpha}{2},2}(\R)}
\nablaewcommand{\int_{\Rn}}{\int_{\Rn}}
\nablaewcommand{\int_\R}{\int_\R}
\nablaewcommand{I_\e}{I_\varphiepsilon}
\nablaewcommand{\nablaie}{\nabla I_\e}
\nablaewcommand{\gammaie}{I_\varphiepsilon'}
\nablaewcommand{I_\es}{I_\varphiepsilon''}
\nablaewcommand{I_0''}{I_0''}
\nablaewcommand{I'_0}{I'_0}
\nablaewcommand{\zetaex}{z_{\varphiepsilon,\rho}}
\nablaewcommand{w_{\e,\xi}}{w_{\varphiepsilon,\xi}}
\nablaewcommand{\zetaer}{z_{\varphiepsilon,\rho}}
\nablaewcommand{w_{\e,\rho}}{w_{\varphiepsilon,\rho}}
\nablaewcommand{{\deltaot{z}}_{\e,\rho}}{{\deltaot{z}}_{\varphiepsilon,\rho}}
\nablaewcommand{{\betaf E}}{{\betaf E}}
\nablaewcommand{{\betaf u}}{{\betaf u}}
\nablaewcommand{{\betaf v}}{{\betaf v}}
\nablaewcommand{{\betaf z}}{{\betaf z}}
\nablaewcommand{{\betaf w}}{{\betaf w}}
\nablaewcommand{{\betaf 0}}{{\betaf 0}}
\nablaewcommand{{\betaf \phi}}{{\betaf \partialrtialhi}}
\nablaewcommand{\underline{\phi}}{\underline{\partialrtialhi}}
\nablaewcommand{{\betaf h}}{{\betaf h}}
\nablaewcommand{\mathbb{E}}{\mathbb{E}}
\nablaewcommand{\mathbb{X}}{\mathbb{X}}
\nablaewcommand{\mathbb{F}}{\mathbb{F}}
\nablaewcommand{\mathbb{Y}}{\mathbb{Y}}
\nablaewcommand{\mathbb{M}}{\mathbb{M}}
\nablaewcommand{\mathbb{H}}{\mathbb{H}}
\nablaewcommand{\sigmask}{\sigmamallskip}
\nablaewcommand{\quad \Longrightarrow \quad}{\quad \Longrightarrow \quad}
\,df\com#1{\fbox{\partialrtialarbox{6in}{\tauextstylettt{#1}}}}
\,df{\mathbb N}{{\mathbb N}}
\,df{\cal A}{{\cal A}}
\nablaewcommand{\,d}{\,d}
\nablaewcommand{\varphiepsilonps}{\varphiepsilon}
\nablaewcommand{\betaegin{equation}}{\betaegin{equation}gin{equation}}
\nablaewcommand{\varphiepsilone}{\varphiepsilonnd{equation}}
\nablaewcommand{\sigmapt}{{\mbox spt}}
\nablaewcommand{{\mbox ind}}{{\mbox ind}}
\nablaewcommand{\sigmaupp}{{\mbox supp}}
\nablaewcommand{{\rm div}\,p}{{\rm div}\,splaystyle}
\nablaewcommand{\partialrtialrt}{\partialrtialartial}
\mathbb{R}^Newcommand{\tauheequation}{\tauhesection.\alpharabic{equation}}
\mathbb{R}^Newcommand{\betaaselinestretch}{1.1}
\nablaewcommand{\Deltam}{(-\Delta)^m}
\nablaewenvironment{pf}{\nablaoindent{\it
Proof}.\varphiepsilonnspace}{\rule{2mm}{2mm}
}
\nablaewcommand{(-\Delta)^{\alphalpha/2}}{(-\Deltaelta)^{\alphalpha/2}}
\tauitle
{\betaf Positive solutions for semilinear fractional elliptic problems involving an inverse fractional operator}
\alphauthor{P.~\'Alvarez-Caudevilla, E.~Colorado and Alejandro Ortega}
\alphaddress{Departamento de Matem\'aticas, Universidad Carlos III de Madrid,
Av. Universidad 30,
28911 Legan\'es (Madrid), Spain}
\varphiepsilonmail{pacaudev@math.uc3m.es}
\alphaddress{Departamento de Matem\'aticas, Universidad Carlos III de Madrid,
Av. Universidad 30,
28911 Legan\'es (Madrid), Spain}
\varphiepsilonmail{ecolorad@math.uc3m.es}
\alphaddress{Departamento de Matem\'aticas, Universidad Carlos III de Madrid,
Av. Universidad 30,
28911 Legan\'es (Madrid), Spain}
\varphiepsilonmail{alortega@math.uc3m.es}
\tauhanks{This paper has been partially supported by the Ministry of Economy and Competitiveness of
Spain and FEDER, under research project MTM2016-80618-P}
\tauhanks{The first author was also partially supported by the Ministry of Economy and Competitiveness of
Spain under research project RYC-2014-15284}
\deltaate{\tauoday}
\betaegin{equation}gin{abstract}
This paper is devoted to the study of the existence of positive solutions for a problem related to a higher order fractional differential equation involving a nonlinear term depending on a fractional differential operator,
\betaegin{equation}gin{equation*}
\lambdaeft\{
\betaegin{equation}gin{tabular}{lcl}
$(-\Deltaelta)^{\alphalpha} u=\lambdaambda u+ (-\Deltaelta)^{\betaegin{equation}ta}|u|^{p-1}u$ & &in $\Omegamega$, \\
$\mkern+3mu(-\Deltaelta)^{j}u=0$ & &on $\partialrtialartial\Omegamega$, for $j\in\mathbb{Z}$, $0\lambdaeq j< [\alphalpha]$,
\varphiepsilonnd{tabular}
\right.
\varphiepsilonnd{equation*}
where $\Omegamega$ is a bounded domain in $\mathbb{R}^{N}$, $0<\betaegin{equation}ta<1$,
$\betaegin{equation}ta<\alphalpha<\betaegin{equation}ta+1$ and $\lambdaambda>0$. In particular, we study the fractional elliptic problem,
\betaegin{equation}gin{equation*}
\lambdaeft\{
\betaegin{equation}gin{array}{ll}
(-\Deltaelta)^{\alphalpha-\betaegin{equation}ta} u= \lambdaambda(-\Deltaelta)^{-\betaegin{equation}ta}u+ |u|^{p-1}u & \mathbb{H}box{in} \quad \Omegamega, \\
\mkern+72.2mu u=0 & \mathbb{H}box{on} \quad \partialrtialartial\Omegamega,
\varphiepsilonnd{array}
\right.
\varphiepsilonnd{equation*}
and we prove existence or nonexistence of positive solutions depending on the parameter $\lambdaambda>0$,
up to the critical value of the exponent $p$, i.e., for $1<p\lambdaeq 2_{\mu}^*-1$ where $\mu:=\alphalpha-\betaegin{equation}ta$ and $2_{\mu}^*=\frac{2N}{N-2\mu}$ is the critical
exponent of the Sobolev embedding.
\varphiepsilonnd{abstract}
\maketitle
\nablaoindent {\it \footnotesize 2010 Mathematics Subject Classification}. {\sigmacriptsize 35A15, 35G20, 35J61, 49J35.}\\
{\it \footnotesize Key words}. {\sigmacriptsize Fractional Laplacian, Critical Problem, Concentration-Compactness Principle, Mountain Pass Theorem}
\sigmaection{Introduction}\lambdaabel{sec:intro}
\nablaoindent Let $\Omega$ be a smooth bounded domain of $\mathbb{R}^N$ with $N>2\mu$ and
$$\mu:=\alphalpha-\betaegin{equation}ta\quad \mathbb{H}box{with} \quad 0<\betaegin{equation}ta<1\quad \mathbb{H}box{and}\quad \betaegin{equation}ta<\alphalpha<\betaegin{equation}ta+1.$$
We analyze the existence of positive solutions for the following fractional elliptic problem,
\betaegin{equation}gin{equation}\lambdaabel{ecuacion}
\lambdaeft\{
\betaegin{equation}gin{array}{ll}
(-\Deltaelta)^{\alphalpha-\betaegin{equation}ta} u= \gamma(-\Deltaelta)^{-\betaegin{equation}ta}u+ |u|^{p-1}u & \mathbb{H}box{in} \quad \Omegamega, \\
u= 0 & \mathbb{H}box{on} \quad \partialrtialartial\Omegamega,
\varphiepsilonnd{array}
\right.
\tauag{$P_\gammaamma$}
\varphiepsilonnd{equation}
depending on the real parameter $\gammaamma>0$. To this end, we consider,
\betaegin{equation}gin{equation*}
1<p\lambdaeq2_{\mu}^*-1=\frac{N+2\mu}{N-2\mu},
\varphiepsilonnd{equation*}
where $2_{\mu}^*=\frac{2N}{N-2\mu}$ is the critical exponent of the Sobolev embedding. Associated with \varphiepsilonqref{ecuacion} we have the following Euler--Lagrange functional:
\betaegin{equation}gin{equation}
\lambdaabel{funcional_ecuacion}
\mathcal{F}_\gamma(u)=\frac{1}{2}\int_\Omega|(-\Deltaelta)^{\frac{\mu}{2}} u|^2dx-\frac{\gamma}{2} \int_\Omega |(-\Delta)^{-\frac{\betaegin{equation}ta}{2}}u|^2\, dx-\frac{1}{p+1} \int_\Omega |u|^{p+1}dx,
\varphiepsilonnd{equation}
such that the solutions of \varphiepsilonqref{ecuacion} corresponds to critical points of the $C^1$ functional \varphiepsilonqref{funcional_ecuacion} and vice versa.
Note that $(-\Delta)^{-\betaegin{equation}ta}$ is a positive linear integral compact operator from $L^2(\Omega)$ into itself and it is well defined thanks to the Spectral Theorem.
The definition of the fractional powers of the positive Laplace operator $(-\Deltaelta)$, in a bounded domain $\Omegamega$ with
homogeneous Dirichlet boundary data, can be carried out through the spectral decomposition using the powers of the eigenvalues of $(-\Deltaelta)$
with the same boundary conditions. Indeed, let $(\varphiphi_i,\lambdaambda_i)$ be the eigenfunctions (normalized with respect to the $L^2(\Omega)$-norm) and eigenvalues of $(-\Deltaelta)$ under homogeneous
Dirichlet boundary data. Then, $(\varphiphi_i,\lambdaambda_i^{\mu})$ stand for the eigenpairs of $(-\Deltaelta)^{\mu}$ under homogeneous
Dirichlet boundary conditions as well. Thus, the fractional operator $(-\Deltaelta)^{\mu}$ is well defined in the space of functions that vanish on the boundary,
\betaegin{equation}gin{equation*}
H_0^{\mu}(\Omegamega)=\lambdaeft\{u=\sigmaum_{j=1}^{\infty} a_j\varphiphi_j\in L^2(\Omegamega):\ \|u\|_{H_0^{\mu}(\Omegamega)}=\lambdaeft(\sigmaum_{j=1}^{\infty} a_j^2\lambdaambda_j^{\mu} \right)^{\frac{1}{2}}<\infty\right\}.
\varphiepsilonnd{equation*}
As a result of this definition it follows that,
\betaegin{equation}gin{equation}\lambdaabel{eqnorma}
\|u\|_{H_0^{\mu}(\Omega)}=\|(-\Deltaelta)^{\frac{\mu}{2}}u\|_{L^2(\Omega)}.
\varphiepsilonnd{equation}
In particular,
\betaegin{equation}gin{equation*}
(-\Deltaelta)^{-\betaegin{equation}ta}u=\sigmaum_{j=1}^{\infty} a_j\lambdaambda_j^{-\betaegin{equation}ta}\varphiphi_j.
\varphiepsilonnd{equation*}
Since the above definition allows us to integrate by parts, we say that $u\in H_0^{\mu}(\Omega)$ is an energy or weak solution for problem \varphiepsilonqref{ecuacion} if,
\betaegin{equation}gin{equation*}
\int_{\Omegamega}(-\Deltaelta)^{\frac{\mu}{2}}u(-\Deltaelta)^{\frac{\mu}{2}}\partialrtialhi dx=\gammaamma\int_{\Omega}(-\Deltaelta)^{-\frac{\betaegin{equation}ta}{2}}u(-\Deltaelta)^{-\frac{\betaegin{equation}ta}{2}}\partialrtialhi dx+\int_{\Omega}|u|^{p-1}u\partialrtialhi dx,\quad \forall\partialrtialhi\in H_0^{\mu}(\Omega).
\varphiepsilonnd{equation*}
In other words, $u\in H_0^{\mu}(\Omega)$ is a critical point of the functional defined by \varphiepsilonqref{funcional_ecuacion}. We also observe that
the functional embedding features for the equation in \varphiepsilonqref{ecuacion} are governed by the Sobolev's embedding Theorem. Let us recall the compact inclusion,
\betaegin{equation}gin{equation}
\lambdaabel{compact_emb}
H_0^{\mu}(\Omega) \mathbb{H}ookrightarrow L^{p+1}(\Omega),\quad 2\lambdaeq p+1<2_{\mu}^*,
\varphiepsilonnd{equation}
being a continuous inclusion up to the critical exponent $p=2_{\mu}^*-1$.\nablaewline
To define non-integer higher-order powers for the Laplace operator, let us recall that the homogeneous Navier boundary conditions are defined as
\betaegin{equation}gin{equation*}
u=\Deltaelta u=\Deltaelta^2 u=\lambdadots=\Deltaelta^{k-1} u=0,\quad\mbox{on }\partialrtialartial\Omegamega.
\varphiepsilonnd{equation*}
Given $\alphalpha>1$, the $\alphalpha$-th power of the classical Dirichlet Laplacian in the sense of the spectral theory can be defined as the operator
whose action on a smooth function $u$ satisfying the homogenous Navier boundary conditions for $0\lambdaeq k<[\alphalpha]$ (where $[\cdot]$ means the integer part), is given by
\betaegin{equation}gin{equation*}
\lambdaangle (-\Deltaelta)^{\alphalpha} u, u \rangle=\sigmaum_{j\gammae 1}
\lambdaambda_j^{\alphalpha}|\lambdaangle u_1,\varphiphi_j\rangle|^2.
\varphiepsilonnd{equation*}
We refer to \cite{MusNa2,MusNa3} for a study of this higher-order fractional Laplace operator, referred to as the Navier fractional Laplacian, as well as useful properties of the fractional Sobolev space $H_0^{\alphalpha}(\Omegamega)$.\nablaewline
On the other hand, we have a connection between problem \varphiepsilonqref{ecuacion} and a fractional order
elliptic system which turns out to be very useful in the sequel. In particular, taking $\tauextstyle{\partialrtialsi:=(-\Delta)^{-\betaegin{equation}ta}u}$, problem \varphiepsilonqref{ecuacion} provides us with the fractional elliptic cooperative system,
\betaegin{equation}gin{equation}
\lambdaabel{cosys}
\lambdaeft\{\betaegin{equation}gin{array}{l}
(-\Deltaelta)^{\mu}u = \gammaamma \partialrtialsi+|u|^{p-1}u,\\
(-\Delta)^{\betaegin{equation}ta}\partialrtialsi=u,
\varphiepsilonnd{array}\right.\quad \mathbb{H}box{in}\quad \Omega,\quad (u,\partialrtialsi)=(0,0)\quad\mathbb{H}box{in}\quad \partialrtialartial\Omega.
\varphiepsilonnd{equation}
Nevertheless, system \varphiepsilonqref{cosys} is not a variational system. In order to obtain a variational system from problem \varphiepsilonqref{ecuacion} we follow a similar idea to the one performed above,
distinguishing whether $\alphalpha=2\betaegin{equation}ta$ or $\alphalpha\nablaeq2\betaegin{equation}ta$. In the first case we take $\tauextstyle{v:=\sigmaqrt{\gammaamma}\partialrtialsi}$ and, recalling that $\mu:=\alphalpha-\betaegin{equation}ta$, we obtain the following fractional elliptic cooperative system,
\betaegin{equation}gin{equation}
\lambdaabel{sistemabb}
\lambdaeft\{\betaegin{equation}gin{array}{l}
(-\Deltaelta)^{\betaegin{equation}ta}u=\sigmaqrt{\gammaamma}v+|u|^{p-1}u,\\
(-\Deltaelta)^{\betaegin{equation}ta}v=\sigmaqrt{\gammaamma}u,
\varphiepsilonnd{array}
\right.
\tauag{$S_{\gammaamma}^{\betaegin{equation}ta}$}
\quad \mathbb{H}box{in}\quad \Omega,\quad (u,v)=(0,0)\quad\mathbb{H}box{on}\quad \partialrtialartial\Omega,
\varphiepsilonnd{equation}
whose associated energy functional is
\betaegin{equation}gin{equation*}
\mathcal{J}_{\gamma}^{\betaegin{equation}ta}(u,v)=\frac{1}{2} \int_\Omega |(-\Delta)^{\frac{\betaegin{equation}ta}{2}} u|^2dx + \frac{1}{2} \int_\Omega |(-\Delta)^{\frac{\betaegin{equation}ta}{2}} v|^2dx -\sigmaqrt{\gammaamma}\int_\Omega uvdx -\frac{1}{p+1} \int_\Omega |u|^{p+1}dx.
\varphiepsilonnd{equation*}
In the second case, $\alphalpha\nablaeq2\betaegin{equation}ta$, taking $v=\gammaamma^{\betaegin{equation}ta/\alphalpha}\partialrtialsi$ we obtain the system,
\betaegin{equation}gin{equation*}
\lambdaeft\{\betaegin{equation}gin{array}{rl}
(-\Deltaelta)^{\mu}u=&\!\!\!\gammaamma^{1-\betaegin{equation}ta/\alphalpha}v+|u|^{p-1}u,\\
(-\Deltaelta)^{\betaegin{equation}ta}v=&\!\!\!\gammaamma^{\betaegin{equation}ta/\alphalpha}u,
\varphiepsilonnd{array}
\right.
\quad \mathbb{H}box{in}\quad \Omega,\quad (u,v)=(0,0)\quad\mathbb{H}box{on}\quad \partialrtialartial\Omega.
\varphiepsilonnd{equation*}
Since the former system is still not variational, we transform it into the following variational system,
\betaegin{equation}gin{equation}
\lambdaabel{sistemaab}
\lambdaeft\{\betaegin{equation}gin{array}{rl}
\frac{1}{\gammaamma^{1-\betaegin{equation}ta/\alphalpha}}(-\Deltaelta)^{\mu} u =&\!\!\! v+\frac{1}{\gammaamma^{1-\betaegin{equation}ta/\alphalpha}}|u|^{p-1}u,\\
\frac{1}{\gammaamma^{\betaegin{equation}ta/\alphalpha}}(-\Deltaelta)^{\betaegin{equation}ta}v=&\!\!\! u,
\varphiepsilonnd{array}
\right.
\tauag{$S_{\gammaamma}^{\alphalpha,\betaegin{equation}ta}$}
\quad \mathbb{H}box{in}\quad \Omega,\quad (u,v)=(0,0)\quad\mathbb{H}box{on}\quad \partialrtialartial\Omega.
\varphiepsilonnd{equation}
whose associated functional is
\betaegin{equation}gin{align*}
\mathcal{J}_{\gamma}^{\alphalpha,\betaegin{equation}ta}(u,v)=&\frac{1}{2\gammaamma^{1-\betaegin{equation}ta/\alphalpha}}\int_\Omega|(-\Delta)^{\frac{\mu}{2}}u|^2dx +\frac{1}{2\gammaamma^{\betaegin{equation}ta/\alphalpha}}\int_\Omega|(-\Delta)^{\frac{\betaegin{equation}ta}{2}}v|^2dx-\int_\Omega uv dx\\
&-\frac{1}{(p+1)\gammaamma^{1-\betaegin{equation}ta/\alphalpha}} \int_\Omega |u|^{p+1}dx.
\varphiepsilonnd{align*}
We will use the equivalence between problem \varphiepsilonqref{ecuacion} and systems
\varphiepsilonqref{sistemabb} and \varphiepsilonqref{sistemaab} to surpass the difficulties that arise while working with the inverse fractional Laplace operator $(-\Deltaelta)^{-\betaegin{equation}ta}$. In particular, this approach will help us to avoid ascertaining explicit
estimations for this inverse term. On the other hand, to overcome the usual difficulties that appear when dealing with fractional
Laplace operators we will use the ideas of Caffarelli and Silvestre \cite{CS}, together
with those performed in \cite{BrCdPS}, giving an equivalent definition of the fractional operator $(-\Deltaelta)^{\mu}$
in a bounded domain $\Omegamega$ by means of an auxiliary problem that we will introduce below.
Associated with the domain $\Omegamega$ let us consider the cylinder
$\mathcal{C}_{\Omegamega}=\Omegamega\tauimes(0,\infty)\sigmaubset\mathbb{R}_+^{N+1}$ called extension cylinder.
Moreover, we denote by $(x,y)$ the points belonging to $\mathcal{C}_{\Omegamega}$ and with
$\partialrtialartial_L\mathcal{C}_{\Omegamega}=\partialrtialartial\Omegamega\tauimes(0,\infty)$ the lateral boundary
of the extension cylinder. Thus, given a function $u\in H_{0}^{\mu}(\Omegamega)$, define the
$\mu$-harmonic extension function $w$, denoted by $w:=E_{\mu}[u]$, as the solution to problem,
\betaegin{equation}gin{equation*}
\lambdaeft\{
\betaegin{equation}gin{array}{ll}
-{\rm div}(y^{1-2\mu}\nablaabla w)=0 & \mathbb{H}box{in} \quad \mathcal{C}_{\Omegamega}, \\
w=0 & \mathbb{H}box{on}\quad \partialrtialartial_L\mathcal{C}_{\Omegamega}, \\
w(x,0)=u(x) & \mathbb{H}box{in} \quad \Omegamega\tauimes\{y=0\}.
\varphiepsilonnd{array}
\right.
\varphiepsilonnd{equation*}
This extension function $w$ belongs to the space
\betaegin{equation}gin{equation*}
\mathcal{X}_0^{\mu}(\mathcal{C}_{\Omegamega})=\omegaverline{\mathcal{C}_0^{\infty}(\Omegamega\tauimes[0,\infty))}^{\|\cdot\|_{\mathcal{X}_0^{\mu}(\mathcal{C}_{\Omegamega})}},\ \tauextstylet{with}\ \|w\|_{\mathcal{X}_0^{\mu}(\mathcal{C}_{\Omegamega})}^2=\kappa_{\mu}\int_{\mathcal{C}_{\Omegamega}}y^{1-2\mu}|\nablaabla w(x,y)|^2dxdy.
\varphiepsilonnd{equation*}
With that constant $\kappa_{\mu}$, whose precise value can be seen in \cite{BrCdPS}, the extension operator is an isometry between $H_0^{\mu}(\Omegamega)$ and
$\mathcal{X}_0^{\mu}(\mathcal{C}_{\Omegamega})$ in the sense
\betaegin{equation}gin{equation}\lambdaabel{isometry}
\|E_{\mu}[\varphiphi]\|_{\mathcal{X}_0^{\mu}(\mathcal{C}_{\Omegamega})}=\|\varphiphi\|_{H_0^{\mu}(\Omegamega)},\ \tauextstylet{for all}\ \varphiphi\in H_0^{\mu}(\Omegamega).
\varphiepsilonnd{equation}
The relevance of the extension function $w$ is that it is related to the fractional
Laplacian of the original function through the formula
\betaegin{equation}gin{equation*}
\frac{\partialrtialartial w}{\partialrtialartial \nablau^{\mu}}:= -\kappa_{\mu} \lambdaim_{y\tauo
0^+} y^{1-2\mu}\frac{\partialrtialartial w}{\partialrtialartial y}=(-\Deltaelta)^{\mu}u(x).
\varphiepsilonnd{equation*}
In the case $\Omegamega=\mathbb{R}^N$ this formulation provides us with explicit expressions for both the fractional Laplacian and the $\mu$-extension in terms of the Riesz and the Poisson kernels respectively. Precisely,
\betaegin{equation}gin{equation*}
\betaegin{equation}gin{split}
& (-\Deltaelta)^{\mu}u(x)=\ d_{N,\mu}P.V.\int_{\mathbb{R}^N}\frac{u(x)-u(y)}{|x-y|^{N+2\mu}}dy\\
& w(x,y)=\ P_y^{\mu}\alphast u(x)=c_{N,\mu}y^{2\mu}\int_{\mathbb{R}^N}\frac{u(z)}{(|x-z|^2+y^2)^{\frac{N+2\mu}{2}}}dz.
\varphiepsilonnd{split}
\varphiepsilonnd{equation*}
For exact values of the constants $c_{N,\mu}$ and $d_{N,\mu}$ we refer to \cite{BrCdPS}.
Thanks to the arguments shown above, we can reformulate problem \varphiepsilonqref{ecuacion} in terms of the extension problem as follows,
\betaegin{equation}gin{equation}\lambdaabel{extension_problem}
\lambdaeft\{
\betaegin{equation}gin{array}{ll}
-{\rm div}(y^{1-2\mu}\nablaabla w)=0 & \mathbb{H}box{in}\quad \mathcal{C}_{\Omegamega}, \\
w=0 & \mathbb{H}box{on}\quad \partialrtialartial_L\mathcal{C}_{\Omegamega}, \\
\frac{\partialrtialartial w}{\partialrtialartial \nablau^\mu}=\gammaamma (-\Deltaelta)^{-\betaegin{equation}ta}w+|w|^{p-1}w & \mathbb{H}box{in}\quad \Omegamega\tauimes\{y=0\}.
\varphiepsilonnd{array}
\right.
\tauag{$\tauilde{P}_{\gammaamma}$}
\varphiepsilonnd{equation}
Therefore, an energy or weak solution of this problem is a function $w\in \mathcal{X}_0^{\mu}(\mathcal{C}_{\Omegamega})$ satisfying
\betaegin{equation}gin{equation*}
\kappa_{\mu}\int_{\mathcal{C}_{\Omegamega}} y^{1-2\mu}\lambdaangle\nablaabla w,\nablaabla\varphiphi \rangle dxdy=\int_{\Omegamega} \lambdaeft(\gammaamma (-\Deltaelta)^{-\betaegin{equation}ta}w+|w|^{p-1}\omega\right)\varphiphi(x,0)dx,\quad \forall\varphiphi\in\mathcal{X}_0^{\mu}(\mathcal{C}_{\Omegamega}).
\varphiepsilonnd{equation*}
For any energy solution $w\in \mathcal{X}_0^{\mu}(\mathcal{C}_{\Omegamega})$
to problem \varphiepsilonqref{extension_problem}, the corresponding trace function $u=Tr[w]=w(\cdot,0)$ belongs to the space $H_0^{\mu}(\Omegamega)$ and is an energy solution for the problem
\varphiepsilonqref{ecuacion} and vice versa. If $u\in H_0^{\mu}(\Omegamega)$ is an energy solution of \varphiepsilonqref{ecuacion}, then $w:=E_\mu[u]\in \mathcal{X}_0^{\mu}(\mathcal{C}_{\Omegamega})$ is
an energy solution for \varphiepsilonqref{extension_problem} and, as a consequence, both formulations are equivalent. Finally, the energy functional associated with problem \varphiepsilonqref{extension_problem} is
\betaegin{equation}gin{equation*}
\widetilde{\mathcal{F}}_{\gammaamma}(w)=\frac{\kappa_{\mu}}{2}\int_{\mathcal{C}_{\Omegamega}}y^{1-2\mu}|\nablaabla w|^2dxdy-\frac{\gammaamma}{2}\int_{\Omegamega}|(-\Deltaelta)^{-\frac{\betaegin{equation}ta}{2}}w|^2dx-\frac{1}{p+1}\int_{\Omegamega}|w|^{p+1}dx.
\varphiepsilonnd{equation*}
Since the extension operator is an isometry, critical points of $\widetilde{\mathcal{F}}_{\gammaamma}$ in $\mathcal{X}_0^{\mu}(\mathcal{C}_{\Omegamega})$ correspond to critical
points of the functional $\mathcal{F}_{\gammaamma}$ in $H_0^{\mu}(\Omegamega)$. Indeed, arguing as in \cite[Proposition 3.1]{BCdPS},
the minima of $\widetilde{\mathcal{F}}_{\gammaamma}$ also correspond to the minima of the functional $\mathcal{F}_{\gammaamma}$.
Another useful tool to be applied throughout this work will be the following trace inequality,
\betaegin{equation}gin{equation}\lambdaabel{sobext}
\int_{\mathcal{C}_{\Omegamega}}y^{1-2\mu}|\nablaabla \partialrtialhi(x,y)|^2dxdy\gammaeq C\lambdaeft(\int_{\Omegamega}|\partialrtialhi(x,0)|^rdx\right)^{\frac{2}{r}},\quad\forall\partialrtialhi\in \mathcal{X}_{0}^{\mu}(\mathcal{C}_{\Omegamega}),
\varphiepsilonnd{equation}
with $1\lambdaeq r\lambdaeq\frac{2N}{N-2\mu},\ N>2\mu$. Let us notice that, since the extension operator is an isometry, inequality \varphiepsilonqref{sobext} is equivalent to the fractional Sobolev inequality,
\betaegin{equation}gin{equation}\lambdaabel{sobolev}
\int_{\Omegamega}|(-\Deltaelta)^{\mu/2}\varphiphi|^2dx\gammaeq C\lambdaeft(\int_{\Omegamega}|\varphiphi|^rdx\right)^{\frac{2}{r}},\quad\forall\varphiphi\in H_{0}^{\mu}(\Omegamega),
\varphiepsilonnd{equation}
with $1\lambdaeq r\lambdaeq\frac{2N}{N-2\mu}$, $N>2\mu$.
\betaegin{equation}gin{remark} When $r=2_{\mu}^*$, the best constant in \varphiepsilonqref{sobext} will be denoted by $S(\mu,N)$. This constant is explicit and independent of the domain $\Omegamega$. Indeed,
its exact value is given by the expression
\betaegin{equation}gin{equation*}
S(\mu,N)=\frac{2\partialrtiali^\mu\Gammaamma(1-\mu)\Gammaamma(\frac{N+2\mu}{2})(\Gammaamma(\frac{N}{2}))^{\frac{2\mu}{N}}}{\Gammaamma(\mu)\Gammaamma(\frac{N-2\mu}{2})(\Gammaamma(N))^\mu},
\varphiepsilonnd{equation*}
and it is never achieved when $\Omegamega$ is a bounded domain. Thus, we have,
\betaegin{equation}gin{equation*}
\int_{\mathbb{R}_{+}^{N+1}}\!\!y^{1-2\mu}|\nablaabla \partialrtialhi(x,y)|^2dxdy\gammaeq S(\mu,N)\lambdaeft(\int_{\mathbb{R}^{N}}|\partialrtialhi(x,0)|^{\frac{2N}{N-2\mu}}dx\right)^{\frac{N-2\mu}{N}}\ \forall \partialrtialhi\in \mathcal{X}_0^\mu(\mathbb{R}_{+}^{N+1}).
\varphiepsilonnd{equation*}
If $\Omegamega=\mathbb{R}^N$, the constant $S(\mu,N)$ is achieved for the family of extremal functions $w_{\varphiepsilon}^{\mu}= E_\mu[v_{\varphiepsilon}^{\mu}]$ with
\betaegin{equation}gin{equation}\lambdaabel{u_eps}
v_{\varphiepsilon}^{\mu}(x)=\frac{\varphiepsilon^{\frac{N-2\mu}{2}}}{(\varphiepsilon^2+|x|^2)^{\frac{N-2\mu}{2}}},
\varphiepsilonnd{equation}
for arbitrary $\varphiepsilon>0$; see \cite{BrCdPS} for further details. Finally, combining the previous comments, the
best constant in \varphiepsilonqref{sobolev} with $\Omegamega=\mathbb{R}^N$ is given then by $\kappa_\mu S(\mu,N)$.
\varphiepsilonnd{remark}
Although systems \varphiepsilonqref{sistemabb} and \varphiepsilonqref{sistemaab} no longer contain an inverse term as $(-\Delta)^{-\betaegin{equation}ta}$ they still are non-local systems, with all the complications that this entails.
However, we use the extension technique shown above to reformulate the non-local systems \varphiepsilonqref{sistemabb} and \varphiepsilonqref{sistemaab} in terms of the following local systems.
Taking $w:=E_{\mu}[u]$ and $z:=E_{\betaegin{equation}ta}[v]$, the extension system corresponding to \varphiepsilonqref{sistemabb} reads as
\betaegin{equation}gin{equation}\lambdaabel{extension_systembb}
\lambdaeft\{
\betaegin{equation}gin{array}{ll}
-{\rm div}(y^{1-2\betaegin{equation}ta}\nablaabla w)= 0 & \mathbb{H}box{in}\quad \mathcal{C}_{\Omegamega}, \\
-{\rm div}(y^{1-2\betaegin{equation}ta}\nablaabla z)=0 & \mathbb{H}box{in}\quad \mathcal{C}_{\Omegamega}, \\
{\rm div}\,splaystyle\frac{\partialrtialartial w}{\partialrtialartial \nablau^{\betaegin{equation}ta}}= \sigmaqrt{\gammaamma} z+|w|^{p-1}w & \mathbb{H}box{in}\quad \Omegamega\tauimes\{y=0\},\\
{\rm div}\,splaystyle\frac{\partialrtialartial z}{\partialrtialartial \nablau^{\betaegin{equation}ta}}= \sigmaqrt{\gammaamma} w & \mathbb{H}box{in}\quad \Omegamega\tauimes\{y=0\},\\
w=z= 0 & \mathbb{H}box{on}\quad \partialrtialartial_L\mathcal{C}_{\Omegamega},
\varphiepsilonnd{array}
\right.
\tauag{$\widetilde{S}_{\gammaamma}^{\betaegin{equation}ta}$}
\varphiepsilonnd{equation}
whose associated functional is
\betaegin{equation}gin{equation*}
\betaegin{equation}gin{split}
\Phi_{\gamma}^{\betaegin{equation}ta}(w,z)&=\frac{\kappa_{\betaegin{equation}ta}}{2}\int_{\mathcal{C}_{\Omegamega}}y^{1-2\betaegin{equation}ta}|\nablaabla w|^2dxdy+\frac{\kappa_{\betaegin{equation}ta}}{2}\int_{\mathcal{C}_{\Omegamega}}y^{1-2\betaegin{equation}ta}|\nablaabla z|^2dxdy-\sigmaqrt{\gammaamma}\int_{\Omegamega}w(x,0)z(x,0)dx\\&-\frac{1}{p+1}\int_{\Omegamega}|w(x,0)|^{p+1}dx.
\varphiepsilonnd{split}
\varphiepsilonnd{equation*}
Since the extension function is an isometry, critical points for the functional $\Phi_{\gamma}^{\betaegin{equation}ta}$
in $\mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega})\tauimes\mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega})$ correspond to critical points of $\mathcal{J}_{\gamma}^{\betaegin{equation}ta}$
in $H_0^{\betaegin{equation}ta}(\Omegamega)\tauimes H_0^{\betaegin{equation}ta}(\Omegamega)$. Moreover, arguing as in \cite[Proposition 3.1]{BCdPS}, the minima of $\Phi_{\gamma}^{\betaegin{equation}ta}$
also correspond to the minima of $\mathcal{J}_{\gamma}^{\betaegin{equation}ta}$. Similarly, the extension system of system \varphiepsilonqref{sistemaab} reads as
\betaegin{equation}gin{equation}\lambdaabel{extension_systemab}
\lambdaeft\{
\betaegin{equation}gin{array}{ll}
-{\rm div}(y^{1-2\mu}\nablaabla w)= 0 & \mathbb{H}box{in}\quad \mathcal{C}_{\Omegamega}, \\
-{\rm div}(y^{1-2\betaegin{equation}ta}\nablaabla z)= 0 & \mathbb{H}box{in}\quad \mathcal{C}_{\Omegamega}, \\
{\rm div}\,splaystyle\frac{1}{\gammaamma^{1-\betaegin{equation}ta/\alphalpha}}\frac{\partialrtialartial w}{\partialrtialartial \nablau^{\mu}}= z+\frac{1}{\gammaamma^{1-\betaegin{equation}ta/\alphalpha}}|w|^{p-1}w & \mathbb{H}box{in}\quad \Omegamega\tauimes\{y=0\},\\
{\rm div}\,splaystyle\frac{1}{\gammaamma^{\betaegin{equation}ta/\alphalpha}}\frac{\partialrtialartial z}{\partialrtialartial \nablau^{\betaegin{equation}ta}}=w & \mathbb{H}box{in}\quad \Omegamega\tauimes\{y=0\},\\
w=z=0 & \mathbb{H}box{on}\quad \partialrtialartial_L\mathcal{C}_{\Omegamega},
\varphiepsilonnd{array}
\right.
\tauag{$\widetilde{S}_{\gammaamma}^{\alphalpha,\betaegin{equation}ta}$}
\varphiepsilonnd{equation}
whose associated functional is
\betaegin{equation}gin{equation*}
\betaegin{equation}gin{split}
\Phi_{\gamma}^{\alphalpha,\betaegin{equation}ta}(w,z)=&\frac{\kappa_{\mu}}{2\gammaamma^{1-\betaegin{equation}ta/\alphalpha}}\int_{\mathcal{C}_{\Omegamega}}y^{1-2\mu}|\nablaabla w|^2dxdy+\frac{\kappa_{\betaegin{equation}ta}}{2\gammaamma^{\betaegin{equation}ta/\alphalpha}}\int_{\mathcal{C}_{\Omegamega}}y^{1-2\betaegin{equation}ta}|\nablaabla z|^2dxdy-\int_{\Omegamega}w(x,0)z(x,0)dx\\&-\frac{1}{(p+1)\gammaamma^{1-\betaegin{equation}ta/\alphalpha}}\int_{\Omegamega}w(x,0)^{p+1}dx.
\varphiepsilonnd{split}
\varphiepsilonnd{equation*}
Once again, since the extension function is an isometry, critical points of $\Phi_{\gamma}^{\alphalpha,\betaegin{equation}ta}$ in
$\mathcal{X}_0^{\mu}(\mathcal{C}_{\Omegamega})\tauimes\mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega})$ correspond to critical points of $\mathcal{J}_{\gamma}^{\alphalpha,\betaegin{equation}ta}$
in $H_0^{\mu}(\Omegamega)\tauimes H_0^{\betaegin{equation}ta}(\Omegamega)$, and also, minima of $\Phi_{\gamma}^{\alphalpha,\betaegin{equation}ta}$ correspond to minima of $\mathcal{J}_{\gamma}^{\alphalpha,\betaegin{equation}ta}$.
Before finishing this introductory section, let us observe that problem \varphiepsilonqref{ecuacion} can be seen as a linear perturbation of the critical problem,
\betaegin{equation}gin{equation} \lambdaabel{crBC}
\lambdaeft\{
\betaegin{equation}gin{array}{ll}
(-\Delta)^{\mu}u=|u|^{2_{\mu}^*-2}u & \mathbb{H}box{in} \quad\Omegamega, \\
u=0 & \mathbb{H}box{on}\quad \partialrtialartial\Omegamega,
\varphiepsilonnd{array}
\right.
\varphiepsilonnd{equation}
for which, after applying a Pohozaev-type result \cite[Proposition 5.5]{BrCdPS}, one can prove the non-existence of positive solutions under the star-shapeness assumption on the domain $\Omegamega$.
Moreover, the limit case $\betaegin{equation}ta\tauo0$ in problem \varphiepsilonqref{ecuacion} corresponds to
\betaegin{equation}gin{equation}\lambdaabel{bezero}
\lambdaeft\{
\betaegin{equation}gin{tabular}{rll}
$(-\Delta)^{\alphalpha}u=$ &\!\!\!$\gamma u+|u|^{2_{\alphalpha}^*-2}u$ &in $\Omegamega$, \\
$u=$ &\!\!\!$0$ &on $\partialrtialartial\Omegamega$,
\varphiepsilonnd{tabular}
\right.\quad\mathbb{H}box{with}\quad 0<\alphalpha<1,
\varphiepsilonnd{equation}
which was studied in \cite{BCdPS}, where the existence of positive solutions is proved for $N\gammaeq4\alphalpha$ if and only if $0<\gammaamma<\lambdaambda_1^*$, with
$\lambdaambda_1^*$ being first eigenvalue of the $(-\Deltaelta)^{\alphalpha}$ operator under homogeneous Dirichlet boundary conditions.
Note that in our situation the non-local term $\gammaamma(-\Delta)^{-\betaegin{equation}ta}u$ plays actually the role of $\gamma u$ in \cite{BCdPS}.
\underline{\betaf Main results.}
We ascertain the existence of positive solutions for the problem \varphiepsilonqref{ecuacion} depending on the positive real parameter $\gamma$.
To do so, we will first show the interval of the parameter $\gamma$ for which there is the possibility of having positive solutions.
Then, we use the equivalence between \varphiepsilonqref{ecuacion} and the systems \varphiepsilonqref{sistemabb} and \varphiepsilonqref{sistemaab} together with the extension technique to prove the main results of this work. Indeed, using the well-known Mountain Pass Theorem (MPT) \cite{AR}, we will prove that there exists a positive solution for \varphiepsilonqref{ecuacion} for any
$$0<\gamma<\lambdaambda_1^*,$$
where $\lambdaambda_1^*$ is the first eigenvalue of the operator $(-\Deltaelta)^{\alphalpha}$ under homogeneous Dirichlet boundary conditions. If $1<p+1<2_{\mu}^*$ one might apply the MPT directly since, as we will show,
our problem possesses the mountain pass geometry and thanks to the compact
embedding \varphiepsilonqref{compact_emb} the Palais-Smale condition is satisfied for the functionals
$\mathcal{F}_\gamma$, $\mathcal{J}_{\gamma}^{\betaegin{equation}ta}$ and $\mathcal{J}_{\gamma}^{\alphalpha,\betaegin{equation}ta}$ (see details below in Section \mathbb{R}f{Sec:ProofTh0}). However, at the critical exponent $p=2_{\mu}^*-1$, the compactness of the
Sobolev embedding is lost and the problem becomes very delicate. To overcome this lack of compactness we apply a concentration-compactness argument relying on \cite[Theorem 5.1]{BCdPS}, which is an adaptation to the fractional setting of the classical result of P.-L. Lions, \cite{Lions}. Then we are capable of proving that, under certain conditions, the Palais-Smale condition is satisfied for the functionals $\Phi_{\gamma}^{\betaegin{equation}ta}$ and $\Phi_{\gamma}^{\alphalpha,\betaegin{equation}ta}$. Thus, by the arguments above, the result will also follow for the functionals $\mathcal{F}_\gamma$, $\mathcal{J}_{\gamma}^{\betaegin{equation}ta}$
and $\mathcal{J}_{\gamma}^{\alphalpha,\betaegin{equation}ta}$. Consequently, we state now the main results of this paper.
\betaegin{equation}gin{theorem}
\lambdaabel{Th0}
Assume $1<p<2_{\mu}^*-1$. Then, for every $\gamma\in (0,\lambda_1^*)$, where $\lambdaambda_1^*$ is the first eigenvalue of $(-\Deltaelta)^{\alphalpha}$ under homogeneous Dirichlet boundary conditions,
there exists a positive solution for the problem \varphiepsilonqref{ecuacion}.
\varphiepsilonnd{theorem}
\betaegin{equation}gin{theorem}
\lambdaabel{Th1}
Assume $p=2_{\mu}^*-1$. Then, for every $\gamma\in (0,\lambda_1^*)$, where $\lambdaambda_1^*$ is the first eigenvalue of $(-\Deltaelta)^{\alphalpha}$ under homogeneous Dirichlet boundary conditions, there exists a positive solution for
the problem \varphiepsilonqref{ecuacion} provided that $N>4\alphalpha-2\betaegin{equation}ta$.
\varphiepsilonnd{theorem}
Let us observe that, even though problem \varphiepsilonqref{ecuacion} is a
non-local but also a linear perturbation of the critical problem
\varphiepsilonqref{crBC}, Theorem \mathbb{R}f{Th1} addresses dimensions
$N>4\alphalpha-2\betaegin{equation}ta$, in contrast to the existence result
\cite[Theorem 1.2]{BCdPS} about the linear perturbation
\varphiepsilonqref{bezero}, that covers the range $N\gammaeq4\alphalpha$. In other
words, the non-local term $(-\Deltaelta)^{-\betaegin{equation}ta}u$, despite of being
just a linear perturbation, has an important effect on the
dimensions for which the classical Brezis--Nirenberg technique (see
\cite{BN}) based on the minimizers of the Sobolev constant still
works. See details in Section \mathbb{R}f{Subsec:concentracion_compacidad}.
\sigmaection{Sub-critical case. Proof Theorem \mathbb{R}f{Th0} }\lambdaabel{Sec:ProofTh0}
\nablaoindent
In this section we carry out the proof of Theorem \mathbb{R}f{Th0}. This is done through the equivalence between problem \varphiepsilonqref{ecuacion} and systems \varphiepsilonqref{sistemabb} and \varphiepsilonqref{sistemaab}. We note
that the results proved in the sequel for the functionals $\mathcal{F}_\gamma$, $\mathcal{J}_{\gamma}^{\betaegin{equation}ta}$ and $\mathcal{J}_{\gamma}^{\alphalpha,\betaegin{equation}ta}$
translate immediately in analogous results for the functionals $\Phi_{\gamma}^{\betaegin{equation}ta}$ and $\Phi_{\gamma}^{\alphalpha,\betaegin{equation}ta}$.
First, we characterize the existence of positive solutions for problem \varphiepsilonqref{ecuacion} in terms of the parameter $\gammaamma$. Moreover, for such
characterization
the following eigenvalue problem will be considered
\betaegin{equation}gin{equation}\lambdaabel{eiglin1}
\lambdaeft\{
\betaegin{equation}gin{array}{ll}
(-\Delta)^{\mu} u = \lambda (-\Delta)^{-\betaegin{equation}ta} u& \mathbb{H}box{in} \quad\Omegamega, \\
u=0 & \mathbb{H}box{on}\quad \partialrtialartial\Omegamega.
\varphiepsilonnd{array}
\right.
\varphiepsilonnd{equation}
Then, for the first eigenfunction $\partialrtialhi_1$ of \varphiepsilonqref{eiglin1}, associated with the first eigenvalue $\lambda_1^*$, we find
$$\int_\Omega |(-\Delta)^{\frac{\mu}{2}} \partialrtialhi_1|^2dx =\lambda_1^* \int_\Omega |(-\Delta)^{-\frac{\betaegin{equation}ta}{2}} \partialrtialhi_1|^2dx,$$
and, therefore,
\betaegin{equation}gin{equation}\lambdaabel{bieigen}
\lambda_1^*=\inf_{u\in H_0^{\mu}(\Omega)} \frac{\int_\Omega |(-\Delta)^{\frac{\mu}{2}} u|^2dx}{ \int_\Omega |(-\Delta)^{-\frac{\betaegin{equation}ta}{2}} u|^2dx}.
\varphiepsilonnd{equation}
On the other hand, thanks to the definition of the fractional operator $(-\Deltaelta)^{\mu}$, we have that $\partialrtialhi_1\varphiepsilonquiv\varphiphi_1$,
with $\varphiphi_1$ as the first eigenfunction of the Laplace operator under homogeneous Dirichlet boundary conditions. Then,
$$(-\Delta)^{\mu}\partialrtialhi_1=(-\Delta)^{\mu}\varphiphi_1=\lambdaambda_1^{\mu}\varphiphi_1 \quad\mathbb{H}box{and}\quad (-\Delta)^{-\betaegin{equation}ta}\partialrtialhi_1=(-\Delta)^{-\betaegin{equation}ta}\varphiphi_1=\lambdaambda_1^{-\betaegin{equation}ta}\varphiphi_1,$$
with $\lambdaambda_1$ as the first eigenvalue of the Laplace operator
under homogeneous Dirichlet boundary conditions. Hence, due to
\varphiepsilonqref{eiglin1}, we conclude that
$\lambda_1^*=\lambda_1^{\mu+\betaegin{equation}ta}=\lambda_1^{\alphalpha}$. Thus, $\lambdaambda_1^*$
coincides with the first eigenvalue of the operator
$(-\Deltaelta)^{\alphalpha}$ under homogeneous Dirichlet or Navier boundary
conditions, depending on whether $\alphalpha\lambdaeq1$ or $1<\alphalpha<\betaegin{equation}ta+1$
respectively. As a consequence, we have the following.
\betaegin{equation}gin{lemma}\lambdaabel{cota}
Problem \varphiepsilonqref{ecuacion} does not possess a positive solution when
$$\gammaamma \gammaeq \lambda_1^*.$$
\varphiepsilonnd{lemma}
\betaegin{equation}gin{proof}
Assume that $u$ is a positive solution of \varphiepsilonqref{ecuacion} and let $\varphiphi_1$ be a positive first eigenfunction of the Laplace operator in $\Omega$ under homogeneous Dirichlet boundary conditions.
Taking $\varphiphi_1$ as a test function for equation \varphiepsilonqref{ecuacion} we obtain
\betaegin{equation}gin{align*}
\lambdaambda_1^{\mu}\int_{\Omegamega}u\varphiphi_1dx=\int_{\Omegamega}\varphiphi_1(-\Deltaelta)^{\mu}udx &=\gammaamma\int_{\Omega}\varphiphi_1(-\Deltaelta)^{-\betaegin{equation}ta}udx+\int_{\Omega}|u|^{p-1}u\varphiphi_1dx\\
&> \gammaamma\int_{\Omega}\varphiphi_1(-\Deltaelta)^{-\betaegin{equation}ta}udx=\gammaamma\int_{\Omega}u(-\Deltaelta)^{-\betaegin{equation}ta}\varphiphi_1dx\\
&=\frac{\gammaamma}{\lambdaambda_1^{\betaegin{equation}ta}}\int_{\Omegamega}u\varphiphi_1dx.
\varphiepsilonnd{align*}
Hence, $\lambdaambda_1^{\mu}>\frac{\gammaamma}{\lambdaambda_1^{\betaegin{equation}ta}}$, and we conclude that $\gammaamma<\lambdaambda_1^{\mu+\betaegin{equation}ta}=\lambdaambda_1^{\alphalpha}=\lambdaambda_1^*$, proving the lemma.
\varphiepsilonnd{proof}
Next we check that $\mathcal{F}_\gamma$, as well as $\mathcal{J}_{\gamma}^{\betaegin{equation}ta}$ and $\mathcal{J}_{\gamma}^{\alphalpha,\betaegin{equation}ta}$ satisfy the MP geometry.
\betaegin{equation}gin{lemma}
\lambdaabel{lezero}
The functionals $\mathcal{F}_\gamma$, $\mathcal{J}_{\gamma}^{\betaegin{equation}ta}$ and $\mathcal{J}_{\gamma}^{\alphalpha,\betaegin{equation}ta}$ have the MP geometry .
\varphiepsilonnd{lemma}
\betaegin{equation}gin{proof}
For short, we prove the result for $\mathcal{F}_\gamma$, for the remaining functionals the result follows in a similar way. Without loss of generality,
we consider a function $g\in H_0^{\mu}(\Omega)$ such that $\|g\|_{p+1}=1$. Because of \varphiepsilonqref{bieigen}, the fractional Sobolev inequality \varphiepsilonqref{sobolev} and
\varphiepsilonqref{eqnorma}, we find that for $t>0$,
\betaegin{equation}gin{align*}
\mathcal{F}_\gamma(tg)&=\frac{t^2}{2}\int_{\Omegamega}|(-\Deltaelta)^{\frac{\mu}{2}}g|^2dx-\frac{\gammaamma t^2}{2}\int_{\Omegamega}|(-\Deltaelta)^{-\frac{\betaegin{equation}ta}{2}}g|^2dx-\frac{t^{p+1}}{p+1}\\
&\gammaeq\frac{t^2}{2}\int_{\Omegamega}|(-\Deltaelta)^{\frac{\mu}{2}}g|^2dx-\frac{\gammaamma t^2}{2\lambdaambda_1^{*}}\int_{\Omegamega}|(-\Deltaelta)^{\frac{\mu}{2}}g|^2dx-\frac{t^{p+1}}{p+1}\\
&\gammaeq\frac{t^2}{2}\lambdaeft(1-\frac{\gamma}{\lambda_1^*}\right)\int_{\Omegamega}|(-\Deltaelta)^{\frac{\mu}{2}}g|^2dx-\frac{t^{p+1}}{C(p+1)}\int_{\Omegamega}|(-\Deltaelta)^{\frac{\mu}{2}}g|^2dx\\
&=\|g\|_{H_0^{\mu}(\Omega)}^2\lambdaeft(\frac{1}{2}\lambdaeft(1-\frac{\gammaamma}{\lambdaambda_1^*}\right)t^2-\frac{1}{C(p+1)}t^{p+1}\right)>0,
\varphiepsilonnd{align*}
for $t>0$ sufficiently small and $C>0$ is a constant coming from inequality \varphiepsilonqref{sobolev}, that is,
$$0<t^{p-1}<\frac{C(p+1)}{2}\lambdaeft(1-\frac{\gammaamma}{\lambdaambda_1^*}\right).$$
Thus, the functional $\mathcal{F}_\gamma$ has a local minimum at $u=0$, i.e., $\mathcal{F}_\gamma(tg)>\mathcal{F}_\gamma(0)=0$ for any $g\in H_0^{\mu}(\Omega)$ provided $t>0$ is small enough. Furthermore, it is clear that
\betaegin{equation}gin{align*}
\mathcal{F}_\gamma(tg)&=\frac{t^2}{2} \int_\Omega |(-\Deltaelta)^{\frac{\mu}{2}} g|^2dx - \frac{\gamma t^2}{2} \int_\Omega |(-\Deltaelta)^{-\frac{\betaegin{equation}ta}{2}} g|^2dx-\frac{t^{p+1}}{p+1}\\
&\lambdaeq \frac{t^2}{2}\|g\|_{H_0^{\mu}(\Omega)}^2-\frac{t^{p+1}}{p+1}.
\varphiepsilonnd{align*}
Then, $\mathcal{F}_\gamma(tg) \rightarrow -\infty$ as $t\tauo \infty$ and, thus, there exists $\mathbb{H}at u \in H_0^{\mu}(\Omega)$ such that $\mathcal{F}_\gamma(\mathbb{H}at u)<0$. Hence, the functional $\mathcal{F}_\gamma$ has the mountain pass geometry.\nablaewline
\varphiepsilonnd{proof}
Similarly we have the MP geometry for the extended functionals.
\betaegin{equation}gin{lemma}\lambdaabel{lezeroextension}
The functionals $\Phi_{\gamma}^{\betaegin{equation}ta}$ and $\Phi_{\gamma}^{\alphalpha,\betaegin{equation}ta}$ have the MP geometry.
\varphiepsilonnd{lemma}
\betaegin{equation}gin{proof}
The proof is similar to the proof of Lemma \mathbb{R}f{lezero}, we only need to note that, thanks to the isometry \varphiepsilonqref{isometry} and the trace inequality
\varphiepsilonqref{sobext}, the extension function minimizes the norm $\|\cdot\|_{\mathcal{X}_0^{\mu}(\mathcal{C}_{\Omega})}$ among all the functions with the same trace on $\{y=0\}$, i.e.,
$$\|E_{\mu}[\varphiphi(\cdot,0)]\|_{\mathcal{X}_0^{\mu}(\mathcal{C}_{\Omega})}\lambdaeq\|\varphiphi\|_{\mathcal{X}_0^{\mu}(\mathcal{C}_{\Omega})}\quad \mathbb{H}box{for all}\quad \varphiphi\in\mathcal{X}_0^{\mu}(\mathcal{C}_{\Omega}).$$
Therefore,
\betaegin{equation}gin{equation}\lambdaabel{min_eig}
\lambdaambda_1^{\mu}=\inf_{\sigmaubstack{u\in H_0^{\mu}(\Omega)\\ u\nablaot\varphiepsilonquiv0}}
\frac{\|u\|_{H_0^{\mu}(\Omega)}^2}{\|u\|_{L^{2}(\Omega)}^2}
=\inf_{\sigmaubstack{w\in \mathcal{X}_0^{\mu}(\mathcal{C}_{\Omega})\\
w\nablaot\varphiepsilonquiv0}}\frac{\|w\|_{\mathcal{X}_0^{\mu}(\mathcal{C}_{\Omega})}^2}{\|w(\cdot,0)\|_{L^{2}(\Omega)}^2}.
\varphiepsilonnd{equation}
Thus, following the arguments in the proof of Lemma \mathbb{R}f{lezero}, the result follows.
\varphiepsilonnd{proof}
\betaegin{equation}gin{definition}\lambdaabel{def_PS}
Let $V$ be a Banach space. We say that $\{u_n\} \sigmaubset V$ is a Palais-Smale (PS) sequence for a functional $\mathfrak{F}$ if
\betaegin{equation}gin{equation}\lambdaabel{convergencia}
\mathfrak{F}(u_n)\quad\mathbb{H}box{is bounded and}\quad \mathfrak{F}'(u_n) \tauo 0\quad\mbox{in}\ V'\quad \mathbb{H}box{as}\quad n\tauo \infty,
\varphiepsilonnd{equation}
where $V'$ is the dual space of $V$. Moreover, we say that $\{u_n\}$ satisfies a PS condition if
\betaegin{equation}gin{equation}\lambdaabel{conPS}
\{u_n\}\quad \mbox{has a strongly convergent subsequence.}
\varphiepsilonnd{equation}
\varphiepsilonnd{definition}
In particular, we say that the functional $\mathfrak{F}$ satisfies the PS condition at level $c$ if every PS sequence at level $c$ for $\mathfrak{F}$
satisfies the PS condition.
In the subcritical range, $1\lambdae p<2_\mu^*-1$, the PS condition is satisfied at any level c due to the compact embedding \varphiepsilonqref{compact_emb}.
However, at the critical exponent $2_{\mu}^*$ the compactness in the Sobolev embedding is lost and, as we will see, the PS condition will be satisfied
only for levels below certain critical level $c^*$.
\betaegin{equation}gin{lemma}\lambdaabel{acotacion_ecuacion}
Let $\{u_n\}\sigmaubset H_0^\mu(\Omega)$ be a PS sequence at level $c$ for the functional $\mathcal{F}_\gamma$, i.e.
$$\mathcal{F}_\gamma(u_n) \rightarrow c,\quad \mathcal{F}_\gamma'(u_n) \rightarrow 0,\quad \mathbb{H}box{as}\quad n\tauo \infty.$$
Then, $\{u_n\}$ is bounded in $H_0^{\mu}(\Omega)$.
\varphiepsilonnd{lemma}
\betaegin{equation}gin{proof}
Since $\mathcal{F}_\gamma'(u_n) \rightarrow 0$ in $\lambdaeft(H_0^{\mu}(\Omega)\right)'$ and
$\mathcal{F}_\gamma(u_n) \tauo c$, we find that
$$\mathcal{F}_\gamma(u_n)-\frac{1}{p+1} \lambdaangle \mathcal{F}_\gamma'(u_n)|u_n\rangle=c+o(1)\cdot\|u_n\|_{H_0^{\mu}(\Omega)}.$$
That is,
\betaegin{equation}gin{align*}
\lambdaeft(\frac{1}{2}-\frac{1}{p+1}\right)\!\int_\Omega |(-\Deltaelta)^{\frac{\mu}{2}} u_n|^2dx-\lambdaeft(\frac{1}{2}-\frac{1}{p+1}\right)\!\int_\Omega
|(-\Deltaelta)^{-\frac{\betaegin{equation}ta}{2}}u_n|^2dx =c+o(1)\cdot\|u_n\|_{H_0^{\mu}(\Omega)}.
\varphiepsilonnd{align*}
Therefore, by \varphiepsilonqref{bieigen},
since $\gammaamma<\lambdaambda_1^*$, using \varphiepsilonqref{eqnorma} we conclude that
$$0<\lambdaeft(\frac{1}{2}-\frac{1}{p+1}\right)\lambdaeft(1-\frac{\gammaamma}{\lambdaambda_1^*}\right)\|u_n\|_{H_0^{\mu}(\Omega)}^2\lambdaeq c+o(1)\cdot\|u_n\|_{H_0^{\mu}(\Omega)}.$$
Thus, the sequence $\{u_n\}$ is bounded in $H_0^{\mu}(\Omega)$.
\varphiepsilonnd{proof}
Following similar ideas as in the above proof, we obtain the following two results.
\betaegin{equation}gin{lemma}\lambdaabel{acotacion_sistemabb}
Let $\{(u_n,v_n)\}$ be a PS sequence at level $c$ for the functional $\mathcal{J}_{\gamma}^{\betaegin{equation}ta}$, i.e.
$$\mathcal{J}_{\gamma}^{\betaegin{equation}ta}(u_n,v_n) \rightarrow c,\quad \lambdaeft(\mathcal{J}_{\gamma}^{\betaegin{equation}ta}\right)'(u_n,v_n) \rightarrow 0,\quad \mathbb{H}box{as}\quad n\tauo \infty.$$
Then, $\{(u_n,v_n)\}$ is bounded in $H_0^{\betaegin{equation}ta}(\Omega)\tauimes H_0^{\betaegin{equation}ta}(\Omega)$.
\varphiepsilonnd{lemma}
\betaegin{equation}gin{lemma}\lambdaabel{rem}
Let $\{(w_n,z_n)\}$ be a PS sequence at level $c$ for the functional $\Phi_{\gamma}^{\betaegin{equation}ta}\ ($resp. for the functional $\Phi_{\gamma}^{\alphalpha,\betaegin{equation}ta})$. Then, $\{(w_n,z_n)\}$ is bounded in $\mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega})\tauimes\mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega})\ ($resp. in $\mathcal{X}_0^{\mu}(\mathcal{C}_{\Omegamega})\tauimes\mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega}))$
\varphiepsilonnd{lemma}
Now, we are able to prove one of the main results of this paper.
\betaegin{equation}gin{proof}[Proof of Theorem \mathbb{R}f{Th0}.]\mathbb{H}fill\betareak
Since we are dealing with the subcritical case $1<p<2_{\mu}^*-1$, given a PS sequence $\{u_n\}\sigmaubset H_0^{\mu}(\Omega)$ for the functional $\mathcal{F}_\gamma$, thanks to
Lemma \mathbb{R}f{acotacion_ecuacion} and the compact inclusion \varphiepsilonqref{compact_emb}, the PS condition is satisfied. Moreover, by Lemma \mathbb{R}f{lezero},
the functional $\mathcal{F}_\gamma$ satisfies the MP geometry. Then, due to the MPT \cite{AR} and the PS condition,
the functional $\mathcal{F}_\gamma$ possesses a critical point $u\in H_0^{\mu}(\Omega)$.
Moreover, if we define the set of paths
between the origin and $\mathbb{H}at u$,
$$\Gamma:=\{g\in C([0,1],H_0^{\mu}(\Omega))\,;\, g(0)=0,\; g(1)=\mathbb{H}at u\},$$
with $\mathbb{H}at u$ given as in Lemma \mathbb{R}f{lezero}, i.e. $\mathcal{F}_\gamma(\mathbb{H}at u)<0$, then,
$$\mathcal{F}_\gamma(u)=\inf_{g\in\Gamma} \max_{\tauheta \in [0,1]} \mathcal{F}_\gamma(g(\tauheta))= c.$$
To show that $u>0$, let us consider the functional,
\betaegin{equation}gin{equation*}
\mathcal{F}_\gamma^+(u)=\mathcal{F}_\gamma(u^+),
\varphiepsilonnd{equation*}
where $u^+=\max\{u,0\}$. Repeating with minor changes the arguments carried out above, one readily shows that what was proved for the functional $\mathcal{F}_\gamma$
still holds for the functional $\mathcal{F}_\gamma^+$. Hence, it follows that $u\gammaeq 0$ and by the Maximum Principle (see \cite{CaSi}), $u>0$.
\varphiepsilonnd{proof}
\betaegin{equation}gin{remark}
Once we have proved the existence of a positive solution to problem \varphiepsilonqref{ecuacion}, due to the equivalence between \varphiepsilonqref{ecuacion} and systems
\varphiepsilonqref{sistemabb} and \varphiepsilonqref{sistemaab} we have the existence of a positive solution to both systems too.
\varphiepsilonnd{remark}
\sigmaection{Concentration-Compactness at the critical exponent.}\lambdaabel{Subsec:concentracion_compacidad}
In this subsection we focus on the critical exponent case, $p=2_{\mu}^*-1$, proving Theorem\;\mathbb{R}f{Th1}.
Our aim is to prove the PS condition for the functional $\mathcal{F}_{\gamma}$ since the rest of the proof will be similar to what we performed in the previous
section for the subcritical case.
First, by means of a
concentration-compactness argument, we will prove that the PS
condition is satisfied at levels below certain critical level $c^*$
(to be determined). Next, we construct an appropriate path whose energy is
below that critical level $c^*$ and finally we will find a corresponding sequence satisfying the PS condition. Both steps are strongly based on
the use of particular test functions. Hence, through this subsection
we will focus on working with the extended
functionals $\Phi_{\gamma}^{\betaegin{equation}ta}$ and $\Phi_{\gamma}^{\alphalpha,\betaegin{equation}ta}$. Once
we have completed this task, since the $\betaegin{equation}ta$-harmonic extension is
an isometry, given a PS sequence
$\{(w_n,z_n)\}\sigmaubset\mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omega})\tauimes\mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omega})$
at level $c$ for the functional $\Phi_{\gamma}^{\betaegin{equation}ta}$, satisfying the
PS condition, it is clear that the trace sequence
$\{(u_n,v_n)\}=\{Tr[w_n],Tr[z_n]\}$ belongs to
$H_0^{\betaegin{equation}ta}(\Omega)\tauimes H_0^{\betaegin{equation}ta}(\Omega)$ and is a PS sequence at
the same level $c$ below certain $c^*$ for the functional
$\mathcal{J}_{\gamma}^{\betaegin{equation}ta}$, satisfying the PS condition. Thus, the
functional $\mathcal{J}_{\gamma}^{\betaegin{equation}ta}$ satisfies the PS condition at every
level $c$ below the critical level $c^*$. In a similar way we can
infer that the functional $\mathcal{J}_{\gamma}^{\alphalpha,\betaegin{equation}ta}$ satisfies the corresponding
PS condition.
More specifically, by means of a concentration-compactness argument we first prove that the
PS condition is satisfied for any level $c$ with
\betaegin{equation}gin{equation}\lambdaabel{levelbeta}
c<\lambdaeft(\frac{1}{2}-\frac{1}{2_{\betaegin{equation}ta}^*}\right)\lambdaeft(\kappa_{\betaegin{equation}ta}S(\betaegin{equation}ta,N)\right)^{\frac{2_{\betaegin{equation}ta}^*}{2_{\betaegin{equation}ta}^*-2}}=\frac{\betaegin{equation}ta}{N} \lambdaeft(\kappa_{\betaegin{equation}ta}S(\betaegin{equation}ta,N)\right)^{\frac{N}{2\betaegin{equation}ta}},
\tauag{$c_{\betaegin{equation}ta}^*$}
\varphiepsilonnd{equation}
when dealing with the functional $\Phi_{\gamma}^{\betaegin{equation}ta}$, and for any level
\betaegin{equation}gin{equation}\lambdaabel{levelmu}
c<\frac{1}{\gammaamma^{1-\betaegin{equation}ta/\alphalpha}}\lambdaeft(\frac{1}{2}-\frac{1}{2_{\mu}^*}\right)\lambdaeft(\kappa_{\mu}S(\mu,N)\right)^{\frac{2_{\mu}^*}{2_{\mu}^*-2}}=\frac{1}{\gammaamma^{1-\betaegin{equation}ta/\alphalpha}}\frac{\mu}{N} \lambdaeft(\kappa_{\mu}S(\mu,N)\right)^{\frac{N}{2\mu}},
\tauag{$c_{\mu}^*$}
\varphiepsilonnd{equation}
when dealing with the functional $\Phi_{\gamma}^{\alphalpha,\betaegin{equation}ta}$. Next, using an appropriate cut-off version of the extremal functions \varphiepsilonqref{u_eps}
we will obtain a path below the critical levels $c_{\betaegin{equation}ta}^*$ and $c_{\mu}^*$.\nablaewline
\sigmaubsection{PS condition under a critical level}
To accomplish the first step, let us start recalling the following.
\betaegin{equation}gin{definition}
We say that a sequence $\{y^{1-2\mu}|\nablaabla w_n|^2\}_{n\in\mathbb{N}}$ is tight if for any $\varphiepsilonta>0$ there exists $\rho_0>0$ such that
\betaegin{equation}gin{equation*}
\int_{\{y>\rho_0\}}\int_{\Omegamega}y^{1-2\mu}|\nablaabla w_n|^2dxdy\lambdaeq\varphiepsilonta,\quad\forall n\in\mathbb{N}.
\varphiepsilonnd{equation*}
\varphiepsilonnd{definition}
\nablaoindent In particular, since we are dealing with a system, we say that the sequence
$$\{(y^{1-2\mu}|\nablaabla w_n|^2,y^{1-2\betaegin{equation}ta}|\nablaabla z_n|^2)\}_{n\in\mathbb{N}},$$ is tight if for any $\varphiepsilonta>0$ there exists $\rho_0>0$ such that
\betaegin{equation}gin{equation*}
\int_{\{y>\rho_0\}}\int_{\Omegamega}y^{1-2\mu}|\nablaabla w_n|^2dxdy+\int_{\{y>\rho_0\}}\int_{\Omegamega}y^{1-2\betaegin{equation}ta}|\nablaabla z_n|^2dxdy\lambdaeq\varphiepsilonta,\quad\forall n\in\mathbb{N}.
\varphiepsilonnd{equation*}
Now we state the Concentration-Compactness Theorem \cite[Theorem 5.1]{BCdPS} that will be useful in the proof of the PS condition.
\betaegin{equation}gin{theorem}\lambdaabel{th:concentracion}
Let $\{w_n\}$ be a weakly convergent sequence to $w$ in $\mathcal{X}_0^{\mu}(\mathcal{C}_{\Omegamega})$ such that the sequence $\{y^{1-2\mu}|\nablaabla w_n|^2\}_{n\in\mathbb{N}}$ is tight. Let $u_n=w_n(x,0)$ and $u=w(x,0)$. Let $\nablau,\ \zetaeta$ be two nonnegative measures such that
\betaegin{equation}gin{equation*}
y^{1-2\mu}|\nablaabla w_n|^2\tauo\zetaeta\quad\mbox{and}\quad|u_n|^{2_{\mu}^*}\tauo\nablau,\quad\mbox{as}\ n\tauo\infty
\varphiepsilonnd{equation*}
in the sense of measures. Then there exists an index set $I$, at most countable, points $\{x_i\}_{i\in I}\sigmaubset\Omegamega$ and positive numbers $\nablau_i$, $\zetaeta_i$, with $i\in I$, such that,
\betaegin{equation}gin{itemize}
\item $\nablau=|u|^{2_{\mu}^*}+\sigmaum\lambdaimits_{i\in I}\nablau_i\,dlta_{x_i},\ \nablau_i>0,$
\item $\zetaeta=y^{1-2\mu}|\nablaabla w|^2+\sigmaum\lambdaimits_{i\in I}\zetaeta_i\,dlta_{x_i},\ \zetaeta_i>0,$
\varphiepsilonnd{itemize}
where $\,dlta_{x_{j}}$ stands for the Dirac's delta centered at $x_j$ and satisfying the condition
\betaegin{equation}gin{equation*}
\zetaeta_i\gammaeq S(\mu,N)\nablau_i^{2/2_{\mu}^*}.
\varphiepsilonnd{equation*}
\varphiepsilonnd{theorem}
With respect to the PS condition we have the following.
\betaegin{equation}gin{lemma}\lambdaabel{PScondition_extensionsistemabb}
If $p=2_{\betaegin{equation}ta}^*-1$ the functional $\Phi_{\gamma}^{\betaegin{equation}ta}$ satisfies the PS condition for any level $c$ below the critical level defined by \varphiepsilonqref{levelbeta}.
\varphiepsilonnd{lemma}
\betaegin{equation}gin{proof}\mathbb{R}^Newcommand{\qedsymbol}{}
Let $\{(w_n,z_n)\}_{n\in\mathbb{N}}\sigmaubset \mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega})\tauimes \mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega})$ be a PS sequence at level $c$ for the functional $\Phi_{\gamma}^{\betaegin{equation}ta}$, i.e.
\betaegin{equation}gin{equation}\lambdaabel{critic}
\Phi_{\gamma}^{\betaegin{equation}ta}(w_n,z_n)\tauo c<c_{\betaegin{equation}ta}^*\quad\mbox{and}\quad \lambdaeft(\Phi_{\gamma}^{\betaegin{equation}ta}\right)'(w_n,z_n)\tauo 0.
\varphiepsilonnd{equation}
From \varphiepsilonqref{critic} and Lemma \mathbb{R}f{rem} we get that the sequence $\{(w_n,z_n)\}_{n\in\mathbb{N}}$ is uniformly bounded in $\mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega})\tauimes \mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega})$, in other words, there exists a finite $M>0$ such that
\betaegin{equation}gin{equation}
\lambdaabel{Mfrac}
||w_n||_{\mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega})}^2+||z_n||_{\mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega})}^2\lambdaeq M,
\varphiepsilonnd{equation}
and, as a consequence, we can assume that, up to a subsequence,
\betaegin{equation}gin{align}\lambdaabel{conver}
& w_n\rightharpoonup w\quad\mbox{weakly in}\ \mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega}),\nablaonumber\\
& w_n(x,0) \rightarrow w(x,0)\quad\mbox{strong in}\ L^r(\Omegamega), \mathbb{H}box{with}\quad 1\lambdaeq r<2_{\betaegin{equation}ta}^*,\nablaonumber\\
& w_n(x,0) \rightarrow w(x,0)\quad\mbox{a.e. in}\ \Omegamega,
\varphiepsilonnd{align}
and
\betaegin{equation}gin{align}\lambdaabel{conver2}
& z_n\rightharpoonup z\quad\mbox{weakly in}\ \mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega}),\nablaonumber\\
& z_n(x,0) \rightarrow z(x,0)\quad\mbox{strong in}\ L^r(\Omegamega), 1\lambdaeq r<2_{\betaegin{equation}ta}^*,\nablaonumber\\
& z_n(x,0) \rightarrow z(x,0)\quad\mbox{a.e. in}\ \Omegamega.
\varphiepsilonnd{align}
Before applying Theorem \mathbb{R}f{th:concentracion}, first we need to check that the PS sequence $\{(w_n,z_n)\}_{n\in\mathbb{N}}$ is tight. To avoid any unnecessary technical details,
and since the functional $\Phi_{\gamma}^{\betaegin{equation}ta}$ is obtained as a particular case (up to a multiplication by $\sigmaqrt{\gamma}$) of the functional
$\Phi_{\gamma}^{\alphalpha,\betaegin{equation}ta}$ when $\alphalpha=2\betaegin{equation}ta$, we prove the following.
\varphiepsilonnd{proof}
\betaegin{equation}gin{lemma}
A PS sequence $\{(w_n,z_n)\}_{n\in\mathbb{N}}\sigmaubset \mathcal{X}_0^{\mu}(\mathcal{C}_{\Omegamega})\tauimes \mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega})$ at level $c$
for the functional $\Phi_{\gamma}^{\alphalpha,\betaegin{equation}ta}$ is tight.
\varphiepsilonnd{lemma}
\betaegin{equation}gin{proof}
The proof is similar to the proof of Lemma 3.6 in \cite{BCdPS}, which follows some arguments contained in \cite{AAP}, and we include it for the reader's convenience. By contradiction, suppose that there exists $\varphiepsilonta_0>0$ and $m_0\in\mathbb{N}$ such that for any $\rho>0$ we have, up to a subsequence,
\betaegin{equation}gin{equation}\lambdaabel{contradiction}
\int_{\{y>\rho\}}\int_{\Omegamega}y^{1-2\mu}|\nablaabla w_n|^2dxdy+\int_{\{y>\rho\}}\int_{\Omegamega}y^{1-2\betaegin{equation}ta}|\nablaabla z_n|^2dxdy>\varphiepsilonta_0,\quad\forall m\gammaeq m_0.
\varphiepsilonnd{equation}
Let $\varphiepsilon>0$ be fixed (to be determined later), and let $\rho_0>0$ such that
\betaegin{equation}gin{equation*}
\int_{\{y>\rho_0\}}\int_{\Omegamega}y^{1-2\mu}|\nablaabla w|^2dxdy+\int_{\{y>\rho_0\}}\int_{\Omegamega}y^{1-2\betaegin{equation}ta}|\nablaabla z|^2dxdy<\varphiepsilon.
\varphiepsilonnd{equation*}
Let $j=\lambdaeft[\frac{M}{\varphiepsilon\kappa}\right]$ be the integer part, with
$\kappa=\min\lambdaeft\{\frac{\kappa_{\mu}}{\gammaamma^{1-\betaegin{equation}ta/\alphalpha}},\frac{\kappa_{\betaegin{equation}ta}}{\gammaamma^{\betaegin{equation}ta/\alphalpha}}\right\}$ and
$I_k=\{y\in\mathbb{R}^+:\rho_0+k\lambdaeq y\lambdaeq \rho_0+k+1\}$, $k=0,1,\lambdadots,j$. Then, using \varphiepsilonqref{Mfrac},
\betaegin{equation}gin{align*}
\sigmaum_{k=0}^{j}\int_{I_k}\int_{\Omegamega}y^{1-2\mu}|\nablaabla w_n|^2dxdy&+\int_{I_k}\int_{\Omegamega}y^{1-2\betaegin{equation}ta}|\nablaabla z_n|^2dxdy\\
&\lambdaeq\int_{\mathcal{C}_{\Omegamega}}y^{1-2\mu}|\nablaabla w_n|^2dxdy+\int_{\mathcal{C}_{\Omegamega}}y^{1-2\betaegin{equation}ta}|\nablaabla z_n|^2dxdy\\
&\lambdaeq\frac{M}{\kappa}<\varphiepsilon(j+1).
\varphiepsilonnd{align*}
Hence, there exists $k_0\in\{0,1,\lambdadots,j\}$ such that
\betaegin{equation}gin{equation}\lambdaabel{menor}
\int_{I_{k_0}}\int_{\Omegamega}y^{1-2\mu}|\nablaabla w_n|^2dxdy+\int_{I_{k_0}}\int_{\Omegamega}y^{1-2\betaegin{equation}ta}|\nablaabla z_n|^2dxdy\lambdaeq\varphiepsilon.
\varphiepsilonnd{equation}
Take now a smooth cut-off function
\betaegin{equation}gin{equation*}
X(y)=\lambdaeft\{
\betaegin{equation}gin{tabular}{lcl}
$0$&&if $y\lambdaeq r+k_0$,\\
$1$&&if $y\gammaeq r+k_0+1$,
\varphiepsilonnd{tabular}
\right.
\varphiepsilonnd{equation*}
and define $(t_n,s_n)=(X(y)w_n,X(y)z_n)$. Then
\betaegin{equation}gin{align*}
&\lambdaeft|\lambdaeft\lambdaangle \lambdaeft(\Phi_{\gamma}^{\alphalpha,\betaegin{equation}ta}\right)'(w_n,z_n)-\lambdaeft(\Phi_{\gamma}^{\alphalpha,\betaegin{equation}ta}\right)'(t_n,s_n)\Big|(t_n,s_n)\right\rangle\right|\\
&=\frac{\kappa_{\mu}}{\gammaamma^{1-\betaegin{equation}ta/\alphalpha}}\int_{\mathcal{C}_{\Omegamega}}y^{1-2\mu}\lambdaangle\nablaabla(w_n-t_n),\nablaabla t_n\rangle dxdy+\frac{\kappa_{\betaegin{equation}ta}}{\gammaamma^{\betaegin{equation}ta/\alphalpha}}\int_{\mathcal{C}_{\Omegamega}}y^{1-2\betaegin{equation}ta}\lambdaangle\nablaabla(z_n-s_n),\nablaabla s_n\rangle dxdy\\
&=\frac{\kappa_{\mu}}{\gammaamma^{1-\betaegin{equation}ta/\alphalpha}}\int_{I_{k_0}}\int_{\Omegamega}y^{1-2\mu}\lambdaangle\nablaabla(w_n-t_n),\nablaabla t_n\rangle dxdy+\frac{\kappa_{\betaegin{equation}ta}}{\gammaamma^{\betaegin{equation}ta/\alphalpha}}\int_{I_{k_0}}\int_{\Omegamega}y^{1-2\betaegin{equation}ta}\lambdaangle\nablaabla(z_n-s_n),\nablaabla s_n\rangle dxdy.
\varphiepsilonnd{align*}
Now, because of the Cauchy-Schwarz inequality, inequality \varphiepsilonqref{menor} and the compact inclusion\footnote{Let us recall that $\betaegin{equation}ta\in(0,1)$ and $\mu:=\alphalpha-\betaegin{equation}ta\in(0,1)$ thus, the weights $w_1(x,y)=y^{1-2\mu}$ and $w_2(x,y)=y^{1-2\betaegin{equation}ta}$ belongs to the Muckenhoupt class $A_2$. We refer to \cite{FKS} for the precise definition as well as some useful properties of the weights belonging to the Muckenhoupt classes $A_p$.},
\sigmamall{\betaegin{equation}gin{equation*}
H^1(I_{k_0}\tauimes\Omegamega,y^{1-2\mu}dxdy)\tauimes H^1(I_{k_0}\tauimes\Omegamega,y^{1-2\betaegin{equation}ta}dxdy)\mathbb{H}ookrightarrow L^2(I_{k_0}\tauimes\Omegamega,y^{1-2\mu}dxdy)\tauimes L^2(I_{k_0}\tauimes\Omegamega,y^{1-2\betaegin{equation}ta}dxdy),
\varphiepsilonnd{equation*}}
it follows that,
\betaegin{equation}gin{align*}
&\lambdaeft|\lambdaeft\lambdaangle \lambdaeft(\Phi_{\gamma}^{\alphalpha,\betaegin{equation}ta}\right)'(w_n,z_n)-\lambdaeft(\Phi_{\gamma}^{\alphalpha,\betaegin{equation}ta}\right)'(t_n,s_n)\Big|(t_n,s_n) \right\rangle\right|\\
&\lambdaeq\frac{\kappa_{\mu}}{\gammaamma^{1-\betaegin{equation}ta/\alphalpha}}\lambdaeft(\int_{I_{k_0}}\int_{\Omegamega}y^{1-2\mu}|\nablaabla(w_n-t_n)|^2dxdy\right)^{1/2}\lambdaeft(\int_{I_{k_0}}\int_{\Omegamega}y^{1-2\mu}|\nablaabla t_n|^2dxdy\right)^{1/2}\\
&+\frac{\kappa_{\betaegin{equation}ta}}{\gammaamma^{\betaegin{equation}ta/\alphalpha}}\lambdaeft(\int_{I_{k_0}}\int_{\Omegamega}y^{1-2\betaegin{equation}ta}|\nablaabla(z_n-s_n)|^2dxdy\right)^{1/2}\lambdaeft(\int_{I_{k_0}}\int_{\Omegamega}y^{1-2\betaegin{equation}ta}|\nablaabla s_n|^2dxdy\right)^{1/2}\\
&\lambdaeq\max\lambdaeft\{\frac{\kappa_{\mu}}{\gammaamma^{1-\betaegin{equation}ta/\alphalpha}},\frac{\kappa_{\betaegin{equation}ta}}{\gammaamma^{\betaegin{equation}ta/\alphalpha}}\right\}c\varphiepsilon \lambdaeq C\varphiepsilon,
\varphiepsilonnd{align*}
where $C:=c\max\lambdaeft\{\frac{\kappa_{\mu}}{\gammaamma^{1-\betaegin{equation}ta/\alphalpha}},\frac{\kappa_{\betaegin{equation}ta}}{\gammaamma^{\betaegin{equation}ta/\alphalpha}}\right\}>0$. On the other hand, by \varphiepsilonqref{critic},
\betaegin{equation}gin{equation*}
\lambdaeft|\lambdaeft\lambdaangle
\lambdaeft(\Phi_{\gamma}^{\alphalpha,\betaegin{equation}ta}\right)'(t_n,s_n)\Big|(t_n,s_n)
\right\rangle\right|\lambdaeq c_1\varphiepsilon+o(1),
\varphiepsilonnd{equation*}
with $c_1$ a positive constant. Thus, we conclude
\betaegin{equation}gin{align*}
\int_{\{y>r+k_0+1\}}\int_{\Omegamega}y^{1-2\mu}|\nablaabla w_n|^2dxdy&+\int_{\{y>r+k_0+1\}}\int_{\Omegamega}y^{1-2\betaegin{equation}ta}|\nablaabla z_n|^2dxdy\\
&\lambdaeq\int_{\mathcal{C}_{\Omegamega}}y^{1-2\mu}|\nablaabla t_n|^2dxdy+\int_{\mathcal{C}_{\Omegamega}}y^{1-2\betaegin{equation}ta}|\nablaabla s_n|^2dxdy\\
&\lambdaeq\frac{1}{\kappa}\lambdaeft\lambdaangle
\lambdaeft(\Phi_{\gamma}^{\alphalpha,\betaegin{equation}ta}\right)'(t_n,s_n)\Big|(t_n,s_n)
\right\rangle \lambdaeq C \varphiepsilon,
\varphiepsilonnd{align*}
in contradiction with \varphiepsilonqref{contradiction}. Hence, the sequence is tight.
\varphiepsilonnd{proof}
\betaegin{equation}gin{proof}[Continuation proof Lemma \mathbb{R}f{PScondition_extensionsistemabb}]
Once we have proved that the PS sequence
$$\{(w_n,z_n)\}_{n\in\mathbb{N}}\sigmaubset \mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega})\tauimes \mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega}),$$
is tight, we can apply Theorem \mathbb{R}f{th:concentracion}. Consequently, up to a subsequence, there exists an at most countable set $I$,
a sequence of points $\{x_i\}_{i\in I}\sigmaubset\Omegamega$ and non-negative real numbers $\nablau_i$ and $\zetaeta_i$ such that
\betaegin{equation}gin{itemize}
\item $|u_n|^{2_{\betaegin{equation}ta}^*}\tauo \nablau=|u|^{2_{\betaegin{equation}ta}^*}+\sigmaum\lambdaimits_{i\in I}\nablau_i\,dlta_{x_i},$
\item $y^{1-2\betaegin{equation}ta}|\nablaabla w_n|^2\tauo\zetaeta=y^{1-2\betaegin{equation}ta}|\nablaabla w|^2+\sigmaum\lambdaimits_{i\in I}\zetaeta_i\,dlta_{x_i},$
\item $y^{1-2\betaegin{equation}ta}|\nablaabla z_n|^2\tauo\widetilde{\zetaeta}=y^{1-2\betaegin{equation}ta}|\nablaabla z|^2+\sigmaum\lambdaimits_{i\in I}\widetilde{\zetaeta}_i\,dlta_{x_i},$
\varphiepsilonnd{itemize}
where $\,dlta_{x_i}$ is the Dirac's delta centered at $x_i$ and satisfying,
\betaegin{equation}gin{equation}\lambdaabel{in:concentracion}
\zetaeta_i\gammaeq S(\mu,N)\nablau_i^{2/2_{\mu}^*}.
\varphiepsilonnd{equation}
We fix $j\in I$ and we let $\partialrtialhi\in\mathcal{C}_0^{\infty}(\mathbb{R}_+^{N+1})$ be a non-increasing smooth cut-off function verifying $\partialrtialhi=1$ in $B_1^+(x_{j})$, $\partialrtialhi=0$ in $B_2^+(x_{j})^c$, with $B_r^+(x_j)\sigmaubset\mathbb{R}^{N}\tauimes\{y\gammaeq0\}$ the $(N+1)$-dimensional semi-ball of radius $r>0$ centered at $x_j$. Let now $\partialrtialhi_{\varphiepsilon}(x,y)=\partialrtialhi(x/\varphiepsilon,y/\varphiepsilon)$, such that $|\nablaabla\partialrtialhi_{\varphiepsilon}|\lambdaeq\frac{C}{\varphiepsilon}$ and denote $\Gammaamma_{2\varphiepsilon}=B_{2\varphiepsilon}^+(x_{j})\cap\{y=0\}$. Therefore, since by \varphiepsilonqref{critic}
\betaegin{equation}gin{equation}\lambdaabel{tocero}
\lambdaeft(\Phi_{\gamma}^{\betaegin{equation}ta}\right)'(w_n,z_n)\tauo 0\quad \mathbb{H}box{in the dual space}\quad \lambdaeft(\mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega})\tauimes \mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega})\right)',
\varphiepsilonnd{equation}
taking the dual product in \varphiepsilonqref{tocero} with $(\partialrtialhi_{\varphiepsilon}w_n,\partialrtialhi_{\varphiepsilon}z_n)$, we obtain
\betaegin{equation}gin{equation*}
\betaegin{equation}gin{split}
\lambdaim_{n\tauo\infty}&\lambdaeft(\kappa_{\betaegin{equation}ta}\int_{\mathcal{C}_{\Omegamega}}y^{1-2\betaegin{equation}ta}\nablaabla w_n\nablaabla(\partialrtialhi_{\varphiepsilon}w_n)dxdy+\kappa_{\betaegin{equation}ta}\int_{\mathcal{C}_{\Omegamega}}y^{1-2\betaegin{equation}ta}\nablaabla z_n\nablaabla(\partialrtialhi_{\varphiepsilon}z_n)dxdy\right.\\
&\ \ \lambdaeft.-2\sigmaqrt{\gammaamma}\int_{\Gammaamma_{2\varphiepsilon}}\partialrtialhi_{\varphiepsilon}w_n(x,0)z_n(x,0)dx-\int_{\Gammaamma_{2\varphiepsilon}}\partialrtialhi_{\varphiepsilon}|w_n|^{2_{\betaegin{equation}ta}^*}(x,0)dx\right)=0.
\varphiepsilonnd{split}
\varphiepsilonnd{equation*}
Hence,
\betaegin{equation}gin{equation*}
\betaegin{equation}gin{split}
&\lambdaim_{n\tauo\infty}\lambdaeft(\kappa_{\betaegin{equation}ta}\int_{\mathcal{C}_{\Omegamega}}y^{1-2\betaegin{equation}ta}\lambdaangle\nablaabla w_n,\nablaabla\partialrtialhi_{\varphiepsilon} \rangle w_ndxdy+\kappa_{\betaegin{equation}ta}\int_{\mathcal{C}_{\Omegamega}}y^{1-2\betaegin{equation}ta}\lambdaangle\nablaabla z_n,\nablaabla\partialrtialhi_{\varphiepsilon} \rangle z_ndxdy\right)\\
&=\lambdaim_{n\tauo\infty}\lambdaeft(2\sigmaqrt{\gammaamma}\int_{\Gammaamma_{2\varphiepsilon}}\partialrtialhi_{\varphiepsilon}w_n(x,0)z_n(x,0)dx+\int_{\Gammaamma_{2\varphiepsilon}}\partialrtialhi_{\varphiepsilon}|w_n|^{2_{\betaegin{equation}ta}^*}(x,0)dx\right.\\
&\ \ \ \ \ \ \ \ \ \ \ \ \lambdaeft.-\kappa_{\betaegin{equation}ta}\int_{\mathcal{C}_{\Omegamega}}y^{1-2\betaegin{equation}ta}\partialrtialhi_{\varphiepsilon}|\nablaabla w_n|^2dxdy-\kappa_{\betaegin{equation}ta}\int_{\mathcal{C}_{\Omegamega}}y^{1-2\betaegin{equation}ta}\partialrtialhi_{\varphiepsilon}|\nablaabla z_n|^2dxdy\right).
\varphiepsilonnd{split}
\varphiepsilonnd{equation*}
Moreover, thanks to \varphiepsilonqref{conver}, \varphiepsilonqref{conver2} and Theorem \mathbb{R}f{th:concentracion}, we find,
\betaegin{equation}gin{equation}\lambdaabel{eq:tozero}
\betaegin{equation}gin{split}
&\lambdaim_{n\tauo\infty}\lambdaeft(\kappa_{\betaegin{equation}ta}\int_{\mathcal{C}_{\Omegamega}}y^{1-2\betaegin{equation}ta}\lambdaangle\nablaabla w_n,\nablaabla\partialrtialhi_{\varphiepsilon} \rangle w_ndxdy+\kappa_{\betaegin{equation}ta}\int_{\mathcal{C}_{\Omegamega}}y^{1-2\betaegin{equation}ta}\lambdaangle\nablaabla z_n,\nablaabla\partialrtialhi_{\varphiepsilon} \rangle z_ndxdy\right)\\
&\ \ \ \ \ \ \ =2\sigmaqrt{\gammaamma}\int_{\Gammaamma_{2\varphiepsilon}}\partialrtialhi_{\varphiepsilon}w(x,0)z(x,0)dx+\int_{\Gammaamma_{2\varphiepsilon}}\partialrtialhi_{\varphiepsilon}d\nablau-\kappa_{\betaegin{equation}ta}\int_{B_{2\varphiepsilon}^+(x_{j})}\partialrtialhi_{\varphiepsilon}d\zetaeta-\kappa_{\betaegin{equation}ta}\int_{B_{2\varphiepsilon}^+(x_{j})}\partialrtialhi_{\varphiepsilon}d\widetilde{\zetaeta}.
\varphiepsilonnd{split}
\varphiepsilonnd{equation}
Assume for the moment that the left hand side of \varphiepsilonqref{eq:tozero} vanishes as $\varphiepsilon\tauo0$. Then, it follows that,
\betaegin{equation}gin{equation*}
\betaegin{equation}gin{split}
0&=\lambdaim_{\varphiepsilon\tauo0}2\sigmaqrt{\gammaamma}\int_{\Gammaamma_{2\varphiepsilon}}\partialrtialhi_{\varphiepsilon}w(x,0)z(x,0)dx+\int_{\Gammaamma_{2\varphiepsilon}}\partialrtialhi_{\varphiepsilon}d\nablau-\kappa_{\betaegin{equation}ta}\int_{B_{2\varphiepsilon}^+(x_{j})}\partialrtialhi_{\varphiepsilon}d\zetaeta-\kappa_{\betaegin{equation}ta}\int_{B_{2\varphiepsilon}^+(x_{j})}\partialrtialhi_{\varphiepsilon}d\widetilde{\zetaeta}\\
&=\nablau_{j}-\kappa_{\betaegin{equation}ta}\zetaeta_{j}-\kappa_{\betaegin{equation}ta}\widetilde{\zetaeta}_{j},
\varphiepsilonnd{split}
\varphiepsilonnd{equation*}
and we conclude,
\betaegin{equation}gin{equation}\lambdaabel{eq:compacidad}
\nablau_{j}=\kappa_{\betaegin{equation}ta}\lambdaeft(\zetaeta_{j}+\widetilde{\zetaeta}_{j}\right).
\varphiepsilonnd{equation}
Finally, we have two options, either the compactness of the PS sequence or concentration around those points $x_j$. In other words, either $\nablau_{j}=0$, so that $\zetaeta_{j}=\widetilde{\zetaeta}_{j}=0$ or, thanks to \varphiepsilonqref{eq:compacidad} and \varphiepsilonqref{in:concentracion}, $\nablau_{j}\gammaeq\lambdaeft(\kappa_{\betaegin{equation}ta}S(\betaegin{equation}ta,N)\right)^{\frac{2_{\betaegin{equation}ta}^*}{2_{\betaegin{equation}ta}^*-2}}$. In case of having concentration, we find,
\betaegin{equation}gin{equation*}
\betaegin{equation}gin{split}
c=&\lambdaim_{n\tauo\infty}\Phi_{\gamma}^{\betaegin{equation}ta}(w_n,z_n)=\lambdaim_{n\tauo\infty}\Phi_{\gamma}^{\betaegin{equation}ta}(w_n,z_n)-\frac{1}{2}\lambdaeft\lambdaangle\lambdaeft(\Phi_{\gamma}^{\betaegin{equation}ta}\right)'(w_n,z_n)\Big|(w_n,z_n)\right\rangle\\
=& \lambdaeft(\frac{1}{2}-\frac{1}{2_{\betaegin{equation}ta}^*}\right)\int_{\Omega}|w(x,0)|^{2_{\betaegin{equation}ta}^*}dx+\lambdaeft(\frac{1}{2}-\frac{1}{2_{\betaegin{equation}ta}^*}\right)\nablau_{k_0}\\
\gammaeq&\lambdaeft(\frac{1}{2}-\frac{1}{2_{\betaegin{equation}ta}^*}\right)\lambdaeft(\kappa_{\betaegin{equation}ta}S(\betaegin{equation}ta,N)\right)^{\frac{2_{\betaegin{equation}ta}^*}{2_{\betaegin{equation}ta}^*-2}}=c_{\betaegin{equation}ta}^*,
\varphiepsilonnd{split}
\varphiepsilonnd{equation*}
in contradiction with the hypotheses $c<c_{\betaegin{equation}ta}^*$. It only remains to prove that the left hand side of \varphiepsilonqref{eq:tozero} vanishes as $\varphiepsilon\tauo0$. Due to \varphiepsilonqref{critic} and Lemma \mathbb{R}f{rem},
the PS sequence $\{(w_n,z_n)\}_{n\in\mathbb{N}}$ is bounded in $\mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega})\tauimes\mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega})$, so that, up to a subsequence,
\betaegin{equation}gin{equation*}
\betaegin{equation}gin{split}
(w_n,z_n)&\rightharpoonup(w,z)\in \mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega})\tauimes\mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega}),\\
(w_n,z_n)&\rightarrow(w,z)\quad\mbox{a.e. in}\quad \mathcal{C}_{\Omegamega}.
\varphiepsilonnd{split}
\varphiepsilonnd{equation*}
Moreover, for $r<2^*=\frac{2(N+1)}{N-1}$ we have the compact inclusion,
\betaegin{equation}gin{equation*}
\mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega})\tauimes\mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega})\mathbb{H}ookrightarrow L^{r}(\mathcal{C}_{\Omegamega},y^{1-2\betaegin{equation}ta}dxdy)\tauimes L^{r}(\mathcal{C}_{\Omegamega},y^{1-2\betaegin{equation}ta}dxdy).
\varphiepsilonnd{equation*}
Applying H\"older's inequality with $p=\frac{N+1}{N-1}$ and $q=\frac{N+1}{2}$, we find,
\betaegin{equation}gin{equation*}
\betaegin{equation}gin{split}
\int_{B_{2\varphiepsilon}^+(x_{k_0})}& y^{1-2\betaegin{equation}ta}|\nablaabla\partialrtialhi_{\varphiepsilon}|^2|w_n|^2dxdy\\
\lambdaeq& \lambdaeft(\int_{B_{2\varphiepsilon}^+(x_{k_0})}\!\!\!\!\!\!\!\!\! y^{1-2\betaegin{equation}ta}|\nablaabla\partialrtialhi_{\varphiepsilon}|^{N+1}dxdy\right)^{\frac{2}{N+1}}\lambdaeft(\int_{B_{2\varphiepsilon}^+(x_{k_0})}\!\!\!\!\!\!\!\!\! y^{1-2\betaegin{equation}ta}|w_n|^{2\frac{N+1}{N-1}}dxdy\right)^{\frac{N-1}{(N+1)}}\\
\lambdaeq&\frac{1}{\varphiepsilon^2}\lambdaeft(\int_{B_{2\varphiepsilon}(x_{k_0})}\int_0^\varphiepsilon y^{1-2\betaegin{equation}ta}dxdy\right)^{\frac{2}{N+1}}\lambdaeft(\int_{B_{2\varphiepsilon}^+(x_{k_0})}\!\!\!\!\!\!\!\!\! y^{1-2\betaegin{equation}ta}|w_n|^{2\frac{N+1}{N-1}}dxdy\right)^{\frac{N-1}{(N+1)}}\\
\lambdaeq& c_0\varphiepsilon^{\frac{2(1-2\betaegin{equation}ta)}{N+1}}\lambdaeft(\int_{B_{2\varphiepsilon}^+(x_{k_0})}\!\!\!\!\!\!\!\!\! y^{1-2\betaegin{equation}ta}|w_n|^{2\frac{N+1}{N-1}}dxdy\right)^{\frac{N-1}{(N+1)}}\\
\lambdaeq& c_0 \varphiepsilon^{\frac{2(1-2\betaegin{equation}ta)}{N+1}}\varphiepsilon^{\frac{(2+N-2\betaegin{equation}ta)(N-1)}{(N+1)}}\lambdaeft(\int_{B_{2}^+(x_{k_0})}y^{1-2\betaegin{equation}ta}|w_n(\varphiepsilon x,\varphiepsilon y)|^{2\frac{N+1}{N-1}}dxdy\right)^{\frac{N-1}{(N+1)}}\\
\lambdaeq& c_1 \varphiepsilon^{N-2\betaegin{equation}ta}.
\varphiepsilonnd{split}
\varphiepsilonnd{equation*}
for appropriate positive constants $c_0$ and $c_1$. In a similar way,
\betaegin{equation}gin{equation*}
\int_{B_{2\varphiepsilon}^+(x_{k_0})}\!\!\!\!\!\!\!\!\! y^{1-2\betaegin{equation}ta}|\nablaabla\partialrtialhi_{\varphiepsilon}|^2|z_n|^2dxdy\lambdaeq c_2 \varphiepsilon^{N-2\betaegin{equation}ta}.
\varphiepsilonnd{equation*}
Thus, we find that,
\betaegin{equation}gin{equation*}
\betaegin{equation}gin{split}
0\lambdaeq&\lambdaim_{n\tauo\infty}\lambdaeft|\kappa_{\betaegin{equation}ta}\int_{\mathcal{C}_{\Omegamega}}y^{1-2\betaegin{equation}ta}\lambdaangle\nablaabla w_n,\nablaabla\partialrtialhi_{\varphiepsilon} \rangle w_ndxdy+\kappa_{\betaegin{equation}ta}\int_{\mathcal{C}_{\Omegamega}}y^{1-2\betaegin{equation}ta}\lambdaangle\nablaabla z_n,\nablaabla\partialrtialhi_{\varphiepsilon} \rangle z_ndxdy\right|\\
\lambdaeq&\kappa_{\betaegin{equation}ta}\lambdaim_{n\tauo\infty}\lambdaeft(\int_{\mathcal{C}_{\Omegamega}}y^{1-2\betaegin{equation}ta}|\nablaabla w_n|^2dxdy\right)^{1/2}\lambdaeft(\int_{B_{2\varphiepsilon}^+(x_{k_0})}y^{1-2s}|\nablaabla\partialrtialhi_{\varphiepsilon}|^2|w_n|^2dxdy\right)^{1/2}\\
+&\kappa_{\betaegin{equation}ta}\lambdaim_{n\tauo\infty}\lambdaeft(\int_{\mathcal{C}_{\Omegamega}}y^{1-2\betaegin{equation}ta}|\nablaabla z_n|^2dxdy\right)^{1/2}\lambdaeft(\int_{B_{2\varphiepsilon}^+(x_{k_0})}y^{1-2s}|\nablaabla\partialrtialhi_{\varphiepsilon}|^2|z_n|^2dxdy\right)^{1/2}\\
\lambdaeq&C \varphiepsilon^{\frac{N-2\betaegin{equation}ta}{2}}\tauo0,
\varphiepsilonnd{split}
\varphiepsilonnd{equation*}
as $\varphiepsilon\tauo 0$ and the proof of the Lemma \mathbb{R}f{PScondition_extensionsistemabb} is complete.
\varphiepsilonnd{proof}
Next we show the corresponding result for the functional $\Phi_{\gamma}^{\alphalpha,\betaegin{equation}ta}$.
\betaegin{equation}gin{lemma}\lambdaabel{PScondition_extensionsistemaab}
If $p=2_{\mu}^*-1$ the functional $\Phi_{\gamma}^{\alphalpha,\betaegin{equation}ta}$
satisfies the PS condition for any level $c$ below the critical
level defined by \varphiepsilonqref{levelmu}.
\varphiepsilonnd{lemma}
The proof of this result is similar to the one of Lemma \mathbb{R}f{PScondition_extensionsistemabb}, so we omit the details for short.
\sigmaubsection{PS sequences under a critical level}
At this point, it remains to show that we can obtain
PS sequences for the functionals $\Phi_{\gamma}^{\betaegin{equation}ta}$ and $\Phi_{\gamma}^{\alphalpha,\betaegin{equation}ta}$ under the critical levels defined by \varphiepsilonqref{levelbeta} and \varphiepsilonqref{levelmu} respectively. To do so, we consider the extremal functions of the fractional Sobolev inequality \varphiepsilonqref{sobolev}, namely, given $\tauheta\in(0,1)$, we set
\betaegin{equation}gin{equation*}
u_{\varphiepsilon}^{\tauheta}(x)=\frac{\varphiepsilon^{\frac{N-2\tauheta}{2}}}{(\varphiepsilon^2+|x|^2)^{\frac{N-2\tauheta}{2}}},
\varphiepsilonnd{equation*}
and $w_{\varphiepsilon}^{\tauheta}=E_{\tauheta}[u_{\varphiepsilon}^{\tauheta}]$ its $\tauheta$-harmonic extension.
Then, since $w_{\varphiepsilon}^{\tauheta}$ is a minimizer of the Sobolev inequality, it holds
\betaegin{equation}gin{equation*}
S(\tauheta,N)=\frac{{\rm div}\,splaystyle\int_{\mathbb{R}_+^{N+1}}y^{1-2\tauheta}|\nablaabla w_{\varphiepsilon}^{\tauheta} |^2dxdy}{{\rm div}\,splaystyle\|u_{\varphiepsilon}^{\tauheta}\|_{L^{2_{\tauheta}^*}(\mathbb{R}^N)}^2}.
\varphiepsilonnd{equation*}
We take a non-increasing smooth cut-off function $\partialrtialhi_0(t)\in\mathcal{C}_0^{\infty}(\mathbb{R}_+)$ such that
$$\partialrtialhi_0(t)=1\quad \mathbb{H}box{if}\quad 0\lambdaeq t\lambdaeq1/2\quad \mathbb{H}box{and}\quad \partialrtialhi_0(t)=0\quad \mathbb{H}box{if}\quad t\gammaeq 1.$$
Assume without loss of generality that $0\in\Omegamega$, $r>0$ small enough such that $\omegaverline{B}_r^+\sigmaubseteq\omegaverline{\mathcal{C}}_{\Omegamega}$, and define the function $\partialrtialhi_r(x,y)=\partialrtialhi_0(\frac{r_{x,y}}{r})$ where $r_{xy}=|(x,y)|=\lambdaeft(|x|^2+y^2\right)^{1/2}$. Note that $\partialrtialhi_r w_{\varphiepsilon}^{\tauheta}\in\mathcal{X}_0^{\tauheta}(\mathcal{C}_{\Omegamega})$. We recall now the following lemma proved in \cite{BCdPS}.
\betaegin{equation}gin{lemma}\lambdaabel{estcol}
The family $\{\partialrtialhi_r w_{\varphiepsilon}^{\tauheta}\}$ and its trace on $\{y=0\}$, denoted by $\{\partialrtialhi_r u_{\varphiepsilon}^{\tauheta}\}$, satisfy
\betaegin{equation}gin{equation*}
\betaegin{equation}gin{split}
\|\partialrtialhi_r w_{\varphiepsilon}^{\tauheta}\|_{\mathcal{X}_0^{\tauheta}(\mathcal{C}_{\Omegamega})}^{2}&=\|w_{\varphiepsilon}^{\tauheta}\|_{\mathcal{X}_0^{\tauheta}(\mathcal{C}_{\Omegamega})}^{2}+O(\varphiepsilon^{N-2\tauheta}),\\
\|\partialrtialhi_r u_{\varphiepsilon}^{\tauheta}\|_{L^2(\Omegamega)}^{2}&=
\lambdaeft\{
\betaegin{equation}gin{tabular}{lc}
$C \varphiepsilon^{2\tauheta}+O(\varphiepsilon^{N-2\tauheta})$ & if $N>4\tauheta$, \\
$C \varphiepsilon^{2\tauheta}|\lambdaog(\varphiepsilon)|$ & if $N=4\tauheta$.
\varphiepsilonnd{tabular}
\right.
\varphiepsilonnd{split}
\varphiepsilonnd{equation*}
\varphiepsilonnd{lemma}
\betaegin{equation}gin{remark}
Since $\|u_{\varphiepsilon}^{\tauheta}\|_{L^{2_{\tauheta}^*}(\mathbb{R}^N)}\sigmaim C$ does not depend on $\varphiepsilon$ it follows that
\betaegin{equation}gin{equation*}
\|\partialrtialhi_r u_{\varphiepsilon}^{\tauheta}\|_{L^{2_{\tauheta}^*}(\Omegamega)}=\|u_{\varphiepsilon}^{\tauheta}\|_{L^{2_{\tauheta}^*}(\mathbb{R}^N)}+O(\varphiepsilon^N)=C+O(\varphiepsilon^N).
\varphiepsilonnd{equation*}
\varphiepsilonnd{remark}
\nablaoindent Next, we define the normalized functions,
\betaegin{equation}gin{equation*}
\varphiepsilonta_{\varphiepsilon}^{\tauheta}=\frac{\partialrtialhi_r w_{\varphiepsilon}^{\tauheta}}{\|\partialrtialhi_r u_{\varphiepsilon}^{\tauheta}\|_{2_{\tauheta}^*}}\quad \mbox{and} \quad \sigmaigma_{\varphiepsilon}^{\tauheta}=\frac{\partialrtialhi_r u_{\varphiepsilon}^{\tauheta}}{\|\partialrtialhi_r u_{\varphiepsilon}^{\tauheta}\|_{2_{\tauheta}^*}},
\varphiepsilonnd{equation*}
then, because of Lemma \mathbb{R}f{estcol} the following estimates hold,
\betaegin{equation}gin{equation}\lambdaabel{estimaciones}
\betaegin{equation}gin{split}
\|\varphiepsilonta_{\varphiepsilon}^{\tauheta}\|_{\mathcal{X}_0^{\tauheta}(\mathcal{C}_{\Omegamega})}^{2}&=S(\tauheta,N)+O(\varphiepsilon^{N-2\tauheta}),\\
\|\sigmaigma_{\varphiepsilon}^{\tauheta}\|_{L^2(\Omegamega)}^{2}&=
\lambdaeft\{
\betaegin{equation}gin{tabular}{lc}
$C \varphiepsilon^{2\tauheta}+O(\varphiepsilon^{N-2\tauheta})$ & if $N>4\tauheta$, \\
$C \varphiepsilon^{2\tauheta}|\lambdaog(\varphiepsilon)|$ & if $N=4\tauheta$,
\varphiepsilonnd{tabular}
\right.\\
\|\sigmaigma_{\varphiepsilon}^{\tauheta}\|_{L^{2_{\tauheta}^*}(\Omegamega)}&=1.
\varphiepsilonnd{split}
\varphiepsilonnd{equation}
To continue, we consider
\betaegin{equation}gin{equation}\lambdaabel{test}
(\omegaverline{w}_{\varphiepsilon}^{\betaegin{equation}ta},\omegaverline{z}_{\varphiepsilon}^{\betaegin{equation}ta})=(M\varphiepsilonta_{\varphiepsilon}^{\betaegin{equation}ta},M\rho\varphiepsilonta_{\varphiepsilon}^{\betaegin{equation}ta}),
\varphiepsilonnd{equation}
with $\rho>0$ to be determined and $M\gammag 1$ a constant such that $\Phi_{\gamma}^{\betaegin{equation}ta}(\omegaverline{w}_{\varphiepsilon}^{\betaegin{equation}ta},\omegaverline{z}_{\varphiepsilon}^{\betaegin{equation}ta})<0$.
Then, under this construction, we define the set of paths
$$\Gamma_\varphiepsilon:=\{g\in C([0,1],\mathcal{X}_{0}^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega})\tauimes \mathcal{X}_{0}^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega}))\,;\, g(0)=(0,0),\ g(1)=(\omegaverline{w}_{\varphiepsilon}^{\betaegin{equation}ta},\omegaverline{z}_{\varphiepsilon}^{\betaegin{equation}ta})\},$$
and we consider the minimax values
\betaegin{equation}gin{equation*}
c_\varphiepsilon=\inf_{g\in\Gamma_\varphiepsilon} \max_{t \in [0,1]} \Phi_{\gamma}^{\betaegin{equation}ta}(g(t)).
\varphiepsilonnd{equation*}
Next we prove that, in fact, $c_{\varphiepsilon}<c_{\betaegin{equation}ta}^*$ for $\varphiepsilon$ small enough.
\betaegin{equation}gin{lemma}\lambdaabel{levelb}
Assume $p=2_{\betaegin{equation}ta}^*-1$. Then, there exists $\varphiepsilon>0$ small enough such that,
\betaegin{equation}gin{equation}\lambdaabel{cotfunctional}
\sigmaup_{t\gammaeq0}\Phi_{\gamma}^{\betaegin{equation}ta}(t\omegaverline{w}_{\varphiepsilon}^{\betaegin{equation}ta},t\omegaverline{z}_{\varphiepsilon}^{\betaegin{equation}ta})<c_{\betaegin{equation}ta}^*,
\varphiepsilonnd{equation}
provided that $N>6\betaegin{equation}ta$.
\varphiepsilonnd{lemma}
\betaegin{equation}gin{proof}
Because of \varphiepsilonqref{estimaciones} with $\tauheta=\betaegin{equation}ta$, it follows that
\betaegin{equation}gin{align*}
g(t):=&\Phi_{\gamma}^{\betaegin{equation}ta}(t\omegaverline{w}_{\varphiepsilon}^{\betaegin{equation}ta},t\omegaverline{z}_{\varphiepsilon}^{\betaegin{equation}ta})\\
=&\frac{M^2t^2}{2}\lambdaeft(\kappa_{\betaegin{equation}ta}\|\varphiepsilonta_{\varphiepsilon}^{\betaegin{equation}ta}\|_{\mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega})}^{2}
+\rho^2 \kappa_{\betaegin{equation}ta}\|\varphiepsilonta_{\varphiepsilon}^{\betaegin{equation}ta}\|_{\mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega})}^{2}
-2\sigmaqrt{\gamma}\|\sigmaigma_{\varphiepsilon}^{\tauheta}\|_{L^2(\Omegamega)}^{2}\right)-\frac{Mt^{2_{\betaegin{equation}ta}^*}}{2_{\betaegin{equation}ta}^*}\\
=&\frac{M^2t^2}{2}\lambdaeft([\kappa_{\betaegin{equation}ta}S(\betaegin{equation}ta,N)+O(\varphiepsilon^{N-2\betaegin{equation}ta})]+\rho^2[\kappa_{\betaegin{equation}ta}S(\betaegin{equation}ta,N)+O(\varphiepsilon^{N-2\betaegin{equation}ta})]
-2\sigmaqrt{\gammaamma}\|\sigmaigma_{\varphiepsilon}^{\tauheta}\|_{L^2(\Omegamega)}^{2}\right)\\ & -\frac{M^{2_{\betaegin{equation}ta}^*}t^{2_{\betaegin{equation}ta}^*}}{2_{\betaegin{equation}ta}^*}.
\varphiepsilonnd{align*}
It is clear that ${\rm div}\,splaystyle \lambdaim_{t\tauo \infty} g(t)=-\infty$, therefore, the function $g(t)$ possesses a maximum value at the point
\betaegin{equation}gin{equation*}
t_{\gammaamma,\varphiepsilon}:=\lambdaeft(\frac{M^2\lambdaeft([\kappa_{\betaegin{equation}ta}S(\betaegin{equation}ta,N)+O(\varphiepsilon^{N-2\betaegin{equation}ta})]+\rho^2[\kappa_{\betaegin{equation}ta}S(\betaegin{equation}ta,N)+O(\varphiepsilon^{N-2\betaegin{equation}ta})]-2\sigmaqrt{\gammaamma}\|\sigmaigma_{\varphiepsilon}^{\tauheta}\|_{L^2(\Omegamega)}^{2}\right)}{M^{2_{\betaegin{equation}ta}^*}}\right)^{\frac{1}{2_{\betaegin{equation}ta}^*-2}}.
\varphiepsilonnd{equation*}
Moreover, at this point $t_{\gammaamma,\varphiepsilon}$,
\betaegin{equation}gin{equation*}
\betaegin{equation}gin{array}{rl}
g(t_{\gammaamma,\varphiepsilon})=\lambdaeft(\frac{1}{2}-\frac{1}{2_{\betaegin{equation}ta}^*}\right)\Big(&\!\!\!\![\kappa_{\betaegin{equation}ta}S(\betaegin{equation}ta,N)+O(\varphiepsilon^{N-2\betaegin{equation}ta})]+
\rho^2[\kappa_{\betaegin{equation}ta}S(\betaegin{equation}ta,N)+O(\varphiepsilon^{N-2\betaegin{equation}ta})]\\
&\!\!\!\!-2\sigmaqrt{\gammaamma}\|\sigmaigma_{\varphiepsilon}^{\tauheta}\|_{L^2(\Omegamega)}^{2}\Big)^{\frac{2_{\betaegin{equation}ta}^*}{2_{\betaegin{equation}ta}^*-2}}.
\varphiepsilonnd{array}\varphiepsilonnd{equation*}
To finish it is enough to show that
\betaegin{equation}gin{equation}\lambdaabel{ppte}
g(t_{\gammaamma,\varphiepsilon})<\lambdaeft(\frac{1}{2}-\frac{1}{2_{\betaegin{equation}ta}^*}\right)\lambdaeft(\kappa_{\betaegin{equation}ta}S(\betaegin{equation}ta,N)\right)^{\frac{2_{\betaegin{equation}ta}^*}{2_{\betaegin{equation}ta}^*-2}}=c_{\betaegin{equation}ta}^*,
\varphiepsilonnd{equation}
holds true for $\varphiepsilon$ sufficiently small and making the appropriate choice of $\rho>0$. Thus, sim\-pli\-fying \varphiepsilonqref{ppte}, we are left to choose $\rho>0$ such that
\betaegin{equation}gin{equation*}
O(\varphiepsilon^{N-2\betaegin{equation}ta})+\kappa_{\betaegin{equation}ta}S(\betaegin{equation}ta,N)\rho^2+O(\varphiepsilon^{N-2\betaegin{equation}ta})\rho^2<2\sigmaqrt{\gammaamma}\rho \|\sigmaigma_{\varphiepsilon}^{\tauheta}\|_{L^2(\Omegamega)}^{2},
\varphiepsilonnd{equation*}
holds true provided $\varphiepsilon$ is small enough. To this end, take $\rho=\varphiepsilon^{\,dlta}$ with $\,dlta>0$ to be determined, then, since
\betaegin{equation}gin{equation*}
O(\varphiepsilon^{N-2\betaegin{equation}ta})+\kappa_{\betaegin{equation}ta}S(\betaegin{equation}ta,N)\varphiepsilon^{2\,dlta}+O(\varphiepsilon^{N-2\betaegin{equation}ta+2\,dlta})=O(\varphiepsilon^{\tauau}),
\varphiepsilonnd{equation*}
with $\tauau=\min\{N-2\betaegin{equation}ta, 2\,dlta, N-2\betaegin{equation}ta+2\,dlta\}=\min\{N-2\betaegin{equation}ta, 2\,dlta\}$, the proof will be finished once $\,dlta>0$ has been chosen such that the inequality
\betaegin{equation}gin{equation}\lambdaabel{toprove}
O(\varphiepsilon^{\tauau})<2\sigmaqrt{\gammaamma}\rho\|\sigmaigma_{\varphiepsilon}^{\tauheta}\|_{L^2(\Omegamega)}^{2},
\varphiepsilonnd{equation}
holds true for $\varphiepsilon$ small enough. Now we use the estimates \varphiepsilonqref{estimaciones}. Then, if $N=4\betaegin{equation}ta$ inequality \varphiepsilonqref{toprove} reads
\betaegin{equation}gin{equation}\lambdaabel{i.1}
O(\varphiepsilon^{\tauau})<2C\sigmaqrt{\gammaamma}\varphiepsilon^{2\betaegin{equation}ta+\,dlta}|\lambdaog(\varphiepsilon)|.
\varphiepsilonnd{equation}
Since $0<\varphiepsilon\lambdal 1$, inequality \varphiepsilonqref{i.1} holds for $\tauau=\min\{2\betaegin{equation}ta,2\,dlta\}>2\betaegin{equation}ta+\,dlta$, that is impossible and, thus, inequality \varphiepsilonqref{toprove} can not hold for $N=4\betaegin{equation}ta$. On the other hand, if $N>4\betaegin{equation}ta$ inequality \varphiepsilonqref{toprove} has the form,
\betaegin{equation}gin{equation}\lambdaabel{i.2}
O(\varphiepsilon^{\tauau})<2C\sigmaqrt{\gammaamma}\varphiepsilon^{2\betaegin{equation}ta+\,dlta}.
\varphiepsilonnd{equation}
Since $\varphiepsilon\lambdal 1$, inequality \varphiepsilonqref{i.2} holds for $\tauau=\min\{N-2\betaegin{equation}ta,2\,dlta\}>2\betaegin{equation}ta+\,dlta$. Using the identity ${\rm div}\,splaystyle\min\{a,b\}=\frac{1}{2}(a+b-|a-b|)$,
we arrive at the condition
\betaegin{equation}gin{equation}\lambdaabel{i.3}
N-2\betaegin{equation}ta-|N-2\betaegin{equation}ta-2\,dlta|>4\betaegin{equation}ta.
\varphiepsilonnd{equation}
Finally, we have two options,
\betaegin{equation}gin{enumerate}
\item $N-2\betaegin{equation}ta>2\,dlta$ combined with \varphiepsilonqref{i.3} provides us with the range,
\betaegin{equation}gin{equation}\lambdaabel{i.4}
N-2\betaegin{equation}ta>2\,dlta>4\betaegin{equation}ta.
\varphiepsilonnd{equation}
Then $N>6\betaegin{equation}ta$ necessarily, so that we can choose a positive $\,dlta$ satisfying \varphiepsilonqref{i.4} and, hence, inequality \varphiepsilonqref{toprove} holds for $\varphiepsilon$ small enough.
\item $N-2\betaegin{equation}ta<2\,dlta$ combined with \varphiepsilonqref{i.3} implies that $2(N-2\betaegin{equation}ta)-4\betaegin{equation}ta>2\,dlta$, and hence,
\betaegin{equation}gin{equation}\lambdaabel{i.5}
2(N-2\betaegin{equation}ta)-4\betaegin{equation}ta>2\,dlta>N-2\betaegin{equation}ta,
\varphiepsilonnd{equation}
Once again $N>6\betaegin{equation}ta$ necessarily, so that we can choose a positive $\,dlta$ satisfying \varphiepsilonqref{i.5} and, hence, inequality \varphiepsilonqref{toprove} holds for $\varphiepsilon$ small enough.
\varphiepsilonnd{enumerate}
Thus, if $N>6\betaegin{equation}ta$ we can choose $\rho>0$ and $\varphiepsilon>0$ small enough such that \varphiepsilonqref{cotfunctional} is achieved.
\varphiepsilonnd{proof}
Now, we are in the position to conclude the proof of the second main result of the paper. First we will focus on the particular case when $\alphalpha=2\betaegin{equation}ta$. Later on we will follow a similar
argument to prove the results when $\alphalpha\nablaeq 2\betaegin{equation}ta$.
\betaegin{equation}gin{proof}[Proof of Theorem \mathbb{R}f{Th1}. Case $\alphalpha=2\betaegin{equation}ta$.]\mathbb{H}fill\betareak
By Lemma \mathbb{R}f{lezeroextension}, the functional $\Phi_{\gamma}^{\betaegin{equation}ta}$ satisfies the MP geometry. Because of MPT we have a PS sequence which by Lemma
\mathbb{R}f{levelb}, satisfies that the corresponding energy level is bellow the critical one.
Taking into account Lemma \mathbb{R}f{PScondition_extensionsistemabb}, this PS sequence satisfies the PS condition, hence we obtain a
critical point $(w,z)\in\mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omega})\tauimes\mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omega})$ for the functional $\Phi_{\gamma}^{\betaegin{equation}ta}$.
The rest of the proof follows as in the subcritical case.
\varphiepsilonnd{proof}
Now, we focus on the functional $\Phi_{\gamma}^{\alphalpha,\betaegin{equation}ta}$. For this case, we consider
\betaegin{equation}gin{equation}\lambdaabel{test2}
(\omegaverline{w}_{\varphiepsilon}^{\mu},\omegaverline{z}_{\varphiepsilon}^{\betaegin{equation}ta})=(M\varphiepsilonta_{\varphiepsilon}^{\mu},M\rho\varphiepsilonta_{\varphiepsilon}^{\betaegin{equation}ta}),
\varphiepsilonnd{equation}
with $\rho>0$ to be determined and a constant $M\gammag 1$ such that $\Phi_{\gamma}^{\alphalpha,\betaegin{equation}ta}(\omegaverline{w}_{\varphiepsilon}^{\mu},\omegaverline{z}_{\varphiepsilon}^{\betaegin{equation}ta})<0$. Let us notice that, by definition,
\betaegin{equation}gin{equation*}
\sigmaigma_{\varphiepsilon}^{\mu}\sigmaigma_{\varphiepsilon}^{\betaegin{equation}ta}
=\frac{\partialrtialhi_ru_{\varphiepsilon}^{\mu}\partialrtialhi_ru_{\varphiepsilon}^{\betaegin{equation}ta}}{\|\partialrtialhi_ru_{\varphiepsilon}^{\mu}\|_{2_{\mu}^*}
\|\partialrtialhi_ru_{\varphiepsilon}^{\betaegin{equation}ta}\|_{2_{\betaegin{equation}ta}^*}},
\varphiepsilonnd{equation*}
and, since $\mu:=\alphalpha-\betaegin{equation}ta$, we find
\betaegin{equation}gin{equation*}
u_{\varphiepsilon}^{\mu}u_{\varphiepsilon}^{\betaegin{equation}ta}=\frac{\varphiepsilon^{\frac{N-2\mu}{2}}}{(\varphiepsilon^2+|x|^2)^{\frac{N-2\mu}{2}}}
\frac{\varphiepsilon^{\frac{N-2\betaegin{equation}ta}{2}}}{(\varphiepsilon^2+|x|^2)^{\frac{N-2\betaegin{equation}ta}{2}}}=\frac{\varphiepsilon^{N-\alphalpha}}{(\varphiepsilon^2+|x|^2)^{N-\alphalpha}}
=\lambdaeft(\frac{\varphiepsilon^{\frac{N-2(\alphalpha/2)}{2}}}{(\varphiepsilon^2+|x|^2)^{\frac{N-2(\alphalpha/2)}{2}}}\right)^2 \!=\!\lambdaeft(u_{\varphiepsilon}^{\alphalpha/2}\right)^{2}.
\varphiepsilonnd{equation*}
Thus, applying \varphiepsilonqref{estimaciones} with $\tauheta=\frac{\alphalpha}{2}$, we conclude
\betaegin{equation}gin{equation}\lambdaabel{estab}
\int_{\Omegamega}\sigmaigma_{\varphiepsilon}^{\mu}\sigmaigma_{\varphiepsilon}^{\betaegin{equation}ta}dx=C\|\sigmaigma_{\varphiepsilon}^{\alphalpha/2}\|_{L^2(\Omegamega)}^{2}=\lambdaeft\{
\betaegin{equation}gin{tabular}{lc}
$C \varphiepsilon^{\alphalpha}+O(\varphiepsilon^{N-\alphalpha})$ & if $N>2\alphalpha$, \\
$C \varphiepsilon^{\alphalpha}|\lambdaog(\varphiepsilon)|$ & if $N=2\alphalpha$.
\varphiepsilonnd{tabular}
\right.\\
\varphiepsilonnd{equation}
Following the steps performed for the case $\alphalpha=2\betaegin{equation}ta$, we define the set of paths
$$\Gamma_\varphiepsilon:=\{g\in C([0,1],\mathcal{X}_{0}^{\mu}(\mathcal{C}_{\Omegamega})\tauimes \mathcal{X}_{0}^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega}))\,;\, g(0)=(0,0),\; g(1)=(M\varphiepsilonta_{\varphiepsilon}^{\mu},M\rho\varphiepsilonta_{\varphiepsilon}^{\betaegin{equation}ta})\},$$
and we consider the minimax values
$$c_\varphiepsilon=\inf_{g\in\Gamma_\varphiepsilon} \max_{t \in [0,1]} \Phi_{\gamma}^{\alphalpha,\betaegin{equation}ta}(g(t)).$$
The final step of our scheme will be completed once we have shown that $c_{\varphiepsilon}<c_{\mu}^*$ for $\varphiepsilon$ small enough.
\betaegin{equation}gin{lemma}\lambdaabel{levelab}
Assume $p=2_{\betaegin{equation}ta}^*-1$. Then, there exists $\varphiepsilon>0$ small enough such that,
\betaegin{equation}gin{equation}\lambdaabel{cotfunctionalab}
\sigmaup_{t\gammaeq0}\Phi_{\gamma}^{\alphalpha,\betaegin{equation}ta}(t\omegaverline{w}_{\varphiepsilon}^{\mu},t\omegaverline{z}_{\varphiepsilon}^{\betaegin{equation}ta})<c_{\mu}^*,
\varphiepsilonnd{equation}
provided that $N>4\alphalpha-2\betaegin{equation}ta$.
\varphiepsilonnd{lemma}
The proof is similar to the one performed for Lemma \mathbb{R}f{levelb}, but we include it for the reader's convenience.
\betaegin{equation}gin{proof}
Because of \varphiepsilonqref{estimaciones}, it follows that
\betaegin{equation}gin{align*}
g(t):=&\,\Phi_{\gamma}^{\alphalpha,\betaegin{equation}ta}(t\omegaverline{w}_{\varphiepsilon}^{\mu},t\omegaverline{z}_{\varphiepsilon}^{\betaegin{equation}ta})\\
=&\frac{M^2t^2}{2}\lambdaeft(\frac{\kappa_{\mu}}{\gammaamma^{1-\betaegin{equation}ta/\alphalpha}}\|\varphiepsilonta_{\varphiepsilon}^{\mu}\|_{\mathcal{X}_0^{\mu}(\mathcal{C}_{\Omegamega})}^{2}+\frac{\rho^2\kappa_{\betaegin{equation}ta}}{\gammaamma^{\betaegin{equation}ta/\alphalpha}}\|\varphiepsilonta_{\varphiepsilon}^{\betaegin{equation}ta}\|_{\mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omegamega})}^{2}-2\|\sigmaigma_{\varphiepsilon}^{\alphalpha/2}\|_{L^2(\Omegamega)}^{2}\right)-\frac{M^{2_{\mu}^*}t^{2_{\mu}^*}}{2_{\mu}^*\gammaamma^{1-\betaegin{equation}ta/\alphalpha}}\\
=&\frac{M^2t^2}{2}\lambdaeft(\frac{1}{\gammaamma^{1-\betaegin{equation}ta/\alphalpha}}[\kappa_{\mu}S(\mu,N)+O(\varphiepsilon^{N-2\mu})]+\frac{\rho^2}{\gammaamma^{\betaegin{equation}ta/\alphalpha}}[\kappa_{\betaegin{equation}ta}S(\betaegin{equation}ta,N)+O(\varphiepsilon^{N-2\betaegin{equation}ta})]-2\|\sigmaigma_{\varphiepsilon}^{\alphalpha/2}\|_{L^2(\Omegamega)}^{2}\right)\\
&-\frac{M^{2_{\mu}^*}t^{2_{\mu}^*}}{2_{\mu}^*\gammaamma^{1-\betaegin{equation}ta/\alphalpha}}.
\varphiepsilonnd{align*}
It is clear that ${\rm div}\,splaystyle \lambdaim_{t\tauo \infty} g(t)=-\infty$,
therefore, the function $g(t)$ possesses a maximum value at the
point, $$
\betaegin{equation}gin{array}{rl}
t_{\gammaamma,\varphiepsilon}\!=\Big(\frac{\gammaamma^{1-\betaegin{equation}ta/\alphalpha}}{M^{2_{\mu}^*-2}}\Big(
& \!\!\!\!\frac{1}{\gammaamma^{1-\betaegin{equation}ta/\alphalpha}}[\kappa_{\mu}S(\mu,N)\!+\!O(\varphiepsilon^{N-2\mu})]\\
& \!\! \!\! +
\frac{\rho^2}{\gammaamma^{\betaegin{equation}ta/\alphalpha}}[\kappa_{\betaegin{equation}ta}S(\betaegin{equation}ta,N)+O(\varphiepsilon^{N-2\betaegin{equation}ta})]-2\|
\sigmaigma_{\varphiepsilon}^{\alphalpha/2}\|_{L^2(\Omegamega)}^{2}\Big)\Big)^{\frac{1}{2_{\mu}^*-2}}.
\varphiepsilonnd{array}$$
Moreover, at this point $t_{\gammaamma,\varphiepsilon}$,
$$\betaegin{equation}gin{array}{rl}
h(t_{\gammaamma,\varphiepsilon})=\lambdaeft(\frac{1}{2}-\frac{1}{2_{\mu}^*}\right)\Big(\Big( \gammaamma^{1-\betaegin{equation}ta/\alphalpha}\Big)^{\frac{2}{2_{\mu}^*}}
\Big(&\!\!\!\!\frac{1}{\gammaamma^{1-\betaegin{equation}ta/\alphalpha}}[\kappa_{\mu}S(\mu,N)+O(\varphiepsilon^{N-2\mu})]\\
& \!\!\!\! +\frac{\rho^2}{\gammaamma^{\betaegin{equation}ta/\alphalpha}}[\kappa_{\betaegin{equation}ta}S(\betaegin{equation}ta,N)+O(\varphiepsilon^{N-2\betaegin{equation}ta})]-
2\|\sigmaigma_{\varphiepsilon}^{\alphalpha/2}\|_{L^2(\Omegamega)}^{2}\Big)\Big)^{\frac{2_{\mu}^*}{2_{\mu}^*-2}}.
\varphiepsilonnd{array}$$
To complete the proof we must show that the inequality
\betaegin{equation}gin{equation}\lambdaabel{lls}
h(t_{\gammaamma,\varphiepsilon})<c_{\mu}^*:=\frac{1}{\gammaamma^{1-\betaegin{equation}ta/\alphalpha}}\lambdaeft(\frac{1}{2}-\frac{1}{2_{\mu}^*}\right)\lambdaeft(\kappa_{\mu}S(\mu,N)\right)^{\frac{2_{\mu}^*}{2_{\mu}^*-2}},
\varphiepsilonnd{equation}
holds true for $\varphiepsilon$ small enough. Thus, simplifying \varphiepsilonqref{lls}, we are left to choose $\rho>0$ such that inequality
\betaegin{equation}gin{equation*}
O(\varphiepsilon^{N-2\mu})+\rho^2[\kappa_{\betaegin{equation}ta}S(\betaegin{equation}ta,N)+O(\varphiepsilon^{N-2\betaegin{equation}ta})]<2\gammaamma^{\betaegin{equation}ta/\alphalpha}\|\sigmaigma_{\varphiepsilon}^{\alphalpha/2}\|_{L^2(\Omegamega)}^{2}.
\varphiepsilonnd{equation*}
holds true provided $\varphiepsilon$ is small enough. To this end, take $\rho=\varphiepsilon^{\,dlta}$ with $\,dlta>0$ to be determined, therefore, since
\betaegin{equation}gin{equation*}
O(\varphiepsilon^{N-2\mu})+\kappa_{\betaegin{equation}ta}S(\betaegin{equation}ta,N)\varphiepsilon^{2\,dlta}+O(\varphiepsilon^{N-2\betaegin{equation}ta+2\,dlta})=O(\varphiepsilon^{\tauau}),
\varphiepsilonnd{equation*}
with $\tauau=\min\{N-2\mu,2\,dlta,N-2\betaegin{equation}ta+2\,dlta\}=\min\{N-2\mu,2\,dlta\}$, the proof will be completed once we choose $\,dlta>0$ such that the inequality
\betaegin{equation}gin{equation}\lambdaabel{toproveab}
O(\varphiepsilon^{\tauau})<2\gammaamma^{\betaegin{equation}ta/\alphalpha}\|\sigmaigma_{\varphiepsilon}^{\alphalpha/2}\|_{L^2(\Omegamega)}^{2},
\varphiepsilonnd{equation}
holds true for $\varphiepsilon$ small enough. We use once again the estimates \varphiepsilonqref{estimaciones}. If $N=2\alphalpha$, because of \varphiepsilonqref{estab}, inequality \varphiepsilonqref{toproveab} reads,
\betaegin{equation}gin{equation}\lambdaabel{h.1}
O(\varphiepsilon^{\tauau})<2\gammaamma^{\betaegin{equation}ta/\alphalpha}\varphiepsilon^{\alphalpha+\,dlta}|\lambdaog(\varphiepsilon)|.
\varphiepsilonnd{equation}
Since $\varphiepsilon\lambdal 1$, inequality \varphiepsilonqref{h.1} holds for $\tauau=\min\{2\alphalpha-2\mu,2\,dlta\}=\min\{2\betaegin{equation}ta,2\,dlta\}>\alphalpha+\,dlta$. Using the identity ${\rm div}\,splaystyle\min\{a,b\}=\frac{1}{2}(a+b-|a-b|)$, we find that $\tauau>\alphalpha+\,dlta$ implies $\betaegin{equation}ta+\,dlta-|\betaegin{equation}ta-\,dlta|>\alphalpha+\,dlta$, which is impossible because $\alphalpha>\betaegin{equation}ta$. Therefore, \varphiepsilonqref{toproveab} can not hold if $N=2\alphalpha$. On the other hand, if $N>2\alphalpha$, inequality \varphiepsilonqref{toproveab} has the form,
\betaegin{equation}gin{equation}\lambdaabel{h.2}
O(\varphiepsilon^{\tauau})<2\gammaamma^{\betaegin{equation}ta/\alphalpha}\varphiepsilon^{\alphalpha+\,dlta}.
\varphiepsilonnd{equation}
Since $\varphiepsilon\lambdal 1$, inequality \varphiepsilonqref{h.2} holds if and only if
$\tauau=\min\{N-2\mu,2\,dlta\}>\alphalpha+\,dlta$. Keeping in mind the identity ${\rm div}\,splaystyle\min\{a,b\}=\frac{1}{2}(a+b-|a-b|)$, if $\tauau>\alphalpha+\,dlta$ we arrive at the condition
\betaegin{equation}gin{equation}\lambdaabel{h.3}
N-2\mu-|N-2\mu-2\,dlta|>2\alphalpha.
\varphiepsilonnd{equation}
Consequently, we have two options:
\betaegin{equation}gin{enumerate}
\item $N-2\mu>2\,dlta$ combined with \varphiepsilonqref{h.3} provides us with the range,
\betaegin{equation}gin{equation}\lambdaabel{h.4}
N-2\mu>2\,dlta>2\alphalpha.
\varphiepsilonnd{equation}
Then $N>4\alphalpha-2\betaegin{equation}ta$ necessarily, so that we can choose a positive $\,dlta$ satisfying \varphiepsilonqref{h.4} and, hence, inequality \varphiepsilonqref{toproveab} holds for $\varphiepsilon$ small enough.
\item $N-2\mu<2\,dlta$ combined with \varphiepsilonqref{h.3} implies that $2(N-2\mu)-2\alphalpha>2\,dlta$, and hence,
\betaegin{equation}gin{equation}\lambdaabel{h.5}
2(N-2\mu)-2\alphalpha>2\,dlta>N-2\mu.
\varphiepsilonnd{equation}
Once again $N>4\alphalpha-2\betaegin{equation}ta$ necessarily, so that we can choose a positive $\,dlta$ satisfying \varphiepsilonqref{h.5} and, hence, inequality \varphiepsilonqref{toproveab} holds for $\varphiepsilon$ small enough.
\varphiepsilonnd{enumerate}
\varphiepsilonnd{proof}
To conclude, we complete the proof of Theorem \mathbb{R}f{Th1}, by dealing with the remaining case $\alphalpha\nablaeq2\betaegin{equation}ta$.
\betaegin{equation}gin{proof}[Proof of Theorem \mathbb{R}f{Th1}. Case $\alphalpha\nablaeq2\betaegin{equation}ta$.]\mathbb{H}fill\betareak
By Lemma \mathbb{R}f{lezeroextension}, the functional $\Phi_{\gamma}^{\alphalpha, \betaegin{equation}ta}$ satisfies the MP geometry. Because of MPT we have a PS sequence which by Lemma
\mathbb{R}f{levelab}, satisfies that the corresponding energy level is bellow the critical one.
Taking into account Lemma \mathbb{R}f{PScondition_extensionsistemaab}, this PS sequence satisfies the PS condition, hence we obtain a
critical point $(w,z)\in\mathcal{X}_0^{\mu}(\mathcal{C}_{\Omega})\tauimes\mathcal{X}_0^{\betaegin{equation}ta}(\mathcal{C}_{\Omega})$ for the functional $\Phi_{\gamma}^{\alphalpha,\betaegin{equation}ta}$.
The rest of the proof follows as in the subcritical case.
\varphiepsilonnd{proof}
\betaegin{equation}gin{thebibliography}{9}
\betaibitem{AAP} A. Ambrosetti, J. Garcia Azorero, I. Peral, \tauextstyletit{Elliptic variational problems in $\mathbb{R}^N$ with critical growth.} Special issue in celebration of Jack K. Hale's 70th birthday, Part 1 (Atlanta, GA/Lisbon, 1998). J. Differential Equations {\betaf 168} (2000), no. 1, 10--32.
\betaibitem{AR} A. Ambrosetti, P.H. Rabinowitz, {\varphiepsilonm Dual variational methods in critical point theory and applications.}
J. Funct. Anal., {\betaf 14} (1973), 349--381.
\betaibitem{BCdPS} B. Barrios, E. Colorado, A. de Pablo, U. S\'anchez, \tauextstyletit{On some critical problems for the fractional Laplacian
Operator}. J. Differential Equations {\betaf 252} (2012), no. 11, 6133--6162.
\betaibitem{BrCdPS} C. Br\"andle, E. Colorado, A. de Pablo, U. S\'anchez, \tauextstyletit{A concave-convex elliptic problem involving the fractional Laplacian.}
Proc. Roy. Soc. Edinburgh Sect. A {\betaf 143} (2013), no. 1, 39--71.
\betaibitem{BN} H. Brezis, L. Nirenberg, \tauextstyletit{Positive solutions of nonlinear elliptic equations involving critical Sobolev exponents}.
Comm. Pure Appl. Math. \tauextstyletbf{36} (1983), no. 4, 437--477.
\betaibitem{CaSi}X. Cabr\'e, Y. Sire, \varphiepsilonmph{Nonlinear equations for fractional Laplacians, I:
Regularity, maximum principles, and Hamiltonian estimates.} Ann.
Inst. H. Poincar\'e Anal. Non Lin\'eaire {\tauextstyletbf 31} (2014), no. 1, 23--53.
\betaibitem{CS} L. Caffarelli, L. Silvestre, \tauextstyletit{An extension problem related to the fractional Laplacian}. Comm. Partial Differential Equations
\tauextstyletbf{32} (2007), no. 7-9, 1245--1260.
\betaibitem{EV} L.C. Evans, {\varphiepsilonm Partial Differential Equations.} Graduate Studies in Mathematics {\betaf 19}. American Mathematical Society,
Providence, RI, 1998. xviii+662 pp. ISBN: 0-8218-0772-2.
\betaibitem{FKS} E.B. Fabes, C.E. Kenig, R.P. Serapioni, \tauextstyletit{The local regularity of solutions of degenerate elliptic equations}. Comm. Partial
Differential Equations {\betaf 7} (1982), no. 1, 77--116.
\betaibitem{Lions} P.-L. Lions, \tauextstyletit{The concentration-compactness principle in the calculus of variations. The limit case. II.} Rev. Mat.
Iberoamericana {\betaf 1} (1985), no. 2, 45--121.
\betaibitem{MusNa2} R. Musina, A. I. Nazarov (2014) \tauextstyletit{On Fractional Laplacians--2}. Ann. Inst. H. Poincar\'e Anal. Non Lin\'eaire {\betaf 33}
(2016), no. 6, 1667--1673.
\betaibitem{MusNa3} R. Musina, A. I. Nazarov. \tauextstyletit{On fractional Laplacians--3.} ESAIM Control Optim. Calc. Var. {\betaf 22} (2016), no. 3, 832--841.
\varphiepsilonnd{thebibliography}
\varphiepsilonnd{document} |
\begin{document}
\title[Gorenstein cohomological dimension and stable categories]{Gorenstein cohomological dimension and stable categories for groups}
\author[Wei Ren] {Wei Ren}
\thanks{}
\blacksquareubjclass[2010]{18G20, 18E30, 20C07, 55U35}
\date{\today}
\thanks{E-mail: wren$
\blacksquareymbol{64}$cqnu.edu.cn}
\keywords{Gorenstein cohomological dimension, Benson's cofibrant module, stable category, model category.}
\maketitle
\dedicatory{}
\commby{}
\begin{abstract}
First we study the Gorenstein cohomological dimension ${\rm Gcd}_RG$ of groups $G$ over coefficient rings $R$, under changes of groups and rings; a characterization for finiteness of ${\rm Gcd}_RG$ is given; a Gorenstein version of Serre's theorem is proved, i.e. ${\rm Gcd}_RH = {\rm Gcd}_RG$ for any subgroup $H$ of $G$ with finite index; characterizations for finite groups are given. These generalize the results in literatures, which were obtained over $\mathbb{Z}$ or rings of finite global dimension, to more general rings. Moreover, we establish a model structure on the weakly idempotent complete exact category $\mathcal{F}ib$ consisting of fibrant $RG$-modules, and show that the homotopy category $\mathrm{Ho}(\mathcal{F}ib)$ is triangle equivalent to the stable category $\underline{\mathcal{C}of}(RG)$ of Benson's cofibrant modules, as well as the stable module category ${\rm StMod}(RG)$. For any commutative ring $R$ of finite global dimension, if either $G$ is of type $\Phi_R$ or is in Kropholler's large class such that ${\rm Gcd}_RG$ is finite, then
${\rm Ho}(\mathcal{F}ib)$ is equivalent to the stable category of Gorenstein projective $RG$-modules, the singularity category, and the homotopy category of totally acyclic complexes of projective $RG$-modules.
\end{abstract}
\blacksquareection{Introduction}
The origin of Gorenstein projective dimension may date back to the study of $G$-dimension by Auslander and Bridger \cite{AB69} in 1960s. The Gorenstein projective dimension generalizes the projective dimension, in the sense that if $M$ is an $R$-module of finite projective dimension, then ${\rm Gpd}_RM = {\rm pd}_RM$.
Let $G$ be any group. Recall that the \emph{$($Gorenstein$)$ cohomological dimension} of $G$ over a ring of coefficients $R$, denoted by ${\rm cd}_{R}G$ (resp. ${\rm Gcd}_{R}G$), is defined as the (Gorenstein) projective dimension of the trivial $RG$-module $R$. Studying groups through their cohomological dimensions arose from both topological and algebraic sources. For example, it follows from \cite{Sta68, Swan69} that a non-trivial group $G$ is free if and only if ${\rm cd}_{\mathbb{Z}}G = 1$. It is conjectured in \cite{BDT09} that a group $G$ admits a finite dimensional classifying space $\underline{E}G$ for proper actions if and only if ${\rm Gcd}_\mathbb{Z}G$ is finite; in this case, there is a finite dimensional contractible $G$-CW-complex with finite stabilizers.
The Gorenstein cohomological dimension of groups was extensively studied under an assumption that the coefficient ring is either the ring of integers $\mathbb{Z}$ (see e.g. \cite{ABS09, BDT09, DT08, DT10, Tal14}), or commutative ring of finite (weak) global dimension (see e.g. \cite{Bis21+, Bis21, ET14, ET18}). The finiteness of (weak) global dimension of the coefficient ring is used to force every Gorenstein projective module to be projective, since there is an observation \cite[Remark 2.15]{Nee08} that any acyclic complex of projective modules is contractible if all of its syzygies are flat modules.
However, as stated in \cite[Remark 1.4(i)]{ET22}, in dealing with Gorenstein cohomological dimension of groups, the assumption that the coefficient ring has finite (weak) global dimension is perhaps unnatural. In fact, non-projective Gorenstein projective module fails to exist over rings of finite global dimension; moreover, many (Iwanaga)-Gorenstein rings, for instance $\mathbb{Z}/4\mathbb{Z}$ and the ring of dual numbers $k[x]/(x^2)$, are not of finite (weak) global dimension. This stimulates us to consider Gorenstein cohomological dimension of groups over more general rings. Specifically, we generalize some important results in the literatures which were obtained over $\mathbb{Z}$ or (noetherian) rings of finite global dimension; see for example Theorem \ref{thm:fGcd}, Theorem \ref{thm:H2Gequ}, Proposition \ref{prop:gd-bound}, Corollary \ref{cor:Gcd=pdB1} and Theorem \ref{thm:triequ}.
In Section 2, we intend to study the finiteness of Gorenstein cohomological dimension of groups. Recall that a ring is \emph{Gorenstein regular} \cite{Bel00, EEGR} if it has finite global Gorenstein projective dimension. Let $G$ be a group, $R$ be a Gorenstein regular ring.
We show in Lemma \ref{lem:SplitMonic} that if ${\rm Gcd}_{R}G$ is finite, then there exists an $R$-split $RG$-exact sequence $0\rightarrow R\rightarrow \Lambda$, where $\Lambda$ is an $R$-projective $RG$-module with ${\rm pd}_{RG}\Lambda = {\rm Gcd}_RG$; if $R$ is commutative, then the converse holds by Lemma \ref{lem:RG-Gp}. Moreover, a characterization of finiteness of ${\rm Gcd}_RG$ is given (see Theorem \ref{thm:fGcd}). Note that the class of Gorenstein regular rings contains strictly the rings of finite global dimension, as well as Iwanaga-Gorenstein rings (e.g. $\mathbb{Z}/4\mathbb{Z}$ and $k[x]/(x^2)$). In particular, the most interesting examples of coefficient rings in dealing with applications of group rings in geometry and representation theory, such as the ring of integers $\mathbb{Z}$, the field of rationals $\mathbb{Q}$ and finite fields, are all Gorenstein regular. Hence, we generalize the results in \cite[Theorem 6.4]{ET14} and \cite[Theorem 1.7]{ET18} for coefficient rings of finite global dimension to Gorenstein regular rings. Also, we rediscover \cite[Theorem 2.7]{BDT09} by letting $R = \mathbb{Z}$.
The ``Gcd'' can be considered as an assignment of invariants for the pairs of groups and coefficient rings $(G, R)$. In Section 3 and 4, we will study the assignment Gcd under changes of groups and coefficient rings, respectively. We define an order for such pairs; see Definition \ref{def:order}. Using Lemma \ref{lem:SplitMonic} and \ref{lem:RG-Gp}, we show in Proposition \ref{prop:GroupOrd} that if $R$ is a commutative Gorenstein regular ring and $(H, R)\leq (G, R)$, then $\mathrm{Gcd}_{R}H\leq \mathrm{Gcd}_{R}G$; by specifically taking $R = \mathbb{Z}$ we reobtain \cite[Theorem 2.8 (1)]{BDT09}. If $R$ is commutative Gorenstein regular and $(G, S) \leq (G, R)$, then $\mathrm{Gcd}_{S}G\leq \mathrm{Gcd}_{R}G$; see Proposition \ref{prop:RingOrd}. We apply this to recover \cite[Propostion 2.1]{ET18} and \cite[Theorem 3.2]{Tal14}, that is, $\mathrm{Gcd}_{R}G\leq \mathrm{Gcd}_{\mathbb{Z}}G$ for any commutative ring $R$, and particularly $\mathrm{Gcd}_{\mathbb{Q}}G \leq \mathrm{Gcd}_{\mathbb{Z}}G$. Consequently, ``Gcd'' preserves the order of pairs of groups and commutative Gorenstein regular rings, that is, $\mathrm{Gcd}_{S}H\leq \mathrm{Gcd}_{R}G$ provided that $(H, S) \leq (G, R)$; see Corollary \ref{cor:KpOrd}.
By Serre's Theorem, there is an equality between cohomological dimensions of a torsion-free group $G$ and a subgroup $H$ of finite index over the ring of integers $\mathbb{Z}$; see e.g. \cite[Theorem VIII 3.1]{Bro82}. We have a Gorenstein cohomological version of Serre's Theorem over any coefficient ring $R$ (not necessarily commutative Gorenstein regular), that is, ${\rm Gcd}_RH = {\rm Gcd}_RG$; see Theorem \ref{thm:H2Gequ}. We rediscover \cite[Proposition 2.18]{ABS09} by specifying the ring to be $\mathbb{Z}$, while our proof is straightforward, and quite different from that of \cite{ABS09}. Moreover, we note that the equality was stated in
\cite[Corollary 2.10]{ET18} under additional assumptions that the coefficient ring is of finite weak global dimension and $H$ is a normal submodule of $G$.
There are some applications. We give a homological characterization for finite groups, which strengthens and extends \cite[Proposition 2.19]{ABS09}, the main theorem of \cite{DT08}, and \cite[Corollary 2.3]{ET18}; see Theorem \ref{thm:fgroup}. An upper bound for Gorenstein projective dimensions of modules over group rings is given by using Gorenstein cohomological dimension of the group (see Proposition \ref{prop:gd-bound}), which improves
the one in \cite[Corollary 1.6]{ET18}. Consequently, if we assume $R$ is a commutative 1-Gorenstein ring (i.e. a ring such that every module has Gorenstein projective dimension at most 1), then for any $RG$-module $M$, we have ${\rm Gpd}_{RG}M\leq 1$ when $G$ is a finite group, and ${\rm Gpd}_{RG}M\leq 2$ when $G$ is a group which acts on a tree with finite vertex stabilizers (e.g. any non-trivial free group); see Corollary \ref{cor:gd<1} and \ref{cor:gd<2}.
We remark that it is reasonable to study Gorenstein homological theory over Gorenstein regular rings, as illustrated by the following facts: every module has a Gorenstein projective approximation (\cite[Theorem 2.10]{Hol04}); many homological invariants are equal (\cite[Theorem 4.1]{Emm12}); the stable category of Gorenstein projective modules is triangle equivalent to the singularity category (see e.g. \cite[Section 6]{Bel00}).
The second part of the paper concerns Benson's cofibrant modules and stable categories over group rings. Let $B(G, R)$ be an $RG$-module given by functions from $G$ to $R$ which take finitely many values \cite{CK97}. For any $RG$-module $M$, $M\otimes_{R}B(G, R)$ is an $RG$-module via diagonal action of $G$. Recall that $M$ is {\em Benson's cofibrant} if $M\otimes_{R}B(G, R)$ is a projective $RG$-module; see \cite[Definition 4.1]{Ben97}.
Inspired by \cite{CK97, DT10}, in Section 5, we study the relation between (Benson's) cofibrant modules and Gorenstein projective modules over group rings; see Proposition \ref{prop:Cof-GP}, \ref{prop:GP-Cof1} and \ref{prop:GP-Cof2}. Consequently, if $G$ is a group of type $\Phi_{R}$ \cite{Tal07, MS19} or is in Kropholler's large class ${\rm LH}\mathfrak{F}$ \cite{Kro93}, and $R$ is a commutative ring of finite global dimension, then cofibrant modules and Gorenstein projective modules coincide; see Corollary \ref{cor:GP=Cof}. The corollary recovers \cite[Theorem 3.11]{Bis21+} and \cite[Theorem 3.4]{Bis21}, and provides a partial answer to \cite[Conjecture A]{DT10} by taking $R = \mathbb{Z}$. Moreover, by applying Lemma \ref{lem:SplitMonic} we have the equality ${\rm Gcd}_{R}G = {\rm pd}_{RG}B(G, R)$ for any commutative Gorenstein regular ring $R$ if the later is finite; see Corollary \ref{cor:Gcd=pdB1}. This generalizes \cite[Theorem 1.18]{Bis21} since $R$ was assumed to be of finite global dimension therein, while our argument is quite straightforward and easy.
In Section 6, we are devoted to model category and stable categories with respect to cofibrant modules. Quillen \cite{Qui67} introduced the notion of a model category as an axiomatization of homotopy theory. Let $\mathcal{F}ib$ consist of $RG$-modules $M$ such that ${\rm pd}_{RG}M\otimes_{R}B(G, R) < \infty$, named {\em fibrant modules}. By the very definition, $\mathcal{F}ib$ is a weakly idempotent complete exact category; see Lemma \ref{lem:F}. We establish a model structure on $\mathcal{F}ib$, where the cofibrations are precisely the monomorphisms whose cokernel are cofibrant $RG$-modules; see Theorem \ref{thm:model} and compare \cite[Section 10]{Ben97}.
For model category $\mathcal{F}ib$, the associated homotopy category $\mathrm{Ho}(\mathcal{F}ib)$ is obtained by formally inverting weak equivalences. Moreover, $\mathrm{Ho}(\mathcal{F}ib)$ is triangle equivalent to the stable category $\underline{\mathcal{C}of}(RG)$; see Corollary \ref{cor:equ1}. Note that the subcategory $\mathcal{C}of(RG)$ formed by cofibrant $RG$-modules is a Frobenius category; see Lemma \ref{lem:C}.
Let ${\rm StMod}(RG)$ be the {\em stable module category} (Definition \ref{def:stab}) with all fibrant $RG$-modules as objects, and morphisms for any objects $M$ and $N$ are given by
$${\rm Hom}_{{\rm StMod}(RG)}(M, N) = \widehat{{\rm Ext}}_{RG}^0(M, N) := \mathop{\underrightarrow{\mathrm{lim}}}\limits_i\underline{{\rm Hom}}_{RG}(\Omega^i(M), \Omega^i(N)).$$
The definition extends that of \cite[Section 8]{Ben97} by removing the countably presented assumption for modules therein. Noting that the map in ${\rm Hom}_{{\rm StMod}(RG)}(M, N)$ might not correspond to any map in ${\rm Hom}_{RG}(M, N)$, the stable module category can be difficult to work with.
We prove that ${\rm Ho}(\mathcal{F}ib)$ is equivalent to ${\rm StMod}(RG)$; see Theorem \ref{thm:stable}. If $R$ is a commutative ring with finite global dimension, and $G$ is either a group of type $\Phi_R$ or an ${\rm LH}\mathfrak{F}$-group, then ${\rm Ho}(\mathcal{F}ib)$ is equivalent to the stable category of Gorenstein projective $RG$-modules $\underline{\mathcal{GP}}(RG)$; moreover, if ${\rm Gcd}_RG$ is finite, then the following categories are equivalent:
$$\begin{aligned} {\rm Ho}(\mathcal{F}ib) &
\blacksquareimeq {\rm StMod}(RG)
\blacksquareimeq \underline{\mathcal{C}of}(RG) = \underline{\mathcal{GP}}(RG) \\
&
\blacksquareimeq {\rm D}_{sg}(RG)
\blacksquareimeq {\rm K}_{tac}(RG\text{-}{\rm Proj}) = {\rm K}_{ac}(RG\text{-}{\rm Proj}),
\end{aligned}$$
where ${\rm D}_{sg}(RG)$ is the singularity category \cite{Buc87, Or04}, ${\rm K}_{tac}(RG\text{-}{\rm Proj})$ and ${\rm K}_{ac}(RG\text{-}{\rm Proj})$ are respectively the homotopy category of totally acyclic complexes and acyclic complexes of projective $RG$-modules; see Theorem \ref{thm:triequ}. We generalize \cite[Theorem 3.10]{MS19} by removing the noetherian assumption, and extend the equivalences
${\rm StMod}(RG)
\blacksquareimeq \underline{\mathcal{GP}}(RG)
\blacksquareimeq {\rm D}_{sg}(RG)
\blacksquareimeq {\rm K}_{tac}(RG\text{-}{\rm Proj})$ therein.
For example, if $G$ is a group which admits a finite dimensional contractible $G$-CW-complex with finite stabilizers, then it follows from Proposition \ref{prop:H1Fgroup} that
${\rm Gcd}_RG$ is finite, and furthermore, all of the above equivalences of categories hold.
\blacksquareection{Finiteness of Gorenstein cohomological dimension of groups}
Throughout, all rings are assumed to be associative with unit, and all modules will be left modules unless otherwise specified.
Let $A$ be a ring. An acyclic complex of projective $A$-modules
$$\mathbf{P} = \cdots\longrightarrow P_{n+1}\longrightarrow P_{n}\longrightarrow P_{n-1}\longrightarrow\cdots$$
is said to be \emph{totally acyclic}, if it remains acyclic after applying $\mathrm{Hom}_{A}(-, P)$ for any projective $A$-module $P$. A module is \emph{Gorenstein projective} \cite{EJ00} if it is isomorphic to a syzygy of such a totally acyclic complex. For finitely generated Gorenstein projective modules, there are different terminologies in literatures, such as modules of $G$-dimension zero, maximal Cohen-Macaulay modules and totally reflexive modules; see for example \cite{AB69, Buc87, Chr00, EJ00}.
For any module $M$, the \emph{Gorenstein projective dimension} is defined in the standard way by using resolutions by Gorenstein projective modules, that is,
$\mathrm{Gpd}_AM\leq n$ if and only if there is an exact sequence $0\rightarrow M_{n}\rightarrow M_{n-1} \rightarrow \cdots \rightarrow M_{0}\rightarrow M\rightarrow 0$ with each $M_{i}$ being Gorenstein projective.
For any pair $(G, R)$ of a group $G$ and a coefficient ring $R$, the \emph{(left) Gorenstein cohomological dimension} of $G$ over $R$, denoted by $\mathrm{Gcd}_{R}G$, is defined to be the Gorenstein projective dimension of the module $R$ over the group ring $RG$ with trivial group action.
Let $A$ be a ring. The {\em (left) Gorenstein global dimension} of $A$ is defined as $${\rm G.gldim}(A)={\rm sup}\{{\rm Gpd}_A(M)\; |\; M\in {\rm Mod}(A)\}.$$
It might be also called the (left) global Gorenstein projective dimension. The preference of the terminology is justified by \cite[Theorem 1.1]{BM10} which shows that ${\rm G.gldim}(A)$ equals to the supremum of the Gorenstein injective dimension of all $A$-modules.
According to a classical result established by Serre, and by Auslander and Buchsbaum, a commutative noetherian local ring $A$ is {\em regular} if and only if its global dimension ${\rm gldim}(A)$ if finite. Analogously, a ring with finite left Gorenstein global dimension is said to be {\em left Gorenstein regular}. It is also called {\em left Gorenstein} in \cite[Section 6]{Bel00}, which is equivalent to the fact that the category of left modules of the ring is a {\em Gorenstein category} in the sense of \cite[Definition 2.18]{EEGR} or \cite[Section 4]{Bel00}.
For any ring $A$, it is clear that ${\rm G.gldim}(A)\leq {\rm gldim}(A)$. Then the rings of finite global dimension are Gorenstein regular. The converse may not true in general. For example, both
$\mathbb{Z}_4 = \mathbb{Z}/4\mathbb{Z}$ and the truncated polynomial ring $k[x]/(x^n)$ ($n\geq 2$) over a field $k$, are rings with Gorenstein global dimension zero, while their global dimensions are infinity. Recall that a ring $A$ is Iwanaga-Gorenstein, provided it is noetherian and with finite self-injective dimension on both sides. It follows from \cite[Theorem 10.2.14]{EJ00} that every Iwanaga-Gorenstein ring is Gorenstein regular. But, there are many non-trivial examples of non-noetherian rings which is Gorenstein regular (by a trivial example we mean the ring with finite global dimension); see for example \cite{EEI08}.
In this section, we intend to study the finiteness of Gorenstein cohomological dimension of groups. We begin with the following observation.
\begin{lemma}\label{lem:Gp}
Let $R$ be a left Gorenstein regular ring and $G$ a group. Then any Gorenstein projective $RG$-module is also a Gorenstein projective $R$-module.
\end{lemma}
\begin{proof}
Let $M$ be a Gorenstein projective $RG$-module. Then there is a totally acyclic complex of projective $RG$-modules $\cdots\rightarrow P_{1}\rightarrow P_{0}\rightarrow P_{-1}\rightarrow\cdots$ such that
$M\cong \mathrm{Ker}(P_0\rightarrow P_{-1})$. Since any projective $RG$-module is also $R$-projective, by restricting this totally acyclic complex, we get an acyclic complex of projective $R$-module. Noting that $R$ is left Gorenstein regular, every projective $R$-module has finite injective dimension, and consequently, every acyclic complex of projective $R$-module is totally acyclic. Hence, $M$ is also a Gorenstein projective $R$-module.
\end{proof}
\begin{lemma}\label{lem:Gp+1}
Let $G$ be a group, $R$ a commutative Gorenstein regular ring, and $M$ be an $RG$-module. If $M$ is Gorenstein projective as an $R$-module, then for any projective $RG$-module $P$, the induced $RG$-module $P\otimes_{R}M$ is Gorenstein projective.
\end{lemma}
\begin{proof}
For any subgroup $H$ of $G$, recall that there exist simultaneously an induction functor $\mathrm{Ind}_H^G = RG\otimes_{RH}-$ and a coinduction functor $\mathrm{Coind}_H^G = \mathrm{Hom}_{RH}(RG, -)$ from the category of $RH$-modules to the category of $RG$-modules. In particular, we consider the subgroup $H=\{1\}$ formed by the identity element of $G$. For any projective $RG$-module $P$, there is an isomorphism of $RG$-modules
$$P\otimes_R{\rm Ind}_H^GM \cong {\rm Ind}_H^G(P\otimes_RM),$$
where $G$ acts diagonally on the left tensor product; see for example \cite[Section III 5]{Bro82}. Note that the diagonal $RG$-module structure of $P\otimes_R{\rm Ind}_H^GM$ coincides with the one induced by $_{RG}P\otimes_{R}-$.
Since $R$ is Gorenstein regular and $M$ is a Gorenstein projective $R$-module, we infer that $P\otimes_{R}M$ is still a Gorenstein projective $R$-module. Hence, it follows from \cite[Lemma 2.6]{ET18} that
${\rm Ind}_H^G(P\otimes_RM)$ is a Gorenstein projective $RG$-module, and moreover, we infer that $P\otimes_R{\rm Ind}_H^GM$ is a Gorenstein projective $RG$-module. As $R$-modules, $M$ is a direct summand of ${\rm Ind}_H^GM$. The induced $RG$-module $P\otimes_RM$ is then a direct summand of the induced $RG$-module $P\otimes_R{\rm Ind}_H^GM$. Hence, $P\otimes_{R}M$ is a Gorenstein projective $RG$-module, as expected.
\end{proof}
\begin{lemma}\label{lem:SplitMonic}
Let $G$ be a group, $R$ be a left Gorenstein regular ring. If ${\rm Gcd}_{R}G$ is finite, then there exists an $R$-split $RG$-exact sequence $0\rightarrow R\rightarrow \Lambda$, where $\Lambda$ is an $R$-projective $RG$-module such that ${\rm Gcd}_RG = {\rm pd}_{RG}\Lambda$.
\end{lemma}
\begin{proof}
Let ${\rm Gcd}_{R}G = {\rm Gpd}_{RG}R = n$. It follows from \cite[Theorem 2.10]{Hol04} that there exists an exact sequence $0\rightarrow K\rightarrow M\rightarrow R\rightarrow 0$, where $M$ is a Gorenstein projective $RG$-module, and ${\rm pd}_{RG}K = n-1$. For $M$, there is an exact sequence of $RG$-modules
$0\rightarrow M\rightarrow P\rightarrow L\rightarrow 0$, where $L$ is Gorenstein projective and $P$ is projective. We consider the following pushout of $M\rightarrow R$ and $M\rightarrow P$:
$$\xymatrix@C=20pt@R=20pt{ & & 0\ar[d] & 0\ar[d] \\
0 \ar[r] &K \ar@{=}[d] \ar[r] & M \ar[d]\ar[r] &R \ar[d]\ar[r] &0 \\
0 \ar[r] &K \ar[r] & P \ar[r] \ar[d] &\Lambda \ar[r]\ar[d] & 0\\
& & L \ar[d] \ar@{=}[r] & L\ar[d]\\
& & 0 & 0
}$$
From the middle row we infer that ${\rm pd}_{RG}\Lambda = {\rm pd}_{RG}K + 1 = n$. It follows from Lemma \ref{lem:Gp} that $L$ is also a Gorenstein projective $R$-module, and then the sequence
$0\rightarrow R\rightarrow \Lambda\rightarrow L\rightarrow 0$ is $R$-split. Moreover, as an $R$-module, $\Lambda\cong L\oplus R$ is Gorenstein projective. By \cite[Proposition 10.2.3]{EJ00}, which says that projective dimension of any Gorenstein projective module is either zero or infinity, we imply from ${\rm pd}_{R}\Lambda \leq {\rm pd}_{RG}\Lambda = n$ that $\Lambda$ is a projective $R$-module. This completes the proof.
\end{proof}
The above $R$-projective $RG$-module $\Lambda$ is called a {\em characteristic module} for $G$ over $R$; see \cite[Definition 1.1]{ET22}. This notion provides a useful tool for characterizing the finiteness of ${\rm Gcd}_RG$. Note that the above lemma is analogous to \cite[Corollary 1.3]{ET18}, however the coefficient ring in \cite{ET18} is assumed to be of finite weak global dimension. It is well known that for any noetherian ring, its weak global dimension agrees with the global dimension. In this case, we may consider Gorenstein regular ring as a generalization of the ring of finite weak global dimension.
\begin{lemma}\label{lem:RG-Gp}
Let $G$ be a group, $R$ be a commutative Gorenstein regular ring. If there exists an $R$-split monomorphism of $RG$-modules $\iota: R\rightarrow \Lambda$, where $\Lambda$ is $R$-projective with $\mathrm{pd}_{RG}\Lambda < \infty$, then for any $RG$-module $M$, one has
$$\mathrm{Gpd}_{RG}M \leq \mathrm{pd}_{RG}\Lambda + {\rm Gpd}_RM.$$
\end{lemma}
\begin{proof}
Let $\mathrm{pd}_{RG}\Lambda = n$. There exists an exact sequence
$$0\longrightarrow P_n\longrightarrow \cdots \longrightarrow P_1\longrightarrow P_0\longrightarrow \Lambda\longrightarrow 0,$$ where each $P_i$ is a projective $RG$-module. Since $\Lambda$ is a projective $R$-module and each $P_i$ restricts to be a projective $R$-module, the sequence is $R$-split.
Since the inequality to be proved is clear if ${\rm Gpd}_RM$ is infinity, it suffices to assume that ${\rm Gpd}_RM = m$ is finite. In this case, we may prove the result by induction on $m$. Let $m=0$, that is, $M$ is a Gorenstein projective $R$-module. By applying $-\otimes_{R}M$ to the above $R$-split sequence, we get an exact sequence of $RG$-modules
$$0\longrightarrow P_n\otimes_{R}M\longrightarrow \cdots\longrightarrow P_1\otimes_{R}M \longrightarrow P_0\otimes_{R}M \longrightarrow \Lambda\otimes_{R}M\longrightarrow 0,$$
where the $G$-actions are induced by $_{RG}P_i\otimes_R-$ and $_{RG}\Lambda\otimes_R-$.
By Lemma \ref{lem:Gp+1} we imply that the induced $RG$-modules $P_{i}\otimes_RM$ are Gorenstein projective, and then the above sequence yields that ${\rm Gpd}_{RG}(\Lambda\otimes_{R}M)\leq n$.
Since $R$ is assumed to be commutative, by using the anti-automorphism $g\rightarrow g^{-1}$ of $G$, we can regard any left $RG$-module $X$ as a right $RG$-module by setting $gx = xg^{-1}$ for any $g\in G$ and $x\in X$. Note that for any $g\in G$ and any $\lambda\otimes m\in \Lambda\otimes_{R}M$, one has $g\lambda\otimes m = g(\lambda\otimes m)=(\lambda\otimes m)g^{-1} = \lambda\otimes gm$. Similarly, for any $m\otimes\lambda\in M\otimes_R \Lambda$, $gm\otimes \lambda= m\otimes g\lambda$. It is direct to check that the map $\varphi: M\otimes_R\Lambda \rightarrow \Lambda\otimes_R M$ given by $m\otimes\lambda\mapsto \lambda\otimes m$ is an isomorphism of the induced left $RG$-modules. Hence,
${\rm Gpd}_{RG}(M\otimes_{R}\Lambda) = {\rm Gpd}_{RG}(\Lambda\otimes_{R}M)\leq n$.
Since the monomorphism $\iota: R\rightarrow \Lambda$ is $R$-split, for the $RG$-module $M$, by applying $M\otimes_{R}-$ we infer that $M = M\otimes_{R}R$ is a direct summand of the induced $RG$-module $M\otimes_R\Lambda$. Hence, we have ${\rm Gpd}_{RG}M\leq n$.
We now assume that $m > 0$ and consider a short exact sequence of $RG$-modules $0\rightarrow K\rightarrow P\rightarrow M\rightarrow 0$, where $P$ is projective. Since $P$ is restricted to be a projective $R$-module, as an $R$-module we have ${\rm Gpd}_{R}K = m-1$. Invoking the induction hypothesis, we may conclude that ${\rm Gpd}_{RG}K \leq n + (m-1)$, and hence ${\rm Gpd}_{RG}M\leq n + m$.
\end{proof}
The following generalizes \cite[Theorem 6.4]{ET14} and \cite[Theorem 1.7]{ET18}, where the coefficient ring is assumed to be a commutative (noetherian) ring with finite global dimension.
Also, we recover \cite[Theorem 2.7]{BDT09} by specifying $R = \mathbb{Z}$.
\begin{theorem}\label{thm:fGcd}
Let $G$ be a group, $R$ a commutative Gorenstein regular ring. The following are equivalent:
\begin{enumerate}
\item ${\rm Gcd}_{R}G$ is finite.
\item There exists an $R$-split $RG$-exact sequence $0\rightarrow R\rightarrow \Lambda$, where $\Lambda$ is an $R$-projective $RG$-module of finite projective dimension.
\item Any $RG$-module has finite Gorenstein projective dimension.
\item $RG$ is a Gorenstein regular ring.
\end{enumerate}
\end{theorem}
\begin{proof}
The implication (1)$\Rightarrow$(2) follows from Lemma \ref{lem:SplitMonic}, and the implication (2)$\Rightarrow$(3) follows from Lemma \ref{lem:RG-Gp}. The implications (3)$\Rightarrow$(4) and (4)$\Rightarrow$(1) are obvious.
\end{proof}
Furthermore, for any commutative coefficient ring which is not necessarily Gorenstein regular, we have finiteness of ${\rm Gcd}_RG$ for some groups.
\begin{proposition}\label{prop:H1Fgroup}
Let $G$ be the group which admits a finite dimensional contractible $G$-CW-complex with finite stabilizers. Then, for any commutative ring $R$, we have ${\rm Gcd}_RG < \infty$.
\end{proposition}
\begin{proof}
For the group $G$ mentioned above, there is an exact sequence of $\mathbb{Z}G$-modules
$$0\longrightarrow C_n\longrightarrow \cdots\longrightarrow C_1\longrightarrow C_0\longrightarrow \mathbb{Z}\longrightarrow 0,$$
where each $C_i$ is a direct sum of permutation modules of the form $\mathbb{Z}[G/H]$ with $H$ a finite subgroup of $G$. By $G/H$, we denote the collection of cosets $gH$, where $G$ acts on $G/H$ by left translation.
It follows from \cite[Lemma 2.21]{ABS09} that $C_i$ are Gorenstein projective $\mathbb{Z}G$-modules. Then ${\rm Gcd}_\mathbb{Z}G = {\rm Gpd}_{\mathbb{Z}G}\mathbb{Z}$ is finite. Hence, it follows from \cite[Proposition 2.1]{ET18} (see also Corollary \ref{cor:GcdZG}) that
${\rm Gcd}_RG \leq {\rm Gcd}_\mathbb{Z}G < \infty$.
\end{proof}
\begin{example}\label{eg:fGcd}
It is well known that for any finite group $G$ and any coefficient ring $R$, ${\rm Gcd}_{R}G = 0$ is finite; see \cite[Proposition 2.19]{ABS09} for $R = \mathbb{Z}$, \cite[Corollary 2.3]{ET18} for any commutative ring $R$, and Theorem \ref{thm:fgroup} below for any associative ring $R$.
It follows from \cite{Sta68, Swan69} that $G$ is a non-trivial free group if and only if ${\rm Gcd}_{\mathbb{Z}}G = {\rm cd}_{\mathbb{Z}}G = 1$. In this case, we have immediately from \cite[Theorem 3.6]{BDT09} that $G$ acts on a tree with finite vertex stabilizers.
It is a known fact that all groups $G$ of finite virtual cohomological dimension, including all polycyclic-by-finite and all arithmetic groups, admit a finite dimensional contractible $G$-CW-complex with finite stabilizers;
see \cite{CK96}. For these groups, the corresponding Gorenstein cohomological dimensions are finite over any commutative ring of coefficients.
\end{example}
\blacksquareection{Gorenstein cohomological dimension under changes of groups}
In this section, we intend to compare the Gorenstein cohomological dimensions of different groups.
Let $\mathcal{S}$ be a collection of pairs $(G, R)$ of groups and coefficient rings. We define an order on $\mathcal{S}$ as follows. One might consider ``Gcd'' as an assignment of invariants for such pairs.
\begin{definition}\label{def:order}
Let $G$ and $H$ be groups, $R$ and $S$ be coefficient rings. For the pairs of groups and rings, we have $(H, S)\leq (G, R)$ if and only if $H\leq G$ and $S\geq R$, that is, $H$ is a subgroup of $G$ and $S$ is an extension ring of $R$.
\end{definition}
Formally, if we compare the pairs with the irreducible fractions, and denote $(G, R)$ by $\dfrac{G}{R}$, then $\dfrac{H}{S} \leq \dfrac{G}{R}$ seems to be reasonable when $H\leq G$ and $S\geq R$.
The following implies that the assignment ``Gcd'' preserves the order for pairs of groups and commutative Gorenstein regular rings, if the coefficient ring is fixed. The result recovers \cite[Theorem 2.8 (1)]{BDT09} by specifically taking $R = \mathbb{Z}$. We include a proof for convenience, which is similar to that of \cite[Proposition 2.4]{ET18}, while the ring is assumed to be of finite weak global dimension therein.
\begin{proposition}\label{prop:GroupOrd}
Let $R$ be a commutative Gorenstein regular ring, $G$ be a group. For any subgroup $H$ of $G$ $($i.e. $(H, R)\leq (G, R))$, we have $\mathrm{Gcd}_{R}H\leq \mathrm{Gcd}_{R}G$.
\end{proposition}
\begin{proof}
The inequality is obvious if $\mathrm{Gcd}_{R}G$ is infinity, so it suffices to assume that $\mathrm{Gcd}_{R}G = n$ is finite. In this case, it follows from Lemma \ref{lem:SplitMonic} that there exists an $R$-split monomorphism of $RG$-modules $\iota: R\rightarrow \Lambda$, where $\Lambda$ is $R$-projective and $\mathrm{pd}_{RG}\Lambda = n$. For any subgroup $H$ of $G$, there is an extension of group rings $RH\rightarrow RG$, which makes every $RG$-module to be an $RH$-module. Then, $\iota: R\rightarrow \Lambda$ can also be considered as an $R$-split monomorphism of $RH$-modules. Moreover, every projective $RG$-module is also projective as an $RH$-module, then $\mathrm{pd}_{RH}\Lambda \leq \mathrm{pd}_{RG}\Lambda = n$. For trivial $RH$-module $R$, we infer from Lemma \ref{lem:RG-Gp} that
${\rm Gcd}_{R}H = \mathrm{Gpd}_{RH}R \leq \mathrm{pd}_{RH}\Lambda \leq n$. This completes the proof.
\end{proof}
By Serre's Theorem, there is an equality between cohomological dimensions of a torsion-free group and subgroups of finite index; see details in \cite[Theorem 9.2]{Swan69} or \cite[Theorem VIII 3.1]{Bro82}. In this sense, the following result might be regarded as a Gorenstein version of Serre's Theorem. We remark that by specifying the ring to be $\mathbb{Z}$, the result recovers \cite[Proposition 2.18]{ABS09}; while our proof is straightforward, and is quite different from that of \cite{ABS09}. Note that the following equality was also proved in \cite[Corollary 2.10]{ET18} under additional assumptions that the coefficient ring is of finite weak global dimension and $H$ is a normal submodule of $G$.
\begin{theorem}\label{thm:H2Gequ}
Let $G$ be a group, $R$ be a ring of coefficients. For any subgroup $H$ of $G$ with finite index, there is an equality $\mathrm{Gcd}_{R}H = \mathrm{Gcd}_{R}G$.
\end{theorem}
\begin{proof}
By a standard argument, we infer that any Gorenstein projective $RG$-module $M$ can be restricted to be a Gorenstein projective $RH$-module. Let $\mathbf{P} = \cdots\rightarrow P_1\rightarrow P_0\rightarrow P_{-1}\rightarrow\cdots$ be a totally acyclic complex of projective $RG$-modules such that $M \cong \mathrm{Ker}(P_0\rightarrow P_{-1})$. Then, $\mathbf{P}$ is still an acyclic complex of projective $RH$-modules. Since the index $|G:H|$ is finite, there is an equivalence of functors $\mathrm{Ind}_H^G
\blacksquareimeq \mathrm{Coind}_H^G$; see for example \cite[Proposition III 5.9]{Bro82}. For any projective $RH$-module $Q$, there is an isomorphism ${\rm Hom}_{RH}(\mathbf{P}, Q)\cong {\rm Hom}_{RG}(\mathbf{P}, \mathrm{Ind}_H^GQ)$, and then we infer that $\mathbf{P}$ remains acyclic after applying ${\rm Hom}_{RH}(-, Q)$. Consequently, $M$ is restricted to be a Gorenstein projective $RH$-module. This fact leads to the inequality ${\rm Gpd}_{RH}N\leq {\rm Gpd}_{RG}N$ for any $RG$-module $N$. In particular, for the trivial $RG$-module $R$, we have $\mathrm{Gcd}_{R}H \leq \mathrm{Gcd}_{R}G$. Since the coefficient ring $R$ is not necessarily commutative Gorenstein regular, we cannot prove the existence of the $R$-split monomorphism of $RG$-modules $\iota: R\rightarrow \Lambda$ in this case, and then the inequality is not from Proposition \ref{prop:GroupOrd}.
It remains to prove ${\rm Gcd}_{R}G\leq {\rm Gcd}_{R}H$. Since the inequality obvious holds if ${\rm Gcd}_{R}H = \infty$, it suffices to assume ${\rm Gcd}_{R}H = n$ is finite. Take an exact sequence $0\rightarrow K\rightarrow P_{n-1}\rightarrow \cdots\rightarrow P_0\rightarrow R\rightarrow 0$ of $RG$-modules with each $P_i$ projective.
Since $P_i$ are restricted to be projective $RH$-modules, it follows from ${\rm Gcd}_{R}H = n$ that as an $RH$-module, $K$ is Gorenstein projective. For the required inequality, it suffices to show that $K$ is a Gorenstein projective $RG$-module.
Let $P$ be any projective $RG$-module, which is also restricted to be a projective $RH$-module. Since $P$ is a direct summand of ${\rm Ind}_H^GP$, for any $i>0$, we infer from the isomorphism
$${\rm Ext}^i_{RG}(K, {\rm Ind}_H^GP)\cong {\rm Ext}^i_{RH}(K, P) = 0$$
that ${\rm Ext}^i_{RG}(K, P) = 0$.
Let $\alpha: K\rightarrow {\rm Ind}_H^GK$ be the composition of the $RG$-map $K\rightarrow {\rm Coind}_H^GK = {\rm Hom}_{RH}(RG, K)$ given by $k\rightarrow (rg\rightarrow rgk)$, followed by the isomorphism ${\rm Coind}_H^GK\rightarrow {\rm Ind}_H^GK$. Then $\alpha$ is an $RG$-monic and is split as an $RH$-map. Let $\beta:{\rm Ind}_H^GK\rightarrow K $ be the $RH$-map such that $\beta\alpha = {\rm id}_K$.
It follows from \cite[Lemma 2.6]{ET18} that for the Gorenstein projective $RH$-module $K$, the $RG$-module ${\rm Ind}_H^GK\cong {\rm Coind}_H^GK$ is Gorenstein projective. Then, there is an exact sequence of $RG$-modules $0\rightarrow{\rm Ind}_H^GK
\blacksquaretackrel{f}\rightarrow P_0\rightarrow L\rightarrow 0$, where $P_0$ is projective and $L$ is Gorenstein projective. Hence, we obtain an exact sequence of $RG$-modules $0\rightarrow K
\blacksquaretackrel{d}\rightarrow P_0\rightarrow L_0\rightarrow 0$, where $d = f\alpha$ and $L_0 = {\rm Coker}d$.
Let $Q$ be any projective $RH$-module, and $\gamma: K\rightarrow Q$ be any $RH$-map. Consider the following diagram
$$\begin{xymatrix}@C=20pt{
0 \ar[r] &K \ar[dd]_{\gamma} \ar[rd]^{\alpha} \ar[rr]^{d} & &P_0 \ar@{-->}@/^2pc/[lldd]^{\exists \delta} \ar[r] & L_0 \ar[r] &0\\
& & {\rm Ind}_H^G K \ar[ru]^{f} \ar[ld]_{\gamma\beta}\\
&Q
}\end{xymatrix}$$
We have ${\rm Ext}^1_{RH}(L, Q)=0$ since $L = {\rm Coker}f$ is a Gorenstein projective $RH$-module, and then for the $RH$-map $\gamma\beta$, there exists a map $\delta:P_0\rightarrow Q$ such that $\gamma\beta = \delta f$. Moreover, $\delta d = \delta f\alpha = \gamma\beta\alpha = \gamma$. This implies that $d^*: \mathrm{Hom}_{RH}(P_{0},Q)\rightarrow\mathrm{Hom}_{RH}(K, Q)$ is epic. Hence, we infer from the exact sequence $$\mathrm{Hom}_{RH}(P_{0},Q)\longrightarrow\mathrm{Hom}_{RH}(K, Q)\longrightarrow\mathrm{Ext}_{RH}^{1}(L_{0},Q)\longrightarrow 0$$
that $\mathrm{Ext}_{RH}^{1}(L_{0},Q)=0$. Consider $0\rightarrow K
\blacksquaretackrel{d}\rightarrow P_0\rightarrow L_0\rightarrow 0$ as an exact sequence of $RH$-modules, where $K$ is Gorenstein projective and $P_{0}$ is projective. It follows immediately from
\cite[Corollary 2.11]{Hol04} that $L_{0}$ is a Gorenstein projective $RH$-module. Hence, for any projective $RG$-module $P$, we infer that ${\rm Ext}^1_{RG}(L_0, P) = 0$ from ${\rm Ext}^1_{RG}(L_0, {\rm Ind}_H^GP)\cong {\rm Ext}_{RH}^1(L_0, P) = 0$. This implies that the sequence $0\rightarrow K\rightarrow P_0\rightarrow L_0\rightarrow 0$ remains exact after applying ${\rm Hom}_{RG}(-, P)$.
Then, repeat the above argument for $L_0$, we will obtain inductively an acyclic complex
$0\rightarrow K\rightarrow P_0\rightarrow P_{-1}\rightarrow P_{-2}\rightarrow \cdots$
with each $P_i$ a projective $RG$-module, which remains acyclic after applying ${\rm Hom}_{RG}(-, P)$ for any projective $RG$-module $P$. Recall that ${\rm Ext}^i_{RG}(K, P) = 0$. Consequently, we infer from \cite[Proposition 2.3]{Hol04} that $K$ is a Gorenstein projective $RG$-module, as desired.
\end{proof}
\begin{remark}\label{rem:FroF}
If $H$ is a subgroup of $G$ with finite index, then both $({\rm Ind}_H^G, {\rm Res}_H^G)$ and $({\rm Res}_H^G, {\rm Ind}_H^G)$ are adjoint pairs of functors, where ${\rm Res}_H^G: {\rm Mod}(RG)\rightarrow {\rm Mod}(RH)$ is the restriction functor. In this case, $RH\rightarrow RG$ is a Frobenius extension of rings, and $({\rm Ind}_H^G, {\rm Res}_H^G)$ is called a strongly adjoint pair by Morita \cite{Mor65}, or a Frobenius pair by \cite[Definition 1.1]{CIGTN99}. The Gorenstein homological properties under Frobenius extension of rings and Frobenius pairs of functors were studied in \cite{CR21, Ren18, Zhao19}.
\end{remark}
It is well known that for any finite group $G$, the integral group ring $\mathbb{Z}G$ is Gorenstein regular; precisely, it is a ring with Gorenstein global dimension 1. We have the following known result by a slightly different argument; compare with \cite[Theorem 3.3]{Ren18} and \cite[Example 3.8]{CR21}.
\begin{corollary}\label{cor:fgroup}
Let $G$ be a finite group. For any ring $R$, we have ${\rm G.gldim}(RG) = {\rm G.gldim}(R)$. Consequently, the group ring $RG$ is left Gorenstein regular if and only if so is $R$.
\end{corollary}
\begin{proof}
We use the argument of the above theorem in the special case $H = \{1\}$. For any $RG$-module $M$, we have ${\rm Gpd}_{RG}M = {\rm Gpd}_RM$, and then we infer that ${\rm G.gldim}(RG) \leq {\rm G.gldim}(R)$.
For any $R$-module $N$, we have ${\rm Gpd}_{R}{\rm Ind}_H^GN = {\rm Gpd}_{RG}{\rm Ind}_H^GN$. As $R$-modules, $N$ is a direct summand of ${\rm Ind}_H^GN$, and then ${\rm Gpd}_RN \leq {\rm Gpd}_{R}{\rm Ind}_H^GN$. This implies ${\rm Gpd}_RN \leq {\rm Gpd}_{RG}{\rm Ind}_H^GN$, and then ${\rm G.gldim}(R) \leq {\rm G.gldim}(RG)$.
Consequently, we get the desired equality ${\rm G.gldim}(RG) = {\rm G.gldim}(R)$, and we are done with the proof.
\end{proof}
We can give a homological characterization for finite groups as follows, which strengthens and extends \cite[Proposition 2.19]{ABS09}, the main theorem of \cite{DT08}, as well as \cite[Corollary 2.3]{ET18}.
Let $A$ be a ring. The supremum of the projective lengths (dimensions) of injective $A$-modules $\mathrm{spli}(A)$, and the supremum of the injective lengths (dimensions) of projective $A$-modules $\mathrm{silp}(A)$, are two invariants introduced by Gedrich and Gruenberg \cite{GG87} in connection with the existence of complete cohomological functors in the category of $A$-modules. The finitistic dimension of $A$, denoted by $\mathrm{fin.dim}(A)$, is defined as the supremum of the projective dimensions of those modules that have finite projective dimension.
\begin{theorem}\label{thm:fgroup}
Let $G$ be a group. Then the following are equivalent:
\begin{enumerate}
\item $G$ is a finite group.
\item For any coefficient ring $R$, ${\rm Gcd}_RG = 0$.
\item ${\rm Gcd}_{\mathbb{Z}}G = 0$.
\item For any left Gorenstein regular ring $R$, we have
$${\rm spli}(RG) = {\rm silp}(RG) = {\rm silp}(R) = {\rm spli}(R) = {\rm fin.dim}(R).$$
\item For any left hereditary ring $R$, ${\rm spli}(RG) = {\rm silp}(RG) = 1$.
\item ${\rm spli}(\mathbb{Z}G) = {\rm silp}(\mathbb{Z}G) = 1$.
\end{enumerate}
\end{theorem}
\begin{proof}
(1)$\Longrightarrow$(2) We consider the subgroup $H = \{1\}$ of $G$. Then ${\rm Gcd}_{R}G = {\rm Gcd}_{R}H = 0$ follows immediately from Theorem \ref{thm:H2Gequ}.
(2)$\Longrightarrow$(3) is trivial.
(3)$\Longrightarrow$(1) Consider the subgroup $H = \{1\}$ of $G$. Conversely, let us assume that the group $G$ is not finite. We claim that for any $\mathbb{Z}$-module $M$, the $G$-invariant submodule $(\mathrm{Ind}_H^GM)^G$ of the $\mathbb{Z}G$-module $\mathrm{Ind}_H^GM = \mathbb{Z}G\otimes_{\mathbb{Z}} M$ equals to 0. By \cite[Proposition III 5.1]{Bro82}, $\mathrm{Ind}_H^GM$ is the direct sum of the transform $gM$, where $g$ is an element of $G$ other than the identity. Consider an arbitrary element $x =
\blacksquareum g_{i}m_{i}$ with all but finitely many $m_i\neq 0$. Without loss of generality, let $m_1\neq 0$. We may take the summand entry $g_1m_1$ of $x$ and a summand $g'M$ with $g'\neq g_i$ for those $g_im_i\neq 0$. Then, there exists $g''\in G$ such that $g''(g_1m_1)=g'm_1$ because the $G$-action is transitive on the summand. Thus $x$ is not fixed by $G$, and so $(\mathrm{Ind}_H^GM)^G = 0$, as claimed.
Since ${\rm Gcd}_{\mathbb{Z}}G = 0$, it follows from Lemma \ref{lem:SplitMonic} that there exists a $\mathbb{Z}$-split $\mathbb{Z}G$-monomorphism $\mathbb{Z}\rightarrow \Lambda$, where $\Lambda$ is $\mathbb{Z}$-free and $\mathbb{Z}G$-projective. Then, we get $(\mathrm{Ind}_H^G\Lambda)^G = \mathrm{Hom}_{\mathbb{Z}G}(\mathbb{Z}, \mathbb{Z}G\otimes_{\mathbb{Z}} \Lambda) \neq 0$, which conflicts with the above assertion $(\mathrm{Ind}_H^GM)^G = 0$. Hence, the contradiction forces $G$ to be a finite group.
(1)$\Longrightarrow$(4) By Corollary \ref{cor:fgroup}, we have ${\rm G.gldim}(RG) = {\rm G.gldim}(R)$. Moreover, for left Gorenstein regular rings $R$ and $RG$, the desired equalities hold immediately by
\cite[Theorem 4.1]{Emm12}.
(4)$\Longrightarrow$(5) It is well known that ${\rm spli}(R) = {\rm silp}(R) = {\rm gldim}(R) = 1$ for any left hereditary ring $R$. The assertion is clear.
(5)$\Longrightarrow$(6) is trivial, and (6)$\Longrightarrow$(1) follows by \cite[Theorem 3]{DT08}.
\end{proof}
Let $G$ be a group, $H$ be a finite subgroup of $G$, and denote by $N_{G}(H)$ the normalizer of $H$ in $G$. The Weyl group of $H$ is $W = N_{G}(H)/H$. Note that modules over the Weyl group $W = N_{G}(H)/H$ are exactly the ones over the group $G$ on which the subgroup $H$ acts trivially. That is, for any commutative coefficient ring $R$, the class of $RW$-modules contains precisely all those $RG$-modules $M$ with $M^H = M$.
We conclude this section by stating a result similar to \cite[Proposition 2.5]{ET18}. Due to Lemma \ref{lem:SplitMonic}, we can replace the rings of finite weak global dimension therein, to be Gorenstein regular rings. The argument follows verbatim from that of \cite{ET18}, so it is omitted. Note that we reobtain \cite[Theorem 2.8 (3)]{BDT09} by specifying $R$ to be the ring of integers $\mathbb{Z}$.
\begin{proposition}\label{prop:Weyl}
Let $R$ be a commutative Gorenstein regular ring, $G$ be a group. For any finite subgroup $H$, we have ${\rm Gcd}_{R}(N_{G}(H)/H)\leq {\rm Gcd}_{R}G$.
\end{proposition}
\blacksquareection{Extension of coefficient rings}
First, we will show that the assignment ``Gcd'', on the other hand, preserves the order of pairs in Definition \ref{def:order} when the group is fixed and the rings are changed. The following result strengthens and extends \cite[Propostion 2.1]{ET18} and \cite[Theorem 3.2]{Tal14}.
\begin{proposition}\label{prop:RingOrd}
Let $R$ and $S$ be commutative rings, $G$ be any group. If $R$ is Gorenstein regular and $(G, S) \leq (G, R)$ (i.e. $S$ is an extension ring of $R$), then
$\mathrm{Gcd}_{S}G\leq \mathrm{Gcd}_{R}G$.
\end{proposition}
\begin{proof}
Since the inequality obviously holds if $\mathrm{Gcd}_{R}G = \infty$, it only suffices to consider the case where $\mathrm{Gcd}_{R}G = n$ is finite. In this case, it follows immediately from Lemma \ref{lem:SplitMonic} that there exists an $R$-split monomorphism of $RG$-modules $\iota: R\rightarrow \Lambda$, where $\Lambda$ is $R$-projective and $\mathrm{pd}_{RG}\Lambda = n$. Note that $S$ is an extension ring of $R$, then $S$ is a both a trivial $SG$-module and an $R$-module. By applying $S\otimes_{R}-$, we have an $S$-split monomorphism of $SG$-modules $S\otimes_{R}\iota: S\rightarrow S\otimes_{R}\Lambda$, where $S\otimes_{R}\Lambda$ is $S$-projective since $\Lambda$ is $R$-projective.
Now we assume that $0\rightarrow P_n\rightarrow \cdots\rightarrow P_1\rightarrow P_0\rightarrow \Lambda\rightarrow 0$ is an $RG$-projective resolution of $\Lambda$. Note that the sequence is $R$-split. Then, by applying $S\otimes_{R}-$, we have an exact sequence of $SG$-modules
$$0\longrightarrow S\otimes_{R}P_n\longrightarrow \cdots\longrightarrow S\otimes_{R}P_1\longrightarrow S\otimes_{R}P_0\longrightarrow S\otimes_{R}\Lambda\longrightarrow 0.$$
Since $S\otimes_{R}P_i$ are projective $SG$-modules, we have $\mathrm{pd}_{SG}(S\otimes_{R}\Lambda)\leq n$. For trivial $SG$-module $S$, it follows immediately from \cite[Proposition 1.4]{ET18} that
$\mathrm{Gpd}_{SG}S \leq \mathrm{pd}_{SG}(S\otimes_{R}\Lambda)$. Then, $\mathrm{Gcd}_{S}G \leq \mathrm{Gcd}_{R}G$ holds as expected.
\end{proof}
Specifically, considering the pairs $(G, R) \leq (G, \mathbb{Z})$ and $(G, \mathbb{Q}) \leq (G, \mathbb{Z})$, we reobtain
\cite[Propostion 2.1]{ET18} and \cite[Theorem 3.2]{Tal14} respectively.
\begin{corollary}\label{cor:GcdZG}
Let $G$ be any group, $R$ be any commutative ring. Then $\mathrm{Gcd}_{R}G\leq \mathrm{Gcd}_{\mathbb{Z}}G$; in particular, $\mathrm{Gcd}_{\mathbb{Q}}G \leq \mathrm{Gcd}_{\mathbb{Z}}G$.
\end{corollary}
We are now in a position to show that ``Gcd'' preserves the order of pairs of groups and rings.
\begin{corollary}\label{cor:KpOrd}
Let $G$ and $H$ be groups, $R$ and $S$ be commutative Gorenstein regular rings. If $(H, S) \leq (G, R)$, then we have
$\mathrm{Gcd}_{S}H\leq \mathrm{Gcd}_{R}G$.
\end{corollary}
\begin{proof}
If $(H, S) \leq (G, R)$, then we have both $(H, S) \leq (G, S)$ and $(G, S) \leq (G, R)$. By Proposition \ref{prop:GroupOrd}, it follows that
${\rm Gcd}_{S}H \leq {\rm Gcd}_{S}G$. By Proposition \ref{prop:RingOrd}, $\mathrm{Gcd}_{S}G\leq \mathrm{Gcd}_{R}G$ holds. Then, we deduce the desired inequality
$\mathrm{Gcd}_{S}H\leq \mathrm{Gcd}_{R}G$.
\end{proof}
Next, we give an upper bound for Gorenstein projective dimension of modules over group rings, by using Gorenstein cohomological dimension of the group.
\begin{proposition}\label{prop:gd-bound}
Let $G$ be a group, $R$ be a commutative Gorenstein regular ring. Then
$${\rm G.gldim}(RG) \leq \mathrm{Gcd}_{R}G + {\rm G.gldim}(R) \leq \mathrm{Gcd}_{\mathbb{Z}}G + {\rm G.gldim}(R).$$
\end{proposition}
\begin{proof}
We start by observing that the second inequality follows from Corollary \ref{cor:GcdZG}. The first inequality obviously holds if ${\rm Gcd}_{R}G $ is infinite. Furthermore, if we assume the finiteness of ${\rm Gcd}_{R}G$, then the desired inequality follows immediately from Lemma \ref{lem:SplitMonic} and \ref{lem:RG-Gp}.
\end{proof}
\begin{remark}
For any $R$, it is well known that ${\rm G.gldim}(R)\leq {\rm gldim}(R)$, and the equality holds if ${\rm gldim}(R)$ is finite. By the above result, we imply \cite[Corollary 1.6]{ET18} immediately: if $R$ has finite global dimension and $G$ is a group with ${\rm Gcd}_RG < \infty$, then all $RG$-modules $M$ have finite Gorenstein projective dimension and ${\rm Gpd}_{RG}M \leq \mathrm{Gcd}_{R}G + {\rm gldim}(R)$.
\end{remark}
Analogous to Definition 9.1.9 and Theorem 12.3.1 of \cite{EJ00}, a ring is said to be {\em 1-Gorenstein} if every module has Gorenstein projective dimension at most 1. By combing the above proposition with Theorem \ref{thm:fgroup} and Corollary \ref{cor:fgroup}, we have the following immediately.
\begin{corollary}\label{cor:gd<1}
Let $R$ be a commutative 1-Gorenstein ring, and $G$ be a finite group. Then Gorenstein projective dimension of any $RG$-module is less than or equal to $1$.
\end{corollary}
It follows from \cite[Theorem 3.6]{BDT09} that the group $G$ acts on a tree with finite vertex stabilizers if and only if ${\rm Gcd}_{\mathbb{Z}}G \leq 1$. For example, any non-trivial free group $G$ satisfying ${\rm Gcd}_{\mathbb{Z}}G = {\rm cd}_{\mathbb{Z}}G = 1$. We infer the following.
\begin{corollary}\label{cor:gd<2}
Let $R$ a commutative 1-Gorenstein ring, and $G$ be a group which acts on a tree with finite vertex stabilizers. Then Gorenstein projective dimension of any $RG$-module is less than or equal to $2$.
\end{corollary}
\blacksquareection{Benson's cofibrant and Gorenstein projective modules}
Inspired by \cite{CK97, DT10}, in this section we intend to show the relation between Benson's cofibrant modules and Gorenstein projective modules over group rings.
Let $G$ be a group. The ring of bounded functions from $G$ to $\mathbb{Z}$ is denoted by $B(G, \mathbb{Z})$. For any commutative ring $R$, $B(G, R):= B(G, \mathbb{Z})\otimes_{\mathbb{Z}} R$ is the module of functions from $G$ to $R$ which take finitely many values; see \cite[Definition 3.1]{CK97}.
The $RG$-modules structure on $B(G, R)$ is given by $g\alpha(g') = \alpha(g^{-1}g')$ for any $\alpha\in B(G, R)$ and any $g, g'\in G$. It follows from \cite[Lemma 3.4]{Ben97} and \cite[Lemma 3.2]{CK97} that $B(G, R)$ is free as an $R$-module, and restricts to a free $RF$-module for any finite subgroup $F$ of $G$. Let $M$ be an $RG$-module. Then $M\otimes_{R}B(G, R)$ is an $RG$-module, where $G$ acts diagonally on the tensor product.
The following notion is due to Benson; see \cite[Definition 4.1]{Ben97}.
\begin{definition}\label{def:Cof}
Let $M$ be an $RG$-module. Then $M$ is {\em cofibrant} if $M\otimes_{R}B(G, R)$ is a projective $RG$-module.
\end{definition}
For the sake of clarity and completeness, we reformulate \cite[Theorem 3.5]{CK97} by modifying the argument therein; see Proposition \ref{prop:Cof-GP}.
The following characterization for Gorenstein projective modules is immediate from \cite[Proposition~2.3]{Hol04}. For any ring $A$, we denote by $\mathcal{P}(A)$ the class of all projective $A$-modules. The left orthogonal of $\mathcal{P}(A)$ is defined as
$$^{\perp}\mathcal{P}(A) = \{M\in {\rm Mod}(A)~~|~~~~ {\rm Ext}^i_A(M, P) = 0, \text{ for any } P\in \mathcal{P}(A) \text{ and } i\geq 1 \}.$$
\begin{lemma}\label{lem:Holm}
Let $A$ be a ring. An $A$-module $M$ is Gorenstein projective if and only if $M\in {^\perp \mathcal{P}(A)}$ and there is an exact sequence $0\rightarrow M \rightarrow P_0\rightarrow P_{-1} \rightarrow \cdots$ with each $P_i\in \mathcal{P}(A)$ and each cocycle in ${^\perp\mathcal{P}(A)}$.
\end{lemma}
Let $G$ be a group and $R$ a commutative ring. Recall that for modules $M$ and $N$ over the group ring $RG$, $M\otimes_{R}N$ and ${\rm Hom}_R(M, N)$ are $RG$-modules via the ``diagonal'' action of $G$ given by $g(m\otimes n) = gm\otimes gn$, and respectively, $(g\alpha)(m) = g\alpha(g^{-1}m)$, where $g\in G$, $m\in M$, $n\in N$ and $\alpha\in {\rm Hom}_R(M, N)$. Moreover, we have ${\rm Hom}_{RG}(M, N) = {\rm Hom}_R(M, N)^G$ and $M\otimes_{RG}N = (M\otimes_{R}N)_G$.
The following is standard; see \cite[Chapter III]{Bro82}.
\begin{lemma}\label{lem:RG-tensor-hom}
Let $M$, $N$ and $L$ be modules over the group ring $RG$.
\begin{enumerate}
\item There is a natural isomorphism $${\rm Hom}_{RG}(M\otimes_{R}N, L)\cong {\rm Hom}_{RG}(M, {\rm Hom}_{R}(N, L)).$$
\item If the $RG$-module $N$ is $R$-projective, then for any projective $RG$-module $M$, the diagonal $RG$-module $M\otimes_{R}N$ is also projective.
\end{enumerate}
\end{lemma}
\begin{proposition}\label{prop:Cof-GP}
Let $G$ be a group, $R$ a commutative ring of coefficients. Then any cofibrant $RG$-module is Gorenstein projective. Moreover, for any $RG$-module $M$, we have $\mathrm{Gpd}_{RG}M\leq \mathrm{pd}_{RG}M\otimes_{R}B(G, R)$.
\end{proposition}
\begin{proof}
Assume $M$ is a cofibrant module, i.e. $M\otimes_{R}B(G, R)$ is a projective $RG$-module. To simplify the notation, we put $B= B(G, R)$. Let $\mathbf{P}^{\bullet}\rightarrow M$ be an $RG$-projective resolution of $M$.
Let $Q$ be any projective $RG$-module. Then, there is an isomorphism of complexes $${\rm Hom}_{RG}(\mathbf{P}^{\bullet}\otimes_{R}B, Q)\cong {\rm Hom}_{RG}(\mathbf{P}^{\bullet}, {\rm Hom}_{R}(B, Q)).$$
Since $M\otimes_RB$ is projective, the complex $\mathbf{P}^{\bullet}\otimes_RB$ is split degreewise. Hence, we infer from the above isomorphism that for any $i>0$,
$\mathrm{Ext}_{RG}^{i}(M, \mathrm{Hom}_{R}(B, Q)) = 0$. By \cite[Lemma 3.4]{CK97}, as $RG$-modules $Q$ is a direct summand of $\mathrm{Hom}_{R}(B, Q)$. This yields $\mathrm{Ext}_{RG}^{i}(M, Q) = 0$, i.e. $M\in {^\perp\mathcal{P}(RG)}$.
It follows from \cite[Lemma 3.3]{CK97} that there exists an $R$-split exact sequence of $RG$-modules $0\rightarrow R\rightarrow B\rightarrow D\rightarrow 0$, where $D$ is free as an $R$-module. By applying
$D^{\otimes i}\otimes_{R}-$ to this exact sequence, we have a series of exact sequences
$$0\longrightarrow D^{\otimes i}\longrightarrow D^{\otimes i}\otimes_{R}B\longrightarrow D^{\otimes i+1}\rightarrow 0,$$
where $i\geq 0$ are integers with the convention that $D^{\otimes 0} = R$. Denote by $V^i= D^{\otimes i}\otimes_{R}B$. We splice these sequences together to obtain a long exact sequence of $RG$-modules
$0\longrightarrow R\longrightarrow V^0\longrightarrow V^1\longrightarrow\cdots$.
Since $M\otimes_{R}B$ is projective, it follows that $M\otimes_{R}B\otimes_{R}V^i$ are also projective $RG$-modules. Moreover, since $B\cong R\oplus D$ as $R$-modules, we get that $M\otimes_{R}V^i$ are projective $RG$-modules. The above long exact sequence is $R$-split because $D^{\otimes i}$ is $R$-projective for every $i\geq 0$, and so there is a long exact sequence of $RG$-modules:
$$0\longrightarrow M\longrightarrow M\otimes_{R}V^0
\blacksquaretackrel{\delta^1}\longrightarrow M\otimes_{R}V^1
\blacksquaretackrel{\delta^2}\longrightarrow M\otimes_{R}V^2\longrightarrow\cdots \eqno{(\flat)}$$
in which all the modules, other than $M$, are projective.
It is clear that ${\rm Im}\delta^i\cong M\otimes_{R}D^{\otimes i}$. Let $Q$ be any projective $RG$-module. Since $M\otimes_{R}B$ is projective, all sequences
$$0\longrightarrow M\otimes_{R}B\otimes_{R}D^{\otimes i} \longrightarrow M\otimes_{R}B\otimes_{R}V^{i} \longrightarrow M\otimes_{R}B\otimes_{R}D^{\otimes i+1} \longrightarrow 0$$
are split, and moreover, they remain exact after applying $\mathrm{Hom}_{RG}(-, Q)$. Together with the natural isomorphisms $$\mathrm{Hom}_{RG}(-, \mathrm{Hom}_{R}(B, Q))\cong \mathrm{Hom}_{RG}(-\otimes_{R}B, Q),$$
we imply the sequence $(\flat)$ remains exact after applying $\mathrm{Hom}_{RG}(-, \mathrm{Hom}_{R}(B, Q))$. Hence, $(\flat)$ remains exact after applying $\mathrm{Hom}_{RG}(-, Q)$ for any projective module $Q$. Then, we infer that ${\rm Im}\delta^i\in {^\perp\mathcal{P}(RG)}$. Consequently, by Lemma \ref{lem:Holm} it follows that $M$ is a Gorenstein projective $RG$-module.
Now, we can prove the inequality $\mathrm{Gpd}_{RG}N\leq \mathrm{pd}_{RG}N\otimes_{R}B$ for any $RG$-module $N$. There is nothing to do unless we assume that $\mathrm{pd}_{RG}N\otimes_{R}B = n$ is finite. Consider a projective resolution $\cdots\rightarrow P_1\rightarrow P_0\rightarrow N\rightarrow 0$ of $N$, and let $K_n = \mathrm{Ker}(P_{n-1}\rightarrow P_{n-2})$. Since $P_i\otimes_{R}B$ is projective for every $i\geq 0$, it follows from $\mathrm{pd}_{RG}N\otimes_{R}B = n$ that $K_n\otimes_{R}B$ is a projective $RG$-module, i.e. $K_n$ is cofibrant. Then $K_n$ is a Gorenstein projective $RG$-module, and
$\mathrm{Gpd}_{RG}N\leq n$ holds. This completes the proof.
\end{proof}
\begin{lemma}\label{lem:cof}
Let $M$ be a Gorenstein projective $RG$-module. Then $M$ is cofibrant $($i.e. ${\rm pd}_{RG}M\otimes_{R}B(G, R) = 0)$ if and only if ${\rm pd}_{RG}M\otimes_{R}B(G, R) < \infty$.
\end{lemma}
\begin{proof}
The ``only if'' part is trivial. Let $M$ be an $RG$-module such that ${\rm pd}_{RG}M\otimes_{R}B(G, R)$ is finite. It follows from
\cite[Lemma 4.5(ii)]{Ben97} that if every extension of $M$ by an $RG$-module of finite projective dimension splits, then $M$ is cofibrant. It is a well-known fact that for any Gorenstein projective module $M$ and any $RG$-module $W$ of finite projective dimension, one has ${\rm Ext}^i_{RG}(M, W) = 0$ for any $i\geq 1$. Then, the ``if'' part holds.
\end{proof}
We now have some basic properties of cofibrant modules.
\begin{proposition}\label{prop:cof}
Let $G$ be a group and $R$ be a commutative ring of coefficients. We use $\mathcal{C}of(RG)$ to denote the class of cofibrant $RG$-modules. Then $\mathcal{C}of(RG)$ contains all projective $RG$-modules. For any exact sequence $0\rightarrow L\rightarrow M\rightarrow N\rightarrow 0$ of $RG$-modules, if $N$ is cofibrant, then $M$ is cofibrant if and only if so is $L$. If $L$ and $M$ are cofibrant and ${\rm Ext}^1_{RG}(N, Q) = 0$ for any $RG$-module $Q$ of finite projective dimension, then $N$ is cofibrant as well.
\end{proposition}
\begin{proof}
It is sufficient to prove the last assertion, since the others follow by the very definition. Assume that there is an exact sequence $0\rightarrow L\rightarrow M\rightarrow N\rightarrow 0$, where $L$ and $M$ are cofibrant. By applying the functor $-\otimes_{R}B(G, R)$ to the sequence, we imply that ${\rm pd}_{RG}N\otimes_{R}B(G, R) \leq 1$. Furthermore, if
${\rm Ext}^1_{RG}(N, Q) = 0$ for any $RG$-module $Q$ with ${\rm pd}_{RG}Q < \infty$, then it follows immediately from \cite[Lemma 4.5(ii)]{Ben97} that $N$ is cofibrant.
\end{proof}
Specifying the $RG$-module $M$ in Proposition \ref{prop:Cof-GP} to be the trivial $RG$-module $R$, we have $\mathrm{Gcd}_{R}G\leq \mathrm{pd}_{RG}B(G, R)$ immediately. Moreover, we have the following equality, which generalizes \cite[Theorem 1.18]{Bis21} from the ring of finite global dimension to Gorenstein regular ring, with a more concise argument.
\begin{corollary}\label{cor:Gcd=pdB1}
Let $R$ be a commutative Gorenstein regular ring, $G$ be any group. Then ${\rm Gcd}_{R}G = {\rm pd}_{RG}B(G, R)$ if the latter is finite.
\end{corollary}
\begin{proof}
By \cite[Lemma 3.3]{CK97}, there exists an $R$-split exact sequence of $RG$-modules
$0\rightarrow R\rightarrow B(G, R)$, where $B(G, R)$ is free as an $R$-module. If ${\rm pd}_{RG}B(G, R)$ is finite, we infer from Lemma \ref{lem:SplitMonic} and \ref{lem:RG-Gp} that
${\rm pd}_{RG}B(G, R)= {\rm Gcd}_{R}G$ holds.
\end{proof}
The notion of the {\em group of type $\Phi$} is due to Talelli; see \cite{Tal07}. Recall that a group $G$ is of type $\Phi_R$, if it has the property that an $RG$-module is of finite projective dimension if and only if its restriction to any finite subgroup is of finite projective dimension; see \cite[Definition 2.1]{MS19}. It was conjectured that a group $G$ is of type $\Phi_\mathbb{Z}$ if and only if $G$ admits a finite dimensional model for $\underline{E}G$, the classifying space for the family of the finite subgroups of $G$; the sufficient condition was proved to be true. We refer \cite{Tal07} for the details.
\begin{proposition}\label{prop:GP-Cof1}
Let $R$ be a commutative ring, and $G$ be a group of type $\Phi_R$. For any Gorenstein projective $RG$-module $M$, if ${\rm pd}_RM < \infty$, then $M$ is cofibrant.
\end{proposition}
\begin{proof}
It follows from \cite[Lemma 3.4(ii)]{Ben97} that for any finite subgroup $H$ of $G$, $B(G, R)$ is restricted to be a free $RH$-module. Then, for any $RG$-module $M$, if ${\rm pd}_RM$ is finite, then ${\rm pd}_{RH}M\otimes_{R}B(G, R)$ is also finite. Since $G$ is of type $\Phi_R$, we infer that ${\rm pd}_{RG}M\otimes_{R}B(G, R)$ is finite. Moreover, if $M$ is Gorenstein projective, then it follows directly from Lemma \ref{lem:cof} that $M$ is cofibrant.
\end{proof}
We briefly recall the Kropholler's class of $\mathrm{LH}\mathfrak{F}$-groups \cite{Kro93} for convenience. Let $\mathfrak{F}$ be the class of finite groups. A class of groups
${\rm H}_{\alpha}\mathfrak{F}$ for each ordinal $\alpha$ is defined as follows. Let ${\rm H}_0\mathfrak{F} = \mathfrak{F}$, and for $\alpha > 0$ we define a group $G$ to be in
${\rm H}_{\alpha}\mathfrak{F}$ if $G$ acts cellularly on a finite dimensional contractible CW-complex $X$, in such a way that the setwise stabilizer of each cell is equal to the pointed stabilizer, and is in ${\rm H}_{\beta}\mathfrak{F}$ for some $\beta < \alpha$. The class ${\rm H}\mathfrak{F}$ is then the union of all the ${\rm H}_{\alpha}\mathfrak{F}$. Here, the letter ``H'' stands for ``hierarchy''. Finally, a group $G$ is said to be an ${\rm LH}\mathfrak{F}$-group if every finitely generated subgroup of $G$ is an
${\rm H}\mathfrak{F}$-group.
Inspired by \cite[Theorem B]{DT10}, we have the following.
\begin{proposition}\label{prop:GP-Cof2}
Let $G$ be an $\mathrm{LH}\mathfrak{F}$-group, $R$ a commutative ring. For any Gorenstein projective $RG$-module $M$, if ${\rm pd}_RM < \infty$, then $M$ is cofibrant.
\end{proposition}
\begin{proof}
We first prove the assertion for $\mathrm{H}\mathfrak{F}$-groups, using transfinite induction on the ordinal number $\alpha$ such that $G$ belongs to $\mathrm{H}_{\alpha}\mathfrak{F}$. For $\alpha = 0$, i.e. $G$ is a finite group, $B(G, R)$ is a free $RG$-module, and then the finiteness of ${\rm pd}_{RG}M\otimes_{R}B(G, R)$ follows from the hypothesis ${\rm pd}_RM < \infty$; furthermore, the $RG$-module $M\otimes_{R}B(G, R)$ is projective. Then, let us assume that the result is true for any $H\in \mathrm{H}_{\beta}\mathfrak{F}$ and all $\beta < \alpha$, i.e. $M\otimes_{R}B(G, R)$ is a projective $RH$-module. A crucial algebraic consequence of the definition of $\mathrm{H}\mathfrak{F}$-groups is that there exists an augmented cellular chain on a finite dimensional contractible CW-complex $X$, that is, we have an exact sequence of $\mathbb{Z}G$-modules
$$0\longrightarrow C_n\longrightarrow \cdots\longrightarrow C_1\longrightarrow C_0\longrightarrow \mathbb{Z}\longrightarrow 0,$$
where each $C_i$ is a direct sum of permutation modules of the form $\mathbb{Z}[G/H]$ with $H$ a subgroup of $G$ satisfying $H\in \mathrm{H}_{\beta}\mathfrak{F}$ for $\beta < \alpha$.
For any subgroup $H$ of $G$, $RG$ is a free right $RH$-module since the right translation action of $H$ on $G$ is free, and we can take any set of representatives for the left cosets $gH$ as a basis. Then, for any projective $RH$-module $P$, it follows from the adjunction ${\rm Hom}_{RG}(RG\otimes_{RH}P, -)\cong {\rm Hom}_{RH}(P, {\rm Hom}_{RG}(RG, -))$ that the module ${\rm Ind}_H^GP = RG\otimes_{RH}P$ is projective over $RG$. For each $\mathbb{Z}[G/H] = \mathrm{Ind}_{H}^{G}\mathbb{Z}$, there is an isomorphism (see \cite[Proposition III 5.6]{Bro82})
$${\rm Ind}_H^G\mathbb{Z}\otimes_{\mathbb{Z}}(M\otimes_{R}B(G, R)) \cong {\rm Ind}_H^G(M\otimes_{R}B(G, R)).$$
Invoking the induction hypothesis that $M\otimes_{R}B(G, R)$ is a projective $RH$-module, it follows that $\mathbb{Z}[G/H]\otimes_{\mathbb{Z}}(M\otimes_{R}B(G, R))$ is projective over $RG$ and thus each $C_i\otimes_{\mathbb{Z}}(M\otimes_{R}B(G, R))$ is projective over $RG$. Tensoring the above sequence with $M\otimes_{R}B(G, R)$ gives us an exact sequence of $RG$-modules
$$0\rightarrow C_n\otimes_{\mathbb{Z}}(M\otimes_{R}B(G, R))\rightarrow \cdots\rightarrow C_0\otimes_{\mathbb{Z}}(M\otimes_{R}B(G, R))\rightarrow M\otimes_{R}B(G, R)\rightarrow 0,$$
which implies that ${\rm pd}_{RG}M\otimes_{R}B(G, R) < \infty$ for any Gorenstein projective $RG$-module $M$. Hence, it yields by
Lemma \ref{lem:cof} that $M$ is cofibrant, i.e. $M\otimes_{R}B(G, R)$ is a projective $RG$-module.
Next, we prove the result for $\mathrm{LH}\mathfrak{F}$-groups by induction on the cardinality of the group. If $G$ is a countable group, then it belongs to $\mathrm{H}\mathfrak{F}$, so the assertion holds by the above argument. Assume that $G$ is uncountable and the result is true for any groups with cardinality strictly smaller than that of $G$. Then $G$ can be expressed as the union of an ascending chain of subgroups $G_{\alpha}$, where $\alpha < \gamma$ for some ordinal number $\gamma$, such that each $G_{\alpha}$ has cardinality strictly smaller that $|G|$. By the induction hypothesis, $M\otimes_{R}B(G, R)$ is projective over each $RG_{\alpha}$. Then, it follows directly from \cite[Lemma 5.6]{Ben97} that ${\rm pd}_{RG}M\otimes_{R}B(G, R) \leq 1$. By Lemma \ref{lem:cof}, we infer that the Gorenstein projective $RG$-module $M$ is cofibrant. This completes the proof.
\end{proof}
We might use Proposition \ref{prop:GP-Cof1} and \ref{prop:GP-Cof2} to recover \cite[Theorem 3.11]{Bis21+} and \cite[Theorem 3.4]{Bis21} respectively; see the next corollary. By specifying $R = \mathbb{Z}$, we also reobtain \cite[Corollary C]{DT10}, which gives a partial answer to \cite[Conjecture A]{DT10}: over the integral group ring $\mathbb{Z}G$, Gorenstein projective modules coincide with Benson's cofibrant modules.
\begin{corollary}\label{cor:GP=Cof}
Let $R$ be a commutative ring of finite global dimension. If $G$ is either a group of type $\Phi_R$, or an $\mathrm{LH}\mathfrak{F}$-group, then Gorenstein projective $RG$-modules coincide with cofibrant modules. Moreover, for any $RG$-module $M$, we have $\mathrm{pd}_{RG}M\otimes_{R}B(G, R) = \mathrm{Gpd}_{RG}M$.
\end{corollary}
In particular, for the trivial $RG$-module $R$, we have the following; compare with Corollary \ref{cor:Gcd=pdB1}. Note that the case for $\mathrm{LH}\mathfrak{F}$-groups recovers \cite[Theorem A.1]{ET22}.
\begin{corollary}\label{cor:Gcd=pdB2}
Let $R$ be a commutative ring of finite global dimension. If $G$ is either a group of type $\Phi_R$, or an $\mathrm{LH}\mathfrak{F}$-group, then
$\mathrm{Gcd}_RG = \mathrm{pd}_{RG}B(G, R)$.
\end{corollary}
We conclude this section with a remark that the class of $\mathrm{LH}\mathfrak{F}$-groups is in fact a very much larger than $\mathfrak{F}$. For example, soluble-by-finite groups, linear groups, groups of finite cohomological dimension over $\mathbb{Z}$, and groups with a faithful representation as endomorphisms of a noetherian module over a commutative ring, are all contained in $\mathrm{LH}\mathfrak{F}$; see for example \cite{Kro93, BDT09}.
\blacksquareection{Model structure and Stable categories over group rings}
A model category \cite{Qui67} refers to a category with three specified classes of morphisms, called fibrations, cofibrations and weak equivalences, which satisfy a few axioms that are deliberately reminiscent of properties of topological spaces. The homotopy category associated to a model category is obtained by formally inverting the weak equivalences, while the objects are the same. We refer basic definitions and facts on model categories to \cite{Qui67, Hov99, DS95}.
\begin{lemma}\label{lem:C}
Let $G$ be a group and $R$ be a commutative ring of coefficients. Then the subcategory of cofibrant $RG$-modules $\mathcal{C}of(RG)$ is a Frobenius category, whose projective and injective objects coincide and are precisely the projective $RG$-modules.
\end{lemma}
\begin{proof}
By Proposition \ref{prop:Cof-GP}, for any projective $RG$-module $P$ and any cofibrant module $M$, we have ${\rm Ext}_{RG}^i(M, P) = 0$, and then projective $RG$-modules are the projective-injective objects with respect to $\mathcal{C}of(RG)$. Consequently, by combing with the properties of cofibrant modules in Proposition \ref{prop:cof}, we get that $\mathcal{C}of(RG)$ is a Frobenius category.
\end{proof}
Let $A$ be a ring. For any $A$-modules $M$ and $N$, we define $\underline{{\rm Hom}}_A(M, N)$ to be the quotient of ${\rm Hom}_A(M, N)$ by the additive subgroup consisting of homomorphisms which factor through a projective module. For the Frobenius category $\mathcal{C}of(RG)$ of cofibrant $RG$-modules, the stable category
$\underline{\mathcal{C}of}(RG)$ is a triangulated category with objects being cofibrant modules but morphisms being $\underline{{\rm Hom}}_{RG}(M, N)$. We refer to \cite{Hap88} for details of Frobenius categories and stable categories.
We refer the exposition \cite{Bun10} to the notion of exact category, which is commonly attributed to Quillen. By \cite[Definition 2.2]{Gil11}, an additive category is weakly idempotent complete if every split monomorphism has a cokernel and every split epimorphism has a kernel. The following is immediate by the very definition.
\begin{lemma}\label{lem:F}
Let $G$ be a group and $R$ be a commutative ring of coefficients. Let $\mathcal{F}ib(RG)$ be the subcategory of $RG$-modules $M$ such that ${\rm pd}_{RG}M\otimes_{R} B(G, R)< \infty$. Then $\mathcal{F}ib(RG)$ is a weakly idempotent complete exact category. The modules in $\mathcal{F}ib(RG)$ are called fibrant $RG$-modules.
\end{lemma}
Let $\mathcal{A}$ be an exact category with enough projectives and enough injectives. The Yoneda Ext bifunctor is denoted by $\mathrm{Ext}_{\mathcal{A}}(-, -)$. A pair of classes $(\mathcal{X}, \mathcal{Y})$ in $\mathcal{A}$ is a \emph{cotorsion pair} provided that $\mathcal{X} = {^\perp}\mathcal{Y}$ and $\mathcal{Y} = \mathcal{X}^{\perp}$, where the left orthogonal class $^{\perp}\mathcal{Y}$ consists of $X$ such that $\mathrm{Ext}^{\geq 1}_{\mathcal{A}}(X, Y) = 0$ for all $Y\in \mathcal{Y}$, and the right orthogonal class $\mathcal{X}^{\perp}$ is defined similarly. The cotorsion pair $(\mathcal{X}, \mathcal{Y})$ is said to be \emph{complete} if for any object $M\in \mathcal{A}$, there exist short exact sequences $0\rightarrow Y\rightarrow X \rightarrow M \rightarrow 0$ and $0\rightarrow M\rightarrow Y' \rightarrow X' \rightarrow 0$ with $X, X'\in \mathcal{X}$ and $Y, Y'\in \mathcal{Y}$.
It is not usually trivial to check that a category has a model category structure, see for example \cite[Section 10]{Ben97}. However, thanks to \cite[Theorem 2.2]{Hov02} and \cite[Theorem 3.3]{Gil11}, we have a correspondence between complete cotorsion pairs and the model structure. This brings us a convenience to have the following; compare \cite[Section 10]{Ben97}.
\begin{theorem}\label{thm:model}
Let $G$ be a group and $R$ be a commutative ring of coefficients. Let $\mathcal{W}$ be the subcategory formed by $RG$-modules of finite projective dimension. Then there are complete cotorsion pairs
$(\mathcal{C}of\cap \mathcal{W}, \mathcal{F}ib)$ and $(\mathcal{C}of, \mathcal{W}\cap \mathcal{F}ib)$ in the weakly idempotent complete exact category $\mathcal{F}ib(RG)$.
Consequently, there is a model structure on the category $\mathcal{F}ib(RG):$
\begin{itemize}
\item the cofibrations (trivial cofibrations) are monomorphisms whose cokernel are cofibrant $RG$-modules (projective $RG$-modules).
\item the fibrations (trivial fibrations) are epimorphisms (with kernel being of finite projective dimension).
\item the weak equivalences are morphisms which factor as a trivial cofibration followed by a trivial fibration.
\end{itemize}
\end{theorem}
\begin{proof}
It is clear that $\mathcal{P}
\blacksquareubseteq \mathcal{C}of\cap \mathcal{W}$, that is, all projective $RG$-modules are included in $\mathcal{C}of\cap \mathcal{W}$. We infer that $\mathcal{C}of\cap \mathcal{W}
\blacksquareubseteq \mathcal{P}$ since any cofibrant module is Gorenstein projective, and projective dimension of any Gorenstein projective module is either zero or infinity. Hence, $\mathcal{C}of\cap \mathcal{W} = \mathcal{P}$.
For any $P\in \mathcal{P}$ and any $RG$-module $M$, it is clear that ${\rm Ext}^{\geq 1}_{RG}(P, M) = 0$, and furthermore, we have $\mathcal{P}^{\perp} = \mathcal{F}ib$ and $\mathcal{P}
\blacksquareubseteq {^{\perp}\mathcal{F}ib}$ by noting that ``$\perp$'' is only calculated inside $\mathcal{F}ib$. Let $M\in {^{\perp}\mathcal{F}ib}$. There is an exact sequence $0\rightarrow K\rightarrow P\rightarrow M\rightarrow 0$ in $\mathcal{F}ib$, where $P$ is a projective $RG$-module. Noting that
${\rm Ext}^1_{RG}(M, K) = 0$, we deduce that the sequence is split, and hence, as a direct summand of $P$, $M$ is projective. This implies the inclusion ${^{\perp}\mathcal{F}ib}
\blacksquareubseteq \mathcal{P}$, and consequently, we obtain a cotorsion pair $(\mathcal{C}of \cap \mathcal{W}, \mathcal{F}ib) = (\mathcal{P}, \mathcal{F}ib)$. The completeness of this cotorsion pair is easy to see.
Next, we show that $(\mathcal{C}of, \mathcal{W}\cap \mathcal{F}ib) = (\mathcal{C}of, \mathcal{W})$ is a cotorsion pair. Since every cofibrant module is Gorenstein projective, $\mathcal{C}of
\blacksquareubseteq {^{\perp}\mathcal{W}}$ and $\mathcal{W}
\blacksquareubseteq \mathcal{C}of^{\perp}$ hold immediately. For any $M\in {^{\perp}\mathcal{W}}$, we have $M\in \mathcal{C}of$ by Lemma \ref{lem:cof} since we only consider objects in $\mathcal{F}ib$. Hence, ${^{\perp}\mathcal{W}}
\blacksquareubseteq \mathcal{C}of$, and then
$\mathcal{C}of = {^{\perp}\mathcal{W}}$. Let $M$ be any object in $\mathcal{C}of^{\perp}$. Since we also have $M\in \mathcal{F}ib$, it follows from
Proposition \ref{prop:Cof-GP} that ${\rm Gpd}_{RG}M \leq {\rm pd}_{RG}M\otimes_{R}B(G, R)$ is finite. Assume ${\rm Gpd}_{RG}M = n$. By an argument analogous to that of Lemma \ref{lem:SplitMonic}, we have an exact sequence $0\rightarrow M\rightarrow N\rightarrow L\rightarrow 0$ from a pushout diagram, where $L$ is Gorenstein projective and
${\rm pd}_{RG}N = n$. Then, we infer from Lemma \ref{lem:cof} that $L$ is cofibrant by noting $L\in \mathcal{F}ib$. Hence, ${\rm Ext}_{RG}^1(L, M) = 0$ for $M\in \mathcal{C}of^{\perp}$. Then, the above sequence is split, and ${\rm pd}_{RG}M\leq {\rm pd}_{RG}N = n$. This implies that $\mathcal{C}of^{\perp}
\blacksquareubseteq \mathcal{W}$, and finally, $(\mathcal{C}of, \mathcal{W})$ is a cotorsion pair.
For any $M\in \mathcal{F}ib$, we have an exact sequence $0\rightarrow M\rightarrow N\rightarrow L\rightarrow 0$ with $N\in \mathcal{W}$ and $L\in \mathcal{C}of$. Moreover, for $N\in \mathcal{W}$, there is an exact sequence $0\rightarrow K\rightarrow P\rightarrow N\rightarrow 0$, where $P$ is projective and $K\in \mathcal{W}$. Now we consider the pullback of $M\rightarrow N$ and $P\rightarrow N$, and obtain the following commutative diagram.
$$\xymatrix@C=20pt@R=20pt{
& 0\ar[d] & 0\ar[d] \\
& K \ar@{=}[r]\ar[d] &K\ar[d]\\
0 \ar[r] &X \ar[d] \ar[r] & P \ar[d]\ar[r] &L \ar@{=}[d]\ar[r] &0 \\
0 \ar[r] &M \ar[r]\ar[d] & N \ar[r] \ar[d] &L \ar[r] & 0\\
& 0 & 0
}$$
We infer $X\in \mathcal{C}of$ from the middle row, where $L\in \mathcal{C}of$ and $P\in \mathcal{P}$. Then, by the left column and the lower row, we infer that the cotorsion pair $(\mathcal{C}of, \mathcal{W})$ is complete.
Consequently, by using \cite[Theorem 3.3]{Gil11} we have a model structure on $\mathcal{F}ib$ as stated above, which is corresponding to the triple of classes of $RG$-modules $(\mathcal{C}of, \mathcal{W}, \mathcal{F}ib)$. The triple is usually referred to as a Hovey triple, since such a correspondence was obtained by Hovey in \cite[Theorem 2.2]{Hov02}.
\end{proof}
Let $M$ be an object in a model category. Recall that $M$ is called {\em cofibrant} if $0\rightarrow M$ is a cofibration, and it is called {\em fibrant} if $M\rightarrow 0$ is a fibration.
For the model category $\mathcal{F}ib$, there is a stable category of the Frobenius category $\mathcal{C}of$, whose objects are both cofibrant and fibrant. Moreover, there is an associated homotopy category $\mathrm{Ho}(\mathcal{F}ib)$, which is obtained by formally inverting weak equivalences, that is, the localization of $\mathcal{F}ib$ with respect to the class of weak equivalences.
The following is immediate from a fundamental result about model categories, see for example \cite[Theorem 1.2.10]{Hov99}.
\begin{corollary}\label{cor:equ1}
Let $G$ be a group and $R$ be a commutative ring of coefficients. The homotopy category ${\rm Ho}(\mathcal{F}ib)$ is triangle equivalent to the stable category $\underline{\mathcal{C}of}(RG)$.
\end{corollary}
Note that weak equivalences are crucial in the model category. We have the following characterization, which might be of independent interests.
\begin{proposition}\label{prop:WEqu}
Let $G$ be a group and $R$ a commutative ring of coefficients. For any cofibrant $RG$-modules $M$ and $N$, the following are equivalent:
\begin{enumerate}
\item There is a weak equivalence between $M$ and $N$;
\item $M$ and $N$ are isomorphic in the stable category $\underline{\mathcal{C}of}(RG)$;
\item There is an isomorphism $M\oplus P\cong N\oplus Q$ in the category ${\rm Mod}(RG)$ of $RG$-modules, for some projective modules $P$ and $Q$.
\end{enumerate}
\end{proposition}
\begin{proof}
$(1)\Longrightarrow (2)$ Since the homotopy category ${\rm Ho}(\mathcal{F}ib)$ is triangle equivalent to the stable category $\underline{\mathcal{C}of}(RG)$, it is clear.
$(2)\Longrightarrow (3)$ It is well known that in general, the assertion holds for any stable category of a Frobenius category; see for example \cite[Lemma 1.1]{CZ07}.
$(3)\Longrightarrow (1)$ Note that the isomorphism $M\oplus P\rightarrow N\oplus Q$ is a weak equivalence in the model category $\mathcal{F}ib$. Moreover, the injection $M\rightarrow M\oplus P$ is a trivial cofibration, and the projection $N\oplus Q\rightarrow N$ is a trivial fibration. By the concatenation of these maps, we get a weak equivalence between $M$ and $N$.
\end{proof}
For any module $M$, we define $\Omega(M)$ to be the kernel of a surjective homomorphism from a projective module onto $M$. For any $RG$-modules $M$ and $N$, there is a natural homomorphism $\underline{{\rm Hom}}_{RG}(M, N) \rightarrow \underline{{\rm Hom}}_{RG}(\Omega(M), \Omega(N))$. The complete cohomology is defined by
$\widehat{{\rm Ext}}_{RG}^n(M, N) = \mathop{\underrightarrow{\mathrm{lim}}}\limits_i\underline{{\rm Hom}}_{RG}(\Omega^{n+i}(M), \Omega^i(N))$.
We have the following definition, which generalizes that of \cite[Section 8]{Ben97} by removing the countably presented condition for modules therein.
\begin{definition}\label{def:stab}
Let ${\rm StMod}(RG)$ be the stable module category with all fibrant $RG$-modules as objects, and morphisms for any objects $M$ and $N$ are given by complete cohomology of degree zero, that is,
$${\rm Hom}_{{\rm StMod}(RG)}(M, N) = \mathop{\underrightarrow{\mathrm{lim}}}\limits_i\underline{{\rm Hom}}_{RG}(\Omega^i(M), \Omega^i(N)).$$
\end{definition}
Note that a map in ${\rm Hom}_{{\rm StMod}(RG)}(M, N)$ might not correspond to any map in ${\rm Hom}_{RG}(M, N)$, and hence, this definition can be difficult to work with. However, for cofibrant modules, we have the following.
\begin{lemma}\label{lem:hom}
Let $M$ and $N$ be cofibrant $RG$-modules. There is an isomorphism $$\underline{{\rm Hom}}_{RG}(M, N) \cong \underline{{\rm Hom}}_{RG}(\Omega(M), \Omega(N)).$$
\end{lemma}
\begin{proof}
There exist exact sequences $0\rightarrow \Omega(M)\rightarrow P\rightarrow M\rightarrow 0$ and $0\rightarrow \Omega(N)\rightarrow Q\rightarrow N\rightarrow 0$, where $P$ and $Q$ are projective $RG$-modules. Since ${\rm Ext}_{RG}^1(M, Q) = 0$, any map from $\Omega(M)$ to $Q$ can be extended to a map $P\rightarrow Q$, and then we have the following commutative diagram
$$\xymatrix@C=20pt@R=20pt{
0\ar[r] & \Omega(M)\ar[d]_{\Omega(f)}\ar[r] & P\ar[r]\ar[d]^{g} &M\ar[r]\ar[d]^{f} & 0 \\
0\ar[r] & \Omega(N) \ar[r] &Q \ar[r] &N\ar[r] & 0
}$$
Hence, the natural homomorphism $\underline{{\rm Hom}}_{RG}(M, N) \rightarrow \underline{{\rm Hom}}_{RG}(\Omega(M), \Omega(N))$ is surjective.
Now assume that $0 = \Omega(f)\in \underline{{\rm Hom}}_{RG}(\Omega(M), \Omega(N))$. Since ${\rm Ext}_{RG}^1(M, -)$ vanishes for any projective $RG$-module, if $\Omega(f):\Omega(M)\rightarrow \Omega(N)$ factors through a projective module, then it might factor through $P$. That is, for $\alpha: \Omega(M)\rightarrow P$, there exists a map $s: P\rightarrow \Omega(N)$ such that $\Omega(f) = s\alpha$. Denote by $\gamma: \Omega(N)\rightarrow Q$ and $\delta: Q\rightarrow N$. It is standard to show that $g - \gamma s: P\rightarrow Q$ factors through $\beta: P\rightarrow M$, that is, there exists a map $t: M\rightarrow Q$ such that $g - \gamma s = t\beta$. Since
$f\beta = \delta g = \delta t \beta$ and $\beta$ is epic, it yields that $f = \delta t$. Hence, the natural homomorphism $\underline{{\rm Hom}}_{RG}(M, N) \rightarrow \underline{{\rm Hom}}_{RG}(\Omega(M), \Omega(N))$ is also injective. This completes the proof.
\end{proof}
\begin{theorem}\label{thm:stable}
Let $G$ be a group and $R$ be a commutative ring of coefficients. Then ${\rm Ho}(\mathcal{F}ib)$ is equivalent to ${\rm StMod}(RG)$.
\end{theorem}
\begin{proof}
First, we note that objects of ${\rm Ho}(\mathcal{F}ib)$ and ${\rm StMod}(RG)$ coincide. It suffices to prove that the natural functor from ${\rm Ho}(\mathcal{F}ib)$ to
${\rm StMod}(RG)$ is fully faithful.
Let $M$ and $N$ be any fibrant $RG$-modules. By the completeness of the cotorsion pair $(\mathcal{C}of, \mathcal{W})$, there exists an exact sequence $0\rightarrow K_M\rightarrow Q(M)\rightarrow M\rightarrow 0$,
where $Q(M)$ is cofibrant and $K_M\in \mathcal{W}$. Hence, the cofibrant approximation $Q(M)\rightarrow M$ is also a trivial fibration, and we refer it (or simply, $Q(M)$) to be a cofibrant replacement of $M$. Then, we have the following isomorphisms
$${\rm Hom}_{{\rm Ho}(\mathcal{F}ib)}(M, N)\cong \underline{{\rm Hom}}_{RG}(Q(M), Q(N)) \cong {\rm Hom}_{{\rm StMod}(RG)}(Q(M), Q(N)),$$
where the first one follows by \cite[Theorem 1.2.10(ii)]{Hov99} and Proposition \ref{prop:WEqu}, and the second one holds by Lemma \ref{lem:hom}.
By basic properties of cofibrant modules (see Proposition \ref{prop:cof}), for fibrant $RG$-modules $M$ and $N$, there exists an integer $r >> 0$, such that both $\Omega^r(M)$ and $\Omega^r(N)$ are cofibrant modules, and moreover, the projective dimension of $K_M$ and $K_N$ are not more than $r - 1$. For $M$, we have exact sequences
$$0\longrightarrow \Omega^r(M)\longrightarrow P_{r-1}\longrightarrow \cdots \longrightarrow P_1\longrightarrow P_0\longrightarrow M\longrightarrow 0,$$
$$0\longrightarrow P'_{r}\longrightarrow P'_{r-1}\longrightarrow \cdots \longrightarrow P'_1\longrightarrow Q(M)\longrightarrow M\longrightarrow 0,$$
where $P_i$ and $P'_i$ are all projective $RG$-modules; similarly, we obtain such exact sequences for $N$. Moreover, we get the following commutative diagram:
$$\xymatrix@C=20pt@R=20pt{
0\ar[r] & \Omega^r(M)\ar[d]_{\Omega^r(f)}\ar[r] & P_{r-1}\oplus P'_{r}\ar[r]\ar[d] &\cdots \ar[r] &P_0\oplus P'_1 \ar[r]\ar[d] & Q(M)\ar[r]\ar[d]^{f} & 0 \\
0\ar[r] & \Omega^r(N)\ar[r] & Q_{r-1}\oplus Q'_{r}\ar[r] &\cdots \ar[r] &Q_0\oplus Q'_1 \ar[r] & Q(N)\ar[r] & 0
}$$
Analogous to Lemma \ref{lem:hom}, we can prove that there is an isomorphism
$$\underline{{\rm Hom}}_{RG}(Q(M), Q(N)) \cong \underline{{\rm Hom}}_{RG}(\Omega^r(M), \Omega^r(N)).$$
Moreover, it follows from Lemma \ref{lem:hom} that for all $j > 0$, we have isomorphisms
$$\underline{{\rm Hom}}_{RG}(\Omega^r(M), \Omega^r(N)) \cong \underline{{\rm Hom}}_{RG}(\Omega^{r+j}(M), \Omega^{r+j}(N)),$$
and consequently,
$${\rm Hom}_{{\rm StMod}(RG)}(M, N) = \mathop{\underrightarrow{\mathrm{lim}}}\limits_i\underline{{\rm Hom}}_{RG}(\Omega^i(M), \Omega^i(N)) = \underline{{\rm Hom}}_{RG}(\Omega^r(M), \Omega^r(N)).$$
Hence, we get the desired isomorphism ${\rm Hom}_{{\rm Ho}(\mathcal{F}ib)}(M, N)\cong {\rm Hom}_{{\rm StMod}(RG)}(M, N)$. We are done with the proof.
\end{proof}
It is well known that the category of Gorenstein projective modules $\mathcal{GP}(A)$ over any ring $A$ is a Frobenius category, then the corresponding stable category $\underline{\mathcal{GP}}(A)$ is a triangulated category.
We denote by ${\rm D}^b(RG)$ the bounded derived category of $RG$, and let ${\rm K}^b(RG)$ and ${\rm K}^b(RG\text{-}{\rm Proj})$ stand for the homotopy categories of bounded $RG$-complexes and bounded complexes of projective $RG$-modules, respectively. Note that the composite of natural functors $${\rm K}^b(RG\text{-}{\rm Proj})\rightarrow {\rm K}^b(RG)\rightarrow {\rm D}^b(RG)$$ is fully faithful, and this allows us to view ${\rm K}^b(RG\text{-}{\rm Proj})$
as a (thick) triangulated subcategory of ${\rm D}^b(RG)$. The singularity category of $RG$, denoted by ${\rm D}_{sg}(RG)$, is defined to be the Verdier quotient
${\rm D}^b(RG)/ {\rm K}^b(RG\text{-}{\rm Proj})$; see \cite{Buc87, Or04} or \cite[Section 6]{Bel00}. Note that ${\rm D}_{sg}(RG)$ vanishes if and only if every module has finite projective dimension, that is, $RG$ is a ring of finite global dimension.
Consider the following composition of functors
$$F: \mathcal{GP}(RG)\hookrightarrow {\rm Mod}(RG)
\blacksquaretackrel{\iota}\longrightarrow {\rm D}^b(RG)
\blacksquaretackrel{\pi}\longrightarrow {\rm D}_{sg}(RG),$$
where the first functor is the inclusion, the second functor $\iota$ is the full embedding which sends every $RG$-module to the corresponding stalk complex concentrated in degree zero, and the last one $\pi$ is the standard quotient functor. Note that $F$ induces a unique functor from the stable category $\underline{\mathcal{GP}}(RG)$ to ${\rm D}_{sg}(RG)$.
Let ${\rm K}_{ac}(RG\text{-}{\rm Proj})$ and ${\rm K}_{tac}(RG\text{-}{\rm Proj})$ denote the homotopy categories of acyclic complexes and totally acyclic complexes of projective $RG$-modules, respectively. There is a functor
$\Omega: {\rm K}_{ac}(RG\text{-}{\rm Proj})\rightarrow \underline{\mathcal{GP}}(RG)$ given by taking the kernel of the boundary map in degree zero.
We summarize some equivalences of triangulated categories as follows, which generalizes \cite[Theorem 3.10]{MS19} and extends the equivalences
$${\rm StMod}(RG)
\blacksquareimeq \underline{\mathcal{GP}}(RG)
\blacksquareimeq {\rm D}_{sg}(RG)
\blacksquareimeq {\rm K}_{tac}(RG\text{-}{\rm Proj}),$$
which are proved in \cite{MS19} in the case that $R$ is a commutative noetherian ring with finite global dimension and $G$ is a group of type $\Phi_R$. We remove the noetherian assumption therein.
\begin{theorem}\label{thm:triequ}
Let $G$ be a group and $R$ be a commutative ring of coefficients. Then we have equivalences of triangulated categories
$${\rm Ho}(\mathcal{F}ib)
\blacksquareimeq {\rm StMod}(RG)
\blacksquareimeq \underline{\mathcal{C}of}(RG).$$
If $R$ is a commutative ring with finite global dimension, and $G$ is either a group of type $\Phi_R$ or an ${\rm LH}\mathfrak{F}$-group, then ${\rm Ho}(\mathcal{F}ib)
\blacksquareimeq \underline{\mathcal{GP}}(RG)$. Moreover, if ${\rm Gcd}_RG$ is finite, then the following categories are equivalent:
$$\begin{aligned} {\rm Ho}(\mathcal{F}ib) &
\blacksquareimeq {\rm StMod}(RG)
\blacksquareimeq \underline{\mathcal{C}of}(RG) = \underline{\mathcal{GP}}(RG) \\
&
\blacksquareimeq {\rm D}_{sg}(RG)
\blacksquareimeq {\rm K}_{tac}(RG\text{-}{\rm Proj}) = {\rm K}_{ac}(RG\text{-}{\rm Proj}).
\end{aligned}$$
\end{theorem}
\begin{proof}
If $R$ is a commutative ring with finite global dimension, and $G$ is either a group of type $\Phi_R$ or an ${\rm LH}\mathfrak{F}$-group, then Corollary \ref{cor:GP=Cof} leads to that $\underline{\mathcal{C}of}(RG)$ and $\underline{\mathcal{GP}}(RG)$ coincide. Furthermore, it follows from Theorem \ref{thm:fGcd} that if ${\rm Gcd}_RG$ is finite, then $RG$ is a Gorenstein regular ring. In this case, every acyclic complex of projective $RG$-modules is totally acyclic. Hence, ${\rm K}_{ac}(RG\text{-}{\rm Proj})$ agrees with ${\rm K}_{tac}(RG\text{-}{\rm Proj})$. The equivalence $\Omega: {\rm K}_{tac}(RG\text{-}{\rm Proj})\rightarrow \underline{\mathcal{GP}}(RG)$ follows from \cite[Theorem 4.16]{Bel00}. We infer from \cite[Theorem 6.9]{Bel00} or \cite[Theorem 3.3]{Chen11} that the natural functor $F: \underline{\mathcal{GP}}(RG)\rightarrow {\rm D}_{sg}(RG)$ is an equivalence. This completes the proof.
\end{proof}
\begin{remark}
The singularity category was first studied by Buchweitz in his unpublished note \cite{Buc87} under the name of ``stable derived category''. In order to distinguish with the singularity category for finitely generated modules, if the modules are not necessarily finitely generated, the category is sometimes called a {\rm big singularity category}; see e.g. \cite[pp. 205]{Chen11}.
\end{remark}
\vskip 10pt
\noindent {\bf Acknowledgements.}\quad
The author is grateful to Professors X.-W. Chen, I. Emmanouil, D.-S. Li, W.-H. Qian and an anonymous reviewer for their helpful suggestions. This work is supported by the National Natural Science Foundation of China (No. 11871125), and Natural Science Foundation of Chongqing, China (No. cstc2018jcyjAX0541).
\vskip 10pt
{\footnotesize \noindent Wei Ren\\
School of Mathematical Sciences, Chongqing Normal University, Chongqing 401331, PR China\\
}
\end{document} |
\begin{document}
\title{Smooth Conjugacy classes of circle diffeomorphisms with irrational rotation number}
\renewcommand{\arabic{footnote}}{}
\footnote{2010 \emph{Mathematics Subject Classification}: 37E10; 37C15, 37E15.}
\footnote{\emph{Key words and phrases}: Circle diffeomorphisms, $C^1$-conjugacy class, Rotation number.}
\renewcommand{\arabic{footnote}}{\arabic{footnote}}
\setcounter{footnote}{0}
\begin{abstract} In this paper we prove the $C^1$-density of every $C^r$-conjugacy class in the closed subset of diffeomorphisms of the circle with a given irrational rotation number.
\end{abstract}
\section{Introduction}
One knows from H. Poincar\'e that the dynamic of a homeomorphism $f$ of the circle depends strongly on the rotation number $\rho(f)$: the existence of periodic orbits is equivalent to the rationality of $\rho(f)$. If, on the contrary, the rotation number is irrational then $f$ is semi-conjugated to the corresponding irrational rotation. The non-injectivity of the semiconjugacy consists in colapsing each wandering interval to a point. In the thirties, A. Denjoy exhibited examples of $C^1$-diffeomorphisms with irrational rotation number but having wandering intervals. He also proved that such a phenomenon cannot appear if $f$ is assumed to be $C^2$: every $C^2$-diffeomorphism with irrational rotation number is topologically conjugated to the corresponding irrational rotation. Note that the conjugating homeomorphisms (or the semiconjugacy) is unique up to composition by a rotation.
However, even for $C^2$ or $C^\infty$ or even analytic diffeomorphorphism with irrational rotation number, the conjugating homeomorphism is \emph{in general} not differentiable. The expression \emph{in general} here leads to important and deep works, in particular by V. Arnold \cite{Ar}, M. Herman \cite{He} and J.C. Yoccoz \cite{Yo}. Indeed, for rotation numbers satisfying a diophantine condition, every smooth diffeomorphism is smoothly conjugated to a rotation.
Later, different proofs and some generalizations were given by K. Khanin, Y. Sinai \cite{KhaSi1}, \cite{KhaSi2} and Y. Katznelson, D. Ornstein \cite{KaOr1},\cite{KaOr2}.
We consider here $C^1$-diffeomorphisms. In this class of regularity, no arithmetic condition may ensure a regularity on the conjugacy homeomorphism. Even if we did not find references for this precise statement, it is not doubtful that every irrational rotation number corresponds to infinitely many $C^1$-conjugacy classes. Let us illustrate this different conjugacy classes by distinct behaviors:
\begin{itemize}
\item the $C^1$-centralizer of a diffeomorphism $f$ is the group of diffeomorphisms commuting with $f$. Any diffeomorphism $g$ $C^1$-conjugated to $f$ has a $C^1$-centralizer conjugated to the one of $f$ (by the same diffeomorphism). Therefore, the isomorphism class of the centralizer is a $C^1$-invariant for a $C^1$-conjugacy class: in particular, if $f$ is $C^1$-conjugated to a rotation then its $C^1$-centralizer is isomorphic to $S^1$. There are examples of diffeomorphisms for which the centralizer is trivial, or some dense subgroup of $\RR$, or much larger than $\RR$ if $f$ admits wandering intervals.
\item the asymptotic behavior of the iterates $f^n$ leads also to be an invariant of a $C^1$ conjugacy class: if a $C^1$-diffeomorphism is $C^1$ conjugated to a rotation its derivatives $df^n$ are uniformly bounded for $n\in \ZZ$. However \cite[Theorem B]{BoCrWi} implies that any rotation number contains a $C^1$-diffeomorphism for which the sequences $\sup\{df^n(x),df^{-n}(x)\}$, $n\in\ZZ$ is unbounded in any orbit.
\end{itemize}
All these kind of properties are invariant under $C^1$- conjugacy, and they show the great variety of $C^1$-behavior of $C^1$-diffeomorphisms with the same irrational rotation number.
In this paper we consider the space of diffeomorphism having a given irrational rotation number $\alpha\in(\RR\setminus {\mathbb Q}} \def\RR{{\mathbb R}} \def\SS{{\mathbb S}} \def\TT{{\mathbb T})/\ZZ$. In his thesis Herman denotes by $F^r_\alpha\subset Diff^r(S^1)$ the closed subset of $C^r$-diffeomorphisms whose rotation number is $\alpha$. He proved several results on $F^r_\alpha$: it is connected and $F^s_\alpha$, for $s>r$ is dense in $F^r_\alpha$ for the $C^r$-topology. As we said in our comments above, $F^1_\alpha$ always contains many different $C^1$-behavior. The aim of this paper is to show that these behaviors are indeed equidistributed in $F^1_\alpha$, giving some homogeneity of this space. More precisely:
Given any diffeomorphism $f\in Diff^1(S^1)$ and $r\in\NN$ we denote by ${\cal C}} \def\cI{{\cal I}} \def\cO{{\cal O}} \def\cU{{\cal U}^r(f)$, its $C^r$-conjugacy class $\{hfh^{-1}, h\in Diff^r(S^1)\}$; notice that elements in the class ${\cal C}} \def\cI{{\cal I}} \def\cO{{\cal O}} \def\cU{{\cal U}^1(f)$ share the same $C^1$-properties as $f$ (same $C^1$-centralizer, same distorsion properties, etc.). We prove:
\begin{theo}\label{t.conjugacy} Given any $\alpha\in (\RR\setminus {\mathbb Q}} \def\RR{{\mathbb R}} \def\SS{{\mathbb S}} \def\TT{{\mathbb T})/\ZZ$ and any $f\in F^1_\alpha$, the $C^1$-conjugacy class ${\cal C}} \def\cI{{\cal I}} \def\cO{{\cal O}} \def\cU{{\cal U}^1(f)$ of $f$ is dense in $F^1_\alpha$ for the $C^1$-topology.
\end{theo}
Approaching the conjugation diffeomorphism $h$ by some smooth diffeomorphism, one gets that ${\cal C}} \def\cI{{\cal I}} \def\cO{{\cal O}} \def\cU{{\cal U}^r(f)$ is also $C^1$-dense, for every $r\in\NN$.
The same kind of question for the rational rotation number case can also be considered. That case is very related to the question of conjugacy classes for diffeomorphisms on the segment $[0,1]$: this problem is solved in \cite{Fa} which gives complete (and different) answers to two natural questions:
\begin{itemize}
\item Under what conditions the $C^1$-conjugacy class of a diffeomorphism $f$ of $[0,1]$ contains $g$ in its closure?
\item Under what conditions does it exist a path $h_t$, $t\in[0,1)$ of diffeomorphisms so that $ h_0= Id$ and $h_tfh_t^{-1}$ tends to $g$ for $t\to 1$?
\end{itemize}
This approach suggest a natural question in our setting:
\begin{ques} Given $f,g\in F^1_\alpha$ does it exist a path $h_t$, $t\in[0,1)$
of diffeomorphisms of $S^1$ so that $ h_0= Id$ and $h_tfh_t^{-1}$ tends to $g$ for $t\to 1$?
\end{ques}
After announcing our results and the ones of \cite{Fa}, A. Navas \cite{Na1}
found a very simple, elementary and clever argument that answers partially to this question;
He showed that
\begin{theo}[Navas] Given any irrational $\alpha$ and $f\in F^1_\alpha$, there is a path $h_t$, $t\in[0,1)$
of diffeomorphisms of $S^1$ so that $ h_0= Id$ and $h_tfh_t^{-1}$ tends to $R_\alpha$ for $t\to 1$.
\end{theo}
Navas argument consists in building the derivatives of the conjugacy $h_t$ as an approximative solution of a cohomological equation, the rotation $R_\alpha$ being characterized in $F^1_\alpha$ by its vanishing logarithm of its derivative. This argument seem not being adaptable for going from $f$ to $g$, when $g$ is not smoothly conjugated to the rotation.
Notice that a similar result had been proved by Herman in \cite{He} for $C^2$-diffeomorphisms: he proved in that setting that $f$ can be conjugated arbitrarily close to the rotation in the $C^{1 +}$ bounded variations topology.
Given two diffeomorphism $(f_0, g_0) \in Diff^1(S^1) \times Diff^1(S^1)$ and $r\in\NN$ we denote by ${\cal C}} \def\cI{{\cal I}} \def\cO{{\cal O}} \def\cU{{\cal U}^r(f_0,g_0)$, its $C^r$-conjugacy class $\{(f,g) , f=hf_0h^{-1}, g=hg_0h^{-1} h\in Diff^r(S^1)\}$.
One of our motivation for this paper is the same question for commuting diffeomorphisms
\begin{ques}\label{q.commuting} Given two irrational rotation numbers $\alpha, \beta$, we consider the space of $C^1$ commuting diffeomorphisms $f, g$ with respective rotation numbers $\alpha$ and $\beta$, endowed with the $C^1$ topology.
Are all the $C^1$- conjugacy classes dense in this space?
\end{ques}
This problem is very related with a famous old question posed by Rosenberg: does it exists pair $(f,g)$ such that the $\ZZ^2$ action is $C^r$-structurally stable?
A positive answer to Question~\ref{q.commuting} would answer negatively to Rosenberg question for $r=1$.
In that direction, Navas \cite{Na1} proved recently that every conjugacy class contains the pair of rotations $(R_\alpha,R_\beta)$ in its closure.
Notice that, in higher differentiability, \cite{KlNa} \cite{DeKlNa} provide a generalization of Denjoy theorem for $\ZZ^n$ actions on the circle by $C^{1+\theta}$-diffeomorphisms, where the number $\theta\in(0,1)$ depends on $n$.
For smooth action J. Moser \cite{Mo} posed the problem of smooth linearization of commuting circle diffeomorphisms. In this direction Fayad and Khanin \cite{FaKha} proved that a finite number of commuting $C^{\infty}$ diffeomorphisms, with simultaneously Diophantine rotation numbers are smoothly conjugated to rotations.
\subsection{Idea of the proof and organization of the paper}
The idea of the proof is very simple. Given $f$ and $g$ with the same irrational rotation number, we want to build a conjugate $hfh^{-1}$ of $f$ arbitrarily $C^1$-close to $g$.
For that, we consider long orbit segments $x\dots, f^n(x)$ and $y\dots,g^n(y)$ of the same length. They are ordered in the same way on the circle.
Therefore one may consider a homeomorphism $H$ of the circle so that $H(f^i(x))=g^i(y)$ for $0\leq i\leq n$ and we can choose $H$ being affine on each connected component of the complement of the orbit segment. If $n$ is large enough and if $f$ and $g$ have dense orbits, the connected component of the complement of these orbit segments are arbitrarily small so that $f$ and $g$ are almost affine on each component, and the derivative on each component is almost the ratio between the component and its image. The same happens for the piecewise $C^1$ homeomorphism $HfH^{-1}$. Furthermore, up to the components starting at $y$ or at $g^n(y)$ (i.e. the extremities of the orbit segment) the image of a component by $g$ and by $HfH^{-1}$ are the same: as a direct consequence their derivatives are almost equal.
For the derivatives of $HfH^{-1}$ and $g$ being everywhere (that is, even on the components adjacent to $y$ and $g^n(y)$) almost equal, we show that it is sufficient that the ratio between the length of adjacent components to the extremal points $x$, $f^n(x)$ and $y$, $g^n(y)$ are the same for $f$ and for $g$ . This ratio of the lengths of the components adjacent to the initial and end point of the orbit segment are called \emph{the initial and final ratio} of $f$ and $g$.
Then, the announced diffeomorphism $h$ is a smoothing of $H$. This is not so easy because the derivative of $H$ can be very different at the right and the left of a singular point, but Proposition~\ref{p.smoothing} solves this difficulty.
Another difficulty comes from the fact that $f$ or $g$ may not have dense orbits, when we deal with $C^1$-diffeomorphisms. The argument can be adapted in that case, once one notices that one may perform a $C^1$ conjugacy so that the distorsion on the wandering interval is arbitrarily small (see Proposition~\ref{p.denjoy}): thus the diffeomorphism is still almost affine on the complement of long orbit segments.
For concluding the proof it remains to show that on can perform small perturbation of $g$ so that its initial and final ratio will be equal to the ones of $f$. For performing such a perturbation, we would like that the adjacent components to the extremal points are disjoint from their iterates during a long time, allowing to change their ratio slowly. This is not always the case. For that, we need to choose carefully the length $n$ of the orbit segments. We build a sequence of times $k_i$ called \emph{characteristic times} satisfying the property of having a long wandering time.
Lemma~\ref{l.subexp} give a bound of the ratio. This bound allow us to show at Proposition~\ref{p.caracteristic} that a small perturbation of $g$ at the characteristic times enables us to get every possible initial and final ratio of $f$, ending the proof.
\section{Geometry of orbit segments}\label{s.geometry}
In this section we define the fundamental tools of the proof: for every diffeomorphism $f$ with irrational rotation number $\alpha$ we consider orbit segments $x,\dots f^n(x)$, forbidding some exceptional relative position of the first and end point; we call them \emph{adapted segment}. For this adapted segments we define the initial and final ratio which are the ratio of the lengths of the components adjacent to $x$ and to $f^n(x)$.
We consider diffeomorphisms $f$ and $g$ with the same irrational rotation number and admitting adapted segments $\{f^i(x)\},\{g^i(y)\}$, $0\leq i\leq n$ of the same lengths $n$ and with the same initial and final ratio. Then we consider the piecewise affine homeomorphism $H$ sending $f^i(x)$ to $g^i(y)$, $0\leq i\leq n$, which is affine on each component of $S^1\setminus \{f^i(x)\}$. We show that, if the distorsion of $f$ and $g$ on the components of the complement of the orbits segments is small then the conjugate of $f$ by $H$ is a piecewise $C^1$ homeomorphism whose derivative at each point is close to the one of $g$.
We notice that this is the case when $f$ and $g$ have dense orbit. If $f$ has wandering interval, we solve the difficulty is Section~\ref{s.denjoy} by conjugating $f$ to a diffeomorphism with small distorsion on wandering intervals.
Then we show that we can get a smooth conjugacy of $f$ close to $g$ by smoothing the homeomorphisms $H$ in section~\ref{s.smoothing}.
\subsection{Adapted segments, initial and final ratio, and conjugacy}
Two sequences $x_1,\dots,x_n$ and $y_1,\dots, y_n$ of points of $S^1$ are \emph{similarly ordered} on the circle if there is a orientation preserving homeomorphism $\varphi\colon S^1\to S^1$ with $\varphi(x_i)=y_i$ for $i\in\{1,\dots,n\}$.
An \emph{orbit segment of length $n$}, of a diffeomorphism $f$ is a sequence $\{x,f(x),\dots,f^n(x)\}$.
Given an orbit segment $\{x,f(x),\dots,f^n(x)\}$; $n\geq 2$, we call \emph{initial and final basic intervals} of the orbit segment the interval $[a,b]$ and $[c,d]$, respectively, such that:
\begin{itemize}
\item $a,b,c,d\in \{x,f(x),\dots,f^n(x)\}$
\item $x\in(a,b)$ and $\{x\}=(a,b)\cap \{x,f(x),\dots,f^n(x)\}$
\item $f^n(x)\in(c,d)$ and $\{f^n(x)\}= (c,d)\cap\{x,f(x),\dots,f^n(x)\}$
\end{itemize}
So there is $i,j\in\{1,\dots,n-1\}$ such that $c=f^i(x)$ and $d=f^j(x)$. Notice that $a=f^{n-j}(x)$ and $b=f^{n-i}(x)$.
An orbit segment $\{x,\dots,f^n(x)\}$ is \emph{adapted} if $i\neq 0$, $j\neq 0$, $i+j\neq n-1$
\begin{lemm} If $\{x,\dots,f^n(x)\}$ is an adapted orbit segment with initial and final basic intervals $[a,b]$ and $[c,d]$ then the open intervals $(f(c),f(d))$ and $(f^{-1}(a),f^{-1}(b))$ are disjoint from the orbit segment $\{x,\dots,f^n(x)\}$.
\end{lemm}
\begin{demo} As $i,j$ are different from $n$ by definition, $f(c)$ and $f(d)$ are points of the orbit segments $\{x,\dots,f^n(x)\}$. Assume (arguing by absurd) $(f(c),f(d))\cap \{x,\dots,f^n(x)\}\neq \emptyset$. That is, there is $k\in\{0,\dots n\}$ with $f^k(x)\in(f(c),f(d))$; then
$f^{k-1}(x)\in(c,d)$. If $k\neq 0$ this contradics the definition of $(c,d)$. Therefore $(f(c),f(d))\cap \{x,\dots,f^n(x)\}= x$. This means that $i+1=n-j$, that is $i+j=n-1$ contradicting the definition of adapted segment.
This contradiction proves that $(f(c),f(d))\cap \{x,\dots,f^n(x)\}= \emptyset$. The proof of $(f^{-1}(a),f^{-1}(b))\cap \{x,\dots,f^n(x)\}= \emptyset$ is analogous.
\end{demo}
\begin{rema} Given a irrational rotation number $\alpha$, the fact that an orbit segment $\{x,\dots,f^n(x)\}$ is an adapted orbit segment only depends on the length $n\geq 0$: more precisely, if $f,g\in F^0_\alpha$ and if $\{x,\dots,f^n(x)\}$ is an adapted orbit segment for $f$, then for every $y\in S^1$, $\{y,\dots,g^n(y)\}$ is an adapted orbit segment.
\end{rema}
Given an adapted segment interval $\{x,\dots,f^n(x)\}$ we call \emph{initial and final ratio} the quotient
$$R_0= \frac{ \ell([a,x])}{\ell([x,b])}\mbox{ and } R_n=\frac{ \ell([c,f^n(x)])}{\ell([f^n(x),d])}.$$
\begin{theo}\label{t.geometry} Let $f$ and $g$ be two diffeomorphisms with irrational rotation number $\alpha$, both $f$ and $g$ with dense orbits. Assume that, for any
$\varepsilon>0$ there is $\tilde g$ $\varepsilon$-$C^1$-close to $g$, $n\in\NN$ and adapted orbit segments $\{x,\dots, f^n(x)\}$ and $\{y,\cdots,\tilde g^n(y)\}$ similarly ordered and having the same initial and final ratios.
Then there are diffeomorphisms $h_n$ such that $h_n f h_n^{-1}$ tends to $g$ in the $C^1$-topology.
\end{theo}
The diffeomorphism $h_n$ will be obtained as a smoothing of the piecewise affine homeomorphism $H_n$ defined by $H_n(f^k(x))=\tilde g^k(y)$, $k\in\{0,\dots n\}$ and $H_n$ affine in the connected components of the complement of the orbit segment.
\begin{defi} The distorsion $\Delta(g,I)$ of a diffeomorphism $g$ on some compact interval $I$ is the maximum of the logarithm of the quotient of the derivative of $g$ at two point of $I$:
$$\Delta(g,I)=\max_{x,y\in I} \log (\frac{dg(x)}{dg(y)}).$$
\end{defi}
\begin{lemm}\label{l.affine} Consider diffeomorphisms $f$, $g$ and a positive number $\varepsilon>0$. Assume that $f$ and $g$ admits adapted orbit segment
$x\dots,f^n(x)$ and $y,\dots, g^n(y)$ similarly ordered and with the same initial and final ratios. Assume furthermore that the distorsion of $f$ and $g$ on each connected component of the complement of the orbit segment is bounded by $\varepsilon$.
Consider the piecewise affine homeomorphism $H$ defined as $H(f^i(x))=g^i(y)$ and $H$ is affine on the connected components of the complement of the orbit segment.
Then:
\begin{itemize}
\item $HfH^{-1}$ is a piecewise $C^1$-diffeomorphism whose derivatives (at the right and at the left) at every point is close to the derivative of $g$ at the same point. More precisely, $\exp(-2\varepsilon)\leq \frac{d(HfH^{-1})}{d(g)}\leq \exp(2\varepsilon).$
\item $H$ is differentiable at $x$ and $f^n(x)$
\item $HfH^{-1}$ is $C^1$ up to the segment $\{y,\dots,g^n(y)\}$
\end{itemize}
\end{lemm}
\begin{demo}
$HfH^{-1}$ is a piecewise $C^1$-diffeomorphism as it is a composition of a piecewise $C^1$-diffeomorphisms.
Notice that $H$ is affine from $[a(f),x]$ to $[a(g),y]$ and from $[x,b(f)]$ to $[y,b(g)]$ and the ratios $\frac{ \ell([a(f),x])}{\ell([x,b(f)])}$ and $\frac{ \ell([a(g),y])}{\ell([y,b(g)])}$ are equal, then $\frac{ \ell([a(f),x])}{\ell([a(g),y])}=\frac{ \ell([x,b(f)])}{\ell([y,b(g)])}$. This implies that $H$ has the same derivatives at the right and the left sides of $x$, hence is affine in $[a,b]$ (and so smooth at $x$). The proof that $H$ is affine on $[c,d]$ (and so differentiable at $f^n(x)$) is analogous using the final ratios of $f$ and $g$.
Thus $H^{-1}$ is differentiable out of $\{g(y),\dots g^{n-1}(y)\}$.
Furthermore a point $z$ is not singular for $HfH^{-1}$ if $z\notin \{g(y),\dots,g^{n-1}(y)\}$ and $f(H^{-1}(z))\notin \{f(x),\dots, f^{n-1}(x)\}$ that is if $z\notin \{y,\dots,g^{n-1}(y)\}$.
It remains to compare the derivative of $HfH^{-1}$ with the derivative of $g$. For that, notice that on each connected component $C$ of the complement of $\{y,\dots,g^n(y)\}$ the map is the composition of affine maps with the restriction of $f$ to a connected component of the complement of $\{x,\dots, f^n(x)\}$. Composing with affine maps does not modify the distorsion. Therefore the distorsion of $HfH^{-1}$ on $C$ is bounded by $ \varepsilon$.
The distorsion of $g$ on $C$ is also bounded by $\varepsilon$. Furthermore, by construction, $HfH^{-1}(C)=g(C)$. This implies that there is at least a point in $C$ where the derivative of $HfH^{-1}$ and $g$ coincide. As a consequence
$$\exp (-2\varepsilon)\leq \frac{d(HfH^{-1})}{d(g)}\leq \exp(2\varepsilon).$$
\end{demo}
\begin{rema}\label{r.affine} Notice that the derivatives (at the right and at the left) of $HfH^{-1}$ at every point is $\varepsilon_0$-close to the derivative of $g$, where $\varepsilon_0=(\exp (2\varepsilon)-1) M< 3 \varepsilon M$ and $M= \sup_ {x \in S^1} |dg(x)|$.
\end{rema}
\begin{prop}\label{p.smoothing} Let $f$ be a $C^1$ diffeomorphism of the circle, $\varepsilon>0$ and $\{x,\dots, f^n(x)\}$ be an adapted orbit segment. Let $H$ be a piecewise affine homeomorphism, smooth out of $\{f(x),\dots, f^{n-1}(x)\}$ such that the right and left derivatives of $HfH^{-1}$ are $\varepsilon$ close at each point $y\in S^1$.
Then there is a smooth diffeomorphism $h$ arbitrarily $C^0$-close to $H$ and such that the derivative of $hfh^{-1}$ is $2\varepsilon$-close to the right and left derivative of $HfH^{-1}$ at every point.
\end{prop}
We postpone the proof of Proposition~\ref{p.smoothing} to the next section.
\begin{demo}[Proof of Theorem~\ref{t.geometry}] Consider $f$, $g$, and $\varepsilon$. As the orbits of $f$ and $g$ as assumed to be dense, we can choose $n$ such that $x,\dots f^n(x)$ and $y,\dots, g^n(y)$ are adapted segment such that the distorsion of $f$ and $g$ on the connected components of the complements of the respective orbit segments is bounded by $\varepsilon/100$. Choosing $n$ large enough we may approach $g$ by $\tilde g$ such that the corresponding orbit segment of $\tilde g$ is still ordered similarly, has the same initial an final ration as the orbit segment of $f$ and the distorsion of $\tilde g$ on the complement of its orbit segment is bounded by $\varepsilon$.
Then Lemma~\ref{l.affine} build a piecewise $C^1$ conjugate of $f$ which is $\varepsilon_0$-$C^1$-close to $\tilde g$ and Proposition~\ref{p.smoothing} allows us smoothing this piecewise conjugation keeping the $C^1$-proximity to $\tilde g$, hence to $g$.
\end{demo}
\subsection{Smoothing a piecewise linear conjugacy: proof of Proposition~\ref{p.smoothing}}\label{s.smoothing}
We start by linearizing the diffeomorphism $f$ in a neighborhood of the adapted orbit segment:
\begin{lemm} \label{l.linearising} Let $f$ be a diffeomorphism of $S^1$ and $x,\dots,f^n(x)$ be an orbit segment of a non periodic point.
There is a family of diffeomorphisms $\varphi_t\colon S^1\to S^1, t\in (0,t_0]$ with the following properties
\begin{itemize}
\item $\varphi_t(f^i(x))=f^i(x)$ for every $t$ and $i\in\{0,\dots,n+1\}$,
\item $\varphi_t$ tends to $id|_{S^1}$ for the $C^1$-topology, for $t\to 0$.
\item the derivative of $\varphi_t$ at $f^i(x)$, $i\in\{0,\dots,n+1\}$ is equal to $1$,
\item The restriction of $\varphi_t^{-1} f\varphi_t$ to each segment $[f^i(x)-t,f^i(x)+t]$ is the affine map onto $[f^{i+1}(x)- t\cdot df(f^i(x)), f^{i+1}(x)+ t \cdot df(f^i(x))]$.
\end{itemize}
\end{lemm}
(The proof of this lemma is easy. It is just a change of coordinates, once one notes that all the $f^i(x)$ are distinct.)
For every positive $\alpha,\beta$, we denote by $h_{\alpha,\beta}$ the map define by
\begin{itemize}
\item $x\mapsto \alpha x$ for $x<-1$
\item $x\mapsto \frac{\beta-\alpha}4 x^2+\frac{\beta+\alpha}2 x+\frac{\beta-\alpha}4$ for $x\in[-1,1]$,
\item $x\mapsto \beta x$ for $x>1$
\end{itemize}
An elementary calculation shows that
\begin{lemm} $h_{\alpha,\beta}$ is a $C^1$ diffeomorphism of $\RR$ whose derivative is everywhere contained in $[\alpha,\beta]$.
\end{lemm}
\begin{lemm} \label{l.smoothing} Given positive $\alpha,\beta,\gamma,\delta$ and a point $x\in \RR$ one has
$$\min\{\frac{\alpha}{\gamma},\frac{\beta}{\delta}\} \leq \frac{dh_{\alpha,\beta}(x)}{dh_{\gamma,\delta}(x)}\leq \max\{\frac{\alpha}{\gamma},\frac{\beta}{\delta}\}$$
\end{lemm}
\begin{demo}The proof is straightforward if $x\notin [-1,1]$, since the maps $h_{\alpha,\beta}$ and $h_{\gamma,\delta}$ are linear with slope $\alpha$ and $\gamma$ (if $x<-1$) or $\beta$ and $\delta$ (if $x>1$).
For $x\in [-1,1]$ one has: $dh_{\alpha,\beta}(x)=\frac{\beta-\alpha}2 x+\frac{\beta+\alpha}2= \frac{1-x}2 \alpha +\frac {1+x}2 \beta$ and $h_{\gamma,\delta}=\frac{1-x}2 \gamma +\frac {1+x}2 \delta$, so that
$$\frac{dh_{\alpha,\beta}(x)}{dh_{\gamma,\delta}(x)}=\frac{\frac{1-x}2 \alpha +\frac {1+x}2 \beta}{\frac{1-x}2 \gamma +\frac {1+x}2 \delta}$$
The announced inequality now follows immediately from the following (classical) claim:
\begin{clai}Let $a,b,c,d$ be positive numbers. Then
$$\inf\left\{\frac ac,\frac bd\right\}\leq \frac{a+b}{c+d}\leq \max\left\{\frac ac,\frac bd\right\}.$$
\end{clai}
\begin{demo}[Proof of the claim] Assume $\frac ac\leq \frac bd$ (the converse case is similar). then $a\leq \frac{cb}d$. Therefore $\frac{a+b}{c+d}\leq \frac{\frac{cb}d+b}{c+d}= \frac{\frac{(c+d)b}d}{c+d}=\frac bd$
This inequality applied now to $\frac{c+d}{a+b}$ gives now $\frac{c+d}{a+b}\leq\frac ca$ that is
$$\frac ac \leq\frac{a+b}{c+d}\leq \frac bd$$
which is the announced inequality in that case.
\end{demo}
\end{demo}
If $H$ is a piecewise affine diffeomorphism with a singular point at $x$ and $\alpha,\beta$ be the derivative at the right and the left of $x$, and $\eta>0$ be small enough so that $H$ is affine on $[x-\eta,x]$ and on $[x,x+\eta]$, we denote by $h_{\alpha,\beta,x,\eta}$ the diffeomorphism define on $[x-\eta,x+\eta]$ to $H([x-\eta,x+\eta])$ which is $B^{-1}\circ h_{\alpha,\beta}\circ A$ where $A$ is the orientation preserving affine diffeomorphism that send $[x-\eta,x+\eta]$ on $[-1,1]$ and $B$ is the orientation preserving affine diffeomorphism sending $H([x-\eta,x+\eta])=[H(x)-\alpha\eta,H(x)+\beta\eta]$ on $[-\alpha, \beta]$. Notice that
\begin{enumerate}
\item the linear part of $A$ and $B$ coincide, therefore the derivative $d h_{\alpha,\beta,x,\eta}(z)$ is $d h_{\alpha,\beta}(A(z))$.
\item the derivative of $h_{\alpha,\beta,x,\eta}$ and of $H$ coincide on $x-\eta$ and $x+\eta$.
\item if $H$ is smooth at $x$, that is $\alpha=\beta$ then $h_{\alpha,\beta,x,\eta}$ coincides with $H$.
\end{enumerate}
\vskip 2mm
We are now ready for proving Proposition~\ref{p.smoothing}:
\vskip 2mm
\begin{demo}[Proof of Proposition~\ref{p.smoothing}]
Let $f$ be a diffeomorphism and $\{x,\dots, f^n(x)\}$ an adapted orbit segment. Let $H$ be a piecewise affine homeomorphism, smooth out of $\{f(x),\dots, f^{n-1}(x)\}$ such that the right and left derivatives of $HfH^{-1}$ are $\varepsilon$ close at each point $y\in S^1$.
Up to replacing $f$ by a conjugate $\varphi_t^{-1}f\varphi_t$ given by lemma~\ref{l.linearising}, one may assume that there is $t>0$ such that $f$ is affine in restriction to each interval $[f^i(x)-t, f^i(x)+t]$ for $i\in\{0,\dots,n\}$.
Notice that, for any $\eta>0$ small enough, and $i\in\{0,\dots, n+1\}$, the interval $f^i([x-\eta,x+\eta])$ is contained in $[f^i(x)-t,f^i(x)+t]$ where $f$ is affine and $f^i([x-\eta,x+\eta])= [f^i(x)- df^i(x)\cdot \eta, f^i(x)+ df^i(x)\cdot \eta]$.
Let us denote for simplicity:
\begin{itemize}
\item $\eta_i=df^i(x)\cdot \eta$.
\item $\alpha_i, \beta_i$ are the left and right derivative of $H$ at $f^i(x)$.
\item $A_i\colon [f^i(x)-\eta_i,f^i(x)+\eta_i]\to [-1,1]$ and $B_i\colon [H(f^ix)-\alpha_i\eta_i,H(f^i(x))+\beta_i\eta_i] \to [-\alpha_i, \beta_i]$ are the orientation preserving affine maps.
\end{itemize}
We denote by $h_\eta$ the diffeomorphism of $S^1$ defined as follows:
\begin{itemize}
\item $h_\eta$ coincide with $H$ out of $\bigcup_{i=1}^{n-1} [f^i(x)-\eta_i, f^i(x)+\eta_i]$
\item $h_\eta= h_{\alpha_i,\beta_i,f^i(x),\eta_i}$ on $[f^i(x)-\eta_i, f^i(x)+\eta_i]$
\end{itemize}
Consider $h_\eta f h_\eta^{-1}$. For $x \notin H([f^i(x)-\eta_i, f^i(x)+\eta_i])$, $i\in\{0,\dots,n-1\}$ , $h_\eta f h_\eta^{-1}(x) =H f H^{-1}$ so that there is nothing to prove.
Consider $y\in H([f^i(x)-\eta_i, f^i(x)+\eta_i])$. Then
$$h_\eta f h_\eta^{-1}(y)=h_{\alpha_{i+1},\beta_{i+1},f^{i+1}(x),\eta_{i+1}}\circ f\circ h_{\alpha_i,\beta_i,f^i(x),\eta_i}^{-1}(y)$$
Thus, setting $z=h_{\alpha_i,\beta_i,f^i(x),\eta_i}^{-1}(y)$, the derivative is
$$\begin{array}{ccc}
d h_\eta f h_\eta^{-1}(y)&=&df(z)\cdot \frac{dh_{\alpha_{i+1},\beta_{i+1},f^{i+1}(x),\eta_{i+1}}(f(z))}{dh_{\alpha_i,\beta_i,f^i(x),\eta_i}(z)}\\
&=& df(z)\cdot \frac{dh_{\alpha_{i+1},\beta_{i+1}}(A_{i+1}(f(z)))}{dh_{\alpha_i,\beta_i}(A_i(z))}
\end{array}
$$
From the fact that $f$ is affine and from the definition of $A_i$ and $A_{i+1}$ one easily check that $A_{i+1} f= A_i$. This implies
$$d h_\eta f h_\eta^{-1}(y)= df(z)\cdot \frac{dh_{\alpha_{i+1},\beta_{i+1}}(A_i(z))}{h_{\alpha_i,\beta_i,}(A_i(z))}.$$
Since $z \in [f^i(x)-\eta_i,f^i(x)+\eta_i]$, one has that
$$d h_\eta f h_\eta^{-1}(y)= df(f^i(x))\cdot \frac{dh_{\alpha_{i+1},\beta_{i+1}}(A_i(z))}{h_{\alpha_i,\beta_i,}(A_i(z))}.$$
According to Lemma~\ref{l.smoothing} one deduces
$$df(f^i(x))\inf\left\{\frac{\alpha_{i+1}}{\alpha_i}, \frac{\beta_{i+1}}{\beta_i} \right\}\leq d h_\eta f h_\eta^{-1}(y) \leq {df(f^i(x))}\max\left\{\frac{\alpha_{i+1}}{\alpha_i}, \frac{\beta_{i+1}}{\beta_i}\right\}$$
Recall that the derivative of $HfH^{-1}$ is $\frac{\alpha_{i+1}}{\alpha_i}df(f^i(x))$ on $H([f^i(x)-\eta_i,f^i(x)]$ and is $\frac{\beta_{i+1}}{\beta_i}df(f^i(x))$ on $H([f^i(x), f^i(x)+\eta_i]$.
Therefore, the hypothesis on $H$ is that $\left | \frac{\alpha_{i+1}}{\alpha_i}df(f^i(x))-\frac{\beta_{i+1}}{\beta_i}df(f^i(x))\right | <\varepsilon$.
One deduces that $\left | dHfH^{-1}(y)-d h_\eta f h_\eta^{-1}(y)\right | <2\varepsilon$, as announced.
\end{demo}
\subsection{Distorsion in wandering intervals for Denjoy counter examples}\label{s.denjoy}
The aim of this section is to prove the following proposition which allows to generalize Theorem~\ref{t.geometry} removing the assumption of dense orbits.
If $f$ is a $C^1$-diffeomorphism with irrational rotation number, we call \emph{(maximal) wandering interval} the closure of each connected component of the complement of the unique minimal set of $f$.
\begin{prop}\label{p.denjoy} Given any diffeomorphism $f\colon S^1\to S^1$ with irrational rotation number $\alpha$. Then for any $\varepsilon>0$ there is a diffeomorphism $h$ such that the distorsion of $g=hfh^{-1}$ on each wandering interval $I$ is bounded by $\varepsilon$ .
\end{prop}
\begin{rema}\label{r.denjoy} \begin{enumerate}
\item Let $f$ be a diffeomorphism with an irrational rotation number and $\eta>0$. Then for any point $x$ belonging to the minimal set (that is, $x$ does not belong to any wandering interval) there is $n_1>0$ such that for every $n>n_1$, the closure $I$ of every connected component of the complement of orbit segment $x,\dots, f^n(x)$ satisfies one of the following possibilities:
\begin{itemize}
\item either the length of $I$ is smaller than $\eta$
\item or there is one wandering interval $J$ contained in $I$ such that the sum of the length of the two components of $I\setminus J$ is smaller than $\eta$.
\end{itemize}
\item Assume now that the distorsion of $f$ is smaller than $\varepsilon/2$ on each wandering interval. Notice that, due to the uniform continuity of the derivative of $f$, its distorsion on small intervals, is very small then there is $n_2>0$ such that for any $x$ in the minimal set, for any $n>n_2$ , the distorsion of $f$ on the each connected component of the complement of the orbit segment $x,\dots, f^{n_2}(x)$ is bounded by $\varepsilon$.
\end{enumerate}
\end{rema}
The proof of Proposition~\ref{p.denjoy} is divided in two main parts. We first perturb the derivative by conjugacy inside the orbits of wandering intervals in order to get small distorsion. Then we will extend the conjugacy on the circle without changing the distorsion inside the wandering intervals.
\begin{lemm}\label{l.wandering} Let $f$ be a $C^1$-diffeomorphism of $S^1$ with an irrational rotation number, and $\varepsilon>0$. Let $[a,b]$ be a maximal wandering interval. Then there is a family of diffeomorphisms $h_i\colon f^i[[a,b])\to f^i([a,b])$, $i\in\ZZ$ such that :
\begin{itemize}
\item there is $n_0$ such that $h_i=id|_{f^i([a,b])}$ for $|i|\geq n_0$;
\item the distorsion of $h_{i+1}\circ f\circ h_i^{-1}\colon f^i([a,b])\to f^{i+1}([a,b])$ is bounded by $\varepsilon$
\end{itemize}
\end{lemm}
For proving Lemma~\ref{l.wandering} we will use the following lemma:
\begin{lemm} \label{l.fragmentation}Let $\{f_i\}_{i\in\ZZ}$ be a sequence of diffeomorpisms of $[0,1]$ such that $f_n\to id$ for the $C^1$-topology as $n\to\pm\infty$.
Then, there is $\{g_i\}_{i\in\ZZ}$ , arbitrarily $C^1$ close to the identity map, and $n_0$ such that:
\begin{itemize}
\item for $|i|\geq n_0$ one as $g_i=f_i$
\item $g_{n_0}\circ g_{n_0-1} \circ\cdots \circ g_{-n_0+1}\circ g_{-n_0} =f_{n_0}\circ f_{n_0-1} \circ\cdots \circ f_{-n_0+1}\circ f_{-n_0}$
\end{itemize}
\end{lemm}
\begin{demo} Let $n_1>1$ such that $f_n$ and $f_{n+1} f_n$ are $\varepsilon/2$ close to identity for $|n|\geq n_1$. We fix $g_i=f_i$ for $i<-n_1$. Consider $F=f_{n_1}\circ \cdots\circ f_{-n_1}$. A classical elementary result asserts that any orientation preserving diffeomorphism of $[0,1]$ is the product of finitely many diffeomorphisms arbitrarily close to identity. Therefore we can write
$F= g_{m_1} \circ \cdots\circ g_{-n_1}$ with every $g_i$ $\varepsilon$-$C^1$ close to identity. Up to add several $g_i$ equal to identity, one may assume without lost of generality that $m_1=n_1+k_1$ with $k_1>0$.
Then we write:
\begin{itemize}
\item $g_{m_1+i}=f_{n_1+2i}f_{n_1+2i-1}$ for $i=1, \dots k_1$.
\item $g_i=f_i$ for $i>m_1+k_1=n_1+2k_1$.
\item $n_0>n_1+2k_1$
\end{itemize}
Thus $g_i$ is $\varepsilon$-close to identity for every $i$ and
$$g_{m_1+k_1}\circ \dots\circ g_{m_1}\circ g_{-n_1}= f_{n_1+2k_1}\circ \dots \circ f_{n_1+1}\circ F= f_{n_1+2k_1}\circ\dots \circ f_{-n_1}$$.
As a direct consequence
$g_{n_0}\circ g_{n_0-1} \circ\cdots \circ g_{-n_0+1}\circ g_{-n_0} =f_{n_0}\circ f_{n_0-1} \circ\cdots \circ f_{-n_0+1}\circ f_{-n_0}$
concluding the proof of the lemma.
\end{demo}
\begin{demo}[Proof of Lemma~\ref{l.wandering}]Let $\varphi_i\colon f^i([a,b])\to [0,1]$ be the orientation preserving affine diffeomophism. Then we write $f_i=\varphi_{i+1} f\varphi_i^{-1}$. Notice that for $i$ big, length of $f^i([a,b])$ is small, hence the distorsion of $f$ on $f^i([a,b])$ implies the $C^1$ distance of $f_i$ to identity.
Therefore, the sequence $f_i$ satisfies the hypothesis of Lemma~\ref{l.fragmentation}. Consider $n_0>0$ and $g_i$ the sequence of diffeomorphisms given by lemma~\ref{l.fragmentation} such that $g_i$ is $\frac{\varepsilon}{4}$ $C^1$ close to identity. In particular $f_i=g_i$ for $i<-n_0$.
One set
\begin{itemize}
\item $h_i=id$ for $i<-n_0-1$.
\item $h_i=\varphi_i^{-1}\circ g_{i-1}\circ\cdots\circ g_{-n_0-1}\circ f_{-n_0-1}^{-1} \circ\cdots\circ f_{i-1}^{-1}\circ \varphi_i$
\end{itemize}
By definition of the $g_i$ one can check that $h_i=id$ for $i>n_0$.
Furthermore
$$\begin{array}{ccl}
h_{i+1} f h_i^{-1}&=&\varphi_{i+1}^{-1}\circ \\
&& g_{i}\circ\cdots\circ g_{-n_0-1}\circ f_{-n_0-1}^{-1} \circ\cdots\circ f_{i}^{-1}\circ \\
&& \varphi_{i+1}\circ f\circ\varphi_i^{-1}\circ\\
&& f_{i-1}\circ\cdots\circ f_{-n_0-1}\circ g_{-n_0-1}^{-1} \circ\cdots\circ g_{i-1}^{-1}\\
&&\circ \varphi_i \\
&=&\varphi_{i+1}^{-1}\circ \\
&& g_{i}\circ\cdots\circ g_{-n_0-1}\circ f_{-n_0-1}^{-1} \circ\cdots\circ f_{i}^{-1}\circ \\
&& f_i\circ\\
&& f_{i-1}\circ\cdots\circ f_{-n_0-1}\circ g_{-n_0-1}^{-1} \circ\cdots\circ g_{i-1}^{-1}\\
&&\circ \varphi_i \\
&=&\varphi_{i+1}^{-1}\circ g_{i}\circ \varphi_i \\
\end{array}$$
Note that as $g_i$ is $\frac{\varepsilon}{4}$-$C^1$ close to the identity map one has
$|\frac{dg(x)}{dg(y)}-1|<\frac{2\varepsilon}{3}$.
As $\varphi_i$ and $\varphi_{i+1}$ are affine one gets that the distorsion of $h_{i+1} f h_i^{-1}$ on $f^i([a,b])$ is bounded by $ \log (1+\frac{2\varepsilon}{3})$, therefore it is smaller than $\varepsilon$.
\end{demo}
Next lemma ensures that one can extend the conjugacy, defined inside the wandering interval by lemma~\ref{l.wandering}, on the whole circle without changing the distorsion.
First notice that due to the uniform continuity of the derivative of $f$, its distorsion is smaller than $\varepsilon/2$ on every small enough interval. Therefore, we can choose $N, k$ and finitely many orbits segments of (maximal)wandering intervals $[f^{-N}(a_i), f^{-N}(b_i)],\dots [a_i,b_i],\dots ,[f^{-N}(a_i), f^{-N}(b_i)]$, $i\in\{1,\dots k\}$, such that
\begin{itemize}
\item The orbits of the $[a_i,b_i]$ are pairwise distinct
\item for any $n$ with $|n|>N$ the distorsion of $f$ on $f^n([a_i,b_i)$ is smaller than $\frac \varepsilon 2$
\item for any wandering interval $[a,b]$ whose orbit is distinct from the $[a_i,b_i]$ the distorsion is bounded by $\varepsilon/2$ on each $f^i([a,b])$, $i\in\ZZ$.
\end{itemize}
For every $t\in\ZZ$ we chose a diffeomorphism $h_{i,t}\colon [f^t(a_i),f^t(b_i)]\to[f^t(a_i),f^t(b_i)]$ given by lemma~\ref{l.wandering}, so that
\begin{itemize}
\item there is $n_0$ such that $h_{i,t}=id|_{f^t([a_i,b_i])}$ for $|t|\geq n_0$ and any $i\in\{1,\dots, k\}$;
\item the distorsion of $h_{i,t+1}\circ f\circ h_{i,t}^{-1}\colon f^t([a_i,b_i])\to f^{t+1}([a_i,b_i])$ is bounded by $\varepsilon$
\end{itemize}
Notice that the segments $[f^t(a_i),f^t(b_i)]$, $i\in\{1,\dots, k\}$, $t\in\{-n_0,\dots, n_0\}$ are finitely many compact disjoint segments.
\begin{lemm}\label{l.denjoy} There is a diffeomorphism $h$ of $S^1$, such that $h$ coincides with the $h_{i,t}$ for $i\in\{1,\dots, k\}$, $t\in\{-m,\dots, m\}$, and the derivative of $h$ is constant in every wandering interval distinct from the $[f^t(a_i),f^t(b_i)]$, $i\in\{1,\dots, k\}$, $t\in\{-m,\dots, m\}$.
\end{lemm}
\begin{demo} Consider the closure $I=[f^{t_1}(b_i),f^{t_2}(a_j)]$ of a connected component of
$S^1\setminus \bigcup_{i\in\{1,\dots, k\}, t\in\{-m,\dots, m\}}[f^t(a_i),f^t(b_i)]$.
We build a continuous map $\varphi_I\colon I\to (0,+\infty) $ such that
\begin{itemize}
\item $\varphi_I$ constant on each wandering interval in the interior of $I$
\item $\varphi_I(f^{t_1}(b_i)=d h_{i,t_1}(f^{t_1}(b_i))$ and $\varphi_I(f^{t_2}(a_j)= d h_{j,t_2}(f^{t_2}(a_j))$
\item $\int_I \varphi_I=\ell(I)=|f^{t_1}(b_i)-f^{t_2}(a_j)|$
\end{itemize}
The existence of such a function is the usual construction of Lebesgue devil stairs.
Then we define $h|_I$ by $h(x)=f^{t_1}(b_i)+ \int_{f^{t_1}(b_i)}^x \varphi_I$. Its a diffeomorphism of $I$ whose derivative coincides with the one of $h_{i,t_1}$ on $f^{t_1}(b_i)$ and with the one of $h_{j,t_2}$ on $f^{t_2}(a_j)$.
The concatenation for this diffeomorphisms is the announced diffeomorphisms $h$.
\end{demo}
\begin{demo}[Proof of Proposition~\ref{p.denjoy}] Consider the diffeomorphism $h$ given by Lemma~\ref{l.denjoy}. Then $hfh^{-1}$ coincides with $h_{i,t+1}fh_{i,t}$ on the $f^t([a_i,b_i])$ for $|t|<n_0$; therefore, the distorsion is bounded by $\varepsilon$. On the other wandering intervals, $h$ is affine so that the conjugacy does not affect the distorsion, which was bounded by $\frac{\varepsilon}{2}$ by definition of the $[a_i,b_i]$ and $n_0$.
\end{demo}
\section{Proof of the main result}
The aim of this section is to prove Theorem~\ref{t.conjugacy}
assuming Theorem~\ref{t.caracteristic} which explains that one can change the initial and final ratio by arbitrarily small perturbations if one choose adapted segment of a specific length.
\subsection{Perturbing the initial and final ratio at \emph{characteristic times}}
Our main technical result is
\begin{theo}\label{t.caracteristic} Given any $\alpha\in\RR\setminus {\mathbb Q}} \def\RR{{\mathbb R}} \def\SS{{\mathbb S}} \def\TT{{\mathbb T}$ there is a (strictly increasing) sequence $\{k_i\}\subset \NN$ with the following properties:
\begin{itemize}
\item Given any $C^1$-diffeomorphisms $f$, $g$ with rotation number $\alpha$,
\item given any points $x,y\in S^1$
\item given any $\varepsilon>0$
\end{itemize}
Then,
\begin{itemize}
\item for any $i$ the orbits segments $\{x,\dots, f^{k_i}(x)\}$ and $\{y,\dots, g^{k_i}(y)\}$ are adapted segments,
\item there is $i_0$ such that for every $i\geq i_0$ there is a $C^1$-diffeomorphism $g_i$ such that
\begin{itemize}
\item $g_i$ is $\varepsilon$ $C^1$-close to $g$
\item $\{y,\dots, g_i^{k_i}(y)\}$ is an adapted segment of $g_i$ ordered on $S^1$ as $\{x,\dots, f^{k_i}(x)\}$ and $\{y,\dots, g^{k_i}(y)\}$;
\item The initial and final ratio of $g_i$ on the adapted segment $\{y,\dots, g_i^{k_i}(y)\}$ are the same as the ones of $f$ on the adapted segment $\{x,\dots, f^{k_i}(x)\}$.
\end{itemize}
\end{itemize}
\end{theo}
In section~\ref{s.characteristic} we will build the sequence $\{k_i\}$ called \emph{characteristic times}, and Section~\ref{s.perturbation} will be dedicated to the proof of Theorem~\ref{t.caracteristic}.
The aim of this section is to show that Theorem~\ref{t.caracteristic} together with Proposition~\ref{p.denjoy}, Lemma~\ref{l.affine} and Proposition~\ref{p.smoothing} imply Theorem~ \ref{t.conjugacy}.
\subsection{Proof of Theorem~\ref{t.conjugacy}}
Let $f, g\in F^1_\alpha$, and $\varepsilon>0$. We have to prove that there is a diffeomorphism $h$ of $S^1$ such that $hfh^{-1}$ is $\varepsilon$-close to $g$.
According to \cite[Proposition 4.4.2]{He} the set $F^r_\alpha$ of $C^r$-diffeomorphisms with rotation number $\alpha$ is $C^s$-dense in $F^s_\alpha$ for any $s\leq r$. In particular, $F^2_\alpha$ is $C^1$-dense in $F^1_\alpha$. Thus there is a $C^2$-diffeomorphism $g_0$ with rotation number $\alpha$ and $\varepsilon/2$-close to $g$. In other word, up to change $\varepsilon$ by $\varepsilon/2$ and $g$ by $g_0$, we may assume without lost of generality that $g$ is $C^2$.
According to Proposition~\ref{p.denjoy} $f$ is $C^1$ conjugated to $f_0=h_0fh_0^{-1}$ such that the distorsion of $f_0$ on each wandering interval is bounded by $\frac\varepsilon{48M}$, where $M$ is an upper bound for $dg$. Therefore, according to Remark~\ref{r.denjoy}, for any sufficiently large orbit segment associated to a point $x$ in the minimal set of $f_0$,
the distorsion of $f_0$ on each connected component of the complement of the orbit segment will be bounded by $\frac\varepsilon{24M}$.
Thus, we chose $x$ in the minimal set of $f$ and $x_0=h_0(x)$ is on the minimal of $f_0$. We chose a sufficiently large characteristic time $k_i$ so that, according to Theorem~\ref{t.caracteristic}, $g$ admits an $\frac {\varepsilon} {200}$ $C^1$ perturbation $g_1$ for which
\begin{itemize}
\item the orbit segment $\{0,\dots g_1^{k_i}(0)\}$ is adapted and is ordered as $\{0,\dots g^{k_i}(0)\}$
\item the initial and final ratio associated to the orbit segment $\{0,\dots g_1^{k_i}(0)\}$ are the same as the ones of $f_0$ on $\{x_0,\dots f_0^{k_i}(x_0)\}$.
\item the distorsion of $g_1$ on each connected component of the complement of the orbit segment $\{0,\dots g_1^{k_i}(0)\}$ will be be bounded by $\frac\varepsilon{24M}$, since
$g_1$ was chosen $C^1$- close to $g$.
\end{itemize}
Now Lemma~\ref{l.affine} build a piecewise linear conjugacy $H$ so that $Hf_0H^{-1}$ satisfies (see Remark~\ref{r.affine}) that $|dHf_0H^{-1}-dg|<\frac{\varepsilon}{8}$.
Finally Proposition~\ref{p.smoothing} ensures the existence of a diffeomorphism $h$ for which $|dHf_0H^{-1}-dhf_0h^{-1}|<\frac\varepsilon 2$.
One gets that $hh_0 f(hh_0)^{-1}$ is $\varepsilon$ $C^1$-close to $g$, concluding the proof.
$\Box$
It remains now to prove Theorem~\ref{t.caracteristic} for concluding the proof.
\section{Characteristic times}\label{s.caracteristic}
Section~\ref{s.geometry} shows that the main point for conjugating a diffeomorphism $f$ in order to be $C^1$ close to $g$ is to control the initial and final ratio of adapted segments.
In this section, we will choose specific adapted segment that we will call \emph{characteristic segments}. This orbit segments will be choosen for the rotation of $R_\alpha$, $\alpha\in\RR\setminus{\mathbb Q}} \def\RR{{\mathbb R}} \def\SS{{\mathbb S}} \def\TT{{\mathbb T}$, and we will then control the ratio of characteristic segments for the diffeomorphisms $f,g$ in $F^i_\alpha$, $i=1,2$, and for their $C^1$ perturbations.
The idea is very simple: consider the closest return close to $0$ of the $n.\alpha$, and consider the time $n-1$ just before it; then the ratio of the corresponding segments for the rotation $R_\alpha$ is uniformly bounded, between $1/2$ and $2$. Now we will extract a subsequence (called \emph{characteristic time}) for which the union of the two segments adjacent to $0$ will have a large number of disjoint successive iterates.
This long wandering time will allow us to modify as we want this ratio by a $C^1$-perturbation.
As we need to control the complete geometry of the orbit segment until the closest return, we will first reconstruct the sequence of this closest return times, paying attention to the wandering time of union of the segments adjacent to $0$.
\subsection{Ordering the orbit segments of rotations}
\label{s.characteristic}
Let $\alpha\in\RR\setminus {\mathbb Q}} \def\RR{{\mathbb R}} \def\SS{{\mathbb S}} \def\TT{{\mathbb T}$ be an irrational number and $R_\alpha$ the rotation $x\mapsto x+\alpha$ on $S^1=\RR/\ZZ$.
Every orbit segment $\{x,\dots, R_\alpha^n(x)\}$ of the rotation with length $n$ is the image by the isometry $R_x$ of the corresponding orbit segment stating at $0$. We consider therefore the orbit segments $\{0,\alpha,\dots, n\alpha\}$.
We consider the points $-\frac12<-a_n<0<b_n<\frac12$ which are adjacent to $0$ in this orbit segment. We define $r_n,s_n\in\{1,\dots, n\}$ by $-a_n= r_n\alpha$ and $b_n=s_n\alpha$. Notice that $(n-s_n)\alpha<n\alpha<(n-r_n)\alpha $ are the adjacent points to $n\alpha$.
Note that as $a_n \rightarrow 0$, and
$b_n \rightarrow 0$ when $n \rightarrow \infty$ then $r_n \rightarrow \infty$ and $s_n \rightarrow \infty$.
The following lemma is very elementary and classical.
\begin{lemm}\label{l.classic} \begin{enumerate}
\item The length of each connected component of the complement of the orbit segment belong to $\{a_n, b_n,a_n+b_n\}$.
\item $$r_n+s_n\neq n+1\Longleftrightarrow R_\alpha(n\alpha)= (n+1) \alpha \notin [-a_n,b_n].$$ In that case:
\begin{itemize}
\item $(s_n-1)\alpha,(r_n-1)\alpha$ are adjacent and the length of the component $((s_n-1)\alpha,(r_n-1)\alpha)$ is $a_n+b_n$.
\item $a_{n+1}=a_n$ and $b_{n+1}=b_n$, $r_{n+1}=r_n$, $s_{n+1}=s_n$.
\end{itemize}
\item If $r_n+s_n = n+1$ then the image by $R_\alpha$ of the segment $[(n-s_n)\alpha,(n-r_n)\alpha] $ is the segment $[r_n\alpha, s_n\alpha]=[-a_n,b_n]$. One deduces that the length of each connected component of the complement of the orbit segment belong to $\{a_n, b_n\}$. Furthermore:
\begin{enumerate}
\item Assume $a_n>b_n$ then $(n+1)\alpha\in(-a_n,0)$ and
$$\left\{\begin{array}{ccl}
a_{n+1}&=&a_n-b_n\\
b_{n+1}&=&b_n\\
r_{n+1}&=&n+1=r_n + s_n\\
s_{n+1}&=&s_n\\
\end{array}\right.
$$
\item Assume $b_n>a_n$ then $(n+1)\alpha\in(0,b_n)$ and
$$\left\{\begin{array}{ccl}
a_{n+1}&=&a_n\\
b_{n+1}&=&b_n-a_n\\
r_{n+1}&=&r_n\\
s_{n+1}&=&n+1= r_n + s_n\\
\end{array}\right.
$$
\end{enumerate}
\end{enumerate}
\end{lemm}
Let $n_i$ denote the sequence of number such that $(n_i+1)\alpha \in[-a_{n_i},b_{n_i}]$.
Notice that:
\begin{itemize}
\item if $a_{n_i}>b_{n_i}$ then $a_{n_{i+1}}=a_{n_i+1}=a_{n_i}-b_{n_i}$ and $b_{n_{i+1}}=b_{n_i+1}=b_{n_i}$.
\item if $a_{n_i}<b_{n_i}$ then $a_{n_{i+1}}=a_{n_i+1}=a_{n_i}$ and $b_{n_{i+1}}=b_{n_i+1}=b_{n_i}-a_{n_i}$.
\end{itemize}
One deduces:
\begin{lemm} There is a subsequence $n_{i_j}$ of $n_i$ such that $\frac{a_{n_{i_j}}}{b_{n_{i_j}}}\in[\frac12,2]$.
\end{lemm}
\begin{demo}Assume $a_{n_i}>b_{n_i}$. Therefore $a_{n_{i+1}}=a_{n_i}-b_{n_i}$ and $b_{n_{i+1}} = b_{n_i}$.
If $a_{n_{i+1}}<b_{n_{i+1}}$ this means $b_{n_i}<a_{n_i}<2b_{n_i}$ so that $n_i$ belongs to the announced sequence.
Otherwise, $a_{n_{i+1}}>b_{n_{i+1}}$ and $a_{n_{i+2}}=a_{n_{i+1}}-b_{n_{i+1}}<a_{n_i}$;
if $a_{n_{i+1}}-b_{n_{i+1}}<b_{n_i}=b_{n_i+1}=b_{n_i+2}$ we are done; otherwise we continue the process so that there is $k$ such that
$a_{n_{i+k}}>b_{n_{i+k}}=b_{n_i}$ but $a_{n_{i+k+1}}<b_{n_{i+k+1}}$: then $n_{i+k}$ belongs to the announced sequence.
The case $a_{n_i}<b_{n_i}$ is analogous. Thus we have shown that the announced sequence contains numbers greater than any of the $n_i$, allowing to define the $n_{i_j}$ by induction.
\end{demo}
\begin{rema} The points $(n_{i_j}+1)\alpha$ are the sequence of \emph{closest return} to $0$.
\end{rema}
\subsubsection{Wandering time}
Consider $n>0$, the orbit segment $0,\dots, n\alpha$ and the numbers $r_n\alpha, s_n\alpha$, so that $0$ is the unique point of the segment in the open interval $I_n=(r_n\alpha, s_n\alpha)$ and $n$ is the unique point of the segment in the open interval $J_n=((n-s_n)\alpha, (n-r_n)\alpha)$.
We call \emph{wandering time $w(n)$} the largest integer $w$ such that the intervals $I_n, R_\alpha(I_n),\dots, R_\alpha^w(I_n),$ $ R_\alpha^{-w} J_n,\dots,J_n$ are pairwise disjoint.
\begin{lemm} The wandering time $w(n)$ is
$$w(n)=\inf{[\frac{n-r_n-1}2],[\frac{n-s_n-1}2]}$$
where $[. ]$ denotes the integer part.
\end{lemm}
\begin{demo} The intervals $(r_n\alpha,0),\dots,(n\alpha,(n-r_n)\alpha), (0,s_n\alpha),\dots, ((n-s_n)\alpha,n\alpha)$ are pairwise disjoint.
We just break this family in two families of equal length.
\end{demo}
Recall that, the times $n_{i_j}$ (before the closest return) are characterized by $r_{n}+s_{n}= n+1$ and that the sequences $r_n$ and $s_n$ go to infinity. Furthermore
$$w(n_{i_j})=\inf \{{[\frac{s(n_{i_j})}2]-1,[\frac{r(n_{i_j})}2]-1}\}.$$
Lemma~\ref{l.timeratio} and Corollary~\ref{c.timeratio} below give a lower bound for the wandering time:
\begin{lemm} \label{l.timeratio} Exists a strictly increasing sequence $N_i$ of integer such that $N_i+1$ is the closest return time (that is, $N_i$ is a subsequence of the $n_{i_j}$) and
\begin{itemize}
\item either $a_{N_i}<b_{N_i}$ and $ r({N_i})\leq 2 s({N_i})$
\item or $a_{N_i}>b_{N_i}$ and $s({N_i})\leq 2 r({N_i})$.
\end{itemize}
\end{lemm}
\begin{coro}\label{c.timeratio} With the notations of Lemma~\ref{l.timeratio}, for every $i$
\begin{itemize}
\item either $a_{N_i}<b_{N_i}$ and $ \left[\frac{r(N_i)}4\right] -1 \leq w(N_i)$
\item or $a_{N_i} > b_{N_i}$ and $ \left[\frac{s(N_i)}4\right] -1 \leq w(N_i)$.
\end{itemize}
\end{coro}
\begin{demo} Denote by $\cN(\alpha)$ the subset of $\{n_{i_j}\}_{j\in\NN}$ satisfying either $a_{n_{i_j}}<b_{n_{i_j}}$ and $ r_{n_{i_j}}\leq 2 s_{n_{i_j}}$ or $a_{n_{i_j}}>b_{n_{i_j}}$ and $s_{n_{i_j}}\leq 2 r_{n_{i_j}}$. We have to prove that $\cN(\alpha)$ is infinite, for every $\alpha\in\RR\setminus {\mathbb Q}} \def\RR{{\mathbb R}} \def\SS{{\mathbb S}} \def\TT{{\mathbb T}$.
Most of the $\alpha$ are solved by the following claim:
\begin{clai} If $i_{j}-i_{j-1}\geq 2$ then
\begin{itemize}
\item either $a(n_{i_j})<b(n_{i_j})$ and $r(n_{i_j})<s(n_{i_j})$
\item or $a(n_{i_j})>b(n_{i_j})$ and $r(n_{i_j})>s(n_{i_j})$
\end{itemize}
so that in both cases $i_j\in\cN(\alpha)$
\end{clai}
\begin{demo} Assume for instance $a(n_{i_j})<b(n_{i_j})$, the other case is identical.
By the choice of the $n_{i_j}$, one has $a(n)<b(n)$ for every $n_{i_{j-1}}<n\leq n_{i_j}$.
Furthermore, by lemma~\ref{l.classic}
$$\begin{array}{lcl}
r(n_{i_{j-1}}+1)&= &r(n_{i_{j-1}})+s(n_{i_{j-1}})\\
s(n_{i_{j-1}}+1)&= &s(n_{i_{j-1}})
\end{array}
$$
Furthermore, $r(n_{i_{j-1}+1})=r(n_{i_{j-1}}+1)$ and $s(n_{i_{j-1}+1})=s(n_{i_{j-1}}+1)$
Then by lemma~\ref{l.classic}, for every $0\leq k \leq i_j-i_{j-1}$ one has
$$
\begin{array}{lcl}
r(n_{i_{j-1}+k})&= &r(n_{i_{j-1}}+1)\\
s(n_{i_{j-1}+k})&=&s(n_{i_{j-1}}+1) + (k-1)r(n_{i_{j-1}}+1)\\
\end{array}
$$
In particular,
$$
\begin{array}{lcl}
r(n_{i_{j}})&= &r(n_{i_{j-1}}+1)\\
s(n_{i_{j}})&=&s(n_{i_{j-1}}+1) + (i_j-i_{j-1}-1)r(n_{i_{j-1}}+1)\\
\end{array}
$$
so that $r(n_{i_j})<s(n_{i_j})<2s(n_{i_j})$ as announced.
\end{demo}
Consider now $\alpha$ such that there is $j_0$ so that every $j\geq j_0$ does not satisfy the conclusion of the claim.
This implies that $i_{j_0+k}= i_{j_0}+k$ for every positive $k$.
Assume for instance $a(n_{i_{j_0}})<b(n_{i_{j_0}})$.
Therefore, for every $k> 0$ one has
$$\begin{array}{lcl}
r(n_{i_{j_0}}+2k)&= &r(n_{i_{j_0}}+2k-1)+ s(n_{i_{j_0}}+2k-1)\\
s(n_{i_{j_0}}+2k)&= &s(n_{i_{j_0}}+2k-1)\\
\end{array}
$$
$$\begin{array}{lcl}
r(n_{i_{j_0}}+2k+1)&= &r(n_{i_{j_0}}+2k)\\
s(n_{i_{j_0}}+2k+1)&= &r(n_{i_{j_0}}+2k)+ s(n_{i_{j_0}}+2k)\\
\end{array}
$$
In particular $r(n_{i_{j_0+2}})=r(n_{i_{j_0+1}})+s(n_{i_{j_0+1}})=2r(n_{i_{j_0}})+s(n_{i_{j_0}})$ and $s(n_{i_{j_0+2}})=s(n_{i_{j_0+1}})=r(n_{i_{j_0}})+s(n_{i_{j_0}})$ so that
$$r(n_{i_{j_0+2}})<2s(n_{i_{j_0+2}})$$
This proves that $n_{i_{j_0+2}}\in \cN(\alpha)$, and ends the proof of the lemma.
\end{demo}
The $N_i$ are almost the announced characteristic times. The unique defect is that the orbit segments $x,\dots, R_\alpha^{N_i}(x)$ are not adapted. We define $k_i=N_i-1$ and we call them the characteristic times. We denote $w_i=w(k_i)$, the wandering time of the characteristic segment.
Then
\begin{itemize}
\item The orbit segments $x,,\dots, R_\alpha^{k_i}(x)$ are adapted segments,
\item The initial and final ratio of the rotation $R_\alpha$ belong to $[\frac 12,2)$
\item
\begin{itemize}
\item either $a_{k_i}<b_{k_i}$ and $ \left[\frac{r(k_i)}4\right] -1 \leq w(k_i)$
\item or $a_{k_i} > b_{k_i}$ and $ \left[\frac{s(k_i)}4\right] -1 \leq w(k_i)$.
\end{itemize}
\end{itemize}
\subsection{Geometry of the characteristic segment for $C^1$ diffeomorphisms $f\in F^1_{\alpha}$}
Let $f$ be a $C^1$-diffeomorphism with an irrational rotation number $\alpha$. Classical results assert that $f$ is uniquely ergodic, that is adimits a unique invariant measure. The Lyapunov exponent of this measure is zero. This implies:
\begin{lemm} For any $\lambda>1$ there is $n_{\lambda}>0$ such that for any $n> n_\lambda$ and any $x\in S^1$ one has
$$df^n(x)\in[\lambda^{-n},\lambda^n].$$
\end{lemm}
One check easily:
\begin{coro}\label{c.bounds} Let $x,y\in S^1$ such that there is $n$ with $|n|>n_\lambda$ with and $x<f^n(x)<f^{2n}(x)<y<f^{3n}(x)$. Then
$$\frac{|x-f^n(x)|}{\lambda^{|n|}}<|f^n(x)-y|<(\lambda^{|n|}+\lambda^{|2n|})|x-f^n(x)|$$
\end{coro}
\begin{lemm}\label{l.subexp} Let $f\in F^1_\alpha$ be a $C^1$-diffeomorphism with irrational rotation number $\alpha$, and let $k_i$ be the sequence of numbers associated to $\alpha$ defined after Lemma~\ref{l.timeratio}.
Then, given any $\lambda>1$ there is $i(\lambda)$ so that for every $i\geq i(\lambda)$ and any $x\in S^1$ the initial and final ratio of the characteristic segments $x,\dots, f^{k_i}(x)$ belong to $[\lambda^{-w_i},\lambda^{w_i}]$.
\end{lemm}
\begin{demo} Consider the corresponding orbit segment $0,\dots k_i\alpha$ for the rotation $R_\alpha$ and assume, for instance that $a(k_i)<b(k_i)$. By the definition of the characteristic times, we also have $b(k_i)<2a(k_i)$.
This means $R_\alpha^{r(k_i)}(0)<0<R_\alpha^{-r(k_i)}(0)<R_\alpha^{s(k_i)}(0)< R_\alpha^{-2r(k_i)}(0)$
As the order of the orbits just depends of the rotation number, one gets
$f^{r(k_i)}(x)<x<f^{-r(k_i)}(x)<f^{s(k_i)}(x)< f^{-2r(k_i)}(x)$.
Given any $\lambda_1>1$ and $i$ so that $r(k_i)>n_{\lambda_1}$, one deduce the following bounds from Corollary~\ref{c.bounds} applied to $f^{r(k_i)}(x)$ and $f^{-r(k_i)}$:
$$\frac{|x-f^{r(k_i)}(x)|}{\lambda_1^{|r(k_i)|}}<|f^{s(k_i)}(x)-x|<(\lambda_1^{|r(k_i)|}+\lambda_1^{|2r(k_i)|})|x-f^{r(k_i)}(x)|$$
Thus the initial ration belongs to $[\lambda_1^{-r(k_i)}, \lambda_1^{|r(k_i)|}+\lambda_1^{|2r(k_i)|}]$
Recall that, for characteristic time $k_i$ for which $a(k_i)<b(k_i)$ one has $ \left[\frac{r(k_i)}4\right] -1 \leq w(k_i)$. Thus, for concluding the proof of the lemma, it is enough to choose $\lambda_1$ so that $\lambda_1^{|r(k_i)|}+\lambda_1^{|2r(k_i)|}<\lambda^{w(k_i)}$ for every $k_i>n_{\lambda_1}$.
This give us the announce bounds for the initial ratio, the final ratio is obtained similarly.
\end{demo}
We can restate Lemma~\ref{l.subexp} as follows:
\begin{rema} \label{r.subexp}
There is a sequence $\lambda_i>1$ tending to $1$ as $n\to\infty$, so that for every $i$ and any $x\in S^1$ the initial and final ratio of the characteristic segments $x,\dots, f^{k_i}(x)$ belong to $[\lambda_i^{-w_i},\lambda_i^{w_i}]$.
\end{rema}
\section{Perturbations}
\label{s.perturbation}
The aim of this section is to prove Theorem~\ref{t.caracteristic} using the characteristic time $\{ k_i\}$ as the announced sequence. Let $w_i$ be the corresponding wandering times.
In the statement of the theorem, the diffeomorphism $f$ appears only by its initial and final ratio. Let us recall that, according to Lemma~\ref{l.subexp} and Remark~\ref{r.subexp} this ratio are bounded in an interval $[\lambda_i^{-w_i},\lambda_i^{w_i}]$ where the sequence $\lambda_i>1$ tends to $1$; these sequence depends on $f$. Finally, the theorem announces an $\varepsilon$ perturbation of $g$. Therefore, up to shrink $\varepsilon$ if necessary, one may assume that $g$ is a $C^2$ diffeomorphism.
Let us restate the theorem:
\begin{prop}\label{p.caracteristic}
\begin{itemize}
\item Given any $\alpha\in\RR\setminus {\mathbb Q}} \def\RR{{\mathbb R}} \def\SS{{\mathbb S}} \def\TT{{\mathbb T}$, $\{k_i\}\subset \NN$ its characteristic times, and $w_i$ the corresponding wandering times.
\item Given any sequence $\lambda_i>1$ tending to $1$,
\item Given any sequences $\rho^-_i,\rho^+_i\in [\lambda_i^{-w_i},\lambda_i^{w_i}]$
\item Given any $C^2$-diffeomorphism $g$ with rotation number $\alpha$,
\item given any point $y\in S^1$
\item given any $\varepsilon>0$
\end{itemize}
Then, there is $i_0$ such that for every $i\geq i_0$ there is a $C^1$-diffeomorphism $g_i$ such that
\begin{itemize}
\item $g_i$ is $\varepsilon$ $C^1$-close to $g$
\item $\{y,\dots, g_i^{k_i}(y)\}$ is an adapted segment of $g_i$ ordered on $S^1$ as $\{y,\dots, g^{k_i}(y)\}$;
\item The initial and final ration of $g_i$ on the adapted segment $\{y,\dots, g_i^{k_i}(y)\}$ are $\rho^-_i$ and $\rho^+_i$, respectively.
\end{itemize}
\end{prop}
\subsection{Rescaling the statement of Proposition~\ref{p.caracteristic} on the segment $[0,1]$}
Let $I_i=I_i(g)$ and $J_i=J_i(g)$ denote the intervals $(g^{r(k_i)}(y),g^{s(k_i)}(y))$ and $(g^{k_i-s(k_i)}(y),g^{k_i-r(k_i)}(y))$, containing $y$ and $g^{k_i}(x)$, respectively.
By definition of the wandering times, the intervals \\
$I_i(g)$, $g(I_i),\dots$,$ g^{w_i}(I_i)$, $ g^{-w_i} (J_i),\dots,J_i$ are pairwise disjoint.
We will build the final ratio equal to $\rho^+_i$ by performing a perturbation of $g$ with support in $ g^{-w_i} (J_i),\dots,J_i$, and the initial ration equal to $\rho^-_i$ by a perturbation of $g$ with support in $I_i, g(I_i),\dots, g^{w_i}(I_i)$. This supports are disjoint so that the construction can be performed independently. Furthermore, they are analogous. Will will present only the construction of $\rho^-_i$.
\vskip 2mm
For any $j$ the restriction $g|_{g^j(I_i)}$ maps $g^j(I_i)$ to $g^{j+1}(I_i)$. It will be more confortable to deal with diffeomorphisms of the same interval. For that we will rescale the intervals $g^j(I_i)$ by affine maps on $[0,1]$. As this rescaling is affine it will not affect the distorsion of $g|_{g^j(I_i)}$, and small $C^1$-pertubation of the rescaled map will induce $C^1$-pertubation of $g$ of proportional $C^1$-size. More precisely:
\vskip 2mm
Let $\varphi_{i,j}\colon g^j(I_i(g))\to [0,1] $, $j\in\{0,\dots,w_i\}$ be the affine orientation preserving maps. We denote by $G_{i,j}\colon [0,1]\to [0,1]$, $j\in\{0,\dots,w_i-1\}$ the diffeomorphism $\varphi_{i,j+1} \circ g|_{g^j(I_i)}\circ \varphi_{i,j}^{-1}$.
As $g$ is $C^2$ the orbits are all dense so that the length of the interval ${g^j(I_i)}$ tends uniformly to $0$ when $i$ tends to infinity. As a consequence the distorsion of $g$ tends to $0$ on ${g^j(I_i)}$. As a direct consequence the diffeomorphisms $G_{i,j}$ tend uniformly to the identity map in the $C^1$ topology.
Notice that, according to Remark~\ref{r.subexp}, there are $\tilde\lambda_i>1$ tending to $1$ so that the initial and final ratio of $g$ belong to $[\tilde\lambda_i^{-w_i},\tilde\lambda_i^{w_i}]$.
Furthermore, as $g$ is assumed here to be $C^2$ and as the intervals $g^j(I_i)$ are pairwise disjoint in $S^1$, so that the sum of their length is bounded by $1$, a very classical distorsion control argument implies that
\begin{lemm} There is a constant $C>1$ such that for every $i$, for any $ j\in \{0,1,..., w_i\}$ the distorsion of $g^{j}$ on $I_i$ is bounded by $\log C$.
\end{lemm}
As a consequence one gets that for every $i$ one has $$\frac{|g^{r(k_i)+j}(y)-g^j(y)|}{|g^{s(k_i)+j}(y)-g^j(y)|}\in [C^{-2}\tilde\lambda_i^{-w_i},C^2 \tilde\lambda_i^{w_i}]$$
Up to replace $\lambda_i$ and $\tilde \lambda_i$ by $\mu_i=\sup \{\lambda_i, C^{\frac 2{w_i}}\tilde\lambda_i\}$ one has that $ \mu_i \to 1$ and the numbers
$$\rho^-_i, \frac{|g^{r(k_i)+j}(y)-g^j(y)|}{|g^{s(k_i)+j}(y)-g^j(y)|}\in [\mu_i^{-w_i},\mu_i^{w_i}].$$
Our main lemma is
\begin{lemm}\label{l.perturbation} Let $w_i$ be a sequence tending to infinity. Let $G_{i,j}$, $j\in\{0, \dots,w_i-1\}$ be families of diffeomorphisms of $[0,1]$ tending uniformly to identity in the $C^1$ topology with $i$.
Then given any points $t_{i,1}$, $t_{i,2}$ satisfying $\frac{t_{i,\eta}}{1-t_{i,\eta}}\in [\mu_i^{-w_i},\mu_i^{w_i}]$, $\eta\in\{1,2\}$, and given any $\varepsilon>0$. Then there is $i_0$ so that for any $i\geq i_0$, there are families $H_{i,j}$ so that:
\begin{itemize}
\item $H_{i,j}$ are $\varepsilon$ $C^1$-close to $G_{i,j}$
\item $H_{i,j}$ coincides with $G_{i,j}$ in neighborhoods of $0$ and $1$
\item $H_{i,w_i-1}\circ \cdots\circ H_{i,0}(t_{i,1})=t_{i,2}$.
\end{itemize}
\end{lemm}
\begin{demo}[Proof of Proposition~\ref{p.caracteristic}] Consider $g_1$ defined has $g_1=g$ out of the union of the intervals $g^j(I_i)$, $j\in\{0,\dots,w_i-1\}$ and equal to $\varphi_{i,j+1}^{-1} \circ H_{i,j}\circ \varphi_{i,j}$ on $g^j(I_i)$, where $H_{i,j}$ is given by Lemma~\ref{l.perturbation} for the constant:
\begin{itemize}
\item $\frac \varepsilon M$ where $M$ is a bound for $dg$.
\item $t_2$ is $\varphi_{i,w_i}(g^{w_i}(y))$
\item $t_1$ is the point such that $\frac{t_1}{1-t_1}$ is the initial ration $\rho^-_i$.
\end{itemize}
Using the fact that $g=\varphi_{i,j+1}^{-1} \circ G_{i,j}\circ \varphi_{i,j}$ on $g^j(I_i)$ and that $H_{i,j}=G_{i,j}$ is a neighborhood of $0$ and $1$ one easily check that $g_1$ is a diffeomorphism. Furthermore, the fact that $H_{i,j}$ is $\varepsilon/M$ close to $G_{i,j}$ implies that $g_1$ is $\varepsilon$ close to $g$ is the $C^1$ topology. Finally the orbit segment of length $k_i$ through the point $y_1=\varphi_{i,0}^{-1}(t_1)$ satisfies
\begin{itemize}
\item $g_1^{w_i}(y_1)=g^{w_i}(y)$ and therefore, for any $j\in\{w_i,\dots,k_i\}$ $g_1^j(y_1)=g^j(y)$
\item $\{y_1,\dots, g_1^{k_i}(y_1)\}$ is an adapted segment for $g_1$ ordered as the adapted segment $\{y,\dots, g^{k_i}(y)\}$
\end{itemize}
One deduces that the initial ratio of this segment is $\rho^-_i$, as announced.
\end{demo}
\subsection{Proof of Lemma~\ref{l.perturbation}}
Notice that, as the $G_{i,j}$ are assumed to tend uniformly to identity, the condition that the $H_{i,j}$ are $\varepsilon$-close to the $G_{i,j}$ can be replaced by the condition $H_{i,j}$ $\varepsilon$ $C^1$-close to identity (up to shrink slightly $\varepsilon$). Furthermore, the condition that $H_{i,j}$ and $G_{i,j}$ coincide in an (arbitrarily small) neighborhood of $0$ and $1$ can be obtained by the use of a bump function, without introducing derivatives larger that $1+2\varepsilon$. Therefore, up to replace $\varepsilon$ by $\varepsilon/2$ Lemma~\ref{l.perturbation} is a direct consequence of the following:
\begin{lemm}\label{l.perturbation2} Let $w_i$ be a sequence tending to infinity and $\mu_i$ be a sequence tending to one.
Then given any points $t_{i,1}$, $t_{i,2}$ satisfying $\frac{t_{i,\eta}}{1-t_{i,\eta}}\in [\mu_i^{-w_i},\mu_i^{w_i}]$, $\eta\in\{1,2\}$, and given any $\varepsilon>0$. Then there is $i_0$ so that for any $i\geq i_0$, there are families $H_{i,j}$ so that:
\begin{itemize}
\item $H_{i,j}$ are $\varepsilon$ $C^1$-close to $Id$
\item $H_{i,w_i-1}\circ \cdots\circ H_{i,0}(t_{i,1})=t_{i,2}$.
\end{itemize}
\end{lemm}
The main step for the proof is the elementary following
\begin{lemm}\label{l.1step} Given $\varepsilon>0$ small enough and $t\in[-1,1]$ and $y \in[0,1]$ there is a diffeomorphism $\varphi$ of $[0,1]$ which is equal to identity in a neighborhood of $0$ and $1$, $2\varepsilon$-$C^1$-close to the identity, and such that
$$ \frac{|\varphi(y)|}{|\varphi(y)-\varphi(1)|}= (1+t\varepsilon) \frac{|y|}{|1-y|}$$
\end{lemm}
\begin{demo} Let $y_1$ be the point in $[0,1]$ so that $\frac{y_1}{1-y_1}=(1+t\varepsilon) \frac{|y|}{|1-y|}$. An easy calculation shows that
$y_1= \frac{(1+t\varepsilon)y}{1+t\varepsilon y}$
The map $\varphi$ is just obtained by smoothing the piecewise affine homeomorphism, affine from $[0,y]$ to $[0,y_1]$ and from $[y,1]$ to $[y_1,1]$. Notice that the linear parts of the affine segments are
\begin{itemize}
\item $\frac {y_1}y= \frac{1+t\varepsilon}{1+t\varepsilon y}=1+ t\varepsilon\frac{1-y}{1+t\varepsilon y}\in (1-\varepsilon, 1+\varepsilon)$ and
\item $\frac {1-y_1}{1-y}=\frac 1{1+t\varepsilon y}=1-\left(\frac{t y}{1+t\varepsilon y}\right)\varepsilon\in (1-2\varepsilon, 1 +2\varepsilon)$ for $\varepsilon <\frac{1}{2}$.
\end{itemize}
\end{demo}
\begin{demo}[Proof of Lemma~\ref{l.perturbation2}] Applying Lemma~\ref{l.1step} $w_i$ times to $\frac{\varepsilon}{2}$, it is enough that
$\frac{t_{i,2}}{1-t_{i,2}}= (1+t\frac{\varepsilon}{2})^{w_i}\frac{t_{i,1}}{1-t_{i,1}}$ for some $t\in [0,1]$.
By assumption $\frac{t_{i,1}}{1-t_{i,1}}, \frac{t_{i,2}}{1-t_{i,2}}\in [\mu_i^{-w_i},\mu_i^{w_i}]$, that is
$\frac{t_{i,2}}{1-t_{i,2}} \frac{1-t_{i,1}}{t_{i,1}}\in [\mu_i^{-2w_i},\mu_i^{2w_i}]$
Therefore, one can find $t$ if $\mu_i^2< 1+\frac{\varepsilon}{2}$. As $\mu_i\to 1$ when $i\to\infty$, it is enough to choose $i$ large enough, ending the proof.
\end{demo}
\section{Bibliography}
\noindent Christian Bonatti,
\noindent {\small Institut de Math\'ematiques de Bourgogne\\
UMR 5584 du CNRS}
\noindent {\small Universit\'e de Bourgogne, Dijon 21004, FRANCE}
\noindent {\footnotesize{E-mail : bonatti@u-bourgogne.fr}}
\vskip 5mm
\noindent Nancy Guelman
\noindent {\small I.M.E.R.L. Facultad de Ingenier\'ia.\\ Universidad de la Rep\'ublica}
\noindent {\small C.C. 30, Montevideo, Uruguay.}
\noindent{\footnotesize{E-mail: nguelman@fing.edu.uy}}
\end{document} |
\begin{document}
\title{A finiteness theorem for algebraic cycles}
\author{Peter O'Sullivan}
\address{Centre for Mathematics and its Applications\\
Australian National University, Canberra ACT 0200 \\
Australia}
\email{peter.osullivan@anu.edu.au}
\thanks{The author was partly supported by ARC Discovery Project grant DP0774133.}
\subjclass[2000]{14C15, 14C25}
\keywords{algebraic cycle, Chow ring}
\date{}
\dedicatory{}
\begin{abstract}
Let $X$ be a smooth projective variety.
Starting with a finite set of cycles on powers $X^m$ of $X$, we consider the
$\mathbf Q$\nobreakdash-\hspace{0pt} vector subspaces of the $\mathbf Q$\nobreakdash-\hspace{0pt} linear Chow groups of the $X^m$
obtained by iterating the algebraic operations
and pullback and push forward along those morphisms $X^l \to X^m$ for which
each component $X^l \to X$ is a projection.
It is shown that these $\mathbf Q$\nobreakdash-\hspace{0pt} vector subspaces are all finite-dimensional, provided
that the $\mathbf Q$\nobreakdash-\hspace{0pt} linear Chow motive of $X$ is a direct summand of that of an abelian variety.
\end{abstract}
\maketitle
\section{Introduction}
Let $X$ be a smooth projective variety over a field $F$.
Starting with a finite set of cycles on powers $X^m$ of $X$,
consider the $\mathbf Q$\nobreakdash-\hspace{0pt} vector subspaces $C_m$ of the $\mathbf Q$\nobreakdash-\hspace{0pt} linear Chow groups $CH(X^m)_\mathbf Q$
formed by iterating the
algebraic operations and pullback $p^*$ and push forward $p_*$ along those morphisms
$p:X^l \to X^m$ for which each component $X^l \to X$ is a projection.
It is plausible that the $C_m$ are always finite-dimensional,
because when $F$ is finitely generated over the prime field it is plausible that the
$CH(X^m)_\mathbf Q$ themselves are finite-dimensional.
In this paper we prove the finite-dimensionality of the $C_m$
when the $\mathbf Q$\nobreakdash-\hspace{0pt} linear Chow motive of $X$ is a direct summand of that of an abelian
variety over $F$.
More precisely, suppose given an adequate
equivalence relation $\sim$ on $\mathbf Q$\nobreakdash-\hspace{0pt} linear cycles on smooth projective
varieties over $F$.
We say that $X$ is a \emph{Kimura variety for $\sim$} if, in the
category of $\mathbf Q$\nobreakdash-\hspace{0pt} linear Chow motives modulo $\sim$, the motive of
$X$ is the direct sum of one for which some exterior power is $0$ and
one for which some symmetric power is $0$.
A Kimura variety for $\sim$ is also a Kimura variety for any coarser equivalence relation.
It is known (e.g.\ \cite{Kim}, Corollary~4.4) that if the $\mathbf Q$\nobreakdash-\hspace{0pt} linear Chow motive of $X$ is a direct summand of that of
an abelian variety, then $X$ is a Kimura variety for any $\sim$.
The main result is now Theorem~\ref{t:fin} below.
In addition to a finiteness result, it contains also a nilpotence result.
By a filtration $C^\bullet$ on a graded $\mathbf Q$\nobreakdash-\hspace{0pt} algebra $C$ we mean a descending
sequence $C = C^0 \supset C^1 \supset \dots$ of graded ideals of $C$
such that $C^r.C^s \subset C^{r+s}$ for every $r$ and $s$.
The morphisms $p:X^l \to X^m$ in Theorem~\ref{t:fin}~\ref{i:pullpush} are
exactly those defined by maps $[1,m] \to [1,l]$.
\begin{thm}\label{t:fin}
Let $X$ be a smooth projective variety over $F$ which is a Kimura variety
for the equivalence relation ${\sim}$.
For $n = 0,1,2, \dots $, let $Z_n$ be a finite subset of $CH(X^n)_\mathbf Q/{\sim}$ , with $Z_n$ empty
for $n$ large.
Then there exists for each $n$ a graded $\mathbf Q$\nobreakdash-\hspace{0pt} subalgebra
$C_n$ of $CH(X^n)_\mathbf Q/{\sim}$, and a filtration $(C_n)^\bullet$ on $C_n$,
with the following properties.
\begin{enumerate}
\renewcommand{(\alph{enumi})}{(\alph{enumi})}
\item\label{i:pullpush}
If $p:X^l \to X^m$ is a morphism for which each component $X^l \to X$ is a projection,
then $p^*$ sends $C_m$ to $C_l$ and $p_*$ sends $C_l$ to $C_m$, and $p^*$ and $p_*$
respect the filtrations on $C_l$ and $C_m$.
\item\label{i:fin}
For every $n$, the $\mathbf Q$\nobreakdash-\hspace{0pt} algebra $C_n$ is finite-dimensional and contains $Z_n$.
\item\label{i:nilp}
For every $n$, the cycles in $C_n$ which are numerically equivalent to $0$ lie in $(C_n)^1$,
and we have $(C_n)^r = 0$ for $r$ large.
\end{enumerate}
\end{thm}
The finiteness result of Theorem~\ref{t:fin} is non-trivial only
if $Z_n$ is non-empty for some $n > 1$.
Indeed it will follow from Proposition~\ref{p:Chowsub} below that
for any smooth projective variety $X$ over $F$ and finite subset $Z_1$ of $CH(X)_\mathbf Q$,
there is a finite-dimensional graded $\mathbf Q$\nobreakdash-\hspace{0pt} subalgebra $C_n$ of $CH(X^n)_\mathbf Q$ for each $n$ such that
$C_1$ contains $Z_1$ and $p^*$ sends $C_m$ to $C_l$ and $p_*$ sends $C_l$ to $C_m$
for every $p$ as in \ref{i:pullpush} of Theorem~\ref{t:fin}.
If $X$ is a Kimura variety for $\sim$, then the ideal of correspondences
numerically equivalent to $0$ in the algebra $CH(X \times_F X)_\mathbf Q/{\sim}$
of self-correspondences of $X$ has been shown by Kimura (\cite{Kim}, Proposition~7.5)
to consist of nilpotent elements, and by Andr\'e and Kahn (\cite{AndKah}, Proposition~9.1.14)
to be in fact nilpotent.
The nilpotence result of Theorem~\ref{t:fin} implies that of Kimura, but neither
implies nor is implied by that of Andr\'e and Kahn.
If $\sim$ is numerical equivalence, then the $CH(X^m)_\mathbf Q/{\sim} = \overline{CH}(X^m)_\mathbf Q$
are all finite dimensional.
The following result shows that they are generated in a suitable sense by
the $\overline{CH}(X^m)_\mathbf Q$ for $m$ not exceeding some fixed $n$.
\begin{thm}\label{t:num}
Let $X$ be a smooth projective variety over $F$ which is a Kimura variety
for numerical equivalence.
Then there exists an integer $n \ge 0$ with the following property:
for every $m$, the $\mathbf Q$\nobreakdash-\hspace{0pt} vector space $\overline{CH}(X^m)_\mathbf Q$
is generated by elements of the form
\begin{equation}\label{e:numgen}
p_*((p_1)^*(z_1).(p_2)^*(z_2). \cdots .(p_r)^*(z_r)),
\end{equation}
where $z_i$ lies in $\overline{CH}(X^{m_i})_\mathbf Q$ with $m_i \le n$,
and $p:X^l \to X^m$ and the $p_i:X^l \to X^{m_i}$
are morphisms for which each component $X^l \to X$ is a projection.
\end{thm}
Theorem~\ref{t:fin} will be proved in Section~\ref{s:finproof} and
Theorem~\ref{t:num} in Section~\ref{s:numproof}.
Both theorems are deduced from the following fact.
Given a Kimura variety $X$ for $\sim$, there is a reductive group $G$
over $\mathbf Q$, a finite-dimensional $G$\nobreakdash-\hspace{0pt} module $E$,
a central $\mathbf Q$\nobreakdash-\hspace{0pt} point $\rho$ of $G$ with $\rho^2 = 1$, and a commutative
algebra $R$ in the tensor category $\mathbf REP(G,\rho)$ of $G$\nobreakdash-\hspace{0pt} modules
with symmetry modified by $\rho$, with the following property.
If we write $\mathcal M_{\sim}(F)$ for the category of ungraded motives over $F$ modulo ${\sim}$ and
$E_R$ for the free $R$\nobreakdash-\hspace{0pt} module $R \otimes_k E$ on $E$, then
there exist isomorphisms
\[
\xi_{r,s}:\Hom_{G,R}((E_R)^{\otimes r},(E_R)^{\otimes s}) \xrightarrow{\sim}
\Hom_{\mathcal M_{\sim}(F)}(h(X)^{\otimes r},h(X)^{\otimes s})
\]
which are compatible with composition and tensor products of morphisms and
with symmetries interchanging the factors $E_R$ and $h(X)$.
These isomorphisms arise because there is a fully faithful tensor functor
from the category of finitely generated free $R$\nobreakdash-\hspace{0pt} modules to $\mathcal M_{\sim}(F)$,
which sends $E_R$ to $h(X)$ (see \cite{O}, Lemma~3.3 for a similar result).
However to keep the exposition as brief as possible, the $\xi_{r,s}$ will
simply be constructed directly here, in Sections~\ref{s:Kimobj} and \ref{s:Kimvar}.
Now we have an equality
\[
CH(X^r)_\mathbf Q/{\sim} = \Hom_{\mathcal M_{\sim}(F)}(\mathbf 1,h(X)^{\otimes r})
\]
of $\mathbf Q$\nobreakdash-\hspace{0pt} algebras, and pullback along a morphism $f:X^l \to X^m$ is given by composition
with $h(f)$.
There is a canonical autoduality $h(X)^{\otimes 2} \to \mathbf 1$ on $h(X)$,
and push forward along $f$ is given by composing with the transpose of $h(f)$ defined
by this autoduality.
Using the isomorphisms $\xi_{r,s}$, Theorems~\ref{t:fin} and \ref{t:num} then reduce to easily
solved problems about the $G$\nobreakdash-\hspace{0pt} algebra $R$.
\section{Generated spaces of cycles}
The following result gives an explicit description of the spaces of cycles
generated by a given set of cycles on the powers of an arbitrary
smooth projective variety $X$.
By the top Chern class of $X$ we mean the element $(\Delta_X)^*(\Delta_X)_*(1)$
of $CH(X)_\mathbf Q$, where $\Delta_X:X \to X^2$ is the diagonal.
We define the tensor product $z \otimes z'$ of $z$ in $CH(X)_\mathbf Q$
and $z'$ in $CH(X')_\mathbf Q$ as $\pr_1{}\!^*(z).\pr_2{}\!^*(z')$ in
$CH(X \times_F X')_\mathbf Q$.
The push forward of $z \otimes z'$ along a morphism $f \times f'$ is then
$f_*(z) \otimes f'{}_*(z')$.
\begin{prop}\label{p:Chowsub}
Let $X$ be a smooth projective variety over $F$.
For $m = 0,1,2, \dots$ let $Z_m$ be a subset of $CH(X^m)_\mathbf Q$,
such that $Z_1$ contains the top Chern class of $X$.
Denote by $C_m$ the $\mathbf Q$\nobreakdash-\hspace{0pt} vector subspace of $CH(X^m)_\mathbf Q$
generated by elements of the form
\begin{equation}\label{e:pullpush}
p_*((p_1)^*(z_1).(p_2)^*(z_2). \cdots .(p_n)^*(z_r)),
\end{equation}
where $z_i$ lies in $Z_{m_i}$,
and $p:X^j \to X^m$ and the $p_i:X^j \to X^{m_i}$
are morphisms for which each component $X^j \to X$ is a projection.
Then $C_m$ is a $\mathbf Q$\nobreakdash-\hspace{0pt} subalgebra of $CH(X^m)_\mathbf Q$ for each $m$.
If $q:X^l \to X^m$ is a morphism for which each component $X^l \to X$
is a projection, then $q^*$ sends $C_m$ into $C_l$ and
$q_*$ sends $C_l$ into $C_m$.
\end{prop}
\begin{proof}
Write $\mathcal P_{l,m}$ for the set of morphisms $X^l \to X^m$ for which each
component $X^l \to X$ is a projection.
Then the composite of an element of $\mathcal P_{j,l}$ with an element of $\mathcal P_{l,m}$
lies in $\mathcal P_{j,m}$.
Thus $q_*(C_l) \subset C_m$ for $q$ in $\mathcal P_{l,m}$.
Similarly the product of two elements of $\mathcal P_{j,m}$ lies in
$\mathcal P_{2j,2m}$.
Thus the tensor product of two elements of $C_m$ lies in $C_{2m}$.
Since the product of two elements of $CH(X^m)_\mathbf Q$ is the pullback
of their tensor product along $\Delta_{X^m} \in \mathcal P_{m,2m}$,
it remains only to show that
\begin{equation}\label{e:qC}
q^*(C_m) \subset C_l
\end{equation}
for $q$ in $\mathcal P_{l,m}$.
This is clear when $l = m$ and $q$ is a symmetry $\sigma$
permuting the factors $X$ of $X^m$,
because $\sigma^* = (\sigma^{-1})_*$.
An arbitrary $q$ factors for some $l'$ as $q'' \circ q'$ with $q'$ in
$\mathcal P_{l,l'}$ a projection and $q''$ in $\mathcal P_{l',m}$ a closed immersion.
It thus is enough to show that \eqref{e:qC} holds when $q$ is a projection
or $q$ is a closed immersion.
Suppose that $q$ is a projection.
If we write
\[
w_{s,n}:X^{s+n} \to X^n
\]
for the projection onto the last $n$ factors,
then $q$ is a composite of a symmetry with $w_{l-m,m}$.
Thus \eqref{e:qC} holds because $w_{l-m,m}{}^* = 1 \otimes -$ sends $C_m$ into $C_l$.
Suppose that $q$ is a closed immersion.
Then $q$ is a composite of the
\[
e_s = X^{s-2} \times \Delta_X:X^{s-1} \to X^s
\]
for $s \ge 2$ and symmetries.
To prove \eqref{e:qC}, we may thus suppose that
$m \ge 2$ and $q = e_m$.
Denote by $W_m$ the $\mathbf Q$\nobreakdash-\hspace{0pt} subalgebra of $CH(X^m)_\mathbf Q$ generated by the
$v^*(Z_{m'})$ for any $m'$ and $v$ in $\mathcal P_{m,m'}$.
Then $u^*$ sends $W_m$ into $W_l$ for any $u$ in $\mathcal P_{l,m}$, and
by the projection formula $C_m$ is a $W_m$\nobreakdash-\hspace{0pt} submodule of $CH(X^m)_\mathbf Q$.
Since $C_m$ is the sum of the $p_*(W_j)$ with $p$ in $\mathcal P_{j,m}$,
it is to be shown that
\begin{equation}\label{e:emp}
(e_m)^*p_*(W_j) \subset C_{m-1}
\end{equation}
for every $p$ in $\mathcal P_{j,m}$.
We have $p = w_{j,m} \circ \Gamma_p$ with $\Gamma_p$ the graph of $p$, and
\[
(e_m)^* \circ (w_{j,m})_* = (w_{j,m-1})_* \circ (e_{j+m})^*.
\]
Thus \eqref{e:emp} will hold provided that $(e_{j+m})^*(\Gamma_p)_*(W_j) \subset C_{j+m-1}$.
Replacing $m$ by $j+m$ and $p$ by $\Gamma_p$, we may thus suppose that
$p$ has a left inverse in $\mathcal P_{m,j}$.
In that case any $y$ in $W_j$ is of the form $p^*(x)$ with $x$ in $W_m$,
and then
\[
(e_m)^*p_*(y) = (e_m)^*(p_*(1).x) = (e_m)^*p_*(1).(e_m)^*(x).
\]
Thus \eqref{e:emp} will hold provided that $(e_m)^*p_*(1)$ lies in $C_{m-1}$.
To see that $(e_m)^*p_*(1)$ lies in $C_{m-1}$, note that $e_m$ has a left inverse
$f$ in $\mathcal P_{m,m-1}$.
Then
\[
(e_m)^*p_*(1) = f_*(e_m)_*(e_m)^*p_*(1) = f_*(p_*(1).(e_m)_*(1)) = f_*p_*p^*(e_m)_*(1).
\]
Since $(e_m)_*(1) = (w_{m-2,2})^*(\Delta_X)_*(1)$, we reduce finally to showing that
$h^*(\Delta_X)_*(1)$ lies in $C_j$ for every $h$ in $\mathcal P_{j,2}$.
Such an $h$ factors as a projection followed by either a symmetry of $X^2$ or $\Delta_X$,
so we may suppose that $j = 1$ and $h = \Delta_X$.
Then $h^*(\Delta_X)_*(1)$ is the top Chern class of $X$, which by hypothesis lies
in $Z_1 \subset C_1$.
\end{proof}
\section{Group representations}\label{s:grprep}
Let $k$ be a field.
By a $k$\nobreakdash-\hspace{0pt} linear category we mean a category equipped with a structure
of $k$\nobreakdash-\hspace{0pt} vector space on each hom-set such that the composition is $k$\nobreakdash-\hspace{0pt} bilinear.
A $k$\nobreakdash-\hspace{0pt} linear category is said to be pseudo-abelian if it has a zero object
and direct sums, and if every idempotent endomorphism has an image.
A \emph{$k$\nobreakdash-\hspace{0pt} tensor category} is a pseudo-abelian $k$\nobreakdash-\hspace{0pt} linear category $\mathcal C$,
together with a structure of symmetric monoidal category on $\mathcal C$ (\cite{Mac},~VII~7)
such that the tensor product $\otimes$ is $k$\nobreakdash-\hspace{0pt} bilinear on hom-spaces.
Thus $\mathcal C$ is equipped with a unit object $\mathbf 1$, and natural isomorphisms
\[
(L \otimes M) \otimes N \xrightarrow{\sim} L \otimes (M \otimes N),
\]
the associativities,
\[
M \otimes N \xrightarrow{\sim} N \otimes M,
\]
the symmetries, and $\mathbf 1 \otimes M \xrightarrow{\sim} M$ and $M \otimes \mathbf 1 \xrightarrow{\sim} M$, which satisfy appropriate
compatibilities.
We assume in what follows that $\mathbf 1 \otimes M \xrightarrow{\sim} M$ and $M \otimes \mathbf 1 \xrightarrow{\sim} M$ are
identities: this can be always arranged by replacing if necessary $\otimes$ by an isomorphic
functor.
Brackets in multiple tensor products will often be omitted when it is of no importance
which bracketing is chosen.
The tensor product of $r$ copies of $M$ will then be written as $M^{\otimes r}$,
and similarly for morphisms.
There is a canonical action $\tau \mapsto M^{\otimes \tau}$ of the symmetric group
$\mathfrak{S}_r$ on $M^{\otimes r}$,
defined using the symmetries.
It extends to a homomorphism of $k$\nobreakdash-\hspace{0pt} algebras from $k[\mathfrak{S}_r]$
to $\End(M^{\otimes r})$.
When $k$ is of characteristic $0$, the symmetrising idempotent in $k[\mathfrak{S}_r]$
maps to an idempotent endomorphism $e$ of $M^{\otimes r}$, and we define the $r$th
symmetric power $S^r M$ of $M$ as the image of $e$.
Similarly we define the $r$th exterior power $\bigwedge^r M$ of $M$ using the
antisymmetrising idempotent in $k[\mathfrak{S}_r]$
Let $G$ be a linear algebraic group over $k$.
We write $\mathbf REP(G)$ for the category of $G$\nobreakdash-\hspace{0pt} modules.
The tensor product $\otimes_k$ over $k$ defines on $\mathbf REP(G)$
a structure of $k$\nobreakdash-\hspace{0pt} tensor category.
Recall (\cite{Wat}, 3.3) that every $G$\nobreakdash-\hspace{0pt} module is the filtered colimit of its finite-dimensional
$G$\nobreakdash-\hspace{0pt} submodules.
If $E$ is a finite-dimensional $G$\nobreakdash-\hspace{0pt} module, then regarding $\mathbf REP(G)$
as a category of comodules (\cite{Wat}, 3.2) shows that $\Hom_G(E,-)$ preserves filtered
colimits.
When $k$ is algebraically closed, a $k$\nobreakdash-\hspace{0pt} vector subspace of a $G$\nobreakdash-\hspace{0pt} module is a
$G$\nobreakdash-\hspace{0pt} submodule provided it is stable under every $k$\nobreakdash-\hspace{0pt} point of $G$.
This is easily seen by reducing to the finite-dimensional case.
We suppose from now on that $k$ has characteristic $0$.
Let $\rho$ be a central $k$\nobreakdash-\hspace{0pt} point of $G$ with $\rho^2 = 1$.
Then $\rho$ induces a $\mathbf Z/2$\nobreakdash-\hspace{0pt} grading on $\mathbf REP(G)$, with the
$G$\nobreakdash-\hspace{0pt} modules pure of degree $i$ those on which $\rho$ acts as $(-1)^i$.
We define as follows a $k$\nobreakdash-\hspace{0pt} tensor category $\mathbf REP(G,\rho)$.
The underlying $k$\nobreakdash-\hspace{0pt} linear category, tensor product and associativities of $\mathbf REP(G,\rho)$
are the same as those of $\mathbf REP(G)$, but the symmetry $M \otimes N \xrightarrow{\sim} N \otimes M$ is given
by multiplying that in $\mathbf REP(G)$ by $(-1)^{ij}$ when $M$ is of degree $i$ and $N$
of degree $j$, and then extending by linearity.
When $\rho = 1$, the $k$\nobreakdash-\hspace{0pt} tensor categories $\mathbf REP(G)$ and $\mathbf REP(G,\rho)$ coincide.
An algebra in a $k$\nobreakdash-\hspace{0pt} tensor category is defined as an object $R$
together with a multiplication $R \otimes R \to R$ and unit $\mathbf 1 \to R$ satisfying
the usual associativity and identity conditions.
Since the symmetry is not used in this definition, an algebra in $\mathbf REP(G,\rho)$ is
the same as an algebra in $\mathbf REP(G)$, or equivalently a $G$\nobreakdash-\hspace{0pt} algebra.
An algebra $R$ in $\mathbf REP(G,\rho)$ will be said to be finitely generated if its underlying
$k$\nobreakdash-\hspace{0pt} algebra is.
It is equivalent to require that $R$ be generated as a $k$\nobreakdash-\hspace{0pt} algebra
by a finite-dimensional $G$\nobreakdash-\hspace{0pt} submodule.
A (left) module over an algebra $R$ is an object $N$ equipped with an action
$R \otimes N \to N$ satisfying the usual associativity and identity conditions.
If $R$ is an algebra in $\mathbf REP(G,\rho)$ or $\mathbf REP(G)$, we also speak of a $(G,R)$\nobreakdash-\hspace{0pt} module.
A $(G,R)$\nobreakdash-\hspace{0pt} module is said to be finitely
generated if it is finitely generated as a module
over the underlying $k$\nobreakdash-\hspace{0pt} algebra of $R$.
It is equivalent to require that it be generated as a module over the $k$\nobreakdash-\hspace{0pt} algebra $R$ by a
finite-dimensional $G$\nobreakdash-\hspace{0pt} submodule.
An algebra $R$ in a $k$\nobreakdash-\hspace{0pt} tensor category is said to be commutative if
composition with the symmetry interchanging the factors $R$ in $R \otimes R$
leaves the multiplication unchanged.
If $R$ is an algebra in $\mathbf REP(G,\rho)$,
this notion of commutativity does not in general coincide with that of the
underlying $k$\nobreakdash-\hspace{0pt} algebra, but it does
when $\rho$ acts as $1$ on $R$.
Coproducts exist in the category of commutative algebras in a $k$\nobreakdash-\hspace{0pt} tensor category:
the coproduct of $R$ and $R'$ is $R \otimes R'$ with multiplication the tensor
product of the multiplications of $R$ and $R'$ composed with the appropriate
symmetry.
To any map $[1,m] \to [1,l]$ and commutative algebra $R$ there is then associated a
morphism $R^{\otimes m} \to R^{\otimes l}$, defined using symmetries $R^{\otimes \tau}$ and
the unit and multiplication of $R$ and their tensor products and composites,
such that each component $R \to R^{\otimes l}$ is the embedding into one of the factors.
Let $R$ be a commutative algebra in $\mathbf REP(G,\rho)$.
Then the symmetry in $\mathbf REP(G,\rho)$ defines on any $R$\nobreakdash-\hspace{0pt} module a canonical structure
of $(R,R)$\nobreakdash-\hspace{0pt} bimodule.
The category of $(G,R)$\nobreakdash-\hspace{0pt} modules
has a structure of $k$\nobreakdash-\hspace{0pt} tensor category, with the tensor product $N \otimes_R N'$
of $N$ and $N'$ defined in the usual way as the coequaliser of the two
morphisms
\[
N \otimes_k R \otimes_k N' \to N \otimes_k N'
\]
given by the actions of $R$ on $N$ and $N'$, and the tensor product $f \otimes_R f'$
of $f:M \to N$ and $f':M' \to N'$ as the unique morphism rendering the square
\[
\begin{CD}
M \otimes_R M' @>{f \otimes_R f'}>> N \otimes_R N' \\
@AAA @AAA \\
M \otimes_k M' @>{f \otimes_k f'}>> N \otimes_k N'
\end{CD}
\]
commutative.
Let $P$ be an object in $\mathbf REP(G,\rho)$.
We write $P_R$ for the object $R \otimes_k P$ in the $k$\nobreakdash-\hspace{0pt} tensor category of $(G,R)$\nobreakdash-\hspace{0pt} modules.
A morphism of commutative algebras $R' \to R$ in $\mathbf REP(G,\rho)$ induces by tensoring
with $P$ a morphism of $R'$\nobreakdash-\hspace{0pt} modules $P_{R'} \to P_R$.
For each $l$ and $m$, extension of scalars along $R' \to R$
then gives a $k$\nobreakdash-\hspace{0pt} linear map
\begin{equation}\label{e:extscal}
\Hom_{G,R'}((P_{R'})^{\otimes m},(P_{R'})^{\otimes l}) \to
\Hom_{G,R}((P_R)^{\otimes m},(P_R)^{\otimes l})
\end{equation}
Explicitly, \eqref{e:extscal} sends $f'$ to the unique morphism of $(G,R)$\nobreakdash-\hspace{0pt} modules $f$
that renders the square
\[
\begin{CD}
(P_R)^{\otimes m} @>{f}>> (P_R)^{\otimes l} \\
@AAA @AAA \\
(P_{R'})^{\otimes m} @>{f'}>> (P_{R'})^{\otimes l}
\end{CD}
\]
commutative, where the vertical arrows are those defined by $P_{R'} \to P_R$.
If $P$ is finite-dimensional, then for given commutative algebra $R$
and $f$,
there is a finitely generated $G$\nobreakdash-\hspace{0pt} subalgebra $R'$ of $R$ such that $f$ is
in the image of \eqref{e:extscal}.
This can be seen by writing $R$ as the filtered colimit $\colim_\lambda R_\lambda$
of its finitely generated $G$\nobreakdash-\hspace{0pt} subalgebras, and noting that since $P^{\otimes m}$ is finite-dimensional,
the composite of $P^{\otimes m} \to (P_R)^{\otimes m}$ with $f$ factors through some
$(P_{R_\lambda})^{\otimes l}$.
Suppose that $G$ is reductive, or equivalently that $\Hom_G(P,-)$ is exact for every
$G$\nobreakdash-\hspace{0pt} module $P$.
Then $\Hom_G(P,-)$ preserves colimits for $P$ finite-dimensional.
In particular $(-)^G = \Hom_G(k,-)$ preserves colimits.
If $R$ is a commutative algebra in $\mathbf REP(G,\rho)$ with $R^G = k$,
then $R$ has a unique maximal $G$\nobreakdash-\hspace{0pt} ideal.
Indeed $J^G = 0$ for $J \ne R$ a $G$\nobreakdash-\hspace{0pt} ideal of $R$,
while $(J_1)^G = 0$ and $(J_2)^G = 0$ implies $(J_1 + J_2)^G = 0$.
\begin{lem}\label{l:repfin}
Let $G$ be a reductive group over a field $k$ of characteristic $0$ and $\rho$ be a
central $k$\nobreakdash-\hspace{0pt} point of $G$ with $\rho^2 = 1$.
Let $R$ be a finitely generated
commutative algebra in $\mathbf REP(G,\rho)$ with $R^G = k$, and $N$ be a
finitely generated $R$\nobreakdash-\hspace{0pt} module.
\begin{enumerate}
\item\label{i:algfin}
The $k$\nobreakdash-\hspace{0pt} vector space $N^G$ is finite-dimensional.
\item\label{i:idealcompl}
For every $G$\nobreakdash-\hspace{0pt} ideal $J \ne R$ of $R$,
we have
$(J^rN)^G = 0$ for $r$ large.
\end{enumerate}
\end{lem}
\begin{proof}
Every object $P$ of $\mathbf REP(G,\rho)$ decomposes as $P_0 \oplus P_1$
where $\rho$ acts as $(-1)^i$ on $P_i$.
In particular $R = R_0 \oplus R_1$ with $R_0$ a $G$\nobreakdash-\hspace{0pt} subalgebra of $R$.
Suppose that $R$ is generated as an algebra by the finite-dimensional $G$\nobreakdash-\hspace{0pt} submodule $M$.
Then $R_0$ is generated as an algebra by $M_0 + M_1{}\!^2$, and hence is finitely generated.
Since $R$ is a commutative algebra in $\mathbf REP(G,\rho)$, it is generated as an $R_0$\nobreakdash-\hspace{0pt} module
by $M_1$.
Hence any finitely generated $R$\nobreakdash-\hspace{0pt} module is finitely generated
as an $R_0$\nobreakdash-\hspace{0pt} module.
To prove \ref{i:algfin}, we reduce after replacing $R$ by $R_0$
to the case where $R = R_0$.
Then $R$ is a commutative $G$\nobreakdash-\hspace{0pt} algebra in the usual sense.
In this case it is well known that $N^G$ is finite-dimensional over $k = R^G$
(e.g \cite{ShaAlgIV}, II~Theorem~3.25).
To prove \ref{i:idealcompl}, note that $J_0 \ne R_0$ is an ideal of $R_0$.
Since $R$ is a finitely generated $R_0$\nobreakdash-\hspace{0pt} module,
so also is $R_1$.
If $x_1, x_2, \dots ,x_s$ generate $R_1$ over $R_0$, then since each $x_i$ has square $0$
we have $R_1{}\!^r = 0$ and hence $J_1{}\!^r = 0$ for $r>s$.
Thus for $r > s$ we have
\[
J^r N
= (J_0 + J_1)^r N
= J_0{}\!^r N + J_0{}\!^{r-1}J_1 N + \dots + J_0{}\!^{r-s}J_1{}\!^s N
\subset J_0{}\!^{r-s} N.
\]
Replacing $R$ by $R_0$ and $J$ by $J_0$,
we thus reduce again to the case where $R = R_0$ is a commutative $G$\nobreakdash-\hspace{0pt} algebra
in the usual sense.
We may suppose further that $k$ is algebraically closed.
By \ref{i:algfin}, it is enough to show that $\bigcap_{r=0}^\infty J^r N = 0$,
or equivalently
(\cite{BAC-1} III \S 3 No.~2 Proposition~5 and IV \S 1 No.~1 Proposition~2, Corollaire~2)
that $J + \mathfrak{p} \ne R$ for every associated prime
$\mathfrak{p}$ of $N$.
Fix such a $\mathfrak{p}$, and consider the intersection $\mathfrak{p}'$
of the $g\mathfrak{p}$ for $g$ in $G(k)$.
It is stable under $G(k)$, and hence since $k$ is algebraically closed is a $G$\nobreakdash-\hspace{0pt} ideal of $R$.
Thus $J + \mathfrak{p}' \ne R$,
because $J$ and $\mathfrak{p}'$ are contained in the unique maximal $G$\nobreakdash-\hspace{0pt} ideal of $R$.
Since each $g\mathfrak{p}$ lies in the finite set of associated primes of $N$,
it follows that $J + g\mathfrak{p} \ne R$ for some $g$ in $G(k)$.
Applying $g^{-1}$ then shows that $J + \mathfrak{p} \ne R$.
\end{proof}
Let $l_0$ and $l_1$ be integers $\ge 0$.
Write
\begin{equation}\label{e:Gdef}
G = GL_{l_0} \times_k GL_{l_1},
\end{equation}
$E_i$ for the standard representation of $GL_{l_i}$, regarded as a $G$\nobreakdash-\hspace{0pt} module, and
\begin{equation}\label{e:Edef}
E = E_0 \oplus E_1.
\end{equation}
We may identify the endomorphism of $E$ that sends $E_i$ to itself and acts on it as $(-1)^i$
with a central $k$\nobreakdash-\hspace{0pt} point $\rho$ of $G$ with $\rho^2 = 1$.
Consider the semidirect product
\begin{equation}\label{e:semidir}
\Gamma_r = (\mathbf Z/2)^r \rtimes \mathfrak{S}_r,
\end{equation}
where the symmetric group $\mathfrak{S}_r$ acts on $(\mathbf Z/2)^r$ through its action
on $[1,r]$.
For each $r$, the group $\Gamma_r$ acts on $E^{\otimes r}$,
with the action of
$(\mathbf Z/2)^r$ the tensor product of the actions $i \mapsto \rho^i$ of $\mathbf Z/2$ on $E$,
and the action of $\mathfrak{S}_r$ that defined by the
symmetries in $\mathbf REP(G,\rho)$.
Thus we obtain a homomorphism
\begin{equation}\label{e:semidirhom}
k[\Gamma_r] \to \End_G(E^{\otimes r})
\end{equation}
of $k$\nobreakdash-\hspace{0pt} algebras.
For $r \le r'$ we may regard $\Gamma_r$ as a subgroup of $\Gamma_{r'}$, and hence
$k[\Gamma_r]$ as a $k$\nobreakdash-\hspace{0pt} subalgebra of $k[\Gamma_{r'}]$, by identifying
$(\mathbf Z/2)^r$ with the subgroup of $(\mathbf Z/2)^{r'}$ with the last $r'-r$ factors the identity
and $\mathfrak{S}_r$ with the subgroup of $\mathfrak{S}_{r'}$ which leaves
the last $r'-r$ elements of $[1,r']$ fixed.
Write $e_0$ for the idempotent of $k[\mathbf Z/2]$ given by half the sum of the two elements
of $\mathbf Z/2$, and $e_1$ for $1-e_0$.
Given $\pi = (\pi_1,\pi_2,\dots,\pi_r)$ in $(\mathbf Z/2)^r$, we then have an idempotent
\[
e_\pi = e_{\pi_1} \otimes e_{\pi_2} \otimes \dots \otimes e_{\pi_r}
\]
in $k[\mathbf Z/2]^{\otimes r} = k[(\mathbf Z/2)^r] \subset k[\Gamma_r]$.
When every component of $\pi$ is $i \in \mathbf Z/2$,
we write $e_{i,r}$ for $e_\pi$.
We also write $a_{0,r}$ for the antisymmetrising idempotent and
$a_{1,r}$ for the symmetrising idempotent in $k[\mathfrak{S}_r]$, and for $i \in \mathbf Z/2$ we write
\begin{equation}\label{e:xir}
x_{i,r} = e_{i,l_i +1} a_{i,l_i +1} = e_{i,l_i +1} a_{i,l_i +1} e_{i,l_i +1}
= a_{i,l_i +1} e_{i,l_i +1} \in k[\Gamma_{l_i +1}] \subset k[\Gamma_r]
\end{equation}
if $r > l_i$ and $x_{i,r} = 0$ otherwise.
\begin{lem}\label{l:GL}
\begin{enumerate}
\item\label{i:hom0}
If $r \ne r'$ then $\Hom_G(E^{\otimes r},E^{\otimes r'}) = 0$.
\item\label{i:homsurj}
The homomorphism \eqref{e:semidirhom} is surjective, with kernel the ideal of $k[\Gamma_r]$
generated by $x_{0,r}$ and $x_{1,r}$.
\end{enumerate}
\end{lem}
\begin{proof}
\ref{i:hom0}
The action of $G$ on $E$ restricts along the appropriate $\mathbf G_m \to G$ to
the homothetic action of $\mathbf G_m$ on $E$.
\ref{i:homsurj}
Write $I$ for the ideal of
$k[\Gamma_r]$ generated by $x_{0,r}$ and $x_{1,r}$.
The image of $e_\pi$ under \eqref{e:semidirhom} is
the projection onto the direct summand
\[
E_\pi = E_{\pi_1} \otimes_k E_{\pi_2} \otimes_k \dots \otimes_k E_{\pi_r}
\]
of $E^{\otimes r}$.
The $e_\pi$ give a decomposition of the identity of $k[\Gamma_r]$ into orthogonal idempotents,
and \eqref{e:semidirhom} is the direct sum over $\pi$ and $\pi'$ of the homomorphisms
\begin{equation}\label{e:semidirhompi}
e_{\pi'} k[\Gamma_r] e_\pi \to \Hom_G(E_\pi,E_{\pi'})
\end{equation}
it induces on direct summands of $k[\Gamma_r]$ and $\End_G(E^{\otimes r})$.
It is thus enough to show that \eqref{e:semidirhompi} is surjective, with
kernel $e_{\pi'} I e_\pi$.
Restricting to the centre
of $G$ shows that the target of \eqref{e:semidirhompi} is $0$ unless
$\pi'$ and $\pi$ have the same number of components $0$ or $1$,
or equivalently $\pi' = \tau \pi \tau^{-1}$ for some $\tau \in \mathfrak{S}_r$.
The same holds for the source of \eqref{e:semidirhompi}, because
\[
\tau e_\pi \tau^{-1} = e_{\tau \pi \tau^{-1}}
\]
for every $\tau$ and $\pi$.
Since further the image of $\tau \in \mathfrak{S}_r$ under \eqref{e:semidirhom} induces an
isomorphism from $E_\pi$ to $E_{\tau \pi \tau^{-1}}$,
to show that \eqref{e:semidirhompi} has the required properties we may suppose
that $\pi' = \pi$ and that $r = r_0 + r_1$ where the first $r_0$ components of
$\pi$ are $0$ and the last $r_1$ are $1$.
Then the source of \eqref{e:semidirhompi} has a basis
$e_\pi \tau e_\pi = e_\pi \tau$
with $\tau$ in the subgroup $\mathfrak{S}_{r_0} \times \mathfrak{S}_{r_1}$ of
$\mathfrak{S}_r$ that permutes the first $r_0$ and last $r_1$ elements of $[1,r]$
among themselves.
Thus we may identify
\[
k[\mathfrak{S}_{r_0}] \otimes_k k[\mathfrak{S}_{r_1}]
= k[\mathfrak{S}_{r_0} \times \mathfrak{S}_{r_1}]
\]
with the (non-unitary) $k$\nobreakdash-\hspace{0pt} subalgebra $e_\pi k[\Gamma_r] e_\pi$ of $k[\Gamma_r]$.
Similarly we may identify
\[
\End_G(E_0{}\!^{\otimes r_0}) \otimes_k \End_G(E_1{}\!^{\otimes r_1})
= \End_G(E_0{}\!^{\otimes r_0} \otimes_k E_1{}\!^{\otimes r_1})
\]
with the (non-unitary) $k$\nobreakdash-\hspace{0pt} subalgebra $\End_G(E_\pi)$ of $\End_G(E)$.
Now given $\tau$ and $\tau'$ in $\mathfrak{S}_r$, the element $e_\pi \tau' x_{i,r} \tau^{-1} e_\pi$
is $0$ unless both $\tau$ and $\tau'$ send $[1,l_i + 1]$ into $[1,r_0]$ if $i = 0$
or into $[r_0+1,r]$ if $i = 1$.
With the above identifications, $e_\pi I e_\pi$ is thus the ideal of
$k[\mathfrak{S}_{r_0}] \otimes_k k[\mathfrak{S}_{r_1}]$ generated by $y_0 \otimes 1$
and $1 \otimes y_1$, where $y_i$ is $a_{i,l_i+1}$ in
$k[\mathfrak{S}_{l_i +1}] \subset k[\mathfrak{S}_{r_i}]$
if $r_i > l_i$ and $y_i = 0$ otherwise.
Further \eqref{e:semidirhompi} is the tensor product of
the homomorphisms
\begin{equation}\label{e:symhomi}
k[\mathfrak{S}_{r_i}] \to \End_G(E_i{}\!^{\otimes r_i})
\end{equation}
of $k$\nobreakdash-\hspace{0pt} algebras sending $\tau \in \mathfrak{S}_{r_i}$ to
$E_i{}\!^{\otimes \tau}$ in $\mathbf REP(G,\rho)$.
It will thus suffice to prove that \eqref{e:symhomi} is surjective with kernel
generated by $y_i$.
If $i = 0$, \eqref{e:symhomi} may be identified with the homomorphism defined
by the action of $\mathfrak{S}_{r_i}$ by symmetries on the $r_i$th tensor
power in $\mathbf REP(GL_{l_i})$ of the standard representation of $GL_{l_i}$,
while if $i = 1$, the composite of the automorphism
$\tau \mapsto \mathrm{sgn}(\tau) \tau$
of $k[\mathfrak{S}_{r_i}]$ with \eqref{e:symhomi} may be so identified.
The required result is thus classical (e.g. \cite{FulHar}, Theorem~6.3).
\end{proof}
\section{Duals}\label{s:dual}
Let $\mathcal C$ be a $k$\nobreakdash-\hspace{0pt} tensor category.
By a duality pairing in $\mathcal C$ we mean a quadruple $(L,L^\vee,\eta,\varepsilon)$
consisting of objects $L$ and $L^\vee$ of $\mathcal C$ and morphisms
$\eta:\mathbf 1 \to L^\vee \otimes L$, the unit, and
$\varepsilon:L \otimes L^\vee \to \mathbf 1$, the counit,
satisfying triangular identities analogous for those of an adjunction
(\cite{Mac}, p.~85).
Explicitly, it is required that, modulo associativities,
the composite of $L \otimes \eta$ with $\varepsilon \otimes L$ should be $1_L$, and of
$\eta \otimes L^\vee$ with $L^\vee \otimes \varepsilon$ should be $1_{L^\vee}$.
When such an $(L,L^\vee,\eta,\varepsilon)$ exists for a given $L$, it is said to be a duality pairing
for $L$, and $L$ is said to be dualisable, and $L^\vee$ to be dual to $L$.
We then have a dual pairing $(L^\vee,L,\widetilde{\eta},\widetilde{\varepsilon})$
for $L^\vee$, with $\widetilde{\eta}$ and $\widetilde{\varepsilon}$
obtained from $\eta$ and $\varepsilon$ by composing with the appropriate symmetries.
In verifying the properties of duals recalled below, it is useful to reduce to the
case where $\mathcal C$ is strict, i.e.\ where all
associativities of $\mathcal C$ are identities.
This can be done by taking (see \cite{Mac},~XI~3, Theorem~1) a $k$\nobreakdash-\hspace{0pt} linear
strong symmetric monoidal functor (\cite{Mac},~XI~2) $\mathcal C \to \mathcal C'$ giving an equivalence
to a strict $k$\nobreakdash-\hspace{0pt} tensor category $\mathcal C'$.
Suppose given duality pairings $(L,L^\vee,\eta,\varepsilon)$ for $L$
and $(L',L'{}^\vee,\eta',\varepsilon')$ for $L'$.
Then we have a tensor product duality pairing for $L \otimes L'$, with dual
$L^\vee \otimes L'{}^\vee$, and unit and counit
obtained from $\eta \otimes \eta'$ and $\varepsilon \otimes \varepsilon'$ by
composing with the appropriate symmetries.
Further any morphism $f:L \to L'$ has a transpose $f^\vee:L'{}^\vee \to L^\vee$,
characterised by the condition
\[
\varepsilon \circ (L \otimes f^\vee) = \varepsilon' \circ (f \otimes L'{}^\vee),
\]
or by a similar condition using $\eta$ and $\eta'$.
Explicitly, $f^\vee$ is given modulo associativities by the composite
of $\eta \otimes L'{}^\vee$ with $L^\vee \otimes f \otimes L'{}^\vee$ and
$L^\vee \otimes \varepsilon'$.
We have $(1_L)^\vee = 1_{L^\vee}$ and $(f' \circ f)^\vee = f^\vee \circ f'{}^\vee$,
and, with the transpose of $f^\vee$ taken using the dual pairing,
we have $f^{\vee \vee} = f$.
In particular taking $L = L'$ shows that a duality pairing for $L$ is unique up
to unique isomorphism.
Let $L$ be a dualisable object of $\mathcal C$.
Then we have a $k$\nobreakdash-\hspace{0pt} linear map
\[
\tr_L:\Hom_\mathcal C(N \otimes L,N' \otimes L) \xrightarrow{\sim} \Hom_\mathcal C(N,N'),
\]
natural in $N$ and $N'$,
which sends $f$ to its contraction $\tr_L(f)$ with respect to $L$,
defined as follows.
Modulo associativities, $\tr_L(f)$
is the composite of $N \otimes \widetilde{\eta}$
with $f \otimes L^\vee$ and $N' \otimes \varepsilon$, with $L^\vee$ and $\varepsilon$
as above
and $\widetilde{\eta}$ the composite of $\eta$ with the symmetry interchanging
$L^\vee$ and $L$.
It does not depend on the choice of duality pairing for $L$.
When $N = N' = \mathbf 1$, the contraction $\tr_L(f)$ is the trace $\tr(f)$ of
the endomorphism $f$ of $L$.
The rank of $L$ is defined as $\tr(1_L)$.
Modulo associativities, $\tr_{L \otimes L'}$ is given by
successive contraction with respect to $L'$ and $L$, and $\tr_L$ commutes
with $M \otimes -$.
By the appropriate triangular identity for $L$ we have
\begin{equation}\label{e:gcomp}
g'' \circ g' = \tr_L((g'' \otimes g') \circ \sigma)
\end{equation}
for $g':M' \to L$ and $g'':L \to M''$,
with $\sigma$ the symmetry interchanging $M'$ and $L$.
Let $L$ be a dualisable object of $\mathcal C$, and let $\tau$ be a permutation of $[1,r+1]$
and $f_1,f_2, \dots ,f_{r+1}$ be endomorphisms of $L$.
Write $\tau'$ for the permutation of $[1,r]$ obtained by omitting $r+1$ from the cycle
of $\tau$ containing it,
and define endomorphisms $c$ of $\mathbf 1$ and $f'{}\!_1,f'{}\!_2, \dots ,f'{}\!_r$ of $L$ as follows.
If $\tau$ leaves $r+1$ fixed, then $c = \tr(f_{r+1})$ and $f'{}\!_i = f_i$ for $i\le r$.
If $\tau$ sends $r+1$ to $i_0 \le r$, then $c = 1$, and $f'{}\!_i$ for $i\le r$ is $f_i$
when $i \ne i_0$ and $f_{i_0} \circ f_{r+1}$ when $i = i_0$.
We then have
\begin{equation}\label{e:symcontr}
\tr_L((f_1 \otimes f_2 \otimes \dots \otimes f_{r+1}) \circ L^{\otimes \tau}) =
c ((f'{}\!_1 \otimes f'{}\!_2 \otimes \dots \otimes f'{}\!_r) \circ L^{\otimes \tau'}).
\end{equation}
To see this, reduce to the case where $\tau$ leaves all but the last two elements
of $[1,r+1]$ fixed, by composing on the left and right with appropriate morphisms
$L^{\otimes \tau_0} \otimes L$ with $\tau_0$ a permutation of $[1,r]$.
Let $L$, $L'$, $M$ and $M'$ be objects in $\mathcal C$, and $(L,L^\vee,\eta,\varepsilon)$
and $(L',L'{}^\vee,\eta',\varepsilon')$ be duality pairings for $L$ and $L'$.
Then we have a canonical isomorphism
\[
\Hom_\mathcal C(M,M' \otimes L) \xrightarrow{\sim} \Hom_\mathcal C(M \otimes L^\vee,M')
\]
which modulo associativities sends $f:M \to M'\otimes L$ to the composite of
$f \otimes L^\vee$ with $M' \otimes \varepsilon$.
Its inverse is defined using $\eta$.
We also have a canonical isomorphism
\[
\Hom_\mathcal C(L' \otimes M,M') \xrightarrow{\sim} \Hom_\mathcal C(M,L'{}^\vee \otimes M')
\]
defined using $\eta'$.
Replacing $M$ by $L' \otimes M$ in the first of these isomorphisms
and by $M \otimes L^\vee$ in the second,
and using the symmetries interchanging $M$ and $L'$ and $L'{}^\vee$ and $M'$,
then gives a canonical isomorphism
\[
\delta_{M,L;M',L'}:
\Hom_\mathcal C(M \otimes L',M' \otimes L) \xrightarrow{\sim}
\Hom_\mathcal C(M \otimes L^\vee,M' \otimes L'{}^\vee).
\]
Modulo associativities, $\delta_{M,L;M',L'}$ sends $f$ to the composite of
$M \otimes \widetilde{\eta}' \otimes L^\vee$,
the tensor product of $f$ with the symmetry interchanging $L'{}^\vee$ and $L^\vee$,
and $M' \otimes \varepsilon \otimes L'{}^\vee$, where $\widetilde{\eta}'$
is $\eta'$ composed with the symmetry interchanging $L'{}^\vee$ and $L'$.
With the transpose taken using the chosen duality pairings for $L$ and $L'$, we have
\begin{equation}\label{e:deltahg}
\delta_{M,L;M',L'}(h \otimes g) = h \otimes g^\vee.
\end{equation}
With the duality pairing $(\mathbf 1,\mathbf 1,1_\mathbf 1,1_\mathbf 1)$ for $\mathbf 1$, we have
\begin{equation}\label{e:deltaf}
\delta_{M,L;\mathbf 1,\mathbf 1}(f) = \varepsilon \circ (f \otimes L^\vee).
\end{equation}
With the tensor product duality pairings for $L_1 \otimes L_2$ and $L_1{}\!' \otimes L_2{}\!'$,
we have
\begin{multline}\label{e:deltatens}
\sigma''' \circ
(\delta_{M_1,L_1;M_1{}\!',L_1{}\!'}(f_1) \otimes \delta_{M_2,L_2;M_2{}\!',L_2{}\!'}(f_2))
\circ \sigma''
= \\
= \delta_{M_1 \otimes M_2,L_1 \otimes L_2;M_1{}\!' \otimes M_2{}\!',L_1{}\!' \otimes L_2{}\!'}
(\sigma' \circ (f_1 \otimes f_2) \circ \sigma)
\end{multline}
where each of $\sigma$, $\sigma'$, $\sigma''$ and $\sigma'''$
is a symmetry interchanging the middle two factors in a tensor
product $(- \otimes -) \otimes (- \otimes -)$.
If $M'$ is dualisable, we have
\begin{equation}\label{e:deltaMcomp}
\delta_{M',L';M'',L''}(f') \circ \delta_{M,L;M',L'}(f) =
\delta_{M,L;M'',L''}(\tr_{M' \otimes L'}
(\sigma_2 \circ (f' \otimes f) \circ \sigma_1))
\end{equation}
for $\sigma_1$ the symmetry interchanging $M$ and $M'$ and $\sigma_2$ the symmetry
interchanging $L'$ and $L$.
This can be seen by showing
that modulo associativities both sides of
\eqref{e:deltaMcomp} coincide with a morphism obtained from
\[
f' \otimes f \otimes L^\vee \otimes L''{}^\vee \otimes M'{}^\vee \otimes L'{}^\vee
\]
as follows: compose on the left and right with appropriate symmetries, then on the left
with the tensor product of $M'' \otimes L''{}^\vee$ and the counits for $L$, $L'$ and $M'$
and on the right with the tensor product of $M \otimes L^\vee$ with the units
for $L''$, $L'$ and $M'$.
To show this in the case of the left hand side of \eqref{e:deltaMcomp},
write it as a contraction
with respect to $M' \otimes L'{}^\vee$ using \eqref{e:gcomp}
and contract first with respect to $L'{}^\vee$, using the triangular identity.
With the duality pairing $(\mathbf 1,\mathbf 1,1_\mathbf 1,1_\mathbf 1)$ for $\mathbf 1$ and the tensor product
duality pairing for $L \otimes N$,
we have
\begin{equation}\label{e:deltaNcomp}
\delta_{M,N;\mathbf 1,\mathbf 1}(g \circ f) = (\delta_{M,L;\mathbf 1,\mathbf 1}(f) \otimes \delta_{L,N;\mathbf 1,\mathbf 1}(g))
\circ \sigma \circ \delta_{M,N;M \otimes L,L \otimes N}(\alpha),
\end{equation}
with $\alpha:M \otimes (L \otimes N) \xrightarrow{\sim} (M \otimes L) \otimes N$ the associativity
and $\sigma$ the symmetry interchanging $L$ and $L^\vee$ in the tensor product of
$M \otimes L$ and $L^\vee \otimes N^\vee$.
Indeed modulo associativities $\sigma \circ \delta_{M,N;M \otimes L,L \otimes N}(\alpha)$
is $1_M \otimes \eta \otimes 1_{N^\vee}$ by the
triangular identity for $N$, and \eqref{e:deltaNcomp} then follows by the triangular
identity for $L$.
Let $(L,L^\vee,\eta,\varepsilon)$ be a duality pairing for
the object $L$ of $\mathcal C$.
Its $r$th tensor power $(L^{\otimes r},(L^\vee)^{\otimes r},\eta_r,\varepsilon_r)$
is a duality pairing for $L^{\otimes r}$.
We write
\[
L^{r,s} = L^{\otimes r} \otimes (L^\vee)^{\otimes s}.
\]
Then $L^{r,0} = L^{\otimes r}$.
We define a $k$\nobreakdash-\hspace{0pt} bilinear product $\widetilde{\otimes}$ on morphisms between the $L^{r,s}$
by requiring that the square
\begin{equation}\label{e:tildedef}
\begin{CD}
L^{r_1,s_1} \otimes L^{r_2,s_2} @>{\sim}>> L^{r_1+r_2,s_1+s_2} \\
@V{f_1 \otimes f_2}VV @VV{f_1 \mathbin{\widetilde{\otimes}} f_2}V \\
L^{r_1{}\!',s_1{}\!'} \otimes L^{r_2{}\!',s_2{}\!'} @>{\sim}>>
L^{r_1{}\!'+r_2{}\!',s_1{}\!'+s_2{}\!'}
\end{CD}
\end{equation}
commute, with the top isomorphism the symmetry interchanging the two factors
$(L^\vee)^{\otimes s_1}$ and $L^{\otimes r_2}$ and the bottom
that interchanging $(L^\vee)^{\otimes s'{}\!_1}$ and $L^{\otimes r'{}\!_2}$.
Then $\widetilde{\otimes}$ preserves composites, is associative, and we have
\begin{equation}\label{e:tildecom}
f_2 \mathbin{\widetilde{\otimes}} f_1 =
\sigma' \circ (f_1 \mathbin{\widetilde{\otimes}} f_2) \circ \sigma^{-1},
\end{equation}
where $\sigma$ interchanges the first $r_1$ with the last $r_2$ factors $L$
and the first $s_1$ with the last $s_2$ factors $L^\vee$ of $ L^{r_1+r_2,s_1+s_2}$,
and similarly for $\sigma'$.
We define an isomorphism
\begin{equation}\label{e:deltaLdef}
\delta_{L;r,s;r',s'}:
\Hom_\mathcal C(L^{\otimes (r+s')},L^{\otimes (r'+s)}) \xrightarrow{\sim} \Hom_\mathcal C(L^{r,s},L^{r',s'})
\end{equation}
by taking $L^{\otimes r},L^{\otimes s},L^{\otimes r'},L^{\otimes s'}$
for $M,L,M',L'$ in $\delta_{M,L;M',L'}$.
It follows from \eqref{e:deltahg} that
\begin{equation}\label{e:deltaLhg}
\delta_{L;r,s;r',s'}(h \otimes g) = h \otimes g^\vee.
\end{equation}
and from \eqref{e:deltaf} that
\begin{equation}\label{e:deltaLf}
\delta_{L;r,s;0,0}(f) = \varepsilon_s \circ (f \otimes (L^\vee)^{\otimes s}).
\end{equation}
By \eqref{e:deltatens}, we have
\begin{multline}\label{e:deltaLtens}
\delta_{L;r_1,s_1;r_1{}\!',s_1{}\!'}(f_1) \mathbin{\widetilde{\otimes}}
\delta_{L;r_2,s_2;r_2{}\!',s_2{}\!'}(f_2) = \\
= \delta_{L;r_1 + r_2,s_1 + s_2;r_1{}\!' + r_2{}\!',s_1{}\!' + s_2{}\!'}
(\sigma' \circ (f_1 \otimes f_2) \circ \sigma)
\end{multline}
for appropriate symmetries $\sigma$ and $\sigma'$.
By \eqref{e:deltaMcomp}, we have
\begin{equation}\label{e:deltaLcomp}
\delta_{L;r',s';r'',s''}(f') \circ \delta_{L;r,s;r',s'}(f) =
\delta_{L;r,s;r'',s''}(\tr_{L^{\otimes (r'+s')}}
(\sigma_2 \circ (f' \otimes f) \circ \sigma_1)).
\end{equation}
for appropriate symmetries $\sigma_1$ and $\sigma_2$.
We have
\begin{equation}\label{e:deltacomp}
\delta_{L;r,t;0,0}(g \circ f) =
(\delta_{L;r,s;0,0}(f) \mathbin{\widetilde{\otimes}} \delta_{L;s,t;0,0}(g)) \circ
\delta_{L;r,t;r+s,s+t}(1_{L^{\otimes (r+s+t)}}),
\end{equation}
by \eqref{e:deltaNcomp}.
Let $G$ be a linear algebraic group over $k$ and $\rho$ be a central $k$\nobreakdash-\hspace{0pt} point
of $G$ with $\rho^2 = 1$.
Let $E$ be a finite-dimensional $G$\nobreakdash-\hspace{0pt} module and $R$ be a commutative algebra in $\mathbf REP(G,\rho)$.
Then $E$ in $\mathbf REP(G,\rho)$ and $E_R$ in the $k$\nobreakdash-\hspace{0pt} tensor category of
$(G,R)$\nobreakdash-\hspace{0pt} modules are dualisable.
Suppose chosen duality pairings for $E$ and $E_R$.
Then we have a $G$\nobreakdash-\hspace{0pt} module $E^{r,s}$ and a $(G,R)$\nobreakdash-\hspace{0pt} module $(E_R)^{r,s}$
for every $r$ and $s$.
We have canonical embeddings $E \to E_R$ and $E^\vee \to (E_R)^\vee$,
which are compatible with the units and counits of
the chosen duality pairings for $E$ and $E_R$.
They define a canonical embedding
$E^{r,s} \to (E_R)^{r,s}$, which induces an isomorphism of $(G,R)$\nobreakdash-\hspace{0pt} modules
$(E^{r,s})_R \xrightarrow{\sim} (E_R)^{r,s}$.
Given $u:E^{r,s} \to E^{r',s'}$, we write $u_{R;r,s;r',s'}$
for the unique morphism of $(G,R)$\nobreakdash-\hspace{0pt} modules
for which the square
\begin{equation}\label{e:daggerdef}
\begin{CD}
(E_R)^{r,s} @>{u_{R;r,s;r',s'}}>> (E_R)^{r',s'} \\
@AAA @AAA \\
E^{r,s} @>{u}>> E^{r',s'}
\end{CD}
\end{equation}
commutes, with the vertical arrows the canonical embeddings.
Then $(-)_{R;r,s;r',s'}$ preserves identities and composites,
counits $E^{r,r} \to E^{0,0}$ and $(E_R)^{r,r} \to (E_R)^{0,0}$
and (with the identification $E^{r,0} = E^{\otimes r}$)
commutes with the isomorphisms $\delta_E$ and $\delta_{E_R}$.
For each $r$ and $s$ we have an isomorphism
\begin{equation}\label{e:psidef}
\psi_{r,s}:\Hom_{G,R}((E_R)^{r,s},R) \xrightarrow{\sim} \Hom_G(E^{r,s},R),
\end{equation}
given by composing with the canonical embedding $E^{r,s} \to (E_R)^{r,s}$.
Then
\begin{equation}\label{e:psinat}
\psi_{r,s}(w' \circ u_{R;r,s;r',s'}) = \psi_{r',s'}(w') \circ u
\end{equation}
for every $w':(E_R)^{r',s'} \to R$ and $u:E^{r,s} \to E^{r',s'}$.
Suppose that $G$ is reductive and that $R^G = k$,
so that $R$ has a unique maximal $G$\nobreakdash-\hspace{0pt} ideal $J$.
Let $N$ be a dualisable $(G,R)$\nobreakdash-\hspace{0pt} module and $f:R \to N$ be a morphism of $(G,R)$\nobreakdash-\hspace{0pt} modules
which does not factor through $J N$.
Then $f$ has a left inverse.
Indeed $f^\vee$ does not factor through $J$,
because $f$ is the composite of the unit for $N^\vee$ with $N \otimes_R f^\vee$.
Hence $f^\vee$ is surjective, and there is an $x$ in its source
fixed by $G$ with $f^\vee(x) = 1$.
Thus $f^\vee$ has a unique right inverse $g$ with $g(1) = x$, and
$g^\vee$ is left inverse to $f = f^{\vee \vee}$.
\section{Kimura objects}\label{s:Kimobj}
Let $k$ be a field of characteristic $0$ and $\mathcal C$ be a $k$\nobreakdash-\hspace{0pt} tensor category
with $\End_\mathcal C(\mathbf 1) = k$.
An object $L$ of $\mathcal C$ will be called positive (resp.\ negative) if it is dualisable and
$\bigwedge^{r+1} L$ (resp.\ $S^{r+1} L$) is $0$ for some $r$.
An object of $\mathcal C$ will be called a Kimura object if it is the direct sum of a positive
and a negative object of $\mathcal C$.
Let $L$ be a Kimura object of $\mathcal C$.
Then $L = L_0 \oplus L_1$ with $L_0$ positive and $L_1$ negative.
Denote by $l_0$ (resp.\ $l_1$) the least $r$ such that
$\bigwedge^{r+1} L_0$ (resp.\ $S^{r+1} L_1$) is $0$, and let $G$ and $E$ be
as in \eqref{e:Gdef} and \eqref{e:Edef}, and $\rho$ be the central $k$\nobreakdash-\hspace{0pt} point
of $G$ which acts as $(-1)^i$ on $E_i$.
The goal of this section is to construct a commutative algebra
$R$ in $\mathbf REP(G,\rho)$ and an isomorphism
\begin{equation}\label{e:xiiso}
\xi_{r,s}:\Hom_{G,R}((E_R)^{\otimes r},(E_R)^{\otimes s})
\xrightarrow{\sim} \Hom_\mathcal C(L^{\otimes r},L^{\otimes s})
\end{equation}
for every $r$ and $s$, such that the $\xi$ preserve composites and symmetries and are compatible
with $\otimes_R$ and $\otimes$.
Given an object $M$ of $\mathcal C$, write $a_{M,0,r}$ (resp.\ $a_{M,1,r}$) for the image
of the antisymmetrising (resp.\ symmetrising) idempotent of $k[\mathfrak{S}_r]$ under the
$k$\nobreakdash-\hspace{0pt} homomorphism to $\End(M^{\otimes r})$ that sends $\tau$ in
$\mathfrak{S}_r$ to $M^{\otimes \tau}$.
If $M$ is dualisable of rank $d$, then applying \eqref{e:symcontr} with the $f_j$ the identities
shows that
\[
(r+1)\tr_M(a_{M,i,r+1}) = (d - (-1)^i r)a_{M,i,r}
\]
for $i = 0,1$.
If $M$ is positive (resp.\ negative), it follows that
$d$ (resp.\ $-d$) is the least $r$ for which $\bigwedge^{r+1} M$ (resp.\ $S^{r+1} M$)
is $0$.
Thus $L_i$ has rank $(-1)^i l_i$.
Write $b$ for the automorphism of $L$ that sends $L_i$ to $L_i$ and acts on it as $(-1)^i$.
Then for every $r$, the group $\Gamma_r$ of \eqref{e:semidir} acts on $L^{\otimes r}$
with the action of $(\mathbf Z/2)^r$ the $r$th tensor power of the action $i \mapsto b^i$ of
$\mathbf Z/2$ on $L$, and the action of $\mathfrak{S}_r$ that given by $\tau \mapsto L^{\otimes \tau}$.
Thus we obtain a homomorphism
\[
\alpha_r:k[\Gamma_r] \to \End_\mathcal C(L^{\otimes r})
\]
of $k$\nobreakdash-\hspace{0pt} algebras.
If $l_i < r$, then $\alpha_i$ sends the element $x_{i,r}$ of \eqref{e:xir} to the projection onto the
direct summand $\bigwedge^{l_0 + 1}L_0 \otimes_k L^{\otimes (r-l_0-1)}$ when $i = 0$ and
$S^{l_1 + 1}L_1 \otimes_k L^{\otimes (r-l_0-1)}$ when $i = 1$.
Thus both $x_{0,r}$ and $x_{1,r}$ lie in the kernel of $\alpha_r$.
If we write $\beta_r$ for \eqref{e:semidirhom}, it follows by Lemma~\ref{l:GL}~\ref{i:homsurj} that
the kernel of $\alpha_r$ contains that of $\beta_r$.
Hence by Lemma~\ref{l:GL}
there is for each $r$ and $r'$ a unique $k$\nobreakdash-\hspace{0pt} linear map
\[
\varphi_{r;r'}:\Hom_G(E^{\otimes r},E^{\otimes r'}) \to \Hom_\mathcal C(L^{\otimes r},L^{\otimes r'})
\]
such that
\[
\alpha_r = \varphi_{r;r} \circ \beta_r
\]
for every $r$.
By construction, the $\varphi_{r;r'}$ preserve symmetries, identities and composites,
and they are compatible with $\otimes_k$ and $\otimes$.
Applying \eqref{e:symcontr} $t$ times shows that for $v:E^{\otimes (r+t)} \to E^{\otimes (r'+t)}$ we have
\begin{equation}\label{e:phicontr}
\varphi_{r;r'}(\tr_{E^{\otimes t}}(v)) = \tr_{L^{\otimes t}}(\varphi_{r + t;r' + t}(v)),
\end{equation}
because $\tr(\rho^i) = \tr(b^i) = l_0 - (-1)^il_1$.
For every $r,s$ and $r',s'$, we define a $k$\nobreakdash-\hspace{0pt} linear map
$\varphi_{r,s;r's'}$ by requiring that the square
\[
\begin{CD}
\Hom_G(E^{r,s},E^{r',s'}) @>{\varphi_{r,s;r's'}}>> \Hom_\mathcal C(L^{r,s},L^{r',s'}) \\
@A{\delta_{E;r,s;r's'}}AA @AA{\delta_{L;r,s;r's'}}A \\
\Hom_G(E^{\otimes (r+s')},E^{\otimes (r'+s)}) @>{\varphi_{r+s';r'+s}}>>
\Hom_\mathcal C(L^{\otimes (r+s')},L^{\otimes (r'+s)})
\end{CD}
\]
commute, with the $\delta$ the isomorphisms of \eqref{e:deltaLdef}.
Then by \eqref{e:deltaLhg} the $\varphi_{r,s;r's'}$ preserve identities,
and by \eqref{e:deltaLcomp} and
\eqref{e:phicontr} they preserve composites.
By \eqref{e:deltaLtens}, they are compatible
with the bilinear products, defined as in \eqref{e:tildedef},
$\widetilde{\otimes}_k$ on $G$\nobreakdash-\hspace{0pt} homomorphisms
between the $E^{r,s}$ and $\widetilde{\otimes}$ on morphisms between the $L^{r,s}$.
By \eqref{e:deltaLhg},
they send symmetries permuting the factors $E$ or $E^\vee$
of $E^{r,s}$ to the corresponding symmetries of $L^{r,s}$.
We now define as follows a commutative algebra $R$ in $\mathbf REP(G,\rho)$.
Consider the small category $\mathcal L$ whose objects are triples $(r,s,f)$ with $r$ and
$s$ integers $\ge 0$ and $f:L^{r,s} \to \mathbf 1$, where
a morphism from $(r,s,f)$ to $(r',s',f')$ in $\mathcal L$ is a morphism
$u:E^{r,s} \to E^{r',s'}$ such that
\[
f = f' \circ \varphi_{r,s;r',s'}(u).
\]
Then we define $R$ as the colimit
\[
R = \colim_{(r,s,f) \in \mathcal L} E^{r,s}
\]
in $\mathbf REP(G,\rho)$.
Write the colimit injection at $(r,s,f)$ as
\[
i_{(r,s,f)}:E^{r,s} \to R.
\]
We define the unit $\mathbf 1 \to R$ of $R$ as $i_{(0,0,1_\mathbf 1)}$.
We define the multiplication $R \otimes_k R \to R$ by requiring that
for every $((r_1,s_1,f_1),(r_2,s_2,f_2))$ in $\mathcal L \times \mathcal L$ the square
\begin{equation}\label{e:musquare}
\begin{CD}
E^{r_1,s_1} \otimes_k E^{r_2,s_2} @>{\sim}>> E^{r_1+r_2,s_1+s_2} \\
@V{i_{(r_1,s_1,f_1)} \otimes_k i_{(r_1,s_1,f_2)}}VV
@VV{i_{(r_1+r_2,s_1+s_2,f_1 \mathbin{\widetilde{\otimes}} f_2)}}V \\
R \otimes_k R @>>> R
\end{CD}
\end{equation}
should commute,
where the top isomorphism is that of \eqref{e:tildedef} with $E$ for $L$.
Such an $R \otimes_k R \to R$ exists and is unique because
the left vertical arrows of the squares \eqref{e:musquare} form a colimiting cone
by the fact that $\otimes_k$ preserves colimits, while their top right legs form
a cone by the compatibility of the $\varphi_{r,s;r's'}$ with $\widetilde{\otimes}_k$
and $\widetilde{\otimes}$.
The associativity of the multiplication can be checked by writing
$R \otimes_k R \otimes_k R$ as a colimit over $\mathcal L \times \mathcal L \times \mathcal L$
and using the associativity of $\widetilde{\otimes}$.
The commutativity follows from \eqref{e:tildecom} and the compatibility of
the $\varphi_{r,s;r's'}$ with the symmetries.
Since $G$ is reductive, each $\Hom_G(E^{r,s},-)$ preserves colimits.
Hence
the
\[
\Hom_G(E^{r,s},i_{(r',s',f')}):\Hom_G(E^{r,s},E^{r',s'}) \to \Hom_G(E^{r,s},R).
\]
form a colimiting cone of $k$\nobreakdash-\hspace{0pt} vector spaces.
Thus for every $r$ and $s$ there is a unique homomorphism
\[
\theta_{r,s}:\Hom_G(E^{r,s},R) \to \Hom_\mathcal C(L^{r,s},\mathbf 1)
\]
whose composite with $\Hom_G(E^{r,s},i_{(r',s',f')})$ sends $u:E^{r,s} \to E^{r',s'}$ to
\[
f' \circ \varphi_{r,s;r',s'}(u).
\]
Further $\theta_{r,s}$ is an isomorphism, with inverse sending $f:L^{r,s} \to \mathbf 1$
to $i_{(r,s,f)}$.
Thus every $E^{r,s} \to R$ can be written uniquely in the form $i_{(r,s,f)}$.
It follows that
\begin{equation}\label{e:theta0nat}
\theta_{r,s}(v' \circ u) = \theta_{r',s'}(v') \circ \varphi_{r,s;r',s'}(u)
\end{equation}
for $v':E^{r',s'} \to R$, that $\theta_{0,0}$ sends the identity $k \to R$ of $R$ to $1_{\mathbf 1}$,
and that
\begin{equation}\label{e:theta0tens}
\theta_{r_1+r_2,s_1+s_2}(v) =
\theta_{r_1,s_1}(v_1) \mathbin{\widetilde{\otimes}} \theta_{r_2,s_2}(v_2)
\end{equation}
for $v_1:E^{r_1,s_1} \to R$ and $v_2:E^{r_2,s_2} \to R$, where $v$ is defined by a diagram of the
form \eqref{e:musquare} with left arrow $v_1 \otimes_k v_2$ and right arrow $v$.
Composing the isomorphisms $\psi_{r,s}$ of \eqref{e:psidef} and $\theta_{r,s}$
gives an isomorphism
\[
\widehat{\theta}_{r,s} = \theta_{r,s} \circ \psi_{r,s}:
\Hom_{G,R}((E_R)^{r,s},R) \xrightarrow{\sim} \Hom_\mathcal C(L^{r,s},\mathbf 1).
\]
Then with $u_{R;r,s;r',s'}$ as in \eqref{e:daggerdef},
we have by \eqref{e:psinat} and \eqref{e:theta0nat}
\begin{equation}\label{e:thetanat}
\widehat{\theta}_{r,s}(w' \circ u_{R;r,s;r',s'}) =
\widehat{\theta}_{r',s'}(w') \circ \varphi_{r,s;r',s'}(u)
\end{equation}
for every $w':(E_R)^{r',s'} \to R$ and $u:E^{r,s} \to E^{r',s'}$.
Also $\widehat{\theta}_{0,0}(1_R) = 1_{\mathbf 1}$, and
\begin{equation}\label{e:thetatens}
\widehat{\theta}_{r_1+r_2,s_1+s_2}(w_1 \mathbin{\widetilde{\otimes}}_R w_2) =
\widehat{\theta}_{r_1,s_1}(w_1) \mathbin{\widetilde{\otimes}} \widehat{\theta}_{r_2,s_2}(w_2)
\end{equation}
for every $w_1:(E_R)^{r_1,s_1} \to R$ and $w_2:(E_R)^{r_2,s_2} \to R$, by \eqref{e:theta0tens}.
We now define the isomorphism \eqref{e:xiiso} by requiring that the square
\[
\begin{CD}
\Hom_{G,R}((E_R)^{\otimes r},(E_R)^{\otimes s}) @>{\xi_{r,s}}>> \Hom_\mathcal C(L^{\otimes r},L^{\otimes s}) \\
@V{\delta_{E_R;r,s,0,0}}VV @VV{\delta_{L;r,s,0,0}}V\\
\Hom_{G,R}((E_R)^{r,s},R) @>{\widehat{\theta}_{r,s}}>> \Hom_\mathcal C(L^{r,s},\mathbf 1)
\end{CD}
\]
commute.
The $\xi$ preserve composites by
\eqref{e:deltacomp}, \eqref{e:thetanat}, \eqref{e:thetatens},
and the fact that $(-)_{R;r,s;r',s'}$ preserves identities and is compatible
with $\delta_E$ and $\delta_{E_R}$.
They are compatible with $\otimes_R$ and $\otimes$ by
\eqref{e:deltaLtens}, where the relevant $\sigma$ and $\sigma'$ reduce to associativities,
and \eqref{e:thetatens}.
They are compatible with the symmetries by \eqref{e:thetanat}
with $w' = 1_R$ and $u$ the composite of $\sigma \otimes_k (E^\vee)^{\otimes r}$
for $\sigma$ a symmetry of $E^{\otimes r}$ with the counit $E^{r,r} \to k$,
using \eqref{e:deltaLf} and the compatibility of $(-)_{R;r,s;r',s'}$ with symmetries,
composites, and counits.
\section{Kimura varieties}\label{s:Kimvar}
We denote by $\mathcal M_{\sim}(F)$ the category of ungraded $\mathbf Q$\nobreakdash-\hspace{0pt} linear motives over $F$
for the equivalence relation ${\sim}$.
It is a $\mathbf Q$\nobreakdash-\hspace{0pt} tensor category.
There is a contravariant functor $h$ from the category $\mathcal V_F$ of
smooth projective varieties over $F$ to $\mathcal M_{\sim}(F)$,
which sends products in $\mathcal V_F$ to tensor products in $\mathcal M_{\sim}(F)$.
We then have
\begin{equation}\label{e:HomChow}
\Hom_{\mathcal M_{\sim}(F)}(h(X'),h(X)) = CH(X' \times_F X)_\mathbf Q/{\sim},
\end{equation}
and the composite $z \circ z'$ of $z':h(X'') \to h(X')$ with
$z:h(X') \to h(X)$ is given by
\[
z \circ z' = (\pr_{13})_*((\pr_{12})^*(z').(\pr_{23})^*(z)),
\]
where the projections are from $X'' \times_F X' \times_F X$.
Further $h(q)$ for $q:X \to X'$ is the push forward of $1$ in $CH(X)_\mathbf Q/{\sim}$
along $X \to X' \times_F X$ with components $q$ and $1_X$.
The images under $h$ of the structural morphism and diagonal of $X$
define on $h(X)$ a canonical structure of commutative
algebra in $\mathcal M_{\sim}(F)$.
With this structure \eqref{e:HomChow} reduces when $X' = \Spec(F)$ to an
equality of algebras
\[
\Hom_{\mathcal M_{\sim}(F)}(\mathbf 1,h(X)) = CH(X)_\mathbf Q/{\sim}.
\]
Also $h(X)$ is canonically autodual: we have canonical duality pairing
\[
(h(X),h(X),\eta_X,\varepsilon_X),
\]
with both $\eta_X$ and $\varepsilon_X$
the class in $CH(X \times_F X)_\mathbf Q/{\sim}$ of the diagonal of $X$.
The canonical duality pairing for $h(X \times_F X')$ is the tensor product of those for
$h(X)$ and $h(X')$.
The canonical duality pairings define a transpose $(-)^\vee$ for morphisms $h(X') \to h(X)$,
given by pullback of cycles along
the symmetry interchanging $X$ and $X'$.
For $q:X \to X'$ and $z \in CH(X)_\mathbf Q/{\sim}$ and $z' \in CH(X')_\mathbf Q/{\sim}$, we have
\begin{equation}\label{e:pull}
q^*(z') = h(q) \circ z'
\end{equation}
and
\begin{equation}\label{e:push}
q_*(z) = h(q)^\vee \circ z.
\end{equation}
A \emph{Kimura variety for ${\sim}$} is a smooth projective variety $X$ over $F$ such
that $h(X)$ is a Kimura object in $\mathcal M_{\sim}(F)$.
If the motive of $X$ in the category of \emph{graded} motives for $\sim$ is a Kimura object,
then $X$ is a Kimura variety for $\sim$.
The converse also holds, as can be seen by factoring out the tensor ideals
of tensor nilpotent morphisms, but this will not be needed.
Let $X$ be a Kimura variety for ${\sim}$.
We may apply the construction of Section~\ref{s:Kimobj} with $k = \mathbf Q$, $\mathcal C = \mathcal M_{\sim}(F)$
and $L = h(X)$.
For appropriate $l_0$ and $l_1$, we then have with $G$, $E$ and $\rho$ as in
Section~\ref{s:Kimobj} a commutative algebra $R$ in $\mathbf REP(G,\rho)$ and isomorphisms
\[
\xi_{r,s}:\Hom_{G,R}((E_R)^{\otimes r},(E_R)^{\otimes s}) \xrightarrow{\sim}
\Hom_{\mathcal M_{\sim}(F)}(h(X)^{\otimes r},h(X)^{\otimes s})
\]
which are compatible with composites, tensor products, and symmetries.
The homomorphisms of $R$\nobreakdash-\hspace{0pt} modules $\iota$ and $\mu$ with
respective images under $\xi_{0,1}$ and $\xi_{2,1}$ the unit
and multiplication of $h(X)$ define a structure of
commutative $R$\nobreakdash-\hspace{0pt} algebra on $E_R$.
Also the homomorphisms $\eta_1$ and
$\varepsilon_1$ with respective images $\eta_X$ and $\varepsilon_X$
under $\xi_{0,2}$ and $\xi_{2,0}$ are the unit and counit
a duality pairing $(E_R,E_R,\eta_1,\varepsilon_1)$ for $E_R$.
We denote by
\[
((E_R)^{\otimes r},(E_R)^{\otimes r},\eta_r,\varepsilon_r)
\]
its $r$th tensor power.
Then $\xi_{0,2r}(\eta_r) = \eta_{X^r}$ and
$\xi_{2r,0}(\varepsilon_r) = \varepsilon_{X^r}$.
For any $(G,R)$\nobreakdash-\hspace{0pt} homomorphism $f$ from $(E_R)^{\otimes m}$ to $(E_R)^{\otimes l}$
we have
\[
\xi_{l,m}(f^\vee) = \xi_{m,l}(f)^\vee,
\]
where the transpose of $f$ is taken using duality pairings just defined.
Further
\[
\xi_{0,n}:\Hom_{G,R}(R,(E_R)^{\otimes n}) \xrightarrow{\sim} \Hom_{\mathcal M_{\sim}(F)}(\mathbf 1,h(X)^{\otimes n})
\]
is an isomorphism of $\mathbf Q$\nobreakdash-\hspace{0pt} algebras.
We note that
\[
R^G = \Hom_{G,R}(R,R) = CH(\Spec(F))_\mathbf Q/{\sim} = \mathbf Q,
\]
by the isomorphism $\xi_{0,0}$.
\section{Proof of Theorem~\ref{t:fin}}\label{s:finproof}
To prove Theorem~\ref{t:fin}, we may suppose that $Z_1$ contains the classes of
the equidimensional components of $X$, and that $Z_n$ contains the homogeneous
components of each of its elements for the grading of $CH(X^n)_\mathbf Q/{\sim}$.
Denote by $\mathcal A$ the set of those families $C = ((C_n)^i)_{n,i \in \mathbf N}$
with $(C_n)^0$ a $\mathbf Q$\nobreakdash-\hspace{0pt} subalgebra $C_n$ of $CH(X^n)_\mathbf Q/{\sim}$
and $((C_n)^i)_{i \in \mathbf N}$ a filtration of the algebra $C_n$, such that \ref{i:pullpush},
\ref{i:fin} and \ref{i:nilp} of Theorem~\ref{t:fin} hold.
It is to be shown that there is a $C$ in $\mathcal A$ which is graded, i.e.\
such that $(C_n)^i$ is a graded $\mathbf Q$\nobreakdash-\hspace{0pt} vector subspace of $CH(X^n)_\mathbf Q/{\sim}$ for each $n$ and $i$.
For $\lambda \in \mathbf Q^*$, define an endomorphism $z \mapsto \lambda * z$ of the algebra
$CH(X^n)_\mathbf Q/{\sim}$ by taking $\lambda * z = \lambda^j z$ when $z$ is homogeneous
of degree $j$.
Then the graded subspaces of $CH(X^n)_\mathbf Q/{\sim}$ are those that are stable under each
$\lambda * -$.
For each $C$ in $\mathcal A$ we have a $\lambda * C$ in $\mathcal A$ with $((\lambda * C)_n)^i$
the image under $\lambda * -$ of $(C_n)^i$.
Indeed $(\lambda * C)_n$ contains $Z_n$
by the homogeneity assumption on $Z_n$, and
$p_*$ sends $((\lambda * C)_l)^i$ to $((\lambda * C)_m)^i$ for $p$ as in
\ref{i:pullpush}, because $C_l$ contains the classes of the equidimensional
components of each factor $X$ of $X^l$ by the assumption on $Z_1$.
The $C$ in $\mathcal A$ that are graded are then those fixed by each $\lambda * -$.
Now if $\mathcal A$ is non-empty, it has a least element for the ordering of the $C$
by inclusion of the $(C_n)^i$.
Such a least element will be
fixed by the $\lambda * -$, and hence graded.
It will thus suffice to show that $\mathcal A$ is non-empty.
Let $G$, $E$, $\rho$, $R$, $\xi_{r,s}$, $\eta_r$, $\varepsilon_r$, $\iota$ and $\mu$ be as in Section~\ref{s:Kimvar}.
With the identification
\begin{equation}\label{e:CHXn}
\Hom_{\mathcal M_{\sim}(F)}(\mathbf 1,h(X)^{\otimes n}) = CH(X^n)_\mathbf Q/{\sim},
\end{equation}
there exists a finitely generated $G$\nobreakdash-\hspace{0pt} subalgebra $R'$ of $R$ such that
if we write $\beta_{m,n}$ for the homomorphism \eqref{e:extscal} with $P = E$,
then $(\xi_{0,n})^{-1}(Z_n)$ is contained in the image of $\beta_{0,n}$ for every $n$,
and $\eta_1 = \beta_{0,2}(\eta'{}\!_1)$,
$\varepsilon_1 = \beta_{2,0}(\varepsilon'{}\!_1)$, $\iota = \beta_{0,1}(\iota')$
and $\mu = \beta_{2,1}(\mu')$ for some $\varepsilon'{}\!_1$, $\eta'{}\!_1$,
$\iota'$ and $\mu'$.
We then have duality pairing $(E_{R'},E_{R'},\eta'{}\!_1,\varepsilon'{}\!_1)$ for $E_{R'}$,
and if its $r$th tensor power is
\[
((E_{R'})^{\otimes r},(E_{R'})^{\otimes r},\eta'{}\!_r,\varepsilon'{}\!_r),
\]
we have $\eta_r = \beta_{0,2r}(\eta'{}\!_r)$
and $\varepsilon_r = \beta_{2r,0}(\varepsilon'{}\!_r)$.
Further $\iota'$ and $\mu'$ define a structure of commutative $(G,R')$\nobreakdash-\hspace{0pt} algebra on $E_{R'}$.
The $\beta_{m,l}$, and hence their composites
\[
\xi'{}\!_{m,l}:\Hom_{G,R'}((E_{R'})^{\otimes m},(E_{R'})^{\otimes n})
\to \Hom_{\mathcal M_{\sim}(F)}(h(X)^{\otimes n},h(X)^{\otimes n})
\]
with the $\xi_{m,n}$,
preserve identities, composition, tensor products, and transposes
defined using the $\eta'{}\!_r$ and $\varepsilon'{}\!_r$.
Further $\xi'{}\!_{0,n}$ is a homomorphism of $\mathbf Q$\nobreakdash-\hspace{0pt} algebras.
We have $R'{}^G = R^G = \mathbf Q$.
Thus by Lemma~\ref{l:repfin}~\ref{i:algfin}, the $\mathbf Q$\nobreakdash-\hspace{0pt} algebra
\[
\Hom_{G,R'}(R',(E_{R'})^{\otimes n}) \xrightarrow{\sim} \Hom_G(k,(E_{R'})^{\otimes n})
= ((E_{R'})^{\otimes n})^G
\]
is finite-dimensional for every $n$.
Denote by $J'$ the unique maximal $G$\nobreakdash-\hspace{0pt} ideal of $R'$.
Then we have for each $n$ a filtration of the $(G,R')$\nobreakdash-\hspace{0pt} algebra $(E_{R'})^{\otimes n}$
by the $G$\nobreakdash-\hspace{0pt} ideals
\[
J'{}^r (E_{R'})^{\otimes n},
\]
and hence a filtration of the $\mathbf Q$\nobreakdash-\hspace{0pt} algebra $\Hom_{G,R'}(R',(E_{R'})^{\otimes n})$
by the ideals
\begin{equation}\label{e:homideal}
\Hom_{G,R'}(R',J'{}^r(E_{R'})^{\otimes n}).
\end{equation}
Since \eqref{e:homideal} is isomorphic to $(J'{}^r(E_{R'})^{\otimes n})^G$,
it is $0$ for $r$ large, by Lemma~\ref{l:repfin}~\ref{i:idealcompl}.
We now define an element $C$ of $\mathcal A$ as follows.
With the identification \eqref{e:CHXn}, take for $C_n$ the image of $\xi'{}\!_{0,n}$,
and for $(C_n)^r$ the image under $\xi'{}\!_{0,n}$ of \eqref{e:homideal}.
Then \ref{i:fin} holds.
Let $z = \xi'{}\!_{0,n}(x)$ be an element of $C_n$ which does not lie in $(C_n)^1$.
Then $x$ does not factor through $J'(E_{R'})^{\otimes n}$.
As was seen at the end of Section~\ref{s:dual}, this implies that $x$ has a left
inverse.
Hence $z$ has a left inverse $y:h(X)^{\otimes n} \to \mathbf 1$.
Identifying $y$ with an element of $CH(X^n)_\mathbf Q/{\sim}$, the composite
$y \circ z = 1_\mathbf 1$ is the push forward of $y.z$ along the structural morphism of $X^n$.
Thus $z$ is not numerically equivalent to $0$.
The first statement of \ref{i:nilp} follows.
The second statement of \ref{i:nilp} follows from the fact that
\eqref{e:homideal} is $0$ for $r$ large.
Let $p:X^l \to X^m$ be as in \ref{i:pullpush}.
If $p$ is defined by $\nu:[1,m] \to [1,l]$,
then
\[
h(p):h(X)^{\otimes m} \to h(X)^{\otimes l}
\]
is the morphism of commutative algebras in $\mathcal M_{\sim}(F)$ defined by $\nu$.
Thus
\[
h(p) = \xi'{}\!_{m,l}(f)
\]
for $f:(E_{R'})^{\otimes m} \to (E_{R'})^{\otimes l}$ the morphism of
commutative $(G,R')$\nobreakdash-\hspace{0pt} algebras
defined by $\nu$.
That $p^*$ sends $C_m$ to $C_l$ and respects the filtrations now follows from
\eqref{e:pull} and the compatibility of the $\xi'{}\!_{m,l}$ with composites.
That $p_*$ sends $C_l$ to $C_m$ and respects the filtrations follows from
\eqref{e:push} and the compatibility of the $\xi'{}\!_{m,l}$ with composites
and transposes.
Thus \ref{i:pullpush} holds.
\section{Proof of Theorem~\ref{t:num}}\label{s:numproof}
Let $G$, $E$, $\rho$, $R$, $\xi_{r,s}$, $\eta_r$ and $\varepsilon_r$ be as in
Section~\ref{s:Kimvar}, and suppose that the equivalence relation $\sim$
is numerical equivalence.
We show first that $R$ is $G$\nobreakdash-\hspace{0pt} simple, i.e.\ has no $G$\nobreakdash-\hspace{0pt} ideals other
than $0$ and $R$.
Any non-zero $z:h(X)^{\otimes m} \to \mathbf 1$ has a right inverse
$y$, because $z \circ y$ is the push forward of $z.y$
along the structural isomorphism of $X^m$.
The isomorphisms $\xi$ then show that any non-zero $(E_R)^{\otimes m} \to R$
has a right inverse, and is thus surjective.
Let $J \ne 0$ be a $G$\nobreakdash-\hspace{0pt} ideal of $R$.
Since $G$ is reductive and $E$ is a faithful representation of $G$,
the category of finite-dimensional representations of $G$ is the pseudo-abelian hull
of its full subcategory with objects the $E^{r,s}$ (\cite{Wat},~3.5).
Thus for some $r,s$ there is a non-zero homomorphism of $G$\nobreakdash-\hspace{0pt} modules from $E^{r,s}$ to $R$
which factors through $J$.
It defines by the isomorphism \eqref{e:psidef} a non-zero homomorphism of $(G,R)$\nobreakdash-\hspace{0pt} modules $f$ from
$(E_R)^{r,s}$ to $R$ which also factors through $J$.
Since $(E_R)^{r,s}$ is isomorphic by autoduality of $E_R$ to $(E_R)^{r+s}$,
it follows that $f$ is surjective, so that $J = R$.
Thus $R$ has no $G$ ideals other than $0$ and $R$.
If $R_1$ is the $G$\nobreakdash-\hspace{0pt} submodule of $R$ on which $\rho$ acts as $-1$, then
the ideal of $R$ generated by $R_1$ is a $G$\nobreakdash-\hspace{0pt} ideal $\ne R$, because the elements
of $R_1$ have square $0$.
Thus $R_1 = 0$, so that $R$ is commutative as an algebra in $\mathbf REP(G)$.
By a theorem of Magid (\cite{Mag}, Theorem~4.5),
the $G$\nobreakdash-\hspace{0pt} simplicity of $R$ and the fact that $R^G = \mathbf Q$ then imply that $\Spec(R)_k$
is isomorphic to $G_k/H$ for some extension $k$ of $\mathbf Q$ and closed subgroup $H$ of $G_k$.
Thus $R$ is a finitely generated $\mathbf Q$\nobreakdash-\hspace{0pt} algebra.
Hence there exists an $n$ such that a set of generators of
$R$ is contained
in the sum of the images of the $G$\nobreakdash-\hspace{0pt} homomorphisms $E^{r,s} \to R$ for $r+s \le n$.
We may suppose that $n \ge 2$.
We show that $n$ satisfies the requirements of Theorem~\ref{t:num}.
Denote by $U_m$ the $\mathbf Q$\nobreakdash-\hspace{0pt} vector subspace of $\overline{CH}(X^m)_\mathbf Q = CH(X^m)_\mathbf Q/\sim$
generated by the elements \eqref{e:numgen}, and by
\[
U_{m,l} \subset
\Hom_{G,R}((E_R)^{\otimes m},(E_R)^{\otimes l})
\]
the inverse image of
\[
U_{m+l} \subset
\Hom_{\mathcal M_{\sim}(F)}(h(X)^{\otimes m},h(X)^{\otimes l})
= \overline{CH}(X^{m+l})_\mathbf Q
\]
under $\xi_{m,l}$.
The symmetries of $(E_R)^{\otimes m}$ lie in $U_{m,m}$,
because by Proposition~\ref{p:Chowsub} the symmetries of $h(X)^{\otimes m}$ lie
in $U_{2m}$.
Similarly the composite of an element of $U_{m,m'}$ with an element of $U_{m',m''}$ lies in
$U_{m,m''}$, the tensor product of an element of $U_{m,l}$ with an element of
$U_{m',l'}$ lies in $U_{m+m',l+l'}$,
and $\eta_m$ lies in $U_{0,2m}$ and $\varepsilon_m$ lies in $U_{2m,0}$.
Also $U_{m,l}$ coincides with
$\Hom_{G,R}((E_R)^{\otimes m},(E_R)^{\otimes l})$
for $m+l \le n$.
Since $E_R$ is canonically autodual, we may identify $(E_R)^{r,s}$ with $(E_R)^{\otimes (r+s)}$.
The morphism $u_{R;r,s;r',s'}$ of \eqref{e:daggerdef} may then be identified with a morphism
of $R$\nobreakdash-\hspace{0pt} modules
\[
u_{R;r,s;r',s'}:(E_R)^{\otimes (r+s)} \to (E_R)^{\otimes (r'+s')},
\]
and the isomorphism $\psi_{r,s}$ of \eqref{e:psidef} with an isomorphism
\[
\psi_{r,s}:\Hom_{G,R}((E_R)^{\otimes (r+s)},R) \xrightarrow{\sim} \Hom_G(E^{r,s},R).
\]
Then \eqref{e:psinat} still holds. Also we have a commutative square
\begin{equation}\label{e:psicompat}
\begin{CD}
E^{r_1,s_1} \otimes_\mathbf Q E^{r_2,s_2} @>{\sim}>> E^{r_1 + r_2,s_1 + s_2} \\
@V{\psi_{r_1,s_1}(f_1) \otimes_\mathbf Q \psi_{r_2,s_2}(f_2)}VV @VV{\psi_{r_1+r_2,s_1+s_2}(f)}V \\
R \otimes_\mathbf Q R @>>> R
\end{CD}
\end{equation}
where the top isomorphism is that of \eqref{e:tildedef} with $E$ for $L$, the bottom
arrow is the multiplication of $R$, and $f$ is the composite of the appropriate
symmetry of $(E_R)^{\otimes (r_1+r_2+s_1+s_2)}$ with $f_1 \otimes_R f_2$.
By Lemma~\ref{l:GL}, a non zero $w:E^{r',0} \to E^{r,0}$ exists only if $r = r'$, when any such $w$
is a composite of symmetries and tensor products of endomorphisms of $E$.
Thus $w_{R;r',0;r,0}$ lies in $U_{r',r}$ for such a $w$, because $n \ge 2$.
Since $(-)_{R;r,s;r',s'}$ commutes with the isomorphisms $\delta$ of \eqref{e:deltaLdef}, it follows
that $w_{R;r',s';r,s}$ lies in $U_{r'+s',r+s}$ for any $w:E^{r',s'} \to E^{r,s}$.
To prove Theorem~\ref{t:num}, write
\[
W_{r,s} = \psi_{r,s}(U_{r+s,0}).
\]
Consider the smallest $G$\nobreakdash-\hspace{0pt} submodule $R'$ of $R$
such that $a:E^{r,s} \to R$ factors through $R'$ for each $r$, $s$, and $a$ in $W_{r,s}$.
By \eqref{e:psicompat}, $R'$ is a subalgebra of $R$.
Since every $E^{r,s} \to R$ lies in $W_{r,s}$ when $r+s \le n$, the algebra $R'$ contains
a set of generators of $R$.
Hence $R' = R$.
Given $a:E^{r,s} \to R$, there are thus $a_i$ in $W_{r_i,s_i}$ for $i = 1,,2, \dots,t$
such that the image of $a$ lies in the sum of the images of the $a_i$.
By semisimplicity of $\mathbf REP(G,\rho)$, it follows that
\[
a = a_1 \circ w_1 + a_2 \circ w_2 + \dots + a_t \circ w_t
\]
for some $w_i$.
Hence by \eqref{e:psinat}, $a$ lies in $W_{r,s}$.
Thus $W_{r,s}= \Hom_G(E^{r,s},R)$ for every $r$ and $s$.
It follows that $U_{m,0} = \Hom_{G,R}((E_R)^{\otimes m},R)$,
and hence $U_m = \overline{CH}(X^m)_\mathbf Q$, for every $m$.
This proves Theorem~\ref{t:num}.
\section{Concluding remarks}
Theorem~\ref{t:fin} is easily generalised to the case where instead of cycles on the powers
of a single Kimura variety $X$ for ${\sim}$, we consider also cycles on products of a finite number
of such varieties: it suffices to take for $X$ their disjoint union
and to include in $Z_1$ their fundamental classes.
Similarly in the condition on $X^l \to X^m$ in \ref{i:pullpush}, we may consider
a finite number of morphisms $X^l \to X$ additional to the projections: it suffices
to include in the $Z_i$
the classes of their graphs.
Suppose for example that $X$ is an abelian variety, and
let $\Gamma$ be a finitely generated subgroup of $X(k)$.
Then we may consider in \ref{i:pullpush} pullback and push forward along any morphism
$X^l \to X^m$ which sends the identity of $X(k)^l$ to an element of $\Gamma^m$.
More generally, we can construct a small category $\mathcal V$, an equivalence $T$ from
$\mathcal V$ to the category Kimura varieties over $F$ for ${\sim}$, a filtered family
$(\mathcal V_\lambda)_{\lambda \in \Lambda}$ of (not necessarily full) subcategories $\mathcal V_\lambda$ of $\mathcal V$
with union $\mathcal V$,
and for each $\lambda$ in $\Lambda$ and $V$ in $\mathcal V_\lambda$ a
finite-dimensional graded $\mathbf Q$\nobreakdash-\hspace{0pt} subalgebra $C_\lambda(V)$ of $CH(T(V))_\mathbf Q/{\sim}$
and a filtration $C_\lambda(V)^\bullet$ on $C_\lambda(V)$,
with the following properties.
\begin{enumerate}
\renewcommand{(\alph{enumi})}{(\alph{enumi})}
\item
Finite products exist in the $\mathcal V_\lambda$, and
the embeddings $\mathcal V_\lambda \to \mathcal V$ preserve them.
\item
We have $C_\lambda(V)^r \subset C_{\lambda'}(V)^r$ for
$\lambda \le \lambda'$ and $V$ in $\mathcal V_\lambda$, and $CH(T(V))_\mathbf Q/{\sim}$ for $V$ in $\mathcal V_\lambda$
is the union of the $C_{\lambda'}(V)$ for $\lambda' \ge \lambda$.
\item
$T(f)^*$ sends $C_\lambda(V')$ into $C_\lambda(V)$
and $T(f)_*$ sends $C_\lambda(V)$ into $C_\lambda(V')$ for $f:V \to V'$ in $\mathcal V_\lambda$,
and $T(f)^*$ and $T(f)_*$ preserve the filtrations.
\item\label{i:surjnilp}
For $V$ in $\mathcal V_\lambda$, the projection from $C_\lambda(V)$ to $\overline{CH}(T(V))_\mathbf Q$
is surjective with kernel $C_\lambda(V)^1$, and $C_\lambda(V)^r$ is $0$ for $r$ large.
\end{enumerate}
By applying the usual construction for motives (say ungraded)
to $\mathcal V$ and the $CH(T(V))_\mathbf Q/{\sim}$ we obtain a $\mathbf Q$\nobreakdash-\hspace{0pt} tensor category
$\mathcal M$ and a cohomology functor from $\mathcal V$ to $\mathcal M$,
and $T$ defines a fully faithful functor from $\mathcal M$ to $\mathcal M_{\sim}(F)$.
Similarly we obtain from $\mathcal V_\lambda$ and the $C_\lambda(V)$ a (not necessarily full)
$\mathbf Q$\nobreakdash-\hspace{0pt} tensor subcategory $\mathcal M_\lambda$ of $\mathcal M$.
Then each $\mathcal M_\lambda$ has finite-dimensional hom-spaces, and $\mathcal M$ is the filtered
union of the $\mathcal M_\lambda$.
A question involving a finite number of Kimura varieties, a finite number of morphisms
between their products, and a finite number of morphisms between the motives of such
products, thus reduces to a question in some $\mathcal V_\lambda$ and $\mathcal M_\lambda$.
By \ref{i:surjnilp}, the projection from $\mathcal M_\lambda$ to the quotient of $\mathcal M_{\sim}(F)$
by its unique maximal tensor ideal is full,
with kernel the unique maximal
tensor ideal $\mathcal J_\lambda$ of $\mathcal M_\lambda$.
Further $\mathcal M_\lambda$ is the limit of the $\mathcal M_\lambda/(\mathcal J_\lambda)^r$.
Thus we can argue by lifting successively from
the semisimple abelian category $\mathcal M_\lambda/\mathcal J_\lambda$ to the
$\mathcal M_\lambda/(\mathcal J_\lambda)^r$.
Theorems~\ref{t:fin} and \ref{t:num} extend easily to the case where the
base field $F$ is replaced by a non-empty connected smooth quasi-projective scheme $S$ over $F$.
For the category of ungraded motives over $S$ we then have $\End(\mathbf 1) = CH(S)_\mathbf Q/{\sim}$,
which is a local $\mathbf Q$\nobreakdash-\hspace{0pt} algebra with residue field $\mathbf Q$ and nilpotent
maximal ideal.
All the arguments carry over to this case, provided that Lemma~\ref{l:repfin}
is proved in the more general form where the hypothesis ``$R^G = k$'' is
replaced by ``$R^G$ is a local $k$\nobreakdash-\hspace{0pt} algebra with residue field $k$''.
\end{document} |
\begin{document}
\title[Gaussian estimates]{Gaussian estimates for fundamental solutions of
second order parabolic systems with time-independent coefficients}
\author{Seick Kim}
\address{Mathematics Department, University of Missouri, Columbia, Missouri 65211}
\email{seick@math.missouri.edu}
\subjclass[2000]{Primary 35A08, 35B45; Secondary 35K40}
\keywords{Gaussian estimates, a priori estimates, parabolic system}
\begin{abstract}
Auscher, McIntosh and Tchamitchian studied the heat kernels of second
order elliptic operators in divergence form with complex bounded measurable
coefficients on $\mathbb R^n$.
In particular, in the case when $n=2$
they obtained Gaussian upper bound estimates
for the heat kernel without imposing further assumption on the coefficients.
We study the fundamental solutions of the systems of second order
parabolic equations in the divergence form with bounded, measurable,
time-independent coefficients, and
extend their results to the systems of parabolic equations.
\end{abstract}
\maketitle
\section{Introduction}
\label{sec:I}
In 1967, Aronson \cite{Aronson} proved Gaussian upper and lower bounds
for the fundamental solutions of parabolic equations in divergence form
with bounded measurable coefficients.
To establish the Gaussian lower bound Aronson made use of the Harnack
inequality for nonnegative solutions which was proved by Moser in 1964
(see \cite{Moser}).
Related to Moser's parabolic Harnack inequality, we should mention
Nash's earlier paper \cite{Nash} where the H\"{o}lder
continuity of weak solutions to parabolic equations in divergence form was
established.
In 1985, Fabes and Stroock \cite{FS} showed that the idea of Nash
could be used to establish a Gaussian upper and lower bound on the fundamental
solution.
They showed that actually
such Gaussian estimates could be used to prove Moser's Harnack inequality.
We note that Aronson also obtained Gaussian upper bound estimates of
the fundamental solution without using Moser's Harnack inequality.
In \cite{Auscher}, Auscher proposed a new proof of Aronson's Gaussian
upper bound estimates for the fundamental solution of second order
parabolic equations with time-independent coefficients.
His method relies crucially on the assumption that the coefficients are
time-independent and thus it does not exactly reproduce Aronson's result,
which is valid even for the time-dependent coefficients case.
However, his method is interesting in the sense that it
carries over to equations with complex coefficients provided that the
complex coefficients are a small perturbation of real coefficients.
Along with this direction,
Auscher, McIntosh and Tchamitchian also showed that
the heat kernel of second order elliptic operators in divergence form
with complex bounded measurable coefficients in the two dimensional space
has a Gaussian upper bound
(see \cite{AMT} and also \cite{AT}).
We would like to point out that
a parabolic equation with complex coefficients is, in fact,
a special case of a system of parabolic equations.
From this point of view,
Hofmann and the author showed that the fundamental solution
of a parabolic system
has an upper Gaussian bound if the system is
a small perturbation of a diagonal system, which, in particular,
generalized the result of Auscher mentioned above to
the time-dependent coefficients case (see \cite{HK}).
However, the above mentioned result of Auscher, McIntosh and Tchamitchian
regarding the heat kernel of two dimensional elliptic
operators with complex coefficients
does not follow directly from our result.
One of the main goals of this article is to provide a proof that
weak solutions of the parabolic system of divergence type
with time-independent coefficients associated to an
elliptic system in two dimensions enjoy the parabolic local boundedness
property and to show that its fundamental solution has a
Gaussian upper bound. More generally, we show that if weak solutions
of an elliptic system satisfy H\"{o}lder estimates at every scale,
then weak solutions of the corresponding parabolic system with time-independent
coefficients also satisfies similar parabolic H\"{o}lder estimates
from which, in particular,
the parabolic local boundedness property follows easily.
Also, such an argument allows one to derive
H\"{o}lder continuity estimates for weak solutions of parabolic equations
with time-independent coefficients directly from
De Giorgi's theorem \cite{DG57}
on elliptic equations, bypassing Moser's parabolic Harnack inequality.
In fact, this is what Auscher really proved in the
setting of complex coefficients equations by using a functional calculus method
(see \cite{Auscher} and also \cite{AQ}, \cite{AT}).
Even in those complex coefficients settings,
we believe that our approach is much more straightforward and thus
appeals to wider readership.
Finally, we would like to point out that
in this article,
we are mainly interested in global estimates and that
we do not attempt to treat, for example,
the systems with lower order terms, etc.
However, let us also mention that, with some extra technical details,
our methods carry over to those cases as well as to the systems of higher order;
see e.g. \cite{AQ}, \cite{AT} for the details, and also Remark~\ref{rmk:local}.
The remaining sections are organized in the following way.
In Section~\ref{sec:N} we give notations, definitions, and some known facts.
We state the main results in Section~\ref{sec:M}
and give the proofs in Section~\ref{sec:P}.
\section{Notation and definitions}
\label{sec:N}
\subsection{Geometric notation}
\begin{enumerate}
\item
$\mathbb R^n=\text{$n$-dimensional real Euclidean space.}$
\item
$x=(x_1,\cdots,x_n)$ is an arbitrary point of $\mathbb R^{n}$.
\item
$X=(x,t)$ denotes an arbitrary point in $\mathbb R^{n+1}$,
where $x\in\mathbb R^n$ and $t\in\mathbb R$.
\item
$B_r(x)=\set{y\in\mathbb R^n:\abs{y-x}<r}$ is an open ball in $\mathbb R^n$ with center
$x$ and radius $r>0$.
We sometimes drop the reference point $x$ and
write $B_r$ for $B_r(x)$ if there is no danger of confusion.
\item
$Q_r(X)=\set{(y,s)\in\mathbb R^{n+1}: \abs{y-x}<r\text{ and } t-r^2<s<t}$.
We sometimes drop the reference point $X$ and write $Q_r$ for $Q_r(X)$.
\item
$Q^{*}_r(X)=\set{(y,s)\in\mathbb R^{n+1}: \abs{y-x}<r\text{ and } t<s<t+r^2}$.
\item
$Q_{r,s}(X)=\set{(y,s)\in Q_r(X)}$; i.e.,
$Q_{r,s}(X)=B_r(x)\times\set{s}$ if $s\in(t-r^2,t)$ and
$Q_{r,s}(X)=\emptyset$ otherwise.
We sometimes drop the reference point $X$ and write $Q_{r,s}$ for $Q_{r,s}(X)$.
\item
For a cylinder $Q=\Omega\times (a,b)\subset \mathbb R^{n+1}$,
$\partial_P Q$ denotes its parabolic boundary, namely,
$\partial_P Q=\partial\Omega\times (a,b)\cup \overline{\Omega}\times\set{a}$,
where $\partial\Omega$ is the usual topological boundary of
$\Omega\subset\mathbb R^n$ and $\overline\Omega$ is its closure.
\end{enumerate}
\subsection{Notation for functions and their derivatives}
\begin{enumerate}
\item For a mapping from $\Omega\subset\mathbb R^n$ to $\mathbb R^N$,
we write $\vec{f}(x)=(f^1(x),\ldots,f^N(x))^T$ as a column vector.
\item
$\overline{f}_{Q}=\frac{1}{\abs{Q}}\int_{Q}f$,
where $\abs{Q}$ denotes the volume of $Q$.
\item
$u_t=\partial u/\partial t$.
\item
$D_{x_i} u= D_i u= u_{x_i}=\partial u/\partial x_i$.
\item
$D u=(u_{x_1},\ldots,u_{x_n})^T$ is the spatial gradient of $u=u(x,t)$.
\item
For $\vec{f}=(f^1,\ldots,f^N)^T$,
$D\vec{f}=(Df^1,\ldots,Df^N)$; that is $D\vec{f}$ is the $n\times N$ matrix
whose $i$-th column is $Df^i$.
\end{enumerate}
\subsection{Function spaces}
\begin{enumerate}
\item
For $\Omega\subset\mathbb R^n$ and $p\ge 1$, $L^p(\Omega)$ denotes the space
of functions with the following norms:
\begin{equation*}
\norm{u}_{L^p(\Omega)}=\left(\int_\Omega\abs{u(x)}^p\,dx\right)^{1/p}\quad\text{and}\quad
\norm{u}_{L^\infty(\Omega)}=\esssup_\Omega\abs{u}.
\end{equation*}
\item
$C^{\mu}(\Omega)$ denotes the space of
functions that are H\"{o}lder continuous with the exponent $\mu\in (0,1]$,
and
\begin{equation*}
[u]_{C^{\mu}(\Omega)}
=\sup_{x\neq x'\in \Omega}\frac{\abs{u(x)-u(x')}}{\abs{x-x'}^\mu}<\infty.
\end{equation*}
\item
The Morrey space $M^{2,\mu}(\Omega)$ is the set of all functions
$u\in L^2(\Omega)$ such that
\begin{equation*}
\norm{u}_{M^{2,\mu}(\Omega)}=\sup_{B_\rho(x)\subset \Omega}
\left(\rho^{-\mu}\int_{B_\rho(x)}\abs{u}^2\right)^{1/2}<\infty.
\end{equation*}
\item
$C^{\mu}_{P}(Q)$ denotes the space of
functions defined on $Q\subset\mathbb R^{n+1}$ such that
\begin{equation*}
[u]_{C^{\mu}_{P}(Q)}
=\sup_{X\neq X'\in Q}\frac{\abs{u(X)-u(X')}}{d_P(X,X')^\mu}<\infty,
\end{equation*}
where $d_P(X,X')=\max\left(\abs{x-x'},\sqrt{\abs{t-t'}}\right)$.
\end{enumerate}
\subsection{Elliptic and parabolic systems and their adjoints}
\begin{definition}
We say that the coefficients $A^{\alpha\beta}_{ij}(x)$
satisfy the uniform ellipticity condition
if there exist numbers $\nu_0, M_0>0$ such that for all $x\in\mathbb R^n$ we have
\begin{equation}
\label{eqn:para}
\ip{\vec{A}^{\alpha\beta}(x)\vec{\xi}_\beta,\vec{\xi}_\alpha}\ge
\nu_0\abs{\vec{\xi}}^2\quad\text{and }
\abs{\ip{\vec{A}^{\alpha\beta}(x)\vec{\xi}_\beta,\vec{\eta}_\alpha}}\le
M_0\abs{\vec{\xi}}\abs{\vec{\eta}},
\end{equation}
where we used the following notation.
\begin{enumerate}
\item
For $\alpha,\beta=1,\ldots,n$,
$\vec{A}^{\alpha\beta}(x)$ are
$N\times N$ matrices with $(i,j)$-entries $A^{\alpha\beta}_{ij}(x)$.
\item
$\vec{\xi}_\alpha=(\xi_\alpha^1,\cdots,\xi_\alpha^N)^T$
and $\abs{\vec{\xi}}^2=\sum\limits_{\alpha=1}^n\sum\limits_{i=1}^N\abs{\xi_\alpha^i}^2$.
\item
$\ip{\vec{A}^{\alpha\beta}(x)\vec{\xi}_\beta,\vec{\eta}_\alpha}
=\sum\limits_{\alpha,\beta=1}^n \sum\limits_{i,j=1}^N
A_{ij}^{\alpha\beta}(x)\xi_\beta^j \eta_\alpha^i$.
\end{enumerate}
We emphasize that we do not assume that the coefficients are symmetric.
\end{definition}
\begin{definition}
We say that a system of $N$ equations on $\mathbb R^n$
\begin{equation*}
\sum_{j=1}^N\sum_{\alpha,\beta=1}^n
D_{x_\alpha}(A^{\alpha\beta}_{ij}(x) D_{x_\beta}u^j)=0\qquad
(i=1,\ldots,N)
\end{equation*}
is elliptic if the coefficients satisfy
the uniform ellipticity condition.
We often write the above system in a vector form
\begin{equation}
\label{eqn:E-01}
L\vec{u}:=\sum_{\alpha,\beta=1}^n D_\alpha(\vec{A}^{\alpha\beta}(x)
D_\beta\vec{u})=0,
\quad\vec{u}=(u^1\ldots,u^N)^T.
\end{equation}
The adjoint system of \eqref{eqn:E-01} is given by
\begin{equation}
\label{eqn:E-02}
L^{*}\vec{u}:=\sum_{\alpha,\beta=1}^n
D_\alpha\left((\vec{A}^{\alpha\beta}){}^{*}(x) D_\beta\vec{u}\right)=0,
\end{equation}
where $(\vec{A}^{\alpha\beta}){}^{*}=(\vec{A}^{\beta\alpha})^T$,
the transpose of $\vec{A}^{\beta\alpha}$.
\end{definition}
\begin{definition}
We say that a system of $N$ equations on $\mathbb R^{n+1}$
\begin{equation*}
u^i_t-\sum_{j=1}^N\sum_{\alpha,\beta=1}^n
D_{x_\alpha}(A^{\alpha\beta}_{ij}(x) D_{x_\beta}u^j)=0\qquad
(i=1,\ldots,N)
\end{equation*}
is parabolic if the (time-independent) coefficients satisfy
the uniform ellipticity condition.
We often write the above system in a vector form
\begin{equation}
\label{eqn:P-01}
\vec{u}_t-L\vec{u}
:=\vec{u}_t-\sum_{\alpha,\beta=1}^n D_\alpha(\vec{A}^{\alpha\beta}(x)
D_\beta\vec{u})=0.
\end{equation}
The adjoint system of \eqref{eqn:P-01} is given by
\begin{equation}
\label{eqn:P-02}
\vec{u}_t+L^{*}\vec{u}
:=\vec{u}_t+\sum_{\alpha,\beta=1}^n
D_\alpha\left((\vec{A}^{\alpha\beta}){}^{*}(x) D_\beta\vec{u}\right)=0,
\end{equation}
where $(\vec{A}^{\alpha\beta}){}^{*}=(\vec{A}^{\beta\alpha})^T$,
the transpose of $\vec{A}^{\beta\alpha}$.
\end{definition}
\subsection{Weak solutions}
In this article, the term ``weak solution'' is used in a rather abusive way.
To avoid unnecessary technicalities,
we may assume that all the coefficients involved are smooth so that
all weak solutions are indeed classical solutions.
However, this extra smoothness assumption will not
be used quantitatively in our estimates.
This is why we shall make clear the dependence of constants.
\begin{enumerate}
\item
We say that $\vec{u}$ is a weak solution of \eqref{eqn:E-01} in $\Omega\subset\mathbb R^n$ if $\vec{u}$ is a (classical) solution of \eqref{eqn:E-01} in $\Omega$ and $\vec{u}, D\vec{u}\in L^2(\Omega)$.
\item
We say that $\vec{u}$ is a weak solution of \eqref{eqn:P-01} in a cylinder
$Q=\Omega\times (a,b)\subset\mathbb R^{n+1}$ if $\vec{u}$
is a (classical) solution of \eqref{eqn:E-01} in $Q$ and
$\vec{u}, D\vec{u}\in L^2(Q)$, $\vec{u}(\cdot,t)\in L^2(\Omega)$ for all
$a\le t\le b$, and $\sup_{a\le t\le b}
\norm{\vec{u}(\cdot,t)}_{L^2(\Omega)}<\infty$.
\end{enumerate}
\subsection{Fundamental solution}
By a fundamental solution (or fundamental matrix) $\vec{\Gamma}(x,t;y)$
of the parabolic system \eqref{eqn:P-01}
we mean an $N\times N$ matrix of functions defined for $t>0$ which,
as a function of $(x,t)$, is a solution of \eqref{eqn:P-01}
(i.e., each column is a solution of \eqref{eqn:P-01}),
and is such that
\begin{eqnarray}
\lim_{t\downarrow 0}\int_{\mathbb R^n}\vec{\Gamma}(x,t;y)\vec{f}(y)\,dy
=\vec{f}(x)
\end{eqnarray}
for any bounded continuous function $\vec{f}=(f^1,\ldots,f^N)^T$,
where $\vec{\Gamma}(x,t;y)\vec{f}(y)$ denotes the usual matrix multiplication.
\subsection{Notation for estimates}
We employ the letter $C$ to denote a universal constant usually depending
on the dimension and ellipticity constants.
It should be understood that $C$ may vary from line to line.
We sometimes write $C=C(\alpha,\beta,\ldots)$ to emphasize the
dependence on the prescribed quantities $\alpha,\beta,\ldots$.
\subsection{Some preliminary results and known facts}
\begin{lemma}[Energy estimates]
\label{lem:P-03}
Let $\vec{u}$ be a weak solution of \eqref{eqn:P-01} in $Q_R=Q_R(X)$.
Then for $0<r<R$, we have
\begin{equation*}
\sup_{t-r^2\le s\le t}
\int_{Q_{r,s}}\abs{\vec{u}(\cdot,s)}^2+
\int_{Q_r}\abs{D\vec{u}}^2\le \frac{C}{(R-r)^2} \int_{Q_R}\abs{\vec{u}}^2.
\end{equation*}
\end{lemma}
\begin{proof}
See e.g., \cite[Lemma 2.1, p. 139]{LSU}.
\end{proof}
\begin{lemma}[Parabolic Poincar\'{e} inequality]
\label{lem:P-02}
Let $\vec{u}$ be a weak solution of \eqref{eqn:P-01} in $Q_R=Q_R(X)$.
Then there is some constant $C=C(n,M_0)$ such that
\begin{equation*}
\int_{Q_R}\abs{\vec{u}-\overline{\vec{u}}_{Q_R}}^2\le C R^2
\int_{Q_R}\abs{D\vec{u}}^2.
\end{equation*}
\end{lemma}
\begin{proof}
See e.g., \cite[Lemma 3]{Struwe}.
\end{proof}
\begin{lemma}
\label{lem:P-04}
Let $Q_{2R}=Q_{2R}(X_0)$ be a cylinder in $\mathbb R^{n+1}$.
Suppose $\vec{u}\in L^2(Q_{2R})$ and
there are positive constants $\mu\le 1$ and $M$ such that
for any $X\in Q_R$ and any $r\in (0,R)$ we have
\begin{equation*}
\int_{Q_r(X)}\abs{\vec{u}-\overline{\vec{u}}_{Q_{r}(X)}}^2\le M^2r^{n+2+2\mu}.
\end{equation*}
Then $\vec{u}$ is H\"{o}lder continuous in $Q_R$ with the exponent $\mu$
and $[\vec{u}]_{C^{\mu}_P(Q_R)}\le C(n,\mu)M$.
\end{lemma}
\begin{proof}
See e.g., \cite[Lemma 4.3, p. 50]{L}.
\end{proof}
\begin{definition}[Local boundedness property]
We say that the system \eqref{eqn:P-01}
satisfies the local boundedness property for weak solutions
if there is a constant
$M$ such that all weak solutions $\vec{u}$ of \eqref{eqn:P-01}
in $Q_{2r}(X)$ satisfy the estimates
\begin{equation*}
\sup_{Q_r(X)}\abs{\vec{u}}\le M\left(\frac{1}{\abs{Q_{2r}}}
\int_{Q_{2r}(X)}\abs{\vec{u}}^2 \right)^{1/2}.
\end{equation*}
Similarly, we say that the adjoint system \eqref{eqn:P-02}
satisfies the local boundedness property if the corresponding estimates hold
for weak solutions $\vec{u}$ of \eqref{eqn:P-02} in $Q_{2r}^{*}(X)$.
\end{definition}
\begin{theorem}[Theorem~1.1, \cite{HK}]
\label{thm:P-01}
Assume that the system \eqref{eqn:P-01} and its adjoint system
\eqref{eqn:P-02}
satisfy the local boundedness property for weak solutions.
Then the fundamental solution of the system \eqref{eqn:P-01} has an
upper bound
\begin{equation}
\abs{\vec{\Gamma}(x,t;y)}_{op}\le C_0
t^{-{n/2}}\exp\left(-\frac{k_0\abs{x-y}^2}{t}\right),
\end{equation}
where $\abs{\vec{\Gamma}(x,t;y)}_{op}$ denotes the operator norm
of the fundamental matrix $\vec{\Gamma}(x,t;y)$.
Here, $C_0=C_0(n,\nu_0,M_0,M)$ and $k_0=k_0(\nu_0,M_0)$.
\end{theorem}
\section{Main results}
\label{sec:M}
\begin{definition}
We say that an elliptic system \eqref{eqn:E-01}
satisfies the H\"{o}lder estimates for weak solutions at every scale
if there exist constants $\mu_0>0$ and $H_0$ such that
all weak solutions $\vec{u}$ of the system
in $B_{2r}=B_{2r}(x_0)$ satisfy the following estimates
\begin{equation}
\label{eqn:M-04}
[\vec{u}]_{C^{\mu_0}(B_r)}\le H_0 r^{-(n/2+\mu_0)}
\norm{\vec{u}}_{L^2(B_{2r})}.
\end{equation}
Similarly, we say that a parabolic system \eqref{eqn:P-01}
satisfies H\"{o}lder estimates for weak solutions at every scale
if there exist constants $\mu_1>0$ and $H_1$ such that
all weak solutions $\vec{u}$ of the system
in $Q_{2r}=Q_{2r}(X_0)$ satisfy the following estimates
\begin{equation}
\label{eqn:M-05}
[\vec{u}]_{C^{\mu_1}_P(Q_r)}\le H_1 r^{-(n/2+1+\mu_1)}
\norm{\vec{u}}_{L^2(Q_{2r})}.
\end{equation}
\end{definition}
\begin{remark}
Elliptic systems with constant coefficients satisfy the above property,
and in that case, the ellipticity condition \eqref{eqn:para}
can be weakened and replaced by the Legendre-Hadamard condition.
De Giorgi's theorem \cite{DG57} states that the property is satisfied if $N=1$.
The property is also satisfied if $n=2$ and
it is due to Morrey (see Corollary~\ref{thm:M-04}).
Some other examples include, for instance,
a certain three dimensional elliptic system which was studied
by Kang and the author in \cite{KK}.
\end{remark}
We shall prove the following main results in this paper:
\begin{theorem}
\label{thm:M-02}
If an elliptic system \eqref{eqn:E-01} satisfies
the H\"{o}lder estimates for weak solutions at every scale, then
the corresponding parabolic system \eqref{eqn:P-01} with time-independent
coefficients also satisfies the H\"{o}lder estimates for weak solutions at
every scale.
\end{theorem}
\begin{theorem}
\label{thm:M-03}
Suppose that the elliptic system \eqref{eqn:E-01}
and its adjoint system \eqref{eqn:E-02} defined on $\mathbb R^n$
both satisfy the H\"{o}lder estimates for weak solutions at every scale with
constants $\mu_0, H_0$.
Let $\vec{\Gamma}(x,t;y)$ be the fundamental solution of
the parabolic system
\eqref{eqn:P-01}
with the time-independent coefficients
associated to the elliptic system \eqref{eqn:E-01}.
Then $\vec{\Gamma}(x,t;y)$ has an upper bound
\begin{equation}
\label{bound}
\abs{\vec{\Gamma}(x,t;y)}_{op}\le C_0
t^{-n/2}\exp\left(-\frac{k_0\abs{x-y}^2}{t}\right),
\end{equation}
where $C_0=C_0(n,\nu_0,M_0,\mu_0,H_0)$ and $k_0=k_0(\nu_0,M_0)$.
Here, $\abs{\vec{\Gamma}(x,t;y)}_{op}$ denotes the operator norm of fundamental
matrix $\vec{\Gamma}(x,t;y)$.
\end{theorem}
\begin{remark}
\label{rmk:local}
We would like to point out that \eqref{bound} is a global estimate.
Especially, the bound \eqref{bound} holds for all time $t>0$.
Suppose that the elliptic system \eqref{eqn:E-01} and its adjoint system
\eqref{eqn:E-02} enjoy the H\"{o}lder estimates
for weak solutions up to a fixed scale $R_0$; that is,
there is a number $R_0>0$ such that if $\vec{u}$ is a weak solution of
either \eqref{eqn:E-01} or \eqref{eqn:E-02} in $B_r=B_r(x)$
with $0<r\le R_0$, then
$\vec{u}$ is H\"{o}lder continuous and satisfies
\begin{equation*}
[\vec{u}]_{C^{\mu_0}(B_r)}\le H_0 r^{-(n/2+\mu_0)}
\norm{\vec{u}}_{L^2(B_{2r})}.
\end{equation*}
Then, the statement regarding the bound \eqref{bound}
for the fundamental solution should be localized as follows:
For any given $T>0$, there are constants
$k_0=k_0(\nu_0,M_0)$
and $C_0=C_0(n,\nu_0,M_0,\mu_0,H_0,R_0,T)$
such that \eqref{bound} holds for $0<t\le T$.
\end{remark}
\begin{corollary}
\label{thm:M-04}
Let $\vec{\Gamma}(x,t;y)$ be the fundamental solution
of the parabolic system
\eqref{eqn:P-01}
with time-independent coefficients
associated to an elliptic
system \eqref{eqn:E-01} defined on $\mathbb R^2$.
Then $\vec{\Gamma}(x,t;y)$ has an upper bound
\eqref{bound} with the constants $C_0, k_0$
depending only on the ellipticity constants
$\nu_0, M$.
\end{corollary}
\begin{proof}
First, let us recall the well known theorem of Morrey
which states that any two dimensional elliptic system
\eqref{eqn:E-01} with bounded measurable coefficients
satisfies the H\"{o}lder estimates
for weak solutions at every scale, with the constants $\mu_0, H_0$
depending only on the ellipticity constants
(see, \cite[pp. 143--148]{Morrey}).
Next, note that the ellipticity constants $\nu_0, M_0$ in
\eqref{eqn:para} remain unchanged for
$\tilde{A}{}^{\alpha\beta}_{ij}(x)=A^{\beta\alpha}_{ji}(x)$.
Therefore, the corollary is an immediate consequence of Theorem~\ref{thm:M-03}.
\end{proof}
\begin{remark}
In fact, the converse of Theorem~\ref{thm:P-01} is also true
(see \cite[Theorem~1.2]{HK}).
Therefore, in order to extend the above corollary to
the parabolic system with time-dependent coefficients, one needs to show
that the system satisfies the local boundedness property for weak solutions.
Unfortunately, we do not know whether it is true or not if the coefficients
are allowed to depend on the time variable.
If $n\ge 3$, it is not true in general,
even for the time-independent coefficients case
since there is a famous counter-example due to De Giorgi (see \cite{DG68}).
\end{remark}
\section{Proof of Main Results}
\label{sec:P}
\subsection{Some technical lemmas and proofs}
\begin{lemma}
\label{lem:M-01}
If $\vec{u}$ is a weak solution of
the parabolic system with time-independent coefficients \eqref{eqn:P-01}
in $Q_{R}=Q_{R}(X_0)$, then
$\vec{u}_t\in L^2(Q_{r})$ for $r<R$ and
satisfies the estimates
\begin{equation}
\norm{\vec{u}_t}_{L^2(Q_{r})} \le C(R-r)^{-1} \norm{D\vec{u}}_{L^2(Q_{R})}.
\end{equation}
In particular,
if $\vec{u}$ is a weak solution of \eqref{eqn:P-01}
in $Q_{2r}$, then the above estimates together with the energy estimates
yield
\begin{equation}
\label{eqn:M-00}
\norm{\vec{u}_t}_{L^2(Q_{r})} \le Cr^{-2}
\norm{\vec{u}}_{L^2(Q_{2r})}.
\end{equation}
\end{lemma}
\begin{proof}
We first note that if the coefficients are symmetric,
(i.e., $A_{ij}^{\alpha\beta}=A_{ji}^{\beta\alpha}$)
this is a well known result; a proof for such a case
is found, for example,
in \cite[pp. 172--181]{LSU} or in \cite[pp. 360--364]{Evans}.
However, the standard proof does not carry over to the non-symmetric
coefficients case and for that reason, we provide a self-contained proof here.
Fix positive numbers $\sigma,\tau$ such that $\sigma<\tau\le R$.
Let $\zeta$ be a smooth cut-off function such that $\zeta\equiv 1$
in $Q_\sigma$, vanishes near $\partial_P Q_{\tau}$, and
satisfies
\begin{equation*}
\label{eqn:K-01}
0\le \zeta \le 1\quad\text{and}\quad
\abs{\zeta_t}+\abs{D\zeta}^2 \le C(\tau-\sigma)^{-2}.
\end{equation*}
Note that on each slice $Q_{\tau,s}$, we have
\begin{equation*}
\begin{split}
0&=\int_{Q_{\tau,s}}\left(\vec{u}_t-D_{\alpha}(\vec{A}^{\alpha\beta}D_\beta
\vec{u})\right)\cdot\zeta^2\vec{u}_t\\
&=\int_{Q_{\tau,s}}\zeta^2\abs{\vec{u}_t}^2+\int_{Q_{\tau,s}}\zeta^2
\ip{\vec{A}^{\alpha\beta}D_\beta\vec{u},D_\alpha\vec{u}_t}+
\int_{Q_{\tau,s}}
2\zeta\ip{\vec{A}^{\alpha\beta}D_\beta\vec{u},D_\alpha\zeta\vec{u}_t}.
\end{split}
\end{equation*}
Therefore, we find by using the Cauchy-Schwarz inequality that
\begin{equation*}
\begin{split}
\int_{Q_{\tau,s}}\zeta^2\abs{\vec{u}_t}^2
&\le C\int_{Q_{\tau,s}}\zeta^2\abs{D\vec{u}}\abs{D\vec{u}_t}
+C\int_{Q_{\tau,s}}\zeta\abs{D\vec{u}}\abs{D\zeta}\abs{\vec{u}_t}\\
&\le \frac{\epsilon}{2}\int_{Q_{\tau,s}}\zeta^2\abs{D\vec{u}_t}^2+
\frac{C}{\epsilon}\int_{Q_{\tau,s}}\zeta^2\abs{D\vec{u}}^2+
C\int_{Q_{\tau,s}}\abs{D\zeta}^2\abs{D\vec{u}}^2 \\
&+\frac{1}{2}\int_{Q_{\tau,s}}\zeta^2\abs{\vec{u}_t}^2.
\end{split}
\end{equation*}
Thus we have
\begin{equation}
\label{eqn:K-02}
\qquad
\int_{Q_\tau}\zeta^2\abs{\vec{u}_t}^2
\le \epsilon\int_{Q_\tau}\zeta^2\abs{D\vec{u}_t}^2+
\frac{C}{\epsilon}\int_{Q_\tau}\zeta^2\abs{D\vec{u}}^2+
C\int_{Q_\tau}\abs{D\zeta}^2\abs{D\vec{u}}^2.
\end{equation}
Since $\vec{u}_t$
also satisfies \eqref{eqn:P-01}, the energy estimates
yield
\begin{equation}
\label{eqn:K-03}
\int_{Q_{\tau}}\zeta^2\abs{D\vec{u}_t}^2
\le \frac{C_0}{(\tau-\sigma)^2}\int_{Q_{\tau}}\abs{\vec{u}_t}^2.
\end{equation}
This is the part where we exploit the assumption
that the coefficients are time-independent.
Combining \eqref{eqn:K-02} and \eqref{eqn:K-03}, we have
\begin{equation*}
\int_{Q_{\sigma}}\abs{\vec{u}_t}^2
\le \frac{C_0\epsilon}{(\tau-\sigma)^2}\int_{Q_{\tau}}\abs{\vec{u}_t}^2+
\frac{C}{\epsilon}\int_{Q_\tau}\abs{D\vec{u}}^2+
\frac{C}{(\tau-\sigma)^2}\int_{Q_{\tau}}\abs{D\vec{u}}^2.
\end{equation*}
If we set $\epsilon=(\tau-\sigma)^2/2C_0$, we finally obtain
\begin{equation*}
\int_{Q_{\sigma}}\abs{\vec{u}_t}^2
\le \frac{1}{2}\int_{Q_{\tau}}\abs{\vec{u}_t}^2+
\frac{C}{(\tau-\sigma)^2}\int_{Q_{\tau}}\abs{D\vec{u}}^2.
\end{equation*}
Here, we emphasize that
$C$ is a constant independent of $\sigma,\tau$.
Then by a standard iteration argument
(see e.g. \cite[Lemma~{3.1}, pp. 161]{Giaq83}),
we have
\begin{equation}
\label{eqn:K-05}
\int_{Q_{r}}\abs{\vec{u}_t}^2
\le \frac{C}{(R-r)^2}\int_{Q_{R}}\abs{D\vec{u}}^2
\quad\text{for } 0<r<R.
\end{equation}
The proof is complete.
\end{proof}
\begin{lemma}
\label{lem:M-02}
If $\vec{u}$ is a weak solution of
the parabolic system with time-independent coefficients \eqref{eqn:P-01}
in $Q_{2r}=Q_{2r}(X_0)$, then
$D\vec{u}(\cdot,s), \vec{u}_t(\cdot,s)\in L^2(Q_{r,s})$
for all $s\in[t_0-r^2,t_0]$,
and satisfy the following estimates uniformly in $s\in[t_0-r^2,t_0]$.
\begin{eqnarray}
\label{eqn:M-11}
\norm{D\vec{u}(\cdot,s)}_{L^2(Q_{r,s})}
\le C r^{-2} \norm{\vec{u}}_{L^2(Q_{2r})}, \\
\label{eqn:M-12}
\norm{\vec{u}_t(\cdot,s)}_{L^2(Q_{r,s})}
\le C r^{-3} \norm{\vec{u}}_{L^2(Q_{2r})}.
\end{eqnarray}
\end{lemma}
\begin{proof}
By the energy estimates applied to $\vec{u}_t$ we obtain
\begin{equation}
\label{eqn:M-13}
\sup_{t_0-r^2\le s\le t_0}\int_{Q_{r,s}}\abs{\vec{u}_t(\cdot,s)}^2
\le \frac{C}{r^2}\int_{Q_{3r/2}}
\abs{\vec{u}_t}^2.
\end{equation}
On the other hand, the estimates \eqref{eqn:K-05} and
the energy estimates (this time, applied to $\vec{u}$ itself) yield
\begin{eqnarray}
\label{eqn:K-12}
\int_{Q_{3r/2}} \abs{\vec{u}_t}^2 \le
\frac{C}{r^2} \int_{Q_{7r/4}} \abs{D\vec{u}}^2\le
\frac{C}{r^4} \int_{Q_{2r}}\abs{\vec{u}}^2.
\end{eqnarray}
Combining \eqref{eqn:M-13} and \eqref{eqn:K-12} together,
we have the estimates \eqref{eqn:M-12}.
Next, assume that $\vec{u}$ is a weak solution
of \eqref{eqn:P-01} in $Q_{4r}=Q_{4r}(X_0)$.
Let $\zeta$ be a smooth cut-off function such that
$\zeta\equiv 1$ in $Q_r$, vanishes near $\partial_P Q_{2r}$, and satisfies
\begin{equation}
\label{eqn:K-30}
0\le \zeta \le 1\quad\text{and}\quad
\abs{\zeta_t}+\abs{D\zeta}^2 \le Cr^{-2}.
\end{equation}
Note that on each slice $Q_{2r,s}$, we have
\begin{equation*}
\begin{split}
0&=\int_{Q_{2r,s}}\left(\vec{u}_t-D_{\alpha}(\vec{A}^{\alpha\beta}D_\beta
\vec{u})\right)\cdot\zeta^2\vec{u}\\
&=\int_{Q_{2r,s}}\zeta^2\vec{u}_t\cdot\vec{u}+\int_{Q_{2r,s}}\zeta^2
\ip{\vec{A}^{\alpha\beta}D_\beta\vec{u},D_\alpha\vec{u}}+
2\zeta\ip{\vec{A}^{\alpha\beta}D_\beta\vec{u},D_\alpha\zeta\vec{u}}.
\end{split}
\end{equation*}
Using the ellipticity condition and the Cauchy-Schwarz inequality,
we find
\begin{equation*}
\begin{split}
\nu_0\int_{Q_{2r,s}}\zeta^2\abs{D\vec{u}}^2
&\le \int_{Q_{2r,s}}\zeta^2\abs{\vec{u}_t}\abs{\vec{u}}
+C\int_{Q_{2r,s}}\zeta\abs{D\vec{u}}\abs{D\zeta}\abs{\vec{u}}\\
&\le \frac{\epsilon\nu_0}{2}\int_{Q_{2r,s}}\zeta^2\abs{\vec{u}_t}^2+
\frac{C}{\epsilon}\int_{Q_{2r,s}}\zeta^2\abs{\vec{u}}^2+
C\int_{Q_{2r,s}}\abs{D\zeta}^2\abs{\vec{u}}^2 \\
&+\frac{\nu_0}{2}\int_{Q_{2r,s}}\zeta^2\abs{D\vec{u}}^2.
\end{split}
\end{equation*}
Then by \eqref{eqn:K-30}, \eqref{eqn:M-12},
and the energy estimates, for all $s\in[t_0-r^2,t_0]$, we have
\begin{equation}
\label{eqn:M-99}
\begin{split}
\int_{Q_{r,s}}\abs{D\vec{u}}^2
&\le \epsilon\int_{Q_{2r,s}}\abs{\vec{u}_t}^2+
\frac{C}{\epsilon}\int_{Q_{2r,s}}\abs{\vec{u}}^2+
\frac{C}{r^2}\int_{Q_{2r,s}}\abs{\vec{u}}^2 \\
&\le \frac{C\epsilon}{r^6}\int_{Q_{4r}}\abs{\vec{u}}^2+
\frac{C}{\epsilon r^2}\int_{Q_{4r}}\abs{\vec{u}}^2+
\frac{C}{r^4}\int_{Q_{4r}}\abs{\vec{u}}^2.
\end{split}
\end{equation}
If we set $\epsilon=r^2$, then the above estimates \eqref{eqn:M-99}
now become
\begin{equation*}
\int_{Q_{r,s}}\abs{D\vec{u}}^2 \le
\frac{C}{r^4}\int_{Q_{4r}}\abs{\vec{u}}^2,
\end{equation*}
from which the estimates \eqref{eqn:M-11} follows
by a well known covering argument.
\end{proof}
\begin{lemma}
\label{lem:M-03}
Assume that the elliptic system \eqref{eqn:E-01} satisfies
the H\"{o}lder estimates for weak solutions at every scale with constants
$\mu_0, H_0$.
Let $\vec{u}$ be a weak solution of the inhomogeneous elliptic system
\begin{equation}
D_{\alpha}(\vec{A}^{\alpha\beta}(x) D_{\beta}\vec{u})
=\vec{f}\quad\text{in}\quad B_{2}=B_{2}(x_0),
\end{equation}
where $\vec{f}$ belongs to the Morrey space $M^{2,\lambda}(B_{2})$
with $\lambda\ge 0$.
Then, for any $\gamma\ge 0$
with $\gamma<\gamma_0=\min(\lambda+4,n+2\mu_0)$
(we may take $\gamma=\gamma_0$ if $\gamma_0<n$)
there exists
$C=C(n,\nu_0,M_0,\mu_0,H_0,\lambda,\gamma)$ such that
$\vec{u}$ satisfies the following local estimates
\begin{equation}
\label{eqn:M-08}
\int_{B_r(x)}\abs{D\vec{u}}^2
\le C \left( r^{\gamma-2}
\int_{B_2}\abs{D\vec{u}}^2+ r^{\gamma-2}
\norm{\vec{f}}_{M^{2,\lambda}(B_2)}^2
\right)
\end{equation}
uniformly for all $x\in B_1=B_1(x_0)$ and $0<r\le 1$.
Moreover,
if $\gamma<n$,
then $\vec{u}$ belongs to the
Morrey space $M^{2,\gamma}(B_{1})$ and
\begin{equation}
\label{eqn:M-10}
\norm{\vec{u}}_{M^{2,\gamma}(B_{1})}\le
C\left(\norm{\vec{u}}_{L^2(B_{2})}+
\norm{D\vec{u}}_{L^{2}(B_{2})}+
\norm{\vec{f}}_{M^{2,\lambda}(B_2)}\right).
\end{equation}
\end{lemma}
\begin{proof}
First, we note that the property \eqref{eqn:M-04} implies that
for all $0<\rho<r$ and $x\in\mathbb R^n$, we have
\begin{equation*}
\int_{B_\rho(x)}\abs{D\vec{u}}^2\le C\cdot H_0
\left(\frac{\rho}{r}\right)^{n-2+2\mu_0}
\int_{B_r(x)}\abs{D\vec{u}}^2.
\end{equation*}
In the light of the above observation,
the estimates \eqref{eqn:M-08} is quite standard and is found, for example,
in \cite[Chapter~3]{Giaq83}. Then, by Poincar\'{e} inequality we have
\begin{equation}
\label{eqn:M-07}
\int_{B_r(x)}\abs{\vec{u}-\overline{\vec{u}}_{B_r(x)}}^2
\le C r^{\gamma} \left(
\norm{D\vec{u}}_{L^2(B_2)}^2+
\norm{\vec{f}}_{M^{2,\lambda}(B_2)}^2\right)
\end{equation}
uniformly for all $x\in B_1=B_1(0)$ and $0<r\le 1$.
It is well known that if $\gamma<n$, then the estimates \eqref{eqn:M-07}
yield \eqref{eqn:M-10} (see e.g. \cite[Chapter~3]{Giaq83}).
\end{proof}
\subsection{Proof of Theorem~\ref{thm:M-02}}
Let
$\vec{u}$ be a weak solution of \eqref{eqn:P-01} in a cylinder $Q_4=Q_4(0)$.
We rewrite \eqref{eqn:P-01} as $L\vec{u}=\vec{u}_t$.
By Lemma~\ref{lem:M-02}, we find that
$\vec{u}_t(\cdot,s)$ is in $L^2(Q_{2,s})$ and
satisfies
\begin{displaymath}
\norm{\vec{u}_t(\cdot,s)}_{L^2(Q_{2,s})}\le C \norm{\vec{u}}_{L^2(Q_4)}
\quad\text{for all }-4\le s\le 0.
\end{displaymath}
Therefore, we may apply Lemma~\ref{lem:M-03} with $\vec{f}=\vec{u}_t$
and $\lambda=0$, and then apply Lemma~\ref{lem:M-02} to find that
for all $x\in B_{1}(0)$ and $0<r\le 1$, we have
\begin{equation}
\label{eqn:X-11}
\begin{split}
\int_{B_{r}(x)}\abs{D\vec{u}(\cdot,s)}^2
&\le C r^{\gamma-2} \left(
\norm{D\vec{u}(\cdot,s)}_{L^2(Q_{2,s})}^2+
\norm{\vec{u}_t(\cdot,s)}_{L^{2}(Q_{2,s})}^2\right)\\
&\le C r^{\gamma-2} \norm{\vec{u}}_{L^{2}(Q_{4})}^2
\quad\text{uniformly in }s\in[-4,0]
\end{split}
\end{equation}
for all $\gamma<\min(4,n+2\mu_0)$.
By Lemma~\ref{lem:P-02} and then by \eqref{eqn:X-11} we find that
for all $X=(x,t)\in Q_1$ and $r\le 1$
\begin{equation}
\label{eqn:X-12}
\begin{split}
\int_{Q_r(X)}\abs{\vec{u}-\overline{\vec{u}}_{Q_r(X)}}^2
&\le C r^2\int_{t-r^2}^t\int_{B_{r}(x)}
\abs{D\vec{u}(y,s)}^2\,dy\,ds \\
&\le C r^{2+\gamma} \norm{\vec{u}}_{L^{2}(Q_{4})}^2.
\end{split}
\end{equation}
Note that if $n\le 3$, then we may write $\gamma=n+2\mu$ for some $\mu>0$.
In that case, \eqref{eqn:X-12} now reads
\begin{equation}
\int_{Q_r(X)}\abs{\vec{u}-\overline{\vec{u}}_{Q_r(X)}}^2
\le C r^{n+2+2\mu} \norm{\vec{u}}_{L^{2}(Q_{4})}^2
\end{equation}
for all $X\in Q_1$ and $r\le 1$.
Therefore, if $n\le 3$, then Lemma~\ref{lem:P-04} yields the estimates
\begin{equation}
\label{eqn:X-13}
[\vec{u}]_{C^{\mu}_P(Q_{1/2})}
\le C \norm{\vec{u}}_{L^{2}(Q_{4})}.
\end{equation}
We have thus shown that in the case when $n\le 3$, any weak solution
$\vec{u}$ of \eqref{eqn:P-01}
in a cylinder $Q_4=Q_4(0)$ satisfies
the above a priori estimates \eqref{eqn:X-13}
provided that the associated elliptic system
satisfies the H\"{o}lder estimates for weak solutions at every scale.
The general case is recovered as follows.
For given $X_0=(x_0,t_0)$ and $r>0$,
let us consider the new system
\begin{equation}
\label{eqn:scale}
\vec{u}_t-\tilde{L}\vec{u}
:=\vec{u}_t-\sum_{\alpha,\beta=1}^n
D_\alpha(\tilde{\vec{A}}{}^{\alpha\beta}(x) D_\beta\vec{u})=0,
\end{equation}
where $\tilde{\vec{A}}{}^{\alpha\beta}(x)=\vec{A}^{\alpha\beta}(x_0+rx)$.
Note that the associated elliptic system $\tilde{L}\vec{u}=0$
also satisfies the H\"{o}lder estimates for weak solutions at every scale.
Moreover, the ellipticity constants $\nu_0, M_0$ remain the same for
the new coefficients $\tilde{\vec{A}}{}^{\alpha\beta}$.
Let $\vec{u}$ be a weak solution of \eqref{eqn:P-01} in $Q_{4r}(X_0)$.
Then $\tilde{\vec{u}}(X)=\tilde{\vec{u}}(x,t):=\vec{u}(x_0+rx,t_0+r^2t)$
is a weak solution of \eqref{eqn:scale} in $Q_{4}(0)$ and thus
$\tilde{\vec{u}}$ satisfies
the estimates \eqref{eqn:X-13}. By rescaling back to $Q_{4r}(X_0)$,
the estimates \eqref{eqn:X-13} become
\begin{equation}
\label{eqn:X-16}
[\vec{u}]_{C^{\mu}_P(Q_{r/2})}
\le C r^{-(n/2+1+\mu)}\norm{\vec{u}}_{L^{2}(Q_{4r})}.
\end{equation}
Thus, when $n\le 3$, the theorem now
follows from a well known covering argument.
In the case when $n\ge 4$, we invoke a bootstrap argument.
For the sake of simplicity, let us momentarily assume that $4\le n \le 7$.
Let $\vec{u}$ be a weak solution of \eqref{eqn:P-01} in
$Q_{8}=Q_{8}(0)$.
Let us fix $X_0=(x_0,t_0)\in Q_2(0)$ and observe that
$\vec{u}_t$ also satisfies the system \eqref{eqn:P-01} in $Q_{4}(X_0)$.
Thus, by a similar argument that led to \eqref{eqn:X-11}, we find
that for all $x\in B_{1}(x_0)$ and $0<r\le 1$ we have
\begin{equation}
\int_{B_{r}(x)}\abs{D\vec{u}_t(\cdot,s)}^2
\le C r^{\gamma-2} \norm{\vec{u}_t}_{L^{2}(Q_{4}(X_0))}^2
\quad\text{uniformly in }s\in [t_0-4,t_0],
\end{equation}
for all $\gamma<4$ (we may take $\gamma=4$ if $n>4$).
Then, by \eqref{eqn:M-10} in Lemma~\ref{lem:M-03},
Lemma~\ref{lem:M-01}, and Lemma~\ref{lem:M-02} we conclude that
\begin{equation}
\label{eqn:X-14}
\norm{\vec{u}_t(\cdot,s)}_{M^{2,\gamma}(B_{1}(x_0))}
\le C \norm{\vec{u}}_{L^{2}(Q_{8}(0))}
\quad\text{for all }s\in [t_0-4,t_0].
\end{equation}
Since the above estimates \eqref{eqn:X-14} hold
for all $X_0=(x_0,t_0)\in Q_{2}(0)$,
we find that, in particular,
$\vec{u}_t(\cdot, s)$ belongs to $M^{2,\gamma}(B_{2}(0))$
for all $-4\le s \le 0$,
and satisfies
\begin{equation}
\label{eqn:X-15}
\norm{\vec{u}_t(\cdot,s)}_{M^{2,\gamma}(B_{2}(0))}
\le C \norm{\vec{u}}_{L^{2}(Q_{8}(0))}
\quad\text{for all }s\in [-4,0],
\end{equation}
where we also used \eqref{eqn:M-12} of Lemma~\ref{lem:M-02}.
The above estimates \eqref{eqn:X-15} for $\vec{u}_t$ now
allows us to invoke Lemma~\ref{lem:M-03} with $\vec{f}=\vec{u}_t$ and
$\lambda=\gamma$. Then, by Lemma~\ref{lem:M-03} and Lemma~\ref{lem:M-02},
we find that for all $x\in B_{1}(0)$ and $0<r\le 1$, we have
\begin{equation*}
\int_{B_{r}(x)}\abs{D\vec{u}(\cdot,s)}^2
\le C r^{\overline{\gamma}-2} \norm{\vec{u}}_{L^{2}(Q_{8}(0))}^2
\quad\text{uniformly in }s\in [-4,0]
\end{equation*}
for all $\overline{\gamma}<\min(\gamma+4,n+2\mu_0)$.
Since we assume that $n\le 7$, we may write
$\overline{\gamma}=n+2\overline{\mu}$ for
some $\overline{\mu}>0$.
By the exactly same argument we used in the case when $n\le 3$,
we derive the estimates
\begin{equation*}
[\vec{u}]_{C^{\mu}_P(Q_{1/2})}
\le C \norm{\vec{u}}_{L^{2}(Q_{8})},
\end{equation*}
and the theorem follows as before.
Finally, if $n\ge 8$, we repeat the above process;
if $\vec{u}$ is a weak solution of \eqref{eqn:P-01} in $Q_{16}(0)$,
then $\vec{u}_t(\cdot,s)$ is
in $M^{2,\gamma}(B_{1}(0))$ for all $\gamma<8$ and so on.
The process cannot go on indefinitely and
it stops in $k=[n/4]+1$ steps. The proof is complete.
\hfil\qed
\subsection{Proof of Theorem~\ref{thm:M-03}}
The proof is based on Theorem~\ref{thm:P-01}, the proof of which, in turn,
is found in \cite{HK}.
By Theorem~\ref{thm:P-01}, we only need to establish the local boundedness
property for weak solutions of the parabolic system \eqref{eqn:P-01}
and for those of its adjoint system \eqref{eqn:P-02}.
From the hypothesis that the elliptic system \eqref{eqn:E-01}
satisfies the H\"{o}lder estimates for weak solutions at every scale,
we find, by Theorem~\ref{thm:M-02}, that the parabolic system
\eqref{eqn:P-01}
with the associated time-independent coefficients
also satisfies the H\"{o}lder estimates
for weak solutions at every scale; that is,
there exist some constants $\mu>0$ and $C$,
depending on the prescribed quantities,
such that if $\vec{u}$ is a
weak solution of \eqref{eqn:P-01} in $Q_{4r}(X)$, then it satisfies
the estimates
\begin{equation*}
[\vec{u}]_{C^{\mu}_P(Q_{2r})}\le C r^{-(n/2+1+\mu)}
\norm{\vec{u}}_{L^2(Q_{4r})}.
\end{equation*}
Let us fix $Y\in Q_r=Q_{r}(X)$.
Then, for all $Z\in Q_{r}(Y)\subset Q_{2r}(X)$, we have
\begin{equation}
\label{eqn:final}
\abs{\vec{u}(Y)}
\le \abs{\vec{u}(Z)}+
d_P(Y,Z)^{\mu}\cdot
[\vec{u}]_{C^{\mu}_P(Q_{2r})}
\le \abs{\vec{u}(Z)}+ C r^{-(n/2+1)}
\norm{\vec{u}}_{L^2(Q_{4r})}.
\end{equation}
By averaging \eqref{eqn:final} over $Q_{r}(Y)$ with respect to $Z$,
we derive
(note $\abs{Q_r}=Cr^{n+2}$)
\begin{equation*}
\abs{\vec{u}(Y)}
\le C r^{-(n+2)} \norm{\vec{u}}_{L^1(Q_r(Y))}
+ C r^{-(n/2+1)} \norm{\vec{u}}_{L^2(Q_{4r})}.
\end{equation*}
Since $Y\in Q_r$ is arbitrary,
we find, by H\"{o}lder's inequality, that $\vec{u}$ satisfies
\begin{equation*}
\norm{\vec{u}}_{L^\infty(Q_r)}
\le C r^{-(n/2+1)} \norm{\vec{u}}_{L^2(Q_{4r})}
\end{equation*}
for some constant $C=C(n,\nu_0,M_0,\mu_0,H_0)$.
To finish the proof, we also need to show that
if $\vec{u}$ is a weak solution
of the adjoint system \eqref{eqn:P-02} in $Q^{*}_{4r}=Q^{*}_{4r}(X)$,
then it satisfies
the local boundedness property
\begin{equation}
\label{last}
\norm{\vec{u}}_{L^\infty(Q^{*}_r)}
\le C r^{-(n/2+1)} \norm{\vec{u}}_{L^2(Q^{*}_{4r})}.
\end{equation}
The verification of \eqref{last} requires
only a slight modification of the previous
arguments (mostly, one needs to replace $Q_r$ by $Q_r^{*}$ and so on),
but it is rather routine and we skip the details.
\hfil\qed
\end{document} |
\begin{document}
\title{Bell inequalities from group actions: Three parties and non-Abelian groups}
\author{V. U\u{g}ur G\"{u}ney and Mark Hillery}
\affiliation{Department of Physics, Hunter College of the City University of New York, 695 Park Avenue, New York, NY 10065 USA \\ Graduate Center of the City University of New York, 365 Fifth Avenue, New York, NY 10016}
\begin{abstract}
In a previous publication, we showed how group actions can be used to generate Bell inequalities. The group action yields a set of measurement probabilities whose sum is the basic element in the inequality. The sum has an upper bound if the probabilities are a result of a local, realistic theory, but this bound can be violated if the probabilities come from quantum mechanics. In our first paper, we considered the case of only two parties making the measurements and single-generator groups. Here we show that the method can be extended to three parties, and it can also be extended to non-Abelian groups. We discuss the resulting inequalities in terms of nonlocal games.
\end{abstract}
\pacs{03.65.Ud}
\maketitle
\section{Introduction}
A Bell inequality is an inequality containing probabilities of measurement results that will be obeyed by probabilities resulting from a local, realistic theory \cite{bell}. Initial interest in them was confined to people working in the foundations of quantum mechanics, but more recently they have provided the basis for protocols in quantum cryptography and for tests of entanglement. There is now an extensive literature on the subject, and considerable progress has been made in classifying and tabulating Bell inequalities. Two recent reviews provide a good overview of the field \cite{brunner,liang}.
The standard scenario for a Bell inequality is that there are $N$ parties making measurements, each party can make one of $M$ possible measurements, and each measurement has $K$ outcomes. Perhaps the most famous Bell inequality, the Clauser-Horne-Shimony-Holt (CHSH) inequality, is for the case $N=M=K=2$ \cite{clauser}. Kaszlikowsi, et al.\ showed that by increasing the number of outcomes, $K$, one could more strongly violate local realism \cite{kaszlikowski}. The case of full correlation Bell inequalities for $M=K=2$ and general $N$ has been fully characterized by Werner and Wolf \cite{werner}. Bell inequalities for the case $N=2$, $M=2$, and general $K$ were found by Collins, et al.\ \cite{collins}, and this was generalized to the case of general $N$, $M=2$, and general $K$ by Son, et.\ al. \cite{son}.
Recently, an approach to Bell inequalities based on graph theory was developed by Cabello, Severini, and Winter \cite{cabello}. The vertices of a graph correspond to events, where an event is a particular choice of measurement and a measurement outcome for each party. The probabilities of these events are what appear in the inequality, in particular their sum. Two vertices are connected by an edge if the events corresponding to them cannot be true simultaneously. The properties of the graph can be used to find an upper bound to the classical sum of the probabilities, that is the sum when the probabilities come from a local, realistic theory, and also an upper bound to the quantum sum, where the probabilities come from quantum mechanics.
In a previous paper, we explored an approach to Bell inequalities based on group actions of single generator Abelian groups \cite{guney}. In that case the Bell inequalities also involve sums of the probabilities of events, but instead of starting from a graph, we start from a group. The events are generated by the application of operators that form a representation of a group to an initial state. As an example, suppose we have two parties, Alice and Bob, and they each have a qubit so that their joint states are elements of a tensor product Hilbert space, $\mathbb{C}^{2}\otimes \mathbb{C}^{2}$. We will denote the computational, or $z$, basis of $\mathbb{C}^{2}$ by $\{ |0\rangle , |1\rangle \}$ and the $x$ basis by $\{ |\pm x\rangle = (|0\rangle \pm |1\rangle )/\sqrt{2} \}$. Consider the operator $U= |+x\rangle\langle +x| -i|-x\rangle\langle -x|$, and note that $U^{2}= |+x\rangle\langle +x|-|-x\rangle\langle-x| = \sigma_{x}$, and $U^{4}=I$. We have that $\{j\rightarrow U^{j}|j=0,1,2,3\}$ is a representation of the cyclic group $\mathbb{Z}_{4}$, the group of addition modulo $4$, and so is $\{ j\rightarrow U^{j}\otimes U^{j}|j=0,1,2,3\}$. The map from $|\Psi\rangle \in \mathbb{C}^{2}\otimes \mathbb{C}^{2}$ and $j\in \mathbb{Z}_{4}$ to the state $U^{j}\otimes U^{j} |\Psi\rangle$ is an example of a group action \cite{rotman}.
The definition of a group action is the following. If $G$ is a group and $X$ is a set, a group action is a function $\alpha: G\times X\rightarrow X$ such that $\alpha (e,x)=x$ and $\alpha (g,\alpha (h,x))=\alpha (gh,x)$. Here $e,g,h\in G$ and $e$ is the identity element of the group. The subset of $X$ given by $\{ \alpha (g,x)| g\in G\}$ is called the orbit of $x$. Any two orbits are either distinct or identical, so the set of orbits forms a partition of $X$. In the case of our example, the orbits will be sets of the form $\{ U^{j}\otimes U^{j} |\Psi\rangle |j=0,1,2,3\}$. Now let us set $|\Psi\rangle = |0,0\rangle$, in which case the orbit is $\{ |0,0\rangle , |v_{0},v_{0}\rangle , |1,1\rangle , |v_{1},v_{1}\rangle \}$, where $|v_{j}\rangle =U|j\rangle$ for $j=0,1$. Define the observables for Alice to be $a_{0}=|1\rangle\langle 1|$ and $a_{1}=|v_{1}\rangle\langle v_{1}|$ and similarly for Bob, $b_{0}=|1\rangle\langle 1|$ and $b_{1}=|v_{1}\rangle\langle v_{1}|$. The orbit then corresponds to the events $(a_{0}=0, b_{0}=0)$, $(a_{1}=0,b_{1}=0)$, $(a_{0}=1, b_{0}=1)$, and $(a_{1}=1,b_{1}=1)$. We can also choose a second orbit starting with the state $|0,v_{0}\rangle$, which generates four more events. We will not demonstrate this here (a closely related example appeared in \cite{guney}), but the sum of the probabilities for these eight events cannot be larger than $3$ if the probabilities come from a local realistic theory, whereas it can reach the value of $2+\sqrt{2}$ if the probabilities come from quantum mechanics.
In this paper we would like to extend these results in two directions. First, we previously only considered the case of two parties. In the next section we again consider cyclic groups but for the case of three parties. In the following section, we go back to the case of two parties, but look at non-Abelian groups, in particular dihedral groups. We compare the nonlocal games that result from Bell inequalities for the cyclic group $\mathbb{Z}_{6}$ and the dihedral group $D_{3}$, both of which have $6$ members. Finally, we show how group representation theory can be used to find quantum states that violate a Bell inequality that results from the dihedral group $D_{6}$, a group with $12$ members.
\section{Three-party case}
We shall consider a scenario in which three parties, Alice, Bob, and Charlie, share a system of three particles, each party possessing one of the particles. Each of them can measure one of two observables, and for each observable the possible values for the result of the measurement are $0$, $1$, or $2$. Alice's observables are $a_{0}$ and $a_{1}$, Bob's are $b_{0}$ and $b_{1}$, and Charlie's are $c_{0}$ and $c_{1}$.
In the quantum mechanical version of this scenario, Alice, Bob, and Charlie, each has a qutrit. The computational basis is $\{ |j\rangle\, |\, j=0,1,2\}$ and corresponds to the observables $a_{0}$, $b_{0}$, and $c_{0}$; for example, $a_{0}=|1\rangle\langle 1| + 2|2\rangle\langle 2|$, and similarly
for $b_{0}$ and $c_{0}$. In order to define a second basis, consider the operator
\begin{equation}
U=|w_{0}\rangle\langle w_{0}| + e^{-i\pi /3} |w_{1}\rangle\langle w_{1}| + e^{i\pi /3}|w_{2}\rangle\langle w_{2}| ,
\end{equation}
where
\begin{equation}
|w_{j}\rangle = \frac{1}{\sqrt{3}} \sum_{k=0}^{2} e^{2\pi ijk/3} |k\rangle ,
\end{equation}
for $j=0,1,2$. We have that $U^{2}=T$, where $T|j\rangle = |j+1\rangle$, and the addition is modulo $3$. Note that $U^{6}=I$, which implies that $\{ U^{m}|\, m=0,1,\ldots 5\}$ is a representation of the group $\mathbb{Z}_{6}$, addition modulo $6$. We now define a second basis, $\{ |v_{j}\rangle = U|j\rangle\, |\, j=0,1,2\}$, and this basis corresponds to the observables $a_{1}$, $b_{1}$, and $c_{1}$, where, for example, $a_{1}=|v_{1}\rangle\langle v_{1}| + 2|v_{2}\rangle\langle v_{2}|$. Each of the three parties can measure their qutrit in either of the two bases. We now choose four states, and apply to each of them the operator $(U\otimes U\otimes U)^{m}$, for $m=0, 1, \ldots 5$, which generates a total of $24$ states. The four states are:
\begin{equation}
\label{3-party-orbits}
|021\rangle \hspace{5mm} |00v_{1}\rangle \hspace{5mm} |0v_{0}0\rangle \hspace{5mm} |v_{0}20\rangle .
\end{equation}
Note that in the first state all three bases are the same, in the second, the first two bases are the same, in the third, the first and third bases are the same, and in the fourth the second and third bases are the same. This feature remains the same under application of $(U\otimes U\otimes U)^{m}$, so, for example, for all states generated by application of this operator to the first state the bases will be the same. Note that these basis combinations exhaust all possible choices of measurement bases by the parties. Because there are only two measurement bases, either each party chooses the same basis or two of them do and the third party chooses a different one. This second alternative can happen in three different ways. This results in $24$ three-qutrit states each of which is a product of single-qutrit states from one of the two bases.
Note that $m\in \mathbb{Z}_{6}\rightarrow (U\otimes U\otimes U)^{m}$ is also a representation of $\mathbb{Z}_{6}$. Application of these operators to $\mathbb{C}^{3}\otimes \mathbb{C}^{3}\otimes \mathbb{C}^{3}$ defines a group action, and the set resulting from the application of all six operators to a particular state defines the orbit associated with that state. Two orbits are either distinct or identical. Each of the four states in Eq.\ (\ref{3-party-orbits}) generates an orbit. These states were found by means of a random search in the set of states that are threefold tensor products of the states in the computational and $v$ bases. The search identified sets of states whose orbits lead to Bell inequality violations. More details of how the random searches in this paper are performed can be found in Appendix A.
Each of the $24$ states in our set corresponds to a particular choice of measurements by the three parties and a particular set of measurement results. For example, the state $|00v_{1}\rangle$ corresponds to Alice measuring $a_{0}$ and obtaining $0$, Bob measuring $b_{0}$ and obtaining $0$, and Charlie measuring $c_{1}$ and obtaining $1$. In order to maximize the sum of probabilities corresponding to these measurement choices and the specified results, we need to find the state, $|\phi\rangle$, that maximizes the expectation value $\langle\phi |A| \phi\rangle$, where
\begin{equation}
A= \sum_{m=0}^{5} (U\otimes U\otimes U)^{m} L (U^{\dagger}\otimes U^{\dagger}\otimes U^{\dagger})^{m} ,
\end{equation}
and
\begin{eqnarray}
L & = & |021\rangle\langle 021|+ |00v_{1}\rangle\langle 00v_{1}| + |0v_{0}0\rangle\langle 0v_{0}0|
\nonumber \\
& & + |v_{0}20\rangle\langle v_{0}20| .
\end{eqnarray}
The expectation value of $A$ in the state $|\phi\rangle$ is just the sum of the 24 probabilities if Alice, Bob, and Charlie share the three-qutrit state $|\phi\rangle$. The largest value of the expectation value occurs when $|\phi\rangle$ is the eigenstate of $A$ corresponding to its largest eigenvalue.
We want to find the largest eigenvalue of $A$, but before proceeding let us note something that will simplify the calculation. If we define $B=U\otimes U\otimes U$ then we see that the eigenstates of $B$ are states of the form $|w_{j}\rangle |w_{k}\rangle |w_{l}\rangle$, and the possible eigenvalues are $1$, $-1$, $e^{\pm i\pi /3}$, and $e^{\pm 2\pi i/3}$, all of which are degenerate. Let $P_{\lambda}$ be the projection onto the subspace corresponding to the eigenvalue, $\lambda$, of $B$. Because $[P_{\lambda},B]=0$, we have that
\begin{eqnarray}
\label{3-party-proj}
A & = & \left(\sum_{\lambda}P_{\lambda}\right) \sum_{j=0}^{5} B^{j}L(B^{\dagger})^{j}\left( \sum_{\lambda^{\prime}} P_{\lambda^{\prime}} \right) \nonumber \\
& = & \sum_{\lambda} \sum_{\lambda^{\prime}} \left( \sum_{j=0}^{5} \lambda^{j}(\lambda^{\prime \ast})^{j} \right) P_{\lambda}L P_{\lambda^{\prime}} \nonumber \\
& = & \sum_{\lambda} \sum_{\lambda^{\prime}} 6 \delta_{\lambda , \lambda^{\prime}} P_{\lambda}L P_{\lambda^{\prime}} \nonumber \\
& = & 6 \sum_{\lambda} P_{\lambda}L P_{\lambda} .
\end{eqnarray}
Therefore, in order to diagonalize $A$ we only have to diagonalize it within the subspaces corresponding to the eigenvalues of $B$.
We find that the eigenvector corresponding to the maximum eigenvalue of $A$ lies in the subspace where $B$ has an eigenvalue of $1$. This space is seven dimensional, so we are faced with diagonalizing a seven-dimensional matrix. However, because of the form of the matrix,
\begin{equation}
M=\sum_{j=1}^{4} |\mu_{j}\rangle\langle \mu_{j}| ,
\end{equation}
where $|\mu_{1}\rangle = P_{1}|021\rangle$, $|\mu_{2}\rangle = P_{1}|00v_{1}\rangle$, $|\mu_{3}\rangle = P_{1}|0v_{0}0\rangle$, and $|\mu_{4}\rangle = P_{1}|v_{0}20\rangle$, the problem can be reduced to a four-dimensional one. If we express the eigenvector as $\sum_{j=1}^{4} c_{j}|\mu_{j}\rangle$, then the eigenvalue equation becomes
\begin{equation}
\sum_{k=1}^{4} |\mu_{k}\rangle \left( \sum_{j=1}^{4}c_{j}\langle \mu_{k}|\mu_{j}\rangle \right) = \lambda
\sum_{k=1}^{4}c_{k}|\mu_{k}\rangle .
\end{equation}
Finding the overlaps of the vectors, we obtain
\begin{equation}
\frac{1}{27}\left(\begin{array}{cccc} 7 & 2 & -1 & -1 \\ 2 & 7 & -1 & -1\\ -1 & -1 & 7 & -1 \\ -1 & -1 & -1 & 7 \end{array} \right) \left( \begin{array}{c} c_{1} \\ c_{2} \\ c_{3} \\ c_{4} \end{array} \right) = \lambda \left( \begin{array}{c} c_{1} \\ c_{2} \\ c_{3} \\ c_{4} \end{array} \right) .
\end{equation}
The largest eigenvalue is $10/27$, which gives $6(10/27) = 20/9$ (see Eq.\ (\ref{3-party-proj})) as the largest eigenvalue of $A$. The eigenvector is given in the computational basis by
\begin{eqnarray}
\label{3-party-state}
|\phi\rangle & = & \frac{1}{30\sqrt{3}} [ -10 (|000\rangle + |111\rangle + |222\rangle ) + 14 ( |001\rangle
\nonumber \\
& & + |112\rangle + |220\rangle ) + 11(|002\rangle + |110\rangle \nonumber \\
& & + |221\rangle ) -7 ( |010\rangle + |121\rangle + |202\rangle ) \nonumber \\
& & - ( |011\rangle + |022\rangle + |100\rangle + |122\rangle \nonumber \\
&& +|200\rangle + |211\rangle ) -4(|012\rangle + |020\rangle \nonumber \\
& & + |101\rangle + |120\rangle + |201\rangle + |212\rangle )\nonumber \\
& & + 20 (|021\rangle + |102\rangle + |210\rangle ) ] .
\end{eqnarray}
The maximum value of the sum of the classical probabilities is $2$, so we get a violation.
The classical bound is found by assuming that there is a joint distribution for all of the variables,$P(a_{0},b_{0},c_{0};a_{1},b_{1},c_{1})$. Then each of the $24$ measurement probabilities can be expressed in terms of joint distribution, and their sum can be expressed as
\begin{equation}
\sum_{j=0}^{1} \sum_{a_{j},b_{j},c_{j}=0}^{2} c_{a_{0},b_{0},c_{0};a_{1},b_{1},c_{1}} P(a_{0},b_{0},c_{0};a_{1},b_{1},c_{1}) ,
\end{equation}
where the $c_{a_{0},b_{0},c_{0};a_{1},b_{1},c_{1}}$ are integers. The sum is maximized when the probability distribution is chosen to have the value $1$ for one of the values of $(a_{0},b_{0},c_{0},a_{1},b_{1},c_{1})$ corresponding to the largest value of $c_{a_{0},b_{0},c_{0}:a_{1},b_{1},c_{1}}$, which implies that the maximum value of the sum is just the maximum value of $c_{a_{0},b_{0},c_{0}:a_{1},b_{1},c_{1}}$. In this case we find that the largest value is $2$, which is, therefore, the largest value the sum of the probabilities can assume.
It is also possible to phrase this inequality as a nonlocal game. Alice, Bob, and Charlie each receive a bit, $s$, $t$, and $u$, respectively, where $s,t,u=0$ or $1$ from an arbitrator. Each then transmits to the arbitrator, a $0$, a $1$, or a $2$. The arbitrator then decides whether they have won the game. The wining conditions depend on both $s$, $t$, and $u$ and the values returned by Alice, Bob and Charlie. For this game, the winning conditions are listed in Table 1. The values of $(s,t,u)$ are listed on the left, and the corresponding winning combinations of the values sent by Alice, Bob and Charlie are listed on the right. The values of $(s,t,u)$ are grouped according to which of the values are the same. The first two have all the values the same, the next two have their first two values the same, the next two have the first and third values the same, and the last two have the second and third values the same. The sum of the digits in the winning sequences modulo $3$ in each row are the same, $0$ in rows $1,\ 2,\ 5,\ 8$, $1$ in rows $3,\ 6$, and $2$ in rows $4,7$.
\newline
\begin{table}
\centering
\begin{tabular}{|c|c|} \hline
s,t,u & Alice, Bob, Charlie \\ \hline
$000$ & $021$, $102$, $210$, \\ $111$ & $021$, $102$, $210$ \\ $001$ & $001$, $112$, $220$ \\
$110$ & $002$, $110$, $221$ \\ $010$ & $000$, $111$, $222$ \\ $101$ & $010$, $121$, $202$ \\
$100$ & $020$, $101$, $212$ \\ $011$ & $012$, $120$, $201$ \\ \hline
\end{tabular}
\caption{Winning conditions for the nonlocal game}
\end{table}
Let us now look at classical strategies for winning this game. We assume that all of the eight possible values of the triplet $(s,t,u)$ are equally likely. We then note that if Alice always returns $0$, Bob always returns $2$ and Charlie always returns $1$, then they win the game with a probability of $1/4$. The next step is to show that this is the best that can be done. A deterministic classical strategy can be specified by three functions, $f_{A}(s)$, $f_{B}(t)$, and $f_{C}(u)$. Each of these functions takes values in the set $\{ 0,1,2\}$, and if Alice receives the value $s$, she returns $f_{A}(s)$, and similarly for Bob and Charlie. Let $F(a,b,c;s,t,u)$, where $a,b,c\in \{ 0,1,2\}$ be equal to $1$ when $(a,b,c;s,t,u)$ is a winning condition for the game and $0$ otherwise. Then the success probability for the strategy represented by $f_{A}(s)$, $f_{B}(t)$, and $f_{C}(u)$ is
\begin{equation}
\label{classwinstrat}
\frac{1}{8} \sum_{a,b,c=0}^{2} \sum_{s,t,u=0}^{1} F(a,b,c;s,t,u) \delta_{a,f_{A}(s)} \delta_{b,f_{B}(t)} \delta_{c,f_{C}(u)} .
\end{equation}
Let us compare this with the expression for the sum of our $24$ probabilities, which can be expressed as
\begin{equation}
\sum_{a,b,c=0}^{2} \sum_{s,t,u=0}^{1} F(a,b,c;s,t,u) p(a_{s}=a,b_{t}=b,c_{u}=c) ,
\end{equation}
which, we found is bounded above by $2$ if the probabilities $p(a_{s}=a,b_{t}=b,c_{u}=c)$ are derived from a joint distribution. Noting that $\delta_{a,f_{A}(s)} \delta_{b,f_{B}(t)} \delta_{c,f_{C}(u)}$ can be derived from a joint distribution, in particular
\begin{eqnarray}
P(a_{0},b_{0},c_{0};a_{1},b_{1},c_{1}) & = & \delta_{a_{0},f_{A}(0)} \delta_{a_{1},f_{A}(1)} \nonumber \\ & & \delta_{b_{0},f_{B}(0)} \delta_{b_{1},f_{B}(1)} \nonumber \\
& & \delta_{c_{0},f_{C}(0)}\delta_{c_{1},f_{C}(1)} ,
\end{eqnarray}
we see that the sum in Eq.\ (\ref{classwinstrat}) is less than or equal to $2$, which implies that the classical winning strategy must be less than or equal to $1/4$. A deterministic strategy is an optimal one \cite{cleve}, so this means that the maximum classical probability of winning the game is $1/4$.
In the quantum strategy, Alice, Bob, and Charlie share the quantum state in Eq.\ (\ref{3-party-state}) and make measurements on their respective quitrits. Which measurement they make is dictated by the value of the bit they receive from the arbitrator. In particular, Alice measures $a_{s}$, Bob measures $b_{t}$, and Charlie measures $c_{u}$. They then just send the results of their measurements to the arbitrator. Their probability of winning is just $1/8$ times the sum of the probabilities of the winning configurations, which we have seen is $20/9$. This gives an overall probability of $5/18$, which is approximately $0.28$, and this is greater than the winning probability of the classical strategy.
\section{Dihedral groups}
So far, in this paper and in our previous one, we have only made use of abelian groups to generate Bell inequalities. Now we would like to show, by way of an example, that non-abelian groups can also be used. We shall give two examples using dihedral groups. There is a family of dihedral groups, and the dihedral group $D_{n}$ is the group consisting of rotations and reflections in the plane that leave an $n$-sided regular polygon invariant. It is generated by two elements, a rotation, $r$, by angle $2\pi /n$ and a reflection $s$. For example, in the case of an equilateral triangle, the reflection would be about an axis passing through one of the vertices and the midpoint of the opposite side. $D_{n}$ has $2n$ elements, $e$, $r^{j}$ for $j=1,2,\ldots n-1$, and $r^{j}s$ for $j=1,2,\ldots n-1$, where $e$ is the identity element. The group is specified by its presentation, which consists of the elements $r$ and $s$, and the relations $r^{n}=e$, $s^{2}=e$, and $srs=r^{-1}$.
Our approach will make use of the representations of dihedral groups. We will look at two examples, one making use of $D_{3}$ and the other making use of $D_{6}$.
\subsection{$D_{3}$}
The group $D_{3}$ consists of rotations and reflections in the plane that leave an equilateral triangle invariant. It consists of the elements $\{ e,r,r^{2}, s, rs, r^{2}s \}$, where $r^{3}=e$ and $s^{2}=e$. The group has three conjugacy classes $C_{e}=\{ e\}$, $C_{r}=\{ r,r^{2}\}$, and $C_{s}=\{ s, rs, r^{2}s \}$. It has three irreducible representations, $\Gamma^{(j)}$ for $j=1,2,3$, where $\Gamma^{(1)}$ and $\Gamma^{(2)}$ are one-dimensional and $\Gamma^{(3)}$ is two dimensional. The character table for the group is given in Table 2.
\begin{table}
\centering
\begin{tabular}{|c|c|c|c|} \hline
& $C_{e}$ & $C_{r}$ & $C_{s}$ \\ \hline $\Gamma^{(1)}$ & $1$ & $1$& $1$ \\ \hline $\Gamma^{(2)}$ & $1$ & $1$ & $-1$ \\ \hline $\Gamma^{(3)}$ & $2$ & $-1$ & $0$ \\ \hline
\end{tabular}
\caption{Character table for $D_{3}$.}
\end{table}
For the representation $\Gamma^{(3)}$, we can take for the matrices corresponding to $r$ and $s$
\begin{equation}
U=\left( \begin{array}{cc} -1/2 & -\sqrt{3}/2 \\ \sqrt{3}/2 & -1/2 \end{array} \right) \hspace{5mm} V=\left( \begin{array}{cc} 1 & 0 \\ 0 & -1 \end{array}\right) ,
\end{equation}
respectively, where these matrices are expressed in the computational basis $\{ |0\rangle ,|1\rangle \}$. Let us now define three bases of $\mathbb{C}^{2}$, which will correspond to eigenstates of observables (see Figure 1). We start with the basis $\{ |+x\rangle , |-x\rangle \}$, where $|\pm x\rangle = (|0\rangle \pm |1\rangle )/\sqrt{2}$. In addition, define $\{ |u_{0}\rangle = U|+x\rangle ,\ |u_{1}\rangle =U|-x\rangle \}$ and
$\{ |v_{0}\rangle = U^{2}|+x\rangle ,\ |v_{1}\rangle =U^{2}|-x\rangle \}$. Noting that $V|\pm x\rangle = |\mp x\rangle$, we see that if we apply the matrices corresponding to the remaining group elements, $V$, $UV$, or $U^{2}V$ to the states $|\pm x\rangle$ we will just obtain elements of one of the three bases we have just defined.
\begin{figure}
\caption{The three different bases. Solid is $|\pm x\rangle$, dashed is $\{ |u_{0}
\label{di3_bases}
\end{figure}
We now consider a situation in which two parties, Alice and Bob, perform measurements on two qubits, each party possessing one of the qubits. Each party can perform one of three measurements, and each measurement has two possible outcomes. Alice's observables are $a_{0}$, $a_{1}$. and $a_{2}$, and Bob's are $b_{0}$, $b_{1}$, and $b_{2}$, and in each case the result of the measurement will be either $0$ or $1$. In the case that the measurements are described by quantum mechanics, we will have $a_{0}=|-x\rangle\langle -x|$, $a_{1} =|u_{1}\rangle\langle u_{1}|$, and $a_{2}=|v_{1}\rangle\langle v_{1}|$, and similarly for the $b_{j}$, $j=0,1,2$.
Next we find a set of probabilities whose sum will give us a Bell inequality. We begin with the representation of $D_{3}$ on $\mathbb{C}^{2}\otimes \mathbb{C}^{2}$ given by $g\in D_{3} \rightarrow \Gamma^{(3)}(g)\otimes \Gamma^{(3)}(g)$, where $\Gamma^{(3)}(g)$ is the matrix in the representation $\Gamma^{(3)}$ corresponding to the group element $g$ (this matrix will be a product of powers of the matrices $U$ and $V$). Application of these matrices to elements of $\mathbb{C}^{2}\otimes \mathbb{C}^{2}$ gives us a group action, and we are going to be interested in particular orbits. Starting with the state $|+x,+x\rangle$ and applying to it the matrices $\Gamma^{(3)}(g)\otimes \Gamma^{(3)}(g)$, for all $g\in D_{3}$, we have the orbit
\begin{equation}
\{ |+x,+x\rangle , |u_{0},u_{0}\rangle , |v_{0},v_{0}\rangle , |-x,-x\rangle , |u_{1},u_{1}\rangle , |v_{1},v_{1}\rangle \} .
\end{equation}
Projections onto these states correspond to the measurement probabilities for Alice and Bob
\begin{eqnarray}
\label{orbit-prob-1}
\{ p(a_{0}=0,b_{0}=0), p(a_{1}=0, b_{1}=0), \nonumber \\
p(a_{2}=0, b_{2}=0), p(a_{0}=1,b_{0}=1), \nonumber \\
p(a_{1}=1,b_{1}=1), p(a_{2}=1,b_{2}=1) \} .
\end{eqnarray}
We obtain a second orbit by starting with $|-x,v_{0}\rangle$,
\begin{equation}
\{ |-x,v_{0}\rangle , |u_{1},+x\rangle , |v_{1},u_{0}\rangle , |+x,u_{1}\rangle , |u_{0},v_{1}\rangle , |v_{0},-x\rangle \} .
\end{equation}
Projections onto these states correspond to the measurement probabilities
\begin{eqnarray}
\label{orbit-prob-2}
\{ p(a_{0}=1,b_{2}=0), p(a_{1}=1, b_{0}=0), \nonumber \\
p(a_{2}=1, b_{1}=0), p(a_{0}=0,b_{1}=1), \nonumber \\
p(a_{1}=0,b_{2}=1), p(a_{2}=0,b_{0}=1) \} .
\end{eqnarray}
We now want to find the quantum state than maximizes the sum of the probabilities in Eqs.\ (\ref{orbit-prob-1}) and (\ref{orbit-prob-2}). This can be accomplished by finding the largest eigenvalue of the operator
\begin{equation}
A=\sum_{g\in D_{3}} \left( \Gamma^{(3)}(g)\otimes \Gamma^{(3)}(g)\right) L \left( \Gamma^{(3)}(g)\otimes \Gamma^{(3)}(g)\right)^{\dagger} ,
\end{equation}
where $L=|+x,+x\rangle\langle +x,+x|+ |-x,v_{0}\rangle\langle -x,v_{0}|$, and its corresponding eigenstate. The expectation value of $A$ in a state $|\phi\rangle \in \mathbb{C}^{2}\otimes \mathbb{C}^{2}$ is just the sum of the probabilities in Eqs.\ (\ref{orbit-prob-1}) and (\ref{orbit-prob-2}) for that state.
One can simply do a brute force calculation to find the largest eigenvalue of $A$, but it is also possible to make use of group representation theory. The representation $\Gamma^{(3)}\otimes \Gamma^{(3)}$ is reducible, and can be split into its irreducible components
\begin{equation}
\Gamma^{(3)}\otimes \Gamma^{(3)} = \Gamma^{(1)}\oplus \Gamma^{(2)}\oplus \Gamma^{(3)} ,
\end{equation}
where each irreducible component acts on an invariant subspace. This follows from the relation \cite{cornwell}
\begin{equation}
\label{decomposition}
n_{p}=\frac{1}{|G|} \sum_{g\in G}\chi(g) \chi^{(p)}(g)^{\ast} ,
\end{equation}
which gives the number of times, $n_{p}$, an irreducible representation of a group $G$, $\Gamma^{(p)}$ appears in the decomposition of a representation $\Gamma$. Here $|G|$ is the order (number of elements) of $G$, $\chi (g)$ is the character of $\Gamma (g)$, and $\chi^{(p)}(g)$ is the character of $\Gamma^{(p)}(g)$
We find that the space corresponding to $\Gamma^{(1)}$ is spanned by $(|00\rangle + |11\rangle )/\sqrt{2}$, the space corresponding to $\Gamma^{(2)}$ is spanned by $(|01\rangle - |10\rangle )/\sqrt{2}$, and the space corresponding to $\Gamma^{(3)}$ is spanned by $(|00\rangle - |11\rangle )/\sqrt{2}$ and $(|01\rangle + |10\rangle )/\sqrt{2}$.
We can now make use of the following relation \cite{cornwell}. If $G$ is a group and $\Gamma^{(p)}$ and $\Gamma^{(q)}$ are irreducible representations of $G$, then
\begin{equation}
\label{sum-two-irreps}
\frac{1}{|G|} \sum_{g\in G} \Gamma^{(p)}(g)_{jk}^{\ast} \Gamma^{(q)}(g)_{st} = \frac{1}{d_{p}}\delta_{pq} \delta_{js} \delta_{kt} ,
\end{equation}
where $d_{p}$ is the dimension of the irreducible representation $\Gamma^{(p)}$. Now let $\{ |\alpha_{j}^{(p)}\rangle \}$ be an orthonormal basis of a carrier space for the irreducible representation $\Gamma^{(p)}$, $|X_{p}\rangle$ a vector in that space, $\{ |\beta_{j}^{(q)}\rangle \}$ an orthonormal basis for a carrier space for the irreducible representation $\Gamma^{(q)}$, and $|X_{q}\rangle$ a vector in that space. We then have that
\begin{eqnarray}
\frac{1}{|G|} \sum_{g\in G}\langle \alpha_{j}^{(p)}|\Gamma^{(p)}(g)|X_{p}\rangle \langle X_{q}|\Gamma^{(q)\dagger}(g)|\beta_{j^{\prime}}^{(q)}\rangle \nonumber \\
= \frac{1}{|G|} \sum_{g\in G} \sum_{k,k^{\prime}} \langle \alpha_{j}^{(p)}|\Gamma^{(p)}(g)|\alpha_{k}^{(p)}\rangle \langle \beta_{j^{\prime}}^{(q)}| \Gamma^{(q)}(g)|\beta_{k^{\prime}}^{(q)}\rangle^{\ast} \nonumber \\
\langle \alpha_{k}^{(p)}|X_{p}\rangle \langle X_{q}|\beta_{k^{\prime}}^{(q)}\rangle .
\end{eqnarray}
Making use of Eq.\ (\ref{sum-two-irreps}) this becomes
\begin{equation}
\label{two-irreps-final}
\frac{1}{d_{p}}\delta_{pq}\delta_{jj^{\prime}}\sum_{k} \langle X_{q}|\alpha_{k}^{(q)}\rangle \langle\beta_{k}^{(p)}|X_{p}\rangle .
\end{equation}
If, in the case $p=q$ the carrier spaces are the same, this reduces to
\begin{equation}
\label{same-carrier}
\frac{1}{d_{p}}\delta_{pq}\delta_{jj^{\prime}} \| X_{p}\|^{2} .
\end{equation}
Note that if when $p=q$ and the carrier spaces are not the same, we are assuming that the basis elements $|\alpha_{j}^{(p)}\rangle$ and $|\beta_{j}^{(p)}\rangle$ transform in the same way under the action of $\Gamma^{(p)}$. Finally, suppose we have a representation of $G$, $\Gamma (g)$, which is the direct sum of irreducible representations each of which only appears once. In this case we will have that $|\alpha_{j}^{(p)}\rangle = |\beta_{j}^{(p)}\rangle$ when $p=q$, so that we can make use of Eq.\ (\ref{same-carrier}). If we then have a vector
\begin{equation}
|\psi\rangle = \sum_{q,j} c_{q,j}|\alpha_{j}^{(q)}\rangle = \sum_{q} |\psi_{q}\rangle ,
\end{equation}
where $|\psi_{q}\rangle = \sum_{j} c_{q,j} |\alpha_{j}^{(q)}\rangle$ is the component of $|\psi\rangle$ that is in the subspace that transforms according to $\Gamma^{(q)}$, then
\begin{equation}
\frac{1}{|G|} \sum_{g\in G} \Gamma (g)|X\rangle\langle X|\Gamma^{\dagger}(g)|\psi\rangle = \sum_{p} \frac{1}{d_{p}} \| X_{p}\|^{2} |\psi_{p}\rangle ,
\end{equation}
where $X_{p}$ is the projection of $|X\rangle$ onto the subspace that transforms according to $\Gamma^{(p)}$.
Now let us apply this to find the eigenstates of $A$. Setting $|X^{(1)}\rangle = |+x,+x\rangle$ and $|X^{(2)}\rangle = |-x,v_{0}\rangle$, we have that
\begin{eqnarray}
A|\psi\rangle & = & 6\sum_{p=1}^{2} (\| X_{p}^{(1)}\|^{2} + \| X_{p}^{(2)}\|^{2}) |\psi_{p}\rangle \nonumber \\
& & + 3(\| X_{3}^{(1)}\|^{2} + \| X_{3}^{(2)}\|^{2}) |\psi_{3}\rangle .
\end{eqnarray}
From this we see that the eigenvectors of $A$ are just vectors lying in the invariant subspaces, and the eigenvalues are
\begin{eqnarray}
6(\| X_{1}^{(1)}\|^{2} + \| X_{1}^{(2)}\|^{2}) & = & \frac{21}{4} \nonumber \\
6(\| X_{2}^{(1)}\|^{2} + \| X_{2}^{(2)}\|^{2}) & = & \frac{3}{4} \nonumber \\
3(\| X_{3}^{(1)}\|^{2} + \| X_{3}^{(2)}\|^{2}) & = & 3 .
\end{eqnarray}
Therefore, the largest eigenvalue is $21/4$ and the corresponding eigenvector is $(|00\rangle + |11\rangle )/\sqrt{2}$.
The classical bound on the sum of the $12$ probabilities is found as before. We assume that the probabilities can be derived from a joint distribution, $P(a_{0},b_{0}; a_{1},b_{1};a_{2},b_{2})$ and calculate their sum in terms of the joint distribution. The largest coefficient multiplying a probability from the joint distribution gives the upper bound to the sum, and in this case it is $5$. Because $21/4 > 5$, the quantum result violates the classical inequality.
\subsection{Comparison of nonlocal games}
The Bell inequality derived in the last section can be rephrased in terms of a nonlocal game, and we shall do so shortly. It is useful, however to compare that nonlocal game to one resulting from a Bell inequality that is produced by an abelian group, in particular the group $\mathbb{Z}_{6}$.
The group $\mathbb{Z}_{6}$ has a single generator whose sixth power is just the identity element. We will choose the representation of $\mathbb{Z}_{6}$ generated by the matrix, $U$, on $\mathbb{C}^{2}$ (qubits) given by
\begin{equation}
U=|+x\rangle\langle +x|+e^{i\pi /3}|-x\rangle\langle -x| .
\end{equation}
Note that $U^{6}=I$ and $U^{3}=|+x\rangle\langle +x| - |-x\rangle\langle -x| =\sigma_{x}$. We can use $U$ to define three bases, the computational basis $\{ |0\rangle , |1\rangle \}$, $\{ |u_{j}\rangle =U|j\rangle | j=0,1\}$, and $\{ |v_{j}\rangle = U^{2}|j\rangle | j=0,1 \}$. These bases are the eigenstates of three observables, $a_{0}=|1\rangle\langle 1|$, $a_{1}=|u_{1}\rangle\langle u_{1}|$, and $a_{2}=|v_{1}\rangle\langle v_{1}|$.
We now consider the tensor product space $\mathbb{C}^{2}\otimes \mathbb{C}^{2}$ (two qubits) and the representation of $\mathbb{Z}_{6}$ generated by $U\otimes U$. The observables on the first qubit are $a_{j}$ for $j=0,1,2$, and we denote the identical observables on the second qubit by $b_{j}$, for $j=0,1,2$. We now look at two orbits. The first starts with the state $|0,0\rangle$ and is given by
\begin{equation}
\{ |0,0\rangle , |u_{0}, u_{0}\rangle , |v_{0}, v_{0}\rangle , |1,1\rangle , |u_{1}, u_{1}\rangle , |v_{1}, v_{1}\rangle \} ,
\end{equation}
corresponding to the measurement probabilities
\begin{eqnarray}
\label{orbit-prob-1-z6}
\{ p(a_{0}=0,b_{0}=0), p(a_{1}=0, b_{1}=0), \nonumber \\
p(a_{2}=0, b_{2}=0), p(a_{0}=1,b_{0}=1), \nonumber \\
p(a_{1}=1,b_{1}=1), p(a_{2}=1,b_{2}=1) \} .
\end{eqnarray}
The second orbit begins with the state $|0,u_{0}\rangle$ and is
\begin{equation}
\{ |0,u_{0}\rangle , |u_{0}, v_{0}\rangle , |v_{0}, 1\rangle , |1,u_{1}\rangle , |u_{1}, v_{1}\rangle , |v_{1}, 0\rangle \} ,
\end{equation}
which corresponds to the measurement probabilities
\begin{eqnarray}
\label{orbit-prob-2-z6}
\{ p(a_{0}=0,b_{1}=0), p(a_{1}=0, b_{2}=0), \nonumber \\
p(a_{2}=0, b_{0}=1), p(a_{0}=1,b_{1}=1), \nonumber \\
p(a_{1}=1,b_{2}=1), p(a_{2}=1,b_{0}=0) \} .
\end{eqnarray}
We next want to find the quantum state that maximizes the sum of the probabilities in Eqs.\ (\ref{orbit-prob-1-z6}) and (\ref{orbit-prob-2-z6}). As before, it will be the eigenstate of the operator
\begin{equation}
A=\sum_{j=0}^{5} (U\otimes U)^{j}(|0,0\rangle\langle 0,0| + |0,u_{0}\rangle\langle 0,u_{0}|) (U^{\dagger}\otimes U^{\dagger})^{j} ,
\end{equation}
corresponding to the largest eigenvalue. The eigenstates of $U\otimes U$ will also be the eigenstates of $A$ \cite{guney}, and the eigenvalues of $U\otimes U$ are $1$ and $e^{2\pi i/3}$, which are non-degenerate, and $e^{i\pi /3}$, which is doubly degenerate. We find that the eigenstate of $A$ with the largest eigenvalue lies in the space corresponding to the $e^{i\pi /3}$ eigenvalue of $U\otimes U$, and this space is spanned by the vectors $|+x, -x\rangle$ and $|-x, +x\rangle$. In this space, $A$ reduces to the $2\times 2$ matrix
\begin{equation}
\frac{1}{4}\left( \begin{array}{cc} 1 & 1+e^{i\pi /3} \\ 1+e^{-i\pi /3} & 2 \end{array}\right) ,
\end{equation}
and the largest eigenvalue is $3 + (3/2)\sqrt{3}$ with the corresponding eigenvector
\begin{equation}
\label{phi-z6}
|\phi\rangle = \frac{1}{\sqrt{6}}[ (1+e^{i\pi /3})|+x,-x\rangle + \sqrt{3}|-x,+x\rangle ] .
\end{equation}
The maximum classical value of the sum of the probabilities in Eqs.\ (\ref{orbit-prob-1-z6}) and (\ref{orbit-prob-2-z6}), that is the sum if all of the probabilities come from a joint distribution of all six observables is $5$. Since $3 + (3/2)\sqrt{3} > 5$, the quantum probabilities violate the classical bound.
We can now proceed to a discussion of this Bell inequality in terms of a nonlocal game. The two players are Alice and Bob, and an arbitrator sends Alice a value of $s\in \{0,1,2\}$ and Bob a value of $t\in \{0,1,2\}$. Not all values of $(s,t)$ are allowed. In particular, either $s=t$ or $(s,t)$ must be $(0,1)$, $(1,2)$, or $(2,0)$, so that six out of the nine possibilities are allowed, and they will be assumed to be equally probable. Alice and Bob then each send a bit to the arbitrator. They win if their bit values differ and $(s,t)=(2,0)$ or their bit values are the same and $(s,t)$ is any of the other allowed values. Note that for each allowed value of $(s,t)$ there are two winning possibilities. Classically their winning probability is $5/6$, and it can be achieved if Alice and Bob each always send the bit value $0$. In the quantum case, Alice and Bob share the state $|\phi\rangle$ in Eq.\ (\ref{phi-z6}), and the values of $(s,t)$ determine which observable they measure, in particular, Alice measures $a_{s}$ and Bob measures $b_{t}$. The bit values they send to the arbitrator are simply the results of their measurements. In this scenario, all of the probabilities in Eqs.\ (\ref{orbit-prob-1-z6}) and (\ref{orbit-prob-2-z6}) are the same, and are equal to $(2+\sqrt{3})/8$. For each value of $(s,t)$ there are two winning possibilities, so the overall winning probability for Alice and Bob using the quantum strategy is $2(2+\sqrt{3})/8 = (2+\sqrt{3})/4$. Comparing the classical and quantum strategies, we see that the best classical strategy has a winning probability of approximately $0.83$ while the quantum strategy has a winning probability of $0.93$, so there is a quantum advantage.
Now let us go back and rephrase the Bell inequality that resulted from $D_{3}$ as a nonlocal game. As we shall see, its structure is different than that of the game that resulted from $\mathbb{Z}_{6}$. The basic situation is as before, Alice receives a value of $s\in \{0,1,2\}$ and Bob receives a value of $t\in \{0,1,2\}$, but in this case all nine combinations of $(s,t)$ are possible. Alice and Bob then send a bit to an arbitrator. They win if $s=t$ and they return the same bit value or if $s$ and $t$ are different, they return the bit values $(a,b)$ that are shown in Table 3. Note that in this case when $s=t$ there are two winning possibilities for $(a,b)$, but for $s\neq t$ there is only one. This is different from the previous game where for each allowed value of $(s,t)$ there were two winning possibilities.
\begin{table}
\label{win-values}
\centering
\begin{tabular}{|c|c|}\hline
(s,t) & (a,b) \\ \hline (0,1) & (0,1) \\ (1,0) & (1,0) \\ (0,2) & (1,0) \\ (2,0) & (0,1) \\ (1,2) & (0,1) \\ (2,1) & (1,0)
\\ \hline
\end{tabular}
\caption{Winning values for the nonlocal game derived from $D_{3}$ when $s\neq t$. }
\end{table}
Now let us look at the classical and quantum winning probabilities. . As we saw, the sum of the probabilities resulting from $D_{3}$ is $5$, and by an argument similar to that in Section 2 that implies that the classical winning probability is less than or equal to $5/9\simeq 0.556$. This bound can be achieved with the following strategy. If Alice receives $s$ from the arbitrator, she returns $f_{A}(s)$, and when Bob receives $t$, he returns $f_{B}(t)$, where $f_{A}(0)=f_{B}(0)=1$, $f_{A}(1)=f_{B}(1)=0$, and $f_{A}(2)=f_{B}(2)=0$. In the quantum case, Alice and Bob share the state $|\phi\rangle = |00\rangle + |11\rangle )/\sqrt{2}$, and Alice measures $a_{s}$ and Bob measures $b_{t}$, where $a_{s}$ and $b_{t}$ are the observables appropriate for the Bell inequality that resulted from $D_{3}$, i.e.\ $a_{0}=|-x\rangle\langle -x|$, $a_{1} =|u_{1}\rangle\langle u_{1}|$, and $a_{2}=|v_{1}\rangle\langle v_{1}|$, and similarly for the $b_{j}$, $j=0,1,2$. They then report their measurement results as their bit values. The quantum winning probability is then just $1/9$ times the sum of the probabilities in Eqs.\ (\ref{orbit-prob-1}) and (\ref{orbit-prob-2}), which is $7/12 \simeq 0.583$. This is larger than the classical winning probability. In this case, it is worth noting that the quantum probabilities for the two different orbits are not the same. For the case that $s=t$, i.e. the probabilities in Eq.\ (\ref{orbit-prob-1}), the probabilities are all equal to $1/2$. This implies that if $s=t$, Alice and Bob always win the game if they are using the quantum strategy. When $s\neq t$, the probabilities in Eq.\ (\ref{orbit-prob-2}), are all equal to $3/8$.
To summarize the results of this section, we have constructed two nonlocal games, one based on $\mathbb{Z}_{6}$ and the other based on $D_{3}$. For both, Alice received a value of $s$ and Bob received a value of $t$, and each had to return a bit value. In one game, the set of allowed values of $(s,t)$ was restricted, in the other it was not. In addition, in one game for each allowed value of $(s,t)$ there were always two winning values of $(a,b)$, while in the second game this was true if $s=t$, but there was only one winning value otherwise. Therefore, the nonlocal games had rather different structures.
\subsection{$D_{6}$}
To conclude we will look at a larger group, $D_{6}$. This group has the generators $r$ and $s$, where $s^{2}=e$, as before, but now $r^{6}=e$. This group has six conjugacy classes: $C_{e}=\{ e\}$, $C_{r}=\{ r,r^{5}\}$, $C_{r^{2}}=\{ r^{2}, r^{4}\}$, $C_{r^{3}}=\{ r^{3}\}$, $C_{s}=\{ s, r^{2}s, r^{4}s \}$, and $C_{rs}=\{ rs, r^{3}s, r^{5}s \}$. It has six irreducible representations, $\Gamma^{(j)}$ for $j=1,2,3,4$, which are one-dimensional, and $\Gamma^{(5)}$ and $\Gamma^{(6)}$, which are two-dimensional. The character table for this group is given in Table 4.
\begin{table}
\centering
\begin{tabular}{|c|c|c|c|c|c|c|} \hline
& $C_{e}$ & $C_{r}$ & $C_{r^{2}}$ & $C_{r^{3}}$ & $C_{s}$ & $C_{rs}$ \\ \hline $\Gamma^{(1)}$ & $1$ & $1$& $1$& $1$ & $1$& $1$ \\ \hline $\Gamma^{(2)}$ & $1$ & $1$ & $1$ & $1$ & $-1$ & $-1$ \\ \hline $\Gamma^{(3)}$ & $1$ & $-1$ & $1$ & $-1$ & $1$ & $-1$ \\ \hline $\Gamma^{(4)}$ & $1$ & $-1$ & $1$ & $-1$ & $-1$ & $1$ \\ \hline $\Gamma^{(5)}$ & $2$ & $1$ & $-1$ & $-2$ & $0$ & $0$ \\ \hline $\Gamma^{(6)}$ & $2$ & $-1$ & $-1$ & $2$ & $0$ & $0$ \\ \hline
\end{tabular}
\caption{Character table for $D_{6}$.}
\end{table}
We will make use of the following representation of $D_{6}$ on $\mathbb{C}^{3}$. The computational basis is $\{ |j\rangle | j=0,1,2\}$, and let us define another basis
\begin{equation}
|u_{j}\rangle = \frac{1}{\sqrt{3}}\sum_{k=0}^{2} e^{2\pi ijk/3} |k\rangle .
\end{equation}
Corresponding to the group element $r$, we choose
\begin{equation}
U=|u_{0}\rangle\langle u_{0}| + e^{-i\pi /3}|u_{1}\rangle\langle u_{1}| + e^{i\pi /3}|u_{2}\rangle\langle u_{2}| ,
\end{equation}
and corresponding to $s$ we choose
\begin{equation}
V=|u_{0}\rangle\langle u_{0}| + i(|u_{1}\rangle\langle u_{2}| - |u_{2}\rangle\langle u_{1}|) .
\end{equation}
This choice for $U$ was used in a previous paper as a generator of a representation of $\mathbb{Z}_{6}$ \cite{guney}. Note that it has the property that $U^{2}|j\rangle = |j+1\rangle$, where the addition is modulo $3$. If we denote the representation generated by $U$ and $V$ by $\Gamma$, then application of Eq.\ (\ref{decomposition}) gives us that
\begin{equation}
\Gamma = \Gamma^{(1)} \oplus \Gamma^{(5)} .
\end{equation}
Application of powers and products of the operators $U$ and $V$ to the computational basis yield three additional bases, $\{ |v_{j}\rangle = U|j\rangle | j=0,1,2\} $, $\{ |w_{j}\rangle = V|j\rangle | j=0,1,2\}$, and $\{ |x_{j}\rangle = UV|j\rangle | j=0,1,2\}$. We can now define four observables that take values in the set $\{ 0,1,2\}$
\begin{eqnarray}
a_{0}= \sum_{j=1}^{2} j|j\rangle\langle j| & &a_{1}=\sum_{j=1}^{2}j|v_{j}\rangle\langle v_{j}| \nonumber \\
a_{2}=\sum_{j=1}^{2}j|w_{j}\rangle\langle w_{j}| & &a_{3}=\sum_{j=1}^{2}j|x_{j}\rangle\langle x_{j}| .
\end{eqnarray}
In the bipartite case, Alice and Bob will choose among these observables for their measurements. That is, they each possess a qutrit and decide to measure one of the four observables above (we will denote Bob's observables by $b_{j}$).
Next we will choose two orbits. These were again identified by means of a random search that checked for Bell inequality violations. The orbits start on the states $(U^{4}\otimes U^{2}V|0,0\rangle = |2, w_{2}\rangle$ and $(I\otimes U^{5}V)|0,0\rangle = |0,x_{1}\rangle$ and further elements of the orbits are found by applying $\Gamma (g)\otimes \Gamma (g)$, for $g\in D_{6}$, to the initial states. Each orbit contains $12$ states and gives rise to $12$ corresponding measurement probabilities. The $24$ probabilities that result from these two orbits are listed in the Appendix B. If all of these probabilities come from a joint distribution, their sum cannot be greater than $6$.
We now need to see if we can find a quantum state that violates the classical bound. The operator $A$ is now given by
\begin{equation}
\label{d6-A}
A=\sum_{g\in D_{6}} \left( \Gamma (g)\otimes \Gamma (g)\right) L \left( \Gamma (g)\otimes \Gamma (g)\right)^{\dagger} ,
\end{equation}
where now $L=|2,w_{2}\rangle\langle 2,w_{2}|+ |0,x_{1}\rangle\langle 0,x_{1}|$. Application of Eq.\ (\ref{decomposition}) gives us that
\begin{equation}
\Gamma \otimes \Gamma =2\Gamma^{(1)} \oplus \Gamma^{(2)} \oplus 2\Gamma^{(5)}\oplus \Gamma^{(6)} .
\end{equation}
Using the representation of $U$ and $V$ in the $\{ |u_{j}\rangle \}$ basis, we find that $|u_{0}, u_{0}\rangle$ and $(|u_{1}, u_{2}\rangle + |u_{2}, u_{1}\rangle )/\sqrt{2}$ transform as $\Gamma^{(1)}$, $(|u_{1}, u_{2}\rangle - |u_{2}, u_{1}\rangle )/\sqrt{2}$ transforms as $\Gamma^{(2)}$, both $\{ |u_{0}, u_{1}\rangle , |u_{0}, u_{2}\rangle \}$ and $\{ |u_{1}, u_{0}\rangle , |u_{2}, u_{0} \rangle \}$ transform as $\Gamma^{(5)}$, and $\{ |u_{1}, u_{1}\rangle , |u_{2} , u_{2}\rangle \}$ transform as $\Gamma^{(6)}$.
We find that the largest eigenvalue of $A$ corresponds to an eigenvector that lies in the subspace spanned by the two vectors that transform as $\Gamma^{(1)}$. The details of the calculation are in Appendix B, but here we note the following. According to Eq.\ (\ref{sum-two-irreps}), the eigenvectors of $A$ will lie in invariant subspaces corresponding to the representations $\Gamma^{(1)}$, $\Gamma^{(2)}$, $\Gamma^{(5)}$, and $\Gamma^{(6)}$, and these subspaces are orthogonal. The components of both $|2,w_{2}\rangle$ and $|0,x_{1}\rangle$ that lie in the $\Gamma^{(1)}$ subspace are the same, and are given by
\begin{equation}
|X_{1}\rangle = \frac{1}{3}|u_{0},u_{0}\rangle - \frac{1}{2\sqrt{3}}(|u_{1},u_{2}\rangle + |u_{2},u_{1}\rangle ) .
\end{equation}
Because it transforms as $\Gamma^{(1)}$, this vector is invariant under the actions of $U$ and $V$, and this implies that in the $\Gamma^{(1)}$ space, $A$ is just $2(12)|X_{1}\rangle\langle X_{1}|$. Therefore, the two eigenvectors of $A$ in this subspace are the vector orthogonal to $|X_{1}\rangle$, which has an eigenvalue of $0$, and a normalized version of $|X_{1}\rangle$, which is $|\phi\rangle = 3(\sqrt{2/5})|X_{1}\rangle$, whose eigenvalue is $2(12)\| X_{1}\|^{2}=20/3$. As $(20/3) > 6$, the sum of the probabilities for the state $|\phi\rangle$ violates the classical bound, so the sum of the $24$ probabilities in Table 5 gives us a Bell inequality.
\section{Conclusion}
We have shown how group actions can be used to generate Bell inequalities. In particular, we provided an example of a three-party Bell inequality using an Abelian group, and two examples of two-party inequalities but with non-Abelian groups. The orbits of the group action are used to generate events, the sum of whose probabilities is the main object appearing in the Bell inequality. This approach has the benefit of providing a set of quantum observables that can be measured to test the Bell inequality and a quantum state that violates it.
There are a number of areas in which the research presented here could be extended. The choice of the orbits that led to the Bell inequalities was done by using a random search (see Appendix A). It would be useful to have a criterion for choosing them. This would also allow us to gain a better understanding of how the structures of Bell inequalities are related to the underlying groups. The Bell inequalities depend on both the group and the choice of orbits, and at the moment we do not have a good way of disentangling these two effects. A better understanding of how to choose the orbits would, we hope, lead to a better idea of the relation between the group and the Bell inequality.
\section*{Acknowledgment}
This research was supported by a grant from the John Templeton Foundation.
\section*{Appendix A}
Here we provide more detail about how the random search to determine the orbits for the group $D_{3}$ was performed. The two orbits that yield a Bell inequality were found by a random search in the space of all possible orbit pairs made with the SAGE (\url{http://www.sagemath.org/}), an open source computer algebra system. SAGE includes group theory and symbolic manipulation packages that are suitable for this task.
First, using SAGE the $D_3$ group is generated and its elements $g\in{}D_3$ are calculated. Then the group generators $r$ and $s$ are associated with the corresponding representation matrices, $U=\Gamma{}\left(r\right)$, $V=\Gamma{} \left( s \right)$. We know how the rest of the group elements are generated from the generators $\{g_i | i=0,1,\ldots 5\}=\{ e,r,r^{2}, s, rs, r^{2}s \}$. The associated representation matrices are calculated accordingly, $\{ \Gamma\left( g_i \right) = \Gamma_i | i=0,\ldots 5 \} = \{ I, U, U^2, V, UV, U^2V \}$. To associate the representation matrices with quantum measurement outcome states, the matrices are applied to a chosen initial state, which in our case was $|+x\rangle$, giving $|\psi_i\rangle = \Gamma_i |+x\rangle$.
In the code, then, the orthogonality relations among these states are analyzed. A table $T_{ij} = |\langle \psi_i | \psi_j \rangle|$ of the absolute values of inner products is calculated. From the table these states are classified into different orthonormal bases, with each basis corresponding to the different possible eigenstates of a single observable. States for which the inner products are $0$ or $1$ are in the same basis. In this way the Bell scenario for the number of measurements and outcomes is determined. The choice of initial state is essential to be able to get useful orthonormal bases. To be specific, each state is associated with an event $E$, namely an observable and its outcome, $|\psi_i\rangle \leftrightarrow a_{m(g_i)}=o(g_i)$, where $m$ is the choice of observable, and $o$ is the outcome. For our choice of $U$, $V$ and the initial state, $|+x\rangle$, the 6 states $|\psi_i \rangle$ belong to 3 two dimensional orthonormal bases. $\{ E_i | i=0, \ldots 5 \} = \{ a_{0}=0, a_{1}=0, a_{2}=0, a_{0}=1, a_{1}=1, a_{2}=1\}$.
We have two parties, and we want to see whether two orbits are sufficient. For each orbit we need two group elements, $g_\mu$ and $g_\nu$, to set the initial joint state $|\Psi_{\mu,\nu}\rangle = \Gamma(g_\mu) |+x\rangle \otimes \Gamma(g_\nu) |+x\rangle$. Then, the orbit will give us the $A$ operator
\begin{equation}
A_{\mu,\nu} = \sum_i \left( \Gamma(g_i)\otimes \Gamma(g_i)\right) |\Psi_{\mu,\nu}\rangle \langle \Psi_{\mu,\nu}| \left( \Gamma(g_i)^{\dagger}\otimes \Gamma^\dagger{}(g_i) \right).
\end{equation}
The $A$ corresponding to both orbits is $A = A_{\mu_1,\nu_1} + A_{\mu_2,\nu_2}$. The choice of $\mu_1,\nu_1, \mu_2,\nu_2$ also determines the set of joint probabilities
\begin{eqnarray}
\mathcal{P} & = & \left\{ P\left( a_{m(g_i g_{\mu_j})} =o(g_i g_{\mu_j}), b_{n(g_i g_{\nu_j})} = o(g_i g_{\nu_j}) \right) \right. \nonumber \\
& & \left. |i=0,\ldots 5, j=1,2 \right\}.
\end{eqnarray}
Because the size of the search space increases exponentialy with respect to the group size a random search is implemented. The size is $|G|^{N_o N_p}$ where $|G|$ is the order of the group, $N_o$ is the number of orbits we want, and $N_p$ is the number of parties. For a random choice of $\{ \mu_1,\nu_1, \mu_2,\nu_2 \}$ the biggest eigenvalue of $A$, $\lambda_{max}$, is compared with the classical bound of the sum of the joint probabilities in $\mathcal{P}$, $c$. A violation is found when $\lambda_{max} > c$. The code can be downloaded from \url{http://www.github.com/vug/bell-group-actions} .
\section*{Appendix B}
Table 5 gives the probabilities corresponding to the two orbits for $D_{6}$. The starting states for the orbits are given in the first line, and the group representation element that is applied to the initial state to give the resulting probability is given in the first column.
\begin{table}
\centering
\begin{tabular}{|c|c|c|}\hline & $|2,w_{2}\rangle$ & $|0,x_{1}\rangle$ \\ \hline $I$ & $p(a_{0}=2, b_{2}=2)$ & $p(a_{0}=0, b_{3}=1)$ \\ $U$ & $p(a_{1}=2, b_{3}=2)$ & $p(a_{1}=0, b_{2}=0)$ \\ $U^{2}$ & $p(a_{0}=0, b_{2}=1)$ & $p(a_{0}=1, b_{3}=0)$ \\ $U^{3}$ & $p(a_{1}=0, b_{3}=1)$ & $p(a_{1}=1, b_{2}=2)$ \\ $U^{4}$ & $p(a_{0}=1, b_{2}=0)$ & $p(a_{0}=2, b_{3}=2)$ \\ $U^{5}$ & $p(a_{1}=1, b_{3}=0)$ & $p(a_{1}=2, b_{2}=1)$ \\ $V$ & $p(a_{2}=2, b_{0}=2)$ & $p(a_{2}=0, b_{1}=0)$ \\ $UV$ & $p(a_{3}=2, b_{1}=2)$ & $p(a_{3}=0, b_{0}=1)$ \\ $U^{2}V$ & $p(a_{2}=1, b_{0}=0)$ & $p(a_{2}=2, b_{1}=1)$ \\ $U^{3}V$ & $p(a_{3}=1, b_{1}=0)$ & $p(a_{3}=2, b_{0}=2)$ \\ $U^{4}V$ & $p(a_{2}=0, b_{0}=1)$ & $p(a_{2}=1, b_{1}=2)$ \\ $U^{5}V$ & $p(a_{3}=0, b_{1}=0)$ & $p(a_{3}=1, b_{0}=0)$ \\ \hline
\end{tabular}
\caption{Probabilities generated by orbits for $D_{6}$.}
\end{table}
Next, we move on to the calculation of the eigenvalues and eigenstates of the operator $A$ for the group $D_{6}$ given in Eq.\ (\ref{d6-A}) . The eigenvalues and eigenvectors for the $\Gamma^{(1)}$ subspace have already been discussed in the text. The $\Gamma^{(2)}$ and $\Gamma^{(6)}$ subspaces are straightforward, since these representations only appear once in the decomposition of $\Gamma$ and we can use Eq.\ (\ref{same-carrier}). For $\Gamma^{(2)}$ we find that the component of $|2,w_{2}\rangle$ in this subspace is
\begin{equation}
|X_{2}^{(1)}\rangle = \frac{-i}{6}(|u_{1},u_{2}\rangle - |u_{2},u_{1}\rangle )
\end{equation}
while the component of $|0,x_{1}\rangle$ is just $|X_{2}^{(2)}\rangle = - |X_{2}^{(1)}\rangle$. The eigenvalue corresponding to the $\Gamma^{(2)}$ space is then $4/3$. The components of $|2,w_{2}\rangle$ and $|0,x_{1}\rangle$ in the $\Gamma^{(6)}$ subspace are
\begin{eqnarray}
|X_{6}^{(1)}\rangle & = & \frac{i}{3} (-|u_{1},u_{1}\rangle + |u_{2},u_{2}\rangle ) \nonumber \\
|X_{6}^{(2)}\rangle & = & -\frac{1}{3\sqrt{3}}[(1-e^{-2\pi i /3}) |u_{1},u_{1}\rangle \nonumber \\
& & + (1-e^{2\pi i/3}) |u_{2},u_{2}\rangle ] ,
\end{eqnarray}
respectively. This gives $8/3$ as the eigenvalue corresponding to $\Gamma^{(6)}$, and this eigenvalue is two-fold degenerate.
The $\Gamma^{(5)}$ subspace is more complicated. It is four dimensional and consists of two copies of the $\Gamma^{(5)}$ irreducible representation. We first note that because $|u_{0}\rangle$ is invariant under the actions of $U$ and $V$, the states $|u_{0},u_{1}\rangle$ and $|u_{1},u_{0}\rangle$ transform in the same way, and the states $|u_{0},u_{2}\rangle$ and $|u_{2},u_{0}\rangle$ transform in the same way. Now suppose that $|X_{5}\rangle$ is a vector in the $\Gamma^{(5)}$ subspace. Setting $|\alpha_{j}\rangle = |u_{0},u_{j}\rangle$ and $|\beta_{j}\rangle = |u_{j},u_{0}\rangle$, for $j=1,2$, we find from Eq.\ (\ref{two-irreps-final}), that
\begin{eqnarray}
\sum_{g\in D_{6}} \Gamma (g)|X_{5}\rangle\langle X_{5}|\Gamma^{\dagger}(g) \nonumber \\
=6\left( \begin{array}{cccc} \|X_{5\alpha}\|^{2} & 0 & z & 0 \\ 0 & \|X_{5\alpha}\|^{2} & 0 & z \\ z^{\ast} & 0 & \|X_{5\beta}\|^{2} & 0 \\ 0 & z^{\ast} & 0 & \|X_{5\beta}\|^{2} \end{array} \right) ,
\end{eqnarray}
where the matrix is in the $\{ \alpha_{1}, \alpha_{2}, \beta_{1}, \beta_{2} \}$ basis, and
\begin{eqnarray}
\|X_{5\alpha}\|^{2} & = & \sum_{j=1}^{2} |\langle X_{5}|\alpha_{j}\rangle |^{2} \nonumber \\
\|X_{5\beta}\|^{2} & = & \sum_{j=1}^{2} |\langle X_{5}|\beta_{j}\rangle |^{2} \nonumber \\
z & = & \sum_{j=1}^{2} \langle X_{5}|\beta_{j}\rangle \langle \alpha_{j}|X_{5}\rangle .
\end{eqnarray}
The component of $|2,w_{2}\rangle$ transforming as $\Gamma^{(5)}$ is
\begin{eqnarray}
|X_{5}^{(1)}\rangle & = & \frac{1}{3\sqrt{3}}[ (1-e^{-2\pi i/3})|u_{0},u_{1}\rangle \nonumber \\
& & + (1-e^{2\pi i/3})|u_{0},u_{2}\rangle \nonumber \\
& & + \frac{1}{3}(e^{-2\pi i /3}|u_{1},u_{0}\rangle + e^{2\pi i/3}|u_{2},u_{0}\rangle ) ,
\end{eqnarray}
and the component of $|0,x_{1}\rangle$ transforming as $\Gamma^{(5)}$ is
\begin{eqnarray}
|X_{5}^{(2)}\rangle & = &- \frac{1}{3\sqrt{3}}[ (1-e^{-2\pi i/3})|u_{0},u_{1}\rangle \nonumber \\
& & + (1-e^{2\pi i/3})|u_{0},u_{2}\rangle \nonumber \\
& & + \frac{1}{3}(|u_{1},u_{0}\rangle + |u_{2},u_{0}\rangle ) .
\end{eqnarray}
For both $|X_{5}^{(1)}\rangle$ and $|X_{5}^{(2)}\rangle$ we find $\|X_{5\alpha}\|^{2} = \|X_{5\beta}\|^{2} =2/9$ and $z=-1/(3\sqrt{3})$. Putting these together, we find the that eigenvalues of $A$ in the $\Gamma^{(5)}$ subspace are $(4/3)(2\pm \sqrt{3})$ each of which is two-fold degenerate.
\end{document} |
\begin{equation}gin{document}
\title{Notes on multiplicativity of maximal output purity for completely positive qubit maps}
\author{Koenraad M.R.~Audenaert}
\email{k.audenaert@imperial.ac.uk}
\affiliation{Institute for Mathematical Sciences, Imperial College London,
53 Prince's Gate, London SW7 2PG, UK}
\affiliation{Dept.\ of Mathematics, Royal Holloway, University of London, Egham, Surrey TW20 0EX, UK}
\date{\today}
\begin{equation}gin{abstract}
A problem in quantum information theory that has received
considerable attention in recent years is the question of multiplicativity
of the so-called maximal output purity (MOP) of a quantum channel.
This quantity is defined as the maximum value of the purity one can get
at the output of a channel by varying over all physical input states,
when purity is measured by the Schatten $q$-norm, and is denoted by
$\nu_q$. The multiplicativity problem is the question whether two
channels used in parallel have a combined $\nu_q$ that is the product of the
$\nu_q$ of the two channels. A positive answer would imply
a number of other additivity results in QIT.
Very recently, P.\ Hayden has found counterexamples for every value of $q>1$.
Nevertheless, these counterexamples require that
the dimension of these channels increases with $1-q$ and therefore do not rule out
multiplicativity for $q$ in intervals $[1,q_0)$ with $q_0$ depending on the channel dimension.
I argue that this would be enough to prove additivity of entanglement of formation
and of the classical capacity of quantum channels.
More importantly, no counterexamples have as yet been found
in the important special case where one of the channels
is a qubit-channel, i.e.\ its input states are 2-dimensional.
In this paper I focus attention to this qubit case
and I rephrase the multiplicativity conjecture
in the language of block matrices and prove the conjecture in a number
of special cases.
\end{abstract}
\maketitle
\section{Introduction\label{sec:intro}}
Additivity problems are amongst the most important, and notorious, open problems of quantum information theory.
Basically, the question is whether or not certain information theoretic properties of composite quantum systems consisting
of independent parts decompose as simple sums over these parts.
One of the more important instances of this question concerns the classical information carrying capacity
of quantum channels. Is the total capacity of two quantum channels taken in parallel equal to the sum of the capacities
of the separate channels? Roughly speaking, the classical capacity of a quantum channel quantifies the maximal achievable rate
of error-free communication of classical information through the channel, when the classical information is encoded onto
quantum states that are subsequently transmitted through the quantum channel and then decoded into classical information again \cite{holevo99}.
By judiciously choosing encoding and decoding, the transmission errors incurred when passing through the quantum channel can be corrected.
Theoretically speaking, for every channel, error correcting block codes can be devised so that the remaining probability of error
vanishes asymptotically, when block size goes to infinity. The rate of a code is the number of classical bits of information carried,
on average, by one quantum bit (qubit).
The capacity of the channel is then the maximal rate of an error correcting code that asymptotically corrects all errors for that
particular channel.
A basic result of classical information theory is that the capacity of two classical channels in parallel is just the sum of the
two capacities.
The additivity question for the classical capacity of a quantum channel asks whether this is still true for quantum channels
with encoding/decoding based on quantum error correcting codes. If not, this means that the rate of transmission through the
two channels can be increased by encoding the two streams of classical bits into one stream of \textit{entangled} quantum states,
rather than into two independent streams of quantum states.
Other additivity questions in QIT concern the \textit{entanglement of formation} of bipartite quantum states, which is an important
measure of entanglement, and the \textit{minimal output entropy} of a quantum channel.
A surprising result of quantum information theory is that all these additivity questions are in fact equivalent \cite{ka03,shor},
despite the seemingly different contexts in which they are formulated.
In this paper, I concentrate on what looks like the simplest instance of the additivity questions, namely the additivity of
the minimal output entropy of a quantum channel.
When a pure state is sent through a quantum channel, i.e.\ when a quantum operation acts on a pure quantum state, the resulting
state will in general be no longer pure but will be mixed. By expressing the purity of the resulting state in terms of a
mathematical measure of purity, one can ask for the largest possible value of purity an output state can have when
one can choose the input state freely. One such measure of purity is the von Neumann entropy $S(\rho):=-\mathop{\rm Tr}\nolimits[\rho\log\rho]$.
As this is an inverted measure of purity (0 for pure states, positive for mixed states), this has to be minimised, yielding the \textit{minimal
output entropy} of a quantum channel. Its precise definition is
$$
\nu_S(\Phi) := \min_\psi S(\Phi(\ket{\psi}\bra{\psi})),
$$
where $\Phi(\cdot)$ denotes the action of the quantum channel on a state. As is well-known, a quantum channel is
mathematically defined as a trace-preserving completely positive map.
A quantity that is closely related to the minimal output entropy is the \textit{maximal output $q$-purity} (MOP). Here, purity is measured
by the Schatten $q$-norm
$$
||\rho||_q := \mathop{\rm Tr}\nolimits[\rho^q]^{1/q},
$$
a non-commutative generalisation of the familiar $\ell_q$ vector norm.
This yields for the MOP:
$$
\nu_q(\Phi) := \max_\psi ||\Phi(\ket{\psi}\bra{\psi})||_q.
$$
The entropy is related to the Schatten norms via the limit
$$
-x\log x = \lim_{q\to 1} \frac{1-x^q}{q-1}.
$$
In \cite{ahw}, it was proven that the minimal output entropy is additive if the maximal output $q$-norm is multiplicative for
all values of $q$ ``close to 1''; more precisely, if for a pair of given channels $\Phi$ and $\Omega$ there exists a number $q_0>1$ such that
for all $1\le q< q_0$,
$$
\nu_q(\Phi\otimes\Omega) = \nu_q(\Phi)\,\,\nu_q(\Omega)
$$
holds.
Most of the recent efforts on additivity has been directed towards this multiplicativity problem, because of its simple formulation
(and because of the wealth of available techniques for dealing with Schatten norms).
Indeed, when comparing the multiplicativity problem to the other additivity problems, this one almost looks too simple.
However, closer investigation of the equivalence theorems reveal that the complexity is hidden in the dimension
of the channel. More precisely, Theorem 2 from \cite{ka03} states that ``if there exists a real number $q_0>1$ such that
$\nu_q(\Lambda)$ is multiplicative for all $1\le q\le q_0$
and for any CP map $\Lambda$,
then the entanglement of formation is strongly superadditive'', while according to the main result in \cite{msw}, strong superadditivity
of the entanglement of formation implies additivity of the classical capacity of quantum channels.
These two theorems do not mention dimensionality of the states/CP maps/channels involved, because they are stated, in a global manner, in terms of
the set of \textit{all} states/CP maps/channels.
However, on closer inspection of the proofs one finds that something more specific has actually been proven, in terms of the sets of all
CP maps/channels with specified input and output dimension (note, however, that Shor's equivalence theorems do not offer this possibility):
multiplicativity of $\nu_q$, with $q\downarrow 1$, for all CP maps of dimension
\begin{equation}as
\Lambda_1:&& {\cal H}_{1,in}\mapsto{\cal H}_{1,out} \\
\Lambda_2:&& {\cal H}_{2,in}\mapsto{\cal H}_{2,out},
\end{equation}as
implies
additivity of classical capacity for all channels of dimension
\begin{equation}as
\Phi_I: && {\cal H}_{I,in}\mapsto{\cal H}_{I,out} \\
\Phi_{II}:&& {\cal H}_{II,in}\mapsto{\cal H}_{II,out},
\end{equation}as
with
\begin{equation}as
{\cal H}_{1,in} &=& {{\cal H}}_{I,out}^{\otimes 2} \otimes {\cal H}_{I,in} \\
{\cal H}_{1,out} &=& {\cal H}_{I,out} \\
{\cal H}_{2,in} &=& {{\cal H}}_{II,out}^{\otimes 2} \otimes {\cal H}_{II,in} \\
{\cal H}_{2,out} &=& {\cal H}_{II,out}.
\end{equation}as
As an important example, to prove additivity of the classical capacity of a pair of channels,
where one channel is a qubit channel ($2\mapsto 2$),
one needs to prove multiplicativity of MOP for all pairs of CP maps,
where the first one is of dimension $8\mapsto 2$.
Hence, indeed, as regards the multiplicativity of MOP, the complexity of the classical capacity has been hidden in
the increased input dimension of the channels that have to be considered.
Originally, the hope was that $q_0$ in the statement of the multiplicativity question
could be taken to be infinity. Soon after appearance of \cite{ahw}, however, a counterexample
was found for $q>4.78$, involving two identical channels of dimension $3\mapsto 3$ \cite{hw}.
Very recently, the existence of channels was discovered (in a non-constructive way) that violate multiplicativity
for $q$ arbitrarily close to 1 \cite{hayden,winter}. Note, however, that this does not (yet)
disprove additivity of minimal output entropy because the dimension of the channels involved increases
with the minimal value of $q$ for which they violate multiplicativity.
For fixed dimension, there is always room for multiplicativity for $q$ closer to 1, and by the dimension argument mentioned above
this is all one needs. Thus, the claim mentioned in the title of \cite{hayden} that
``the maximal $p$-norm multiplicativity conjecture is false'' is not entirely correct.
In any case, these counterexamples show that even if multiplicativity holds, proving it in some neighbourhood of 1 will be very hard;
most, if not all, known results on Schatten $q$-norms hold over intervals for $q$ like $[1,\infty)$ or $[1,2]$ and not on such intervals
as $[1,q_0]$ with $q_0$ dimension dependent.
On the other hand, the more interesting channels are the lower-dimensional ones, esp.\ the qubit channels, and by the above-mentioned
dimension argument, one can restrict attention to multiplicativity for channels with equally low output dimension.
For qubit channels, no counterexamples have yet been found. In fact,
multiplicativity of MOP when one of the channels is a $2\mapsto 2$ channel has been proven for $q=2$ and $q\ge 4$ \cite{king_p4},
and for all $q\ge1$ when one of the channels is a unital $2\mapsto 2$ channel \cite{king_unital}. Other positive results include multiplicativity
for all $q\ge1$ and for
all dimensions \cite{king} when one of the channels is entanglement breaking (EB) \cite{holevo99},
i.e.\ is of the form $\Phi(\rho) = \sum_k A_k \mathop{\rm Tr}\nolimits[B_k\rho]$, for $A_k,B_k\ge0$
(that is, the Choi matrix of $\Phi$ corresponds to a separable state).
In this paper I study the multiplicativity problem for the important case when one of the channels has input dimension 2,
and reduce the problem to a number of simpler forms, some of which do not hold in general but can be proven
in specific instances. While the results I obtain here do not boil down to new multiplicativity results, I
do explore new mathematical methods, and the hope is that this will provide new inspiration to tackle the additivity problem.
\section{Notations\label{sec:not}}
In this paper, I call a qubit map any linear map from ${\mathbb{C}}^2$ to ${\mathbb{C}}^d$, $d\ge 2$.
This is more general than the customary definition, by which $d=2$.
The reason for this deviation is that the Theorems and Conjectures extend naturally
to these generalised qubit maps.
I will employ overloaded notation where the symbol $\Phi$ either refers to the map or to the Choi matrix of that map.
For example, in expressions like $\Phi(\rho)$, $\Phi$ refers to the map; when used ``stand-alone'',
as in $||\Phi||_q$, it refers to the Choi matrix.
I denote the blocks of the Choi matrix of $\Phi$ by $\Phi_{ij}:=\Phi(e^{ij})$.
Unitarily invariant (UI) matrix norms are denoted $|||.|||$ and are norms that have
the property $|||UAV|||=|||A|||$ for any unitary $U$ and $V$.
For such norms the equality $|||AA^*|||=|||A^*A|||$ holds. This follows from the inequality $|||AB|||\le|||BA|||$
which holds for all $A$ and $B$ such that $AB$ is normal (\cite{bhatia}, Proposition IX.1.1).
When $B=A^*$, both $AB$ and $BA$ are normal, hence equality must then hold.
\section{A Conjecture for Qubit CP Maps\label{sec:conj}}
Let $\Phi$ be a CP qubit map from ${\mathbb{C}}^2$ to ${\mathbb{C}}^{d_1}$,
and let $\rho$ be a $2\times d_2$ state, block partitioned as
$$
\rho = \twomat{B}{C}{C^*}{D}.
$$
In \cite{kingconj}, C.\ King conjectured, and proved in specific instances, that for $q\ge 1$
\begin{equation}\label{eq:chris0}
\schatten{q}{(\Phi\otimes\mathrm{\openone})(\rho)} \le
\nu_q(\Phi)\,\, (\begin{equation}ta+\delta),
\end{equation}
where $\begin{equation}ta=\schatten{q}{B}$ and $\delta=\schatten{q}{D}$.
He also noted that this Conjecture would imply multiplicativity of MOP when one of the channels
is a qubit channel.
While this Conjecture is already a major simplification of the multiplicativity problem (it involves only one channel),
it is still non-trivial due to the fact that a maximisation occurs in the RHS (in the factor $\nu_q$).
It would clearly be very helpful if the remaining maximisation could be removed in one way or another.
An initially rather promising idea was that
the following inequality would imply (\mathop{\rm Re}\nolimitsf{eq:chris0}) \cite{kingpriv}:
\begin{equation}\label{eq:case3}
\schatten{q}{(\Phi\otimes\mathrm{\openone})(\rho)} \le
\max_\theta \schatten{q}{\Phi(\twomat{\begin{equation}ta}{\exp(i\theta)\sqrt{\begin{equation}ta\delta}}{\exp(-i\theta)\sqrt{\begin{equation}ta\delta}}{\delta})}.
\end{equation}
That is, the maximisation over all pure qubit input states is replaced by a maximisation over a single angle $\theta$.
To see how this implies multiplicativity, note first that the matrix
$$\frac{1}{\begin{equation}ta+\delta}\twomat{\begin{equation}ta}{\exp(i\theta)\sqrt{\begin{equation}ta\delta}}{\exp(-i\theta)\sqrt{\begin{equation}ta\delta}}{\delta}$$
represents a state (in fact, a pure one), so that the RHS of (\mathop{\rm Re}\nolimitsf{eq:case3}) is bounded above by $(\begin{equation}ta+\delta)\nu_q(\Phi)$,
thereby implying (\mathop{\rm Re}\nolimitsf{eq:chris0}).
Now put $\rho=(\mathrm{\openone}\otimes\Omega)(\tau)$, with $\tau$ a $2\times d$ state,
then the LHS of (\mathop{\rm Re}\nolimitsf{eq:case3}) is $\schatten{q}{(\Phi\otimes\Omega)(\tau)}$.
The block structure of $\rho$ is then given by
$B=\Omega(\tau^{11})$, $D=\Omega(\tau^{22})$,
yielding the inequality $\begin{equation}ta+\delta\le \nu_q(\Omega)(\mathop{\rm Tr}\nolimits(\tau^{11})+\mathop{\rm Tr}\nolimits(\tau^{22}))$,
so that the RHS of (\mathop{\rm Re}\nolimitsf{eq:chris0}) is indeed bounded above by $\nu_q(\Omega)\nu_q(\Phi)$, implying multiplicativity of the MOP.
Note that, when the off-diagonal block $\Phi_{12}$ (and thus $\Phi_{21}$) is Hermitian,
the optimal value of $\exp(i\theta)$ in the RHS of (\mathop{\rm Re}\nolimitsf{eq:case3}) is $\pm1$. Indeed,
\begin{equation}as
\Phi(\twomat{\begin{equation}ta}{\exp(i\theta)\sqrt{\begin{equation}ta\delta}}{\exp(-i\theta)\sqrt{\begin{equation}ta\delta}}{\delta})
&=& \begin{equation}ta \Phi_{11}+\delta \Phi_{22} + 2\cos\theta\,\sqrt{\begin{equation}ta\delta}\,\Phi_{12}.
\end{equation}as
This is linear in $\cos\theta$, hence the RHS of (\mathop{\rm Re}\nolimitsf{eq:case3}) is the maximisation of a convex function
(the $q$-norm of the matrix) in $\cos\theta$. As $\cos\theta$ runs over a convex set (the interval $[-1,1]$), the maximum is obtained
in an extreme point, hence $\pm1$.
Unfortunately, numerical experiments revealed that (\mathop{\rm Re}\nolimitsf{eq:case3}) does not hold in general;
I will present such a counterexample in the next Section.
Nevertheless, it is the purpose of this paper to study the statement and introduce a number of techniques to prove it in
a variety of special cases.
I start, in the next Section, with the idea of `taking square roots' of CP maps and states.
\section{Taking `Square Roots'\label{sec:sqrt}}
Positivity of $\rho$ and complete positivity of the map $\Phi$ allow us to `take their square roots' and obtain a `square-rooted' version
of inequality (\mathop{\rm Re}\nolimitsf{eq:case3}), in the following sense.
Since $\rho$ is PSD, it can be written as $\rho = X^* X$, where $X$ is a $1\times 2$ block matrix of size
$R\times d_{in}$ ($R$ being the rank of $\rho$).
Denoting $X=(X_1|X_2)$, we have $B=X_1^*X_1$, $D=X_2^*X_2$,
and $C=X_1^*X_2$.
Similarly, $\Phi$ is CP, thus its Choi-matrix can be written as
$\Phi=G^* G$, where $G$ is a $1\times 2$ block matrix with blocks of size $K\times d_{out}$
($K$ is the number of Kraus elements, $d_{out}$ is the dimension of the output Hilbert
space): $G=(G_1|G_2)$.
The LHS of (\mathop{\rm Re}\nolimitsf{eq:chris0}) and (\mathop{\rm Re}\nolimitsf{eq:case3}) is equal to the square of the $2q$-norm of the `square root'
of $(\Phi\otimes\mathrm{\openone})(\rho)$:
$$
(\Phi\otimes\mathrm{\openone})(\rho) = (G_1\otimes X_1+G_2\otimes X_2)^* (G_1\otimes X_1+G_2\otimes X_2),
$$
so
$$
\schatten{q}{(\Phi\otimes\mathrm{\openone})(\rho)} =
\schatten{2q}{G_1\otimes X_1+G_2\otimes X_2}^2.
$$
Likewise, the `square-root' of the RHS of (\mathop{\rm Re}\nolimitsf{eq:chris0}) is
$$
\max_\psi ||\sum_i \psi_i G_i||_{2q}^2\,\,(||X_1||_{2q}^2+||X_2||_{2q}^2)
$$
so that (\mathop{\rm Re}\nolimitsf{eq:chris0}) is equivalent to
\begin{equation}\label{eq:chris0sqrt}
\schatten{2q}{G_1\otimes X_1+G_2\otimes X_2} \le
\max_\psi \frac{||\sum_i \psi_i G_i||_{2q}}{\sqrt{|\psi_1|^2+|\psi_2|^2}}\,\,\sqrt{||X_1||_{2q}^2+||X_2||_{2q}^2}.
\end{equation}
This says that
$$
\frac{||G_1\otimes X_1+G_2\otimes X_2||_{2q}}{\sqrt{||X_1||_{2q}^2+||X_2||_{2q}^2}}
$$
attains its maximum over all $X_i$ when $X_2=\alpha X_1$, for certain (complex) values of the scalar $\alpha$.
The square-root of the RHS of (\mathop{\rm Re}\nolimitsf{eq:case3}) is
$$
\Phi(\twomat{\begin{equation}ta}{\exp(i\theta)\sqrt{\begin{equation}ta\delta}}{\exp(-i\theta)\sqrt{\begin{equation}ta\delta}}{\delta})
= \left(G_1 \sqrt{\begin{equation}ta} +G_2 \sqrt{\delta} \exp(i\theta)\right)^*
\left(G_1 \sqrt{\begin{equation}ta} +G_2 \sqrt{\delta} \exp(i\theta)\right),
$$
which can be written as
$$
\schatten{2q}{G_1 ||X_1||_{2q}+G_2 ||X_2||_{2q} e^{i\theta}}^2,
$$
where I used $\begin{equation}ta = ||X_1^* X_1||_q = ||X_1||_{2q}^2$.
In this way, (\mathop{\rm Re}\nolimitsf{eq:case3}) is equivalent with
\begin{equation}
\schatten{2q}{G_1\otimes X_1+G_2\otimes X_2}
\le \max_\theta \schatten{2q}{G_1 ||X_1||_{2q} + e^{i\theta}G_2 ||X_2||_{2q}}. \label{eq:case3c}
\end{equation}
I now present the promised counterexample to inequality (\mathop{\rm Re}\nolimitsf{eq:case3}), in its square-rooted form (\mathop{\rm Re}\nolimitsf{eq:case3c}).
Consider the diagonal matrices
$G_1=X_1=\mathop{\rm Diag}\nolimits(1,b), G_2=X_2=\mathop{\rm Diag}\nolimits(b,-1)$, with $0\le b\le 1$;
then the inequality is violated when $2<2q< p_0$, where $p_0$ is a root of
the equation $((1+b)^p+(1-b)^p)(1+b^p) = 2(1+b^2)^p$ in $p$.
Fortunately, this counterexample does not violate multiplicativity since it corresponds to block-diagonal $\rho$ and $\Phi$;
thus $\Phi$ is EB and $\rho$ is separable, whence multiplicativity holds.
\section{Rank One Case}
In this Section, I describe a technique called the method of conjugation, and use it to obtain results
for the cases where either the CP map or the state has rank 1.
The method of conjugation amounts to transforming existing relations into new ones by replacing
the `components' of the expressions by their Hermitian conjugates, and exploiting in one way or another
the fact that for any UI norm $|||AA^*||| = |||A^*A|||$.
What exactly is meant by `components' here very much depends on the situation, and I will describe here
a number of applications to illustrate the method.
This method is not new; it appears, for example, in \cite{bhatia94}.
Suppose we have a $d\times2$ bipartite state $\rho$ in block-matrix form, and we decompose
it as
$$
\rho = \twovec{X^*}{Y^*} \twovect{X}{Y},
$$
then a possible way of conjugating $\rho$ is to conjugate its components $X$ and $Y$.
This gives rise to a new matrix, of different dimensions, which I denote by $\tilde{\rho}$, and which is given by
$$
\tilde{\rho} = \twovec{X}{Y} \twovect{X^*}{Y^*}.
$$
I want to stress here that the tilde is just a label and not a functional operation, quite simply
because that operation is not uniquely defined; infinitely many $X$ and $Y$ exist for one and the same $\rho$, each giving
rise to different $\tilde{\rho}$.
Exactly the same can be done for a $2\mapsto d$ CP map $\Phi$. Let us decompose its Choi matrix as
$$
\Phi = \twovec{G^*}{H^*} \twovect{G}{H},
$$
then conjugation yields the new map
$$
\tilde{\Phi} = \twovec{G}{H} \twovect{G^*}{H^*}.
$$
If $\Phi$ is a map from ${\mathbb{C}}^2$ to ${\mathbb{C}}^d$ of rank $R$ (that is, it can be represented by a minimal number of $R$ Kraus
elements) then one can find blocks $G$ and $H$ of size $R\times d$, so that
$\tilde{\Phi}$ is a map from ${\mathbb{C}}^2$ to ${\mathbb{C}}^R$ of rank at most $d$.
The relation linking conjugated state and map to their originals is:
for any UI norm
\begin{equation}\label{eq:conjug}
|||(\tilde{\Phi}\otimes\mathrm{\openone})(\tilde{\rho})||| = |||(\Phi\otimes\mathrm{\openone})(\rho)|||.
\end{equation}
This is proven by writing the expressions out in terms of the blocks and exploiting $|||AA^*|||=|||A^*A|||$.
Indeed, $(\Phi\otimes\mathrm{\openone})(\rho) = (G\otimes X+H\otimes Y)^*(G\otimes X+H\otimes Y)$, and
$(\tilde{\Phi}\otimes\mathrm{\openone})(\tilde{\rho}) = (G\otimes X+H\otimes Y)(G\otimes X+H\otimes Y)^*$.
A simple consequence of (\mathop{\rm Re}\nolimitsf{eq:conjug}) is that $\nu_q(\tilde{\Phi}) = \nu_q(\Phi)$. One just applies (\mathop{\rm Re}\nolimitsf{eq:conjug})
for qubit states $\rho$ (the `blocks' of $\rho$ are scalars) and notes that $\tilde{\rho}$ is the complex conjugate
of $\rho$, whence the maximisation over all $\rho$ coincides with the maximisation over all $\tilde{\rho}$.
The concept of a \textit{complementary channel} introduced in \cite{devetak03,holevo05}
is essentially a specific instance of such a conjugated map.
Let a channel $\Phi$ on a space ${\cal H}$ be represented in Stinespring form by
$$
\Phi(\rho) = \mathop{\rm Tr}\nolimits_{aux}(U(\rho\otimes\omega)U^*),
$$
where $\omega$ is a fixed ancilla state on the space ${\cal H}_{aux}$, and $U$ is a unitary on ${\cal H}\otimes{\cal H}_{aux}$.
The, or rather `a' complementary channel is then defined as a channel with Stinespring form
$$
\Phi'(\rho) = \mathop{\rm Tr}\nolimits_{{\cal H}}(U(\rho\otimes\omega)U^*).
$$
Again, for a given $\Phi$, $\Phi'$ is not unique as it depends on the choice of $\omega$ and $U$ \cite{holevo05}.
\begin{equation}gin{proposition}
The complementary channel $\Phi'$ defined above is a conjugated map of the complex conjugation of $\Phi$.
\end{proposition}
\textit{Proof.}
Let the ancilla state $\omega$ be the pure state $\ket{0}\bra{0}$.
If $\Phi$ has Kraus representation $\Phi(\rho)=\sum_k A_k \rho A_k^*$ (where the $A_k$ are defined by
$\langle j|A_k|m\mathop{\rm ran}\nolimitsgle = \langle j,k|U|m,0\mathop{\rm ran}\nolimitsgle$), then the complementary channel $\Phi'$ satisfies
the relation
$\langle k|\Phi'(\rho)|j\mathop{\rm ran}\nolimitsgle = \mathop{\rm Tr}\nolimits[A_k \rho A_j^*]$.
The Choi matrix of $\Phi$ can thus be decomposed as $\Phi = \twovec{G_1^*}{\vdots} \twovect{G_1}{\cdots}$,
with $G_m^*\ket{k} = A_k\ket{m}$.
Likewise, the Choi matrix of $\Phi'$ can be decomposed as $\Phi' = \twovec{{G'}_1^*}{\vdots} \twovect{{G'}_1}{\cdots}$.
By taking $\rho=\ket{m}\bra{l}$, we find
$$
\langle k|\Phi'(\ket{m}\bra{l})|j\mathop{\rm ran}\nolimitsgle = \mathop{\rm Tr}\nolimits[A_k \ket{m}\bra{l} A_j^*] = \langle l|A_j^* A_k|m\mathop{\rm ran}\nolimitsgle,
$$
while on the other hand
$$
\langle k|\Phi'(\ket{m}\bra{l})|j\mathop{\rm ran}\nolimitsgle = \langle k|{G'}_m^*\,\,G'_l|j\mathop{\rm ran}\nolimitsgle = \overline{\bra{j}{G'}_l^*\,\,{G'}_m\ket{k}}.
$$
We can, therefore, make the identification
$\overline{{G'}_m}\ket{k} = A_k\ket{m} = G_m^*\ket{k}$, so that, indeed, $G'_m = \overline{G}_m^* = G_m^T$.
$\square$\par\vskip24pt
Using this method of conjugation, we can prove three special cases of the (\mathop{\rm Re}\nolimitsf{eq:case3}).
The first special case is when $\Phi$ is the identity map.
In that case the RHS of (\mathop{\rm Re}\nolimitsf{eq:case3}) reduces to $\begin{equation}ta+\delta$, and we get:
\begin{equation}gin{theorem}
For $\rho\ge0$ partitioned as below, and $q\ge 1$,
\begin{equation}
\schatten{q}{\rho}=\schatten{q}{\twomat{B}{C}{C^*}{D}} \le ||B||_q + ||D||_q.
\end{equation}
\end{theorem}
This is well-known, and rather easy to prove. In fact, it holds not only for the Schatten norms, but for any UI norm,
and not only for $2\times2$ partitionings, but for any symmetric partitioning.
\textit{Proof.}
The general structure of the proof is: conjugate, apply the triangle inequality, then conjugate again.
By positivity of $\rho$, we can write
$$
\rho = \twovec{X^*}{Y^*} \twovect{X}{Y},
$$
where $X$ and $Y$ are general $d\times 2d$ matrices. Then after conjugating the two factors (not the blocks, but the whole matrix),
we can exploit the triangle inequality
to find
\begin{equation}as
\schatten{q}{\rho} &=& \schatten{q}{\twovec{X^*}{Y^*}\twovect{X}{Y}} \\
&=& \schatten{q}{\twovect{X}{Y}\twovec{X^*}{Y^*}} \\
&=& ||XX^*+YY^*||_q \\
&\le& ||XX^*||_q + ||YY^*||_q \\
&=& ||X^*X||_q + ||Y^*Y||_q \\
&=& ||B||_q+||D||_q.
\end{equation}as
$\square$\par\vskip24pt
An elaboration of the previous argument yields the case
of single-element CP maps $\Phi$, that is maps of the form $\Phi(\rho)=A\rho A^*$.
\begin{equation}gin{theorem}
Inequality (\mathop{\rm Re}\nolimitsf{eq:case3}) holds for $q\ge1$, for $\Phi$ a single-element CP map, and for any state $\rho$.
\end{theorem}
\textit{Proof.}
In this case $A$ has 2 columns, say $A_1$ and $A_2$, and the Choi matrix of $\Phi$
is given by the rank-1 matrix
$$
\Phi = \twovec{A_1}{A_2} \twovect{A_1^*}{A_2^*}.
$$
Let again
$$
\rho = \twovec{X^*}{Y^*} \twovect{X}{Y},
$$
then, by the conjugation identity (\mathop{\rm Re}\nolimitsf{eq:conjug}),
\begin{equation}as
\schatten{q}{(\Phi\otimes\mathrm{\openone})(\rho)} \quad = \quad \schatten{q}{(\tilde{\Phi}\otimes\mathrm{\openone})(\tilde{\rho})}
&=& \schatten{q}{A_1^*A_1\, XX^* + A_2^*A_2\, YY^* + A_2^*A_1\, Y^*X + A_1^*A_2\, X^*Y},
\end{equation}as
where I exploited the fact that $A_1$ and $A_2$ are vectors, so that the quantities $A_j^*A_k$ are scalars.
We can do the same thing for the RHS, and get the \textit{scalar} quantity
\begin{equation}\label{eq:ii}
\schatten{q}{\Phi(\twomat{\begin{equation}ta}{\exp(i\theta)\sqrt{\begin{equation}ta\delta}}{\exp(-i\theta)\sqrt{\begin{equation}ta\delta}}{\delta})}
= \left| A_1^*A_1\, \begin{equation}ta + A_2^*A_2\, \delta
+ A_2^*A_1\,\exp(i\theta)\sqrt{\begin{equation}ta\delta} + A_1^*A_2\,\exp(-i\theta)\sqrt{\begin{equation}ta\delta} \right|
\end{equation}
Comparison of LHS and RHS in this form invites the idea of using the triangle inequality again.
\begin{equation}as
\schatten{q}{(\Phi\otimes\mathrm{\openone})(\rho)}
&\le& A_1^*A_1 \schatten{q}{XX^*} + A_2^*A_2 \schatten{q}{YY^*} + |A_2^*A_1| \schatten{q}{Y^*X}
+ |A_1^*A_2| \schatten{q}{X^*Y}.
\end{equation}as
Now we know that $\schatten{q}{XX^*} = \begin{equation}ta$ and $\schatten{q}{YY^*} = \delta$.
Furthermore, by the Cauchy-Schwarz inequality for UI norms,
$\schatten{q}{Y^*X}\le \schatten{q}{X^*X}^{1/2}\schatten{q}{Y^*Y}^{1/2}=\sqrt{\begin{equation}ta\delta}$.
Thus
\begin{equation}as
\schatten{q}{(\Phi\otimes\mathrm{\openone})(\rho)}
&\le& A_1^*A_1 \begin{equation}ta + A_2^*A_2 \delta + |A_2^*A_1| \sqrt{\begin{equation}ta\delta}
+ |A_1^*A_2| \sqrt{\begin{equation}ta\delta}.
\end{equation}as
By taking $\theta$ such that $|A_2^*A_1| = \exp(i\theta) A_2^*A_1$, the last expression coincides
with RHS(\mathop{\rm Re}\nolimitsf{eq:ii}).
$\square$\par\vskip24pt
As a third and final special case, we can in a similar fashion prove (\mathop{\rm Re}\nolimitsf{eq:case3}) for any pure input state $\rho$.
\begin{equation}gin{theorem}
Inequality (\mathop{\rm Re}\nolimitsf{eq:case3}) holds for $q\ge1$, for $\Phi$ a CP map, and for pure states $\rho$.
\end{theorem}
\textit{Proof.}
Let
$$
\Phi = \twovec{X^*}{Y^*} \twovect{X}{Y},
$$
and let $\rho=\ket{\psi}\bra{\psi}$, with
$$
\psi = \twovec{\psi_1}{\psi_2}.
$$
Conjugation of $\rho$, via conjugation of its components $\psi_1$ and $\psi_2$, then gives the qubit state
$$
\tilde{\rho} = \twomat{\inpr{\psi_1}{\psi_1}}{\inpr{\psi_1}{\psi_2}}{\inpr{\psi_2}{\psi_1}}{\inpr{\psi_2}{\psi_2}}.
$$
Thus
$$
\schatten{q}{(\Phi\otimes\mathrm{\openone})(\rho)} =
\schatten{q}{\inpr{\psi_1}{\psi_1}XX^* + \inpr{\psi_1}{\psi_2}XY^* + \inpr{\psi_2}{\psi_1}YX^* + \inpr{\psi_2}{\psi_2}YY^*}.
$$
Since we're dealing with a pure state, $\begin{equation}ta=\schatten{q}{\psi_1 \psi_1^*}=\mathop{\rm Tr}\nolimits(\psi_1 \psi_1^*)=\inpr{\psi_1}{\psi_1}$,
and similarly, $\delta=\inpr{\psi_2}{\psi_2}$.
Also, for some $\theta$, $\inpr{\psi_1}{\psi_2} = e^{i\theta} |\inpr{\psi_1}{\psi_2}| = s e^{i\theta} \sqrt{\begin{equation}ta\delta}$,
with $0\le s\le1$ (by the Cauchy-Schwarz inequality).
Then
$$
\schatten{q}{(\Phi\otimes\mathrm{\openone})(\rho)} =
\schatten{q}{\begin{equation}ta X{X}^* + s e^{i\theta}\sqrt{\begin{equation}ta\delta}X{Y}^* + s e^{-i\theta}\sqrt{\begin{equation}ta\delta}Y{X}^* + \delta Y{Y}^*}.
$$
Now notice $s e^{i\theta} = p e^{i\theta}+(1-p)(-e^{i\theta})$ for $p=(1+s)/2$.
Thus
\begin{equation}as
\schatten{q}{(\Phi\otimes\mathrm{\openone})(\rho)} &\le&
p\schatten{q}{\begin{equation}ta X{X}^* + e^{i\theta}\sqrt{\begin{equation}ta\delta}X{Y}^* + e^{-i\theta}\sqrt{\begin{equation}ta\delta}Y{X}^* + \delta Y{Y}^*} \\
&& +(1-p)\schatten{q}{\begin{equation}ta X{X}^* - e^{i\theta}\sqrt{\begin{equation}ta\delta}X{Y}^* - e^{-i\theta}\sqrt{\begin{equation}ta\delta}Y{X}^* + \delta Y{Y}^*} \\
&\le& \max_\theta \schatten{q}{\begin{equation}ta X{X}^* + e^{i\theta}\sqrt{\begin{equation}ta\delta}X{Y}^* + e^{-i\theta}\sqrt{\begin{equation}ta\delta}Y{X}^* + \delta Y{Y}^*} \\
&=& \max_\theta \schatten{q}{\tilde{\Phi}(\twomat{\begin{equation}ta}{e^{i\theta}\sqrt{\begin{equation}ta\delta}}{e^{-i\theta}\sqrt{\begin{equation}ta\delta}}{\delta})} \\
&=& \max_\theta \schatten{q}{\Phi(\twomat{\begin{equation}ta}{e^{-i\theta}\sqrt{\begin{equation}ta\delta}}{e^{i\theta}\sqrt{\begin{equation}ta\delta}}{\delta})}.
\end{equation}as
$\square$\par\vskip24pt
\section{Positive Off-Diagonal Blocks}
In the case where the off-diagonal block $\Phi_{12}$ is PSD,
a very general Theorem can be proven for linear maps with general
input and output dimensions.
First we need an Araki-Lieb-Thirring (A-L-T) type inequality for general operators, proven in \cite{kaijiss}:
\begin{equation}gin{proposition}\label{prop:LTG}
For general operators $F$ and $H$, and for $q\ge 1$,
\begin{equation}
\mathop{\rm Tr}\nolimits|FHF^*|^q \le \mathop{\rm Tr}\nolimits\left((F^*F)^q \frac{|H|^q+|H^*|^q}{2}\right).
\end{equation}
\end{proposition}
The following Proposition has appeared before as Lemma 2 in \cite{king}, in somewhat different form, for the case
where all matrices involved are PSD. In that form, the proof relied on the A-L-T inequality. Having now the
stronger inequality from Proposition \mathop{\rm Re}\nolimitsf{prop:LTG} at our disposal, we can lift the original Proposition
to the following more general setting:
\begin{equation}gin{proposition}\label{prop:EB1}
For $A_k\ge 0$ and general $B_k$, and any $q\ge 1$,
\begin{equation}\label{eq:Apos0}
\schatten{q}{\sum_k A_k\otimes B_k} \le \schatten{q}{\sum_k A_k} \, \max_j \schatten{q}{B_j}.
\end{equation}
\end{proposition}
\textit{Proof.}
Proceeding as in the proof of Lemma 2 in \cite{king},
I introduce the following notations (which are possible because the $A_k$ are PSD):
\begin{equation}as
F &=& (\sqrt{A_1} \otimes \mathrm{\openone}entity \ldots \sqrt{A_K} \otimes \mathrm{\openone}entity) \\
G &=& (\sqrt{A_1} \ldots \sqrt{A_K}) \\
H &=& \bigoplus_k \mathrm{\openone}entity\otimes B_k.
\end{equation}as
I denote by $X_{kk}$ the $k$-th diagonal block of a matrix in the
same partitioning as $H$.
For example, $H_{kk}=\mathrm{\openone}entity\otimes B_k$.
Using these notations, $\sum_k A_k\otimes B_k$ can be written as $FHF^*$.
By Proposition \mathop{\rm Re}\nolimitsf{prop:LTG},
\begin{equation}as
\schatten{q}{\sum_k A_k\otimes B_k}^q &=& \mathop{\rm Tr}\nolimits[|FHF^*|^q] \\
&\le& \mathop{\rm Tr}\nolimits[(F^* F)^q \, (|H|^q+|H^*|^q)]/2 \\
&=& \sum_k \mathop{\rm Tr}\nolimits[[(F^* F)^q]_{kk} \, (\mathrm{\openone} \otimes (|B_k|^q+|B_k^*|^q))]/2 \\
&=& \sum_k \mathop{\rm Tr}\nolimits[[(G^* G)^q]_{kk}] \, \mathop{\rm Tr}\nolimits[|B_k|^q] \\
&\le& \max_j \mathop{\rm Tr}\nolimits[|B_j|^q] \, \sum_k \mathop{\rm Tr}\nolimits[[(G^* G)^q]_{kk}] \\
&=& \max_j \mathop{\rm Tr}\nolimits[|B_j|^q] \, \mathop{\rm Tr}\nolimits[(G^* G)^q].
\end{equation}as
Then noting
$$
\mathop{\rm Tr}\nolimits[(G^* G)^q] = \mathop{\rm Tr}\nolimits[(G G^*)^q] = \mathop{\rm Tr}\nolimits[(\sum_k A_k)^q],
$$
and taking $q$-th roots yields the Proposition.
$\square$\par\vskip24pt
\begin{equation}gin{corollary}\label{cor:EB1}
For $A_k\ge 0$ and general $B_k$, and any $q\ge 1$,
\begin{equation}\label{eq:Apos}
\schatten{q}{\sum_k A_k\otimes B_k} \le \schatten{q}{\sum_k A_k \schatten{q}{B_k}}.
\end{equation}
\end{corollary}
\textit{Proof.}
Define $A'_k = ||B_k||_q A_k$ and $B'_k = B_k/||B_k||_q$.
Then $||B'_k||_q=1$ and, by (\mathop{\rm Re}\nolimitsf{eq:Apos0}),
\begin{equation}as
\schatten{q}{\sum_k A_k\otimes B_k} &=& \schatten{q}{\sum_k A'_k\otimes B'_k}
\le \max_j \schatten{q}{B'_j} \, \schatten{q}{\sum_k A'_k}
= \schatten{q}{\sum_k ||B_k||_q A_k}.
\end{equation}as
$\square$\par\vskip24pt
In fact, it is easy to see that the Corollary is equivalent to Proposition \mathop{\rm Re}\nolimitsf{prop:EB1}.
Just note that, by positivity of the $A_k$, $\sum_k ||B_k||_q A_k \le \max_j ||B_j||_q \sum_k A_k$.
The same inequality then holds for the $q$-norm.
Using the above machinery, we can now prove:
\begin{equation}gin{theorem}\label{th:case2}
For linear maps $\Phi$ where \textit{all} the blocks $\Phi_{ij}:=\Phi(e^{ij})$ are positive,
and for general block-partitioned operators $X=[X_{ij}]$:
\begin{equation}\label{eq:case2}
\schatten{q}{(\Phi\otimes\mathrm{\openone})(X)} \le
\schatten{q}{\Phi([\schatten{q}{X_{ij}}])}.
\end{equation}
\end{theorem}
\textit{Proof.}
By assumption, all blocks $\Phi_{ij}$ are positive.
Corollary \mathop{\rm Re}\nolimitsf{cor:EB1} therefore yields
$$
\schatten{q}{(\Phi\otimes\mathrm{\openone})(X)}
= \schatten{q}{\sum_{i,j} \Phi_{ij}\otimes X_{ij}}
\le \schatten{q}{\phantom{\big|}\sum_{i,j} \schatten{q}{X_{ij}}\,\Phi_{ij}}
= \schatten{q}{\Phi([\schatten{q}{X_{ij}}])}.
$$
$\square$\par\vskip24pt
Proposition \mathop{\rm Re}\nolimitsf{prop:EB1} can also be applied in the `square-root' case.
\begin{equation}gin{corollary}
Let $\Phi$ be a CP map of the form
$$
\Phi=\twovec{G_1^*}{G_2^*}\twovect{G_1}{G_2},
$$
where $G_1$ and $G_2$ are PSD up to scalar phase factors $e^{i\theta_i}$.
Then (\mathop{\rm Re}\nolimitsf{eq:case3}) holds for any state $\rho$ and for $1/2\le q$.
\end{corollary}
Note that this case includes values of $q$ where the ``Schatten $q$-norm'' is not a norm at all.
\textit{Proof.}
Let $G_i=e^{i\theta_i} H_i$, with $H_i$ PSD.
Straightforward application of Corollary \mathop{\rm Re}\nolimitsf{cor:EB1} to LHS(\mathop{\rm Re}\nolimitsf{eq:case3c}) yields, for $2q\ge 1$,
\begin{equation}as
\schatten{2q}{G_1\otimes X_1+G_2\otimes X_2} &\le&
\schatten{2q}{H_1\otimes e^{i\theta_1} X_1+H_2\otimes e^{i\theta_1}X_2} \\
&=& \schatten{2q}{\schatten{2q}{e^{i\theta_1} X_1} H_1+\schatten{2q}{e^{i\theta_2} X_2} H_2} \\
&=& \schatten{2q}{\schatten{2q}{X_1}G_1+e^{i(\theta_1-\theta_2)}\schatten{2q}{X_2}G_2} \\
&\le& \max_\theta\schatten{2q}{\schatten{2q}{X_1} G_1+e^{i\theta}\schatten{2q}{X_2}G_2},
\end{equation}as
which yields (\mathop{\rm Re}\nolimitsf{eq:case3c}), and hence (\mathop{\rm Re}\nolimitsf{eq:case3}), in this case.
$\square$\par\vskip24pt
Proposition \mathop{\rm Re}\nolimitsf{prop:EB1} has some further consequences.
\begin{equation}gin{corollary}\label{cor:sep}
For $\Phi$ a CP map, and $\rho$ a separable state,
\begin{equation}
||(\Phi\otimes\mathrm{\openone})(\rho)||_q \le \nu_q(\Phi)\,\, ||\mathop{\rm Tr}\nolimits_1\rho||_q.
\end{equation}
\end{corollary}
\textit{Proof.}
Since $\rho$ is separable, it can be written in the form
$\rho=\sum_k \sigma_k\otimes B_k$, where all $\sigma_k$ are normalised states,
and all $B_k$ are positive (not necessarily normalised).
As a consequence, $\sum_k B_k = \mathop{\rm Tr}\nolimits_1 \rho$.
By Proposition \mathop{\rm Re}\nolimitsf{prop:EB1}, we get
\begin{equation}as
\schatten{q}{(\Phi\otimes\mathrm{\openone})(\rho)}
&=& \schatten{q}{\sum_k \Phi(\sigma_k)\otimes B_k} \\
&\le& \max_k \schatten{q}{\Phi(\sigma_k)} \,\, \schatten{q}{\sum_k B_k} \\
&\le& \nu_q(\Phi)\,\, \schatten{q}{\sum_k B_k} \\
&=& \nu_q(\Phi)\,\, \schatten{q}{\mathop{\rm Tr}\nolimits_1\rho}.
\end{equation}as
$\square$\par\vskip24pt
\begin{equation}gin{theorem}[King]
The MOP is multiplicative for any $q$ when at least one of the CP maps involved is EB.
\end{theorem}
\textit{Proof.}
Let $\Omega$ be an EB CP map, and $\Phi$ any other CP map.
Let $\rho=(\mathrm{\openone}\otimes\Omega)(\tau)$, with $\tau$ a state.
Because $\Omega$ is EB, $\rho$ is (proportional to) a separable state.
By Corollary \mathop{\rm Re}\nolimitsf{cor:sep}, and the fact that $\mathop{\rm Tr}\nolimits_1\tau$ is a state, we get
\begin{equation}as
\schatten{q}{(\Phi\otimes\Omega)(\tau)}
&=& \schatten{q}{(\Phi\otimes\mathrm{\openone})(\rho)} \\
&\le& \nu_q(\Phi)\,\, \schatten{q}{\mathop{\rm Tr}\nolimits_1\rho} \\
&=& \nu_q(\Phi)\,\, \schatten{q}{\Omega(\mathop{\rm Tr}\nolimits_1\tau)} \\
&\le& \nu_q(\Phi)\,\nu_q(\Omega).
\end{equation}as
$\square$\par\vskip24pt
\section{Block-Hankel and Block-Toeplitz Matrices}
Gurvits has proven in \cite{gurvits} that a state whose density matrix is block-Hankel is separable.
For a published reference, see Ando \cite{ando04}, who uses the term `super-positivity' for separability.
For $2\times d$ states, that also follows from the semidefinite programming test in \cite{woerdeman} (using the $n=0$ case).
Gurvits has also proven that states with block-Toeplitz density matrices are separable.
This follows from a representation by Ando (\cite{ando04}, just after Theorem 4.9) which says that
such matrices can be decomposed in terms of a PSD matrix-valued measure $dP(\cdot)$ on the
interval $[0,2\pi)$. For the $2\times d$ case this reads
$$
\rho=\twomat{B}{C}{C^*}{B} = \int_0^{2\pi} \twomat{1}{e^{i\theta}}{e^{-i\theta}}{1} \otimes dP(\theta).
$$
One clearly sees that every factor of the tensor product is positive; $\rho$ is therefore a separable state.
Actually, from the proofs of Lemma 4.8 and Theorem 4.9 in \cite{ando04} one can see that an integral is not needed, and,
instead, we can use a finite sum
$$
\twomat{B}{C}{C^*}{B} = \sum_{k=1}^d \twomat{1}{e^{i\theta_k}}{e^{-i\theta_k}}{1} \otimes P_k.
$$
Using these representations, we can prove one more special instance of (\mathop{\rm Re}\nolimitsf{eq:case3}).
\begin{equation}gin{theorem}\label{th:gen1}
For $\rho=\twomat{B}{C}{C^*}{B}\ge0$, and for $\Phi$ any linear map, (\mathop{\rm Re}\nolimitsf{eq:case3}) holds for all $q\ge1$.
\end{theorem}
\textit{Proof.}
According to Ando's representation mentioned in the previous Section,
$\rho$ can be written in the form
$$
\twomat{B}{C}{C^*}{B} = \sum_{k=1}^d \twomat{1}{e^{i\theta_k}}{e^{-i\theta_k}}{1} \otimes P_k,
$$
with $P_k\ge0$.
Applying the map $\Phi\otimes\mathrm{\openone}$ gives
$$
(\Phi\otimes\mathrm{\openone})(\rho) = \sum_k \Phi(\twomat{1}{e^{i\theta_k}}{e^{-i\theta_k}}{1}) \otimes P_k.
$$
Because $\Phi$ need not be CP, the first tensor factor is no longer positive. However, the $P_k$ still are,
allowing us to employ Proposition \mathop{\rm Re}\nolimitsf{prop:EB1},
which gives us
$$
\schatten{q}{(\Phi\otimes\mathrm{\openone})(\rho)} \le \max_{\theta} \schatten{q}{\Phi(\twomat{1}{e^{i\theta}}{e^{-i\theta}}{1})} \,\,
\schatten{q}{\sum_k P_k}.
$$
Noticing that the second factor is just $||B||_q=\begin{equation}ta=\delta$ yields (\mathop{\rm Re}\nolimitsf{eq:case3}).
$\square$\par\vskip24pt
When $B$ is different from $D$, restrictions have in general to be imposed on $\Phi$; the previous Theorem being
an exception.
For $C=C*$, the RHS of (\mathop{\rm Re}\nolimitsf{eq:case3}) only depends on $X:=\Phi_{11}+\Phi_{22}$ and $Y:=\Phi_{12}+\Phi_{21}$,
so that we are led to maximise the LHS over all maps $\Phi$, keeping $X$ and $Y$ fixed.
Now the LHS is
$$
\schatten{q}{(\Phi\otimes\mathrm{\openone})(\rho)} = \schatten{q}{\Phi_{11} \otimes B + \Phi_{22}\otimes D + Y\otimes C}
= \schatten{q}{X \otimes \frac{B+D}{2} + \Delta\otimes \frac{B-D}{2} + Y\otimes C},
$$
where $\Delta:=\Phi_{11} - \Phi_{22}$.
So if $\Phi$ is unconstrained, and $B-D\neq0$, then the LHS could be made arbitrarily large
by letting $\Delta$ become arbitrarily large.
If, however, $\Phi$ is CP, say, then that can no longer happen.
Indeed, by positivity of $\Phi_{11}$ and $\Phi_{22}$,
$X\pm\Delta\ge0$, whence $-X\le \Delta \le X$.
\acknowledgments
This work was supported by The Leverhulme Trust (grant F/07 058/U),
and is part of the QIP-IRC (www.qipirc.org) supported by EPSRC (GR/S82176/0).
I thank Chris King for many stimulating conversations and his suggestion to study (\mathop{\rm Re}\nolimitsf{eq:case3}).
Dedicated with love to young Ewout Audenaert, whose constant calls for attention kept me awake during the course of this work.
\begin{equation}gin{thebibliography}{99}
\bibitem{ahw} G.G. Amosov, A.S. Holevo and R.F. Werner, Problems in Information Transmission {\bf 36}, 25--34;
see also arXiv preprint math-ph/0003002 (2000).
\bibitem{ando04} T.~Ando, ``Cones and norms in the tensor product of matrix spaces'',
Lin.\ Alg.\ Appl.\ \textbf{379}, 3--41 (2004).
\bibitem{kaijiss} K.M.R.~Audenaert, ``On the Araki-Lieb-Thirring inequality,'' to be published in Int.\ J.\ Inf.\ Syst.\ Sci.; see also
arXiv preprint math.FA/0701129 (2007).
\bibitem{ka03} K.M.R.~Audenaert and S.L.~Braunstein, Commun.\ Math.\ Phys.\ \textbf{246}, 443--452 (2004).
inequalities for trace norms,'' Invent.\ Math.\ \textbf{115}, 463--482 (1994).
\bibitem{bhatia} R.~Bhatia, \textit{Matrix Analysis}, Springer, Heidelberg (1997).
\bibitem{bhatia94} R.~Bhatia and F.~Kittaneh, ``Norm inequalities for partitioned operators and an application,''
Math.\ Ann.\ \textbf{287}, 719--726 (1990).
\bibitem{devetak03} I.~Devetak and P.~Shor, ``The capacity of a quantum channel for simultaneous
transmission of classical and quantum information'', Comm.\ Math.\ Phys.\ \textbf{256}, 287--303 (2005).
\bibitem{gurvits} L.~Gurvits, (unpublished).
\bibitem{hayden} P.~Hayden, arXiv preprint 0707.3291 (2007).
\bibitem{holevo99} A.~S.~Holevo, ``Quantum coding theorems'', Russian Math.\ Surveys, \textbf{53},
1295--1331 (1999).
\bibitem{holevo05} A.~S.~Holevo, ``On Complementary Channels and the Additivity Problem'', Probab.\ Theory and Appl.\ \textbf{51}, 133--143; see also
arXiv preprint quant-ph/0509101 (2005).
\bibitem{kingconj} C.~King, ``Maximization of capacity and $\ell_p$ norms for some product channels,'' J.\ Math.\ Phys.\ \textbf{43}, 1247--1260 (2002).
\bibitem{king} C.~King, Quantum Information and Computation \textbf{3}, 186--190 (2003).
\bibitem{king_unital} C.~King, J.~Math.~Phys. {\bf 43}, 4641--4653 (2003).
\bibitem{kingpriv} C.~King, private communication (2005).
\bibitem{king_p4} C.~King and N.~Koldan, J.\ Math.\ Phys.\ \textbf{47}, 042106 (2006).
\bibitem{msw} K.~Matsumoto, T.~Shimono and A.~Winter, Commun.\ Math.\ Phys.\ \textbf{246}, 427--442 (2004).
\bibitem{shor} P.W.~Shor, Commun.\ Math.\ Phys.\ \textbf{246}, 453--472 (2004).
\bibitem{hw} R.F.~Werner and A.S.~Holevo, J.~Math.~Phys. 43(9), 4353--4357 (2002).
\bibitem{winter} A.~Winter, arXiv preprint 0707.0402 (2007).
\bibitem{woerdeman} H.~Woerdeman, ``Checking $2\times M$ quantum separability via semidefinite programming'',
Phys.\ Rev.\ A \textbf{67}, 010303 (2003).
\end{thebibliography}
\end{document} |
\begin{document}
\baselineskip24pt
\title{Bayesian Inference for Big Spatial Data Using Non-stationary Spectral Simulation}
\begin{abstract}
It is increasingly understood that the assumption of stationarity is unrealistic for many spatial processes. In this article, we combine dimension expansion with a spectral method to model big non-stationary spatial fields in a computationally efficient manner. Specifically, we use \cite{mejia1974synthesis}'s spectral simulation approach to simulate a spatial process with a covariogram at locations that have an expanded dimension. We introduce Bayesian hierarchical modelling to dimension expansion, which originally has only been modeled using a method of moments approach. In particular, we simulate from the posterior distribution using a collapsed Gibbs sampler. Our method is both full rank and non-stationary, and can be applied to big spatial data because it does not involve storing and inverting large covariance matrices. Additionally, we have fewer parameters than many other non-stationary spatial models. We demonstrate the wide applicability of our approach using a simulation study, and an application using ozone data obtained from the National Aeronautics and Space Administration (NASA).
\end{abstract}
\section{Introduction}
There is increasing interest in using spatial statistical methods to model environmental processes. This is partially due to the emergence of remote sensing instruments and the popularity of Geographic Information Systems (GIS) software \citep[e.g. see,][for standard references]{stein2006spatial,kalkhan2011spatial}. The main goal of these analyses is to make predictions at observed and unobserved locations and provide uncertainty quantification. Early works make the assumption that the process is weakly stationary \citep[e.g., see][for a review]{cressie1993statistics}; that is, the covariance between the response at two different locations is a function of the spatial lag. However, non-stationary processes are much more common in environmental systems observed over large heterogeneous spatial domains \citep[see][for a disscussion]{bradley2016comparison}. There are many models for non-stationary spatial data, and reduced ranks basis function expansions have become a popular choice \citep{banerjee2008gaussian,cressie2008fixed}. However, there are inferential issues with reduced rank methods in the spatial setting \citep{stein2014limitations}, and consequently, there is renewed interest in proposing computationally efficient full-rank models \citep{nychka2015multiresolution,datta2016hierarchical,katzfuss2017multi,bradleyhierarchical,katzfuss2018vecchia}. Thus, in this article our primary goal is to develop an efficient full rank non-stationary spatial statistical model.
There are numerous methods available to model non-stationary spatial data. For example, process convolution \citep{higdon1998process,paciorek2006spatial,neto2014accounting} convolves a known spatially referenced function with a spatial process typically assumed to be Gaussian. There are several related, but different approaches available. For example, using a finite integral representation of a process convolution results in a basis function expansion \citep[][page 157]{cressie2011statistics}. Several parameterizations of basis function expansions are available, including: fixed rank kriging \citep{cressie2008fixed}, lattice kriging \citep{nychka2015multiresolution}, the predictive process \citep{banerjee2008gaussian}, and a stochastic partial differential equation approach \citep{lindgren2011explicit}, among others.
An alternative to modelling nonstationarity with spatial basis functions is to assume a deformation \citep{sampson1992nonparametric}. Here, Euclidean space is ``deformed," or warped, so that far away locations can be more correlated, and vice versa. The parameter space for this method is considerably smaller than many parameterizations using spatial basis function expansions \citep[e.g., see][for examples]{cressie2008fixed,kang-cressie-2011}, and is full rank. A similar but different approach to deformation is referred to as ``dimension expansion" \citep{bornn2012modeling}. This method involves extending the dimension of the locations to a higher dimensional space. This methodology is based on the surprising result that every non-stationary covariance function in $\mathbb{R}^{d}$ can be written as a stationary covariogram defined on locations in $\mathbb{R}^{2d}$ \citep{perrin2003nonstationarity}. Recently, \citet{bornn2012modeling} proposed a method of moments approach to analyzing spatio-temporal data using dimension expansion. To our knowledge the dimension expansion approach has not been implemented using a Bayesian framework.
Thus, our first contribution is to introduce dimension expansion to the Bayesian setting to analyze big spatial data. To achieve a computationally efficient approach to dimension expansion in the Bayesian setting we offer three technical results.
In our first technical result, we provide a ``non-stationary version" of Bochner's Theorem \citep{bochner1959lectures}. That is, we show that a non-stationary covariance function can be written as a convolution of the cosine function with a spectral density. The proof of this result simply involves combining \citet{perrin2003nonstationarity}'s dimension expansion result with Bochner's Theorem. This result opens up new opportunities to use spectral methods to model non-stationary spatial process. Other methods exist \citep[e.g. see,][]{priestley1965evolutionary,martin1982time} to model non-stationary data using spectral densities. However, these methods involve difficult to interpret types of ``quasi-stationarity" assumptions \citep[see,][for a discussion]{sayeed1995optimal}, while our approach can be easily interpreted through dimension expansion. \citet{castruccio2017evolutionary} have also proposed an approach that uses evolutionary spectrum and incorporates an axial symmetric structure into their model.
The second technical result developed in this manuscript follows from our non-stationary version of Bochner's Theorem. Specifically, we extend \citet{mejia1974synthesis}'s method for spectral simulation of a stationary spatial processes to non-stationary spatial processes. This makes it straightforward to simulate in the high-dimensional non-stationary setting because spectral simulation does not require the inverse and storage of a high-dimensional covariance matrix (i.e., is matrix free). In practice, Gaussian spatial datasets correspond to a likelihood that is difficult to compute in high dimensions (i.e., when the dimension of the data $n$ is large) because this requires $O(n^{3})$ computation and $O(n^{2})$ dynamic memory.
Our algorithm is a type of collapsed Gibbs sampler \citep{liu1994collapsed} and it involves two steps. The first step is to augment the likelihood with an $n$-dimensional random vector. Then non-stationary spectral simulation is used within each step of Gibbs sampler to simulate this random vector from it's prior distribution. This strategy is computationally feasible, full-rank, does not require storage of large matrices, and can be implemented on irregularly spaced locations. This last feature is particularly important as spectral methods based on the discrete Fourier transform often require regularly spaced locations \citep{fuentes2002spectral,fuentes2008class}.
The remaining sections of this article are organized as follows. Section 2 introduces our proposed statistical model, and our first two theoretical results. In Section 3, we describe our implementation, which includes inference using a collapsed Gibbs sampler. In Section 4, we present a simulation study, and compare our approach to two different state-of-the art methods in spatial statistics referred to as the Nearest Neighbor Gaussian Process \citep[NNGP;][]{datta2016nearest} and the general Vecchia approximation \citep{katzfuss2017general}. In Section 5, we implement our model using the benchmark ozone dataset analyzed in \citep{cressie2008fixed} and \citep{zhang2018smoothed}. Finally, Section 6 contains a discussion. For ease of exposition all proofs are given in the appendices.
\section{Methodology}
Let $Z(\cdot)$ be a spatial process defined for all $\textbf{s}$$\in$$D$$\subset$$\mathbb{R}^{d}$, where $D$ is the spatial domain of interest in $d$-dimensional Euclidean space, $\mathbb{R}^{d}$. We observe the value of $Z(\cdot)$ at a finite set of locations $\textbf{s}_{1}$, $\ldots$, $\textbf{s}_{n}$$\in$$D$. The data is decomposed additively with
\begin{equation*}
Z(\textbf{s})=Y(\textbf{s})+\epsilon(\textbf{s}),
\end{equation*}
where $\textbf{s}$$\in$$D$, $Y(\cdot)$ is the Gaussian process of principal interest, and the Gaussian process $\epsilon(\cdot)$ represents measurement error. The measurement error $\epsilon(\cdot)$ is assumed to be uncorrelated with mean-zero and variance function $V_{\epsilon}(\cdot)=\sigma_{\epsilon}^2\textbf{I}_{n}$.
The process $Y(\cdot)$ is further decomposed as
\begin{equation*}
Y(\textbf{s})=\textbf{X}^{\prime}(\textbf{s})\pmb{\beta}+\pmb{\nu}(\textbf{s}); \hspace{2pt} \textbf{s}\in D,
\end{equation*}
where $\textbf{X}(s)$ is a known $p$-dimensional vector of covariates, and $\pmb{\beta}\in\mathbb{R}^{p}$ is unknown. For any collection of locations $\textbf{u}_{1}, \ldots, \textbf{u}_{m}$, the random vector $\pmb{\nu}=(\nu(\textbf{u}_{1}), \ldots, \nu(\textbf{u}_{m}))'$ is assumed to have the probability density function (pdf),
\begin{equation}
f(\pmb{\nu}\mid\pmb{\theta})=\int_{\mathbb{R}^{m}}f(\pmb{\nu}\mid\pmb{\theta}, \widetilde{{\pmb{\nu}}},\delta^2)f(\widetilde{\pmb{\nu}}\mid\pmb{\theta})d\widetilde{\pmb{\nu}}, \label{originalcase}
\end{equation}
where $f(\pmb{\nu}\mid\pmb{\theta},\widetilde{\pmb{\nu}},\delta^2)$ is the multivariate normal distribution with mean $\widetilde{\pmb{\nu}}\in\mathbb{R}^{m}$, covariance matrix $\delta^2\textbf{I}_{m}$, and $\textbf{I}_{m}$ is an $m\times m$ identity matrix. The pdf $f(\widetilde{\pmb{\nu}}\mid\pmb{\theta})$ will be specified in Section 2.2, but is approximately normal with mean zero, and covariance matrix $\textit{C}(\pmb{\theta})$, where the $(i,j)$-th element of $\textit{C}(\pmb{\theta})$ is
\begin{equation*}
C(\textbf{s}_{i},\textbf{s}_{j})=\sigma_{\nu}^{2}\exp\left(-\frac{E(\textbf{s}_i,\textbf{s}_j)}{\phi}\right),
\end{equation*}
where
\begin{equation*}
E(\textbf{s}_i,\textbf{s}_j)=\|\bigl(\begin{smallmatrix}
\textbf{s}_{i}\\ \pmb{\psi}^{\prime}(\textbf{s}_{i})\pmb{\eta}
\end{smallmatrix}\bigr)-\bigl(\begin{smallmatrix}
\textbf{s}_{j}\\ \pmb{\psi}^{\prime}(\textbf{s}_{j})\pmb{\eta}
\end{smallmatrix}\bigr)\|,
\end{equation*}
$\pmb{\theta}=(\pmb{\eta}^{\prime},\phi,\sigma_{\nu}^{2})^{\prime}$, and $\|\cdot\|$ is a Euclidean distance. This covariance function uses the aforementioned dimension expansion approach from \cite{bornn2012modeling}. Here, $\pmb{\psi}(\textbf{s}_{i})$ is an $r\times d$ matrix consisting of known basis functions. This use of spatial basis functions is similar to the model in \cite{shand2017modeling}. It will be useful to organize the $n$-dimensional vectors $\textbf{Z}=\{Z(\textbf{s}_{1})\ldots Z(\textbf{s}_{n})\}^{\prime}$, $\textbf{Y}=\{Y(s_{1})\ldots Y(s_{n})\}^{\prime}$ and $\pmb{\epsilon}=\{\epsilon(s_{1})\ldots \epsilon(s_{n})\}^{\prime}$. To model $\textbf{Z}$ set define the prediction locations $\{\textbf{u}_{1}, \ldots, \textbf{u}_{m}\}$ such that the observed locations $\{\textbf{s}_{1}, \ldots, \textbf{s}_{n}\}\subset\{\textbf{u}_{1}, \ldots, \textbf{u}_{m}\}$. Define the corresponding $n\times n$ diagonal matrix $\textbf{V}_{\epsilon}$ $\equiv$ $cov(\pmb{\epsilon})=diag(\text{V}_{\epsilon}(\textbf{s}_{i}): i=1,\ldots,n)$.
\subsection{The Bayesian Hierarchical Model}
In this section, we summarize the statistical model used for inference. The model is organized using the ``data model", ``process model", and ``parameter model" notation used in \cite{cressie2011statistics}, as follows:
\begin{align}
\nonumber
&\textbf{Data Model}: \textbf{Z}\mid\pmb{\beta},\pmb{\nu}\sim\text{N}(\textbf{X}\pmb{\beta}+\textbf{O}\pmb{\nu},\sigma_{\epsilon}^2\textbf{I}_{n})\\
\nonumber
&\textbf{Process Model 1}: \pmb{\nu}\mid\pmb{\theta},\widetilde{\pmb{\nu}},\delta\sim\text{N}(\widetilde{\pmb{\nu}},\delta^2\textbf{I}_{n})\\
\nonumber
&\textbf{Process Model 2}: \widetilde{\pmb{\nu}}\mid\pmb{\theta}\sim f(\widetilde{\pmb{\nu}}\mid\pmb{\theta})\\
\nonumber
&\textbf{Parameter Model 1}: \pmb{\beta}\sim\text{N}(\textbf{0},\sigma^{2}_{\beta}\textbf{I}_{p})\\
\nonumber
&\textbf{Parameter Model 2}: \pmb{\eta}\sim\text{N}(\textbf{0},\sigma^{2}_{\eta}\textbf{I}_{r})\\
\nonumber
&\textbf{Parameter Model 3}: \sigma^{2}_{\nu}\sim \text{IG}(\alpha_{1},\beta_{1})\\
\nonumber
&\textbf{Parameter Model 4}: \sigma^{2}_{\beta}\sim\text{IG}(\alpha_{2},\beta_{2})\\
\nonumber
&\textbf{Parameter Model 5}: \sigma^{2}_{\eta}\sim\text{IG}(\alpha_{3},\beta_{3})\\
\nonumber
&\textbf{Parameter Model 6}: \phi\sim\text{U}(1,\text{U})\\
\nonumber
&\textbf{Parameter Model 7}: \delta^{2}\sim\text{IG}(\alpha_{4},\beta_{4}).\\
&\textbf{Parameter Model 8}: \sigma_{\epsilon}^2\sim\text{IG}(\alpha_{5},\beta_{5}).\label{model}
\end{align}
\noindent
In Equation (\ref{model}), \textbf{O} is an $n\times m$ incidence matrix, $\textbf{0}_{p}$ is a p-dimensional vector of zeros; ``$\text{N}(\pmb{\mu},\pmb{\Sigma})$" is a shorthand for a multivariate normal distribution with mean $\pmb{\mu}$ and positive definite covariance matrix $\pmb{\Sigma}$; ``$\text{IG}(\alpha,\kappa)$" is a shorthand for the inverse gamma distribution with shape $\alpha>0$ and scale $\kappa>0$; and ``$\text{U}(L,U)$" is a shorthand for a uniform distribution with lower bound $L$ and upper bound $U$. All hyperparameters are chosen so that the corresponding prior distribution is ``flat". Example specifications are provided in Section 4 and Section 5.
Process Model 1, Parameter 1, and Parameter Models 3$\--$5 are fairly standard assumptions for Gaussian data, as they lead to easy to sample full-conditional distributions within a Gibbs sampler \citep{cressie2011statistics}. Parameter model 6 and 7 are used to avoid identifiability issues and leads to a conjugate full-conditional distribution \citep[see][page 124]{banerjee2014hierarchical}. It is common to assume Process Model 2 is $f(\widetilde{\pmb{\nu}}\mid\pmb{\theta})$ is the multivariate normal distribution with mean zero and covariance matrix $\textbf{C}(\pmb{\theta})$ \citep{banerjee2014hierarchical,cressie2011statistics}. However, $f(\widetilde{\pmb{\nu}}\mid\pmb{\theta})$ is only approximately normal with mean-zero and covariance matrix $\textbf{C}(\pmb{\theta})$ (see Section 2.2 for details).
\\
\subsection{Theoretical Considerations}
A non-stationary extension of Bochner's Theorem is stated in Theorem~1.\\
\noindent
\textit{Theorem 1:}\textit{Let $C(\textbf{s}_{i},\textbf{s}_{j})$ be a positive definite function on $D$, which is assumed to be compact and bounded. Then there exists a function $f:D\rightarrow\mathbb{R}^{d}$ and a measure $G(\pmb{\omega})$ such that for any pair of locations $\textbf{s}_{i},\textbf{s}_{j}\in D$,}
\begin{equation}
C(\textbf{s}_{i},\textbf{s}_{j})=\int_{-\infty}^{\infty}\cos[\{\textit{\textbf{f}}(\textbf{s}_{i})-\textit{\textbf{f}}(\textbf{s}_{j})\}^{\prime}\pmb{\omega}_{1}+(\textbf{s}_{i}-\textbf{s}_{j})^{\prime}\pmb{\omega}_{2}]G(d\pmb{\omega}),\label{thm1}
\end{equation}
\textit{where the 2d-dimensional vector} $\pmb{\omega}=(\pmb{\omega}_{1}^{\prime},\pmb{\omega}_{2}^{\prime})^{\prime}$.\\
\noindent
\textit{Proof}: See Appendix A.\\
\noindent
The proof of Theorem 1 involves a simple combination of the result in \citet{perrin2003nonstationarity} and Bochner's Theorem. We use Theorem 1 to define a nonstationary covariance function $C(\cdot,\cdot)$. That is, in our model we choose a specific form for $G(d\omega)$ and $f(\cdot)$, and we use Equation \eqref{thm1} to define our nonstationary covariance function.
Additionally, in practice we assume $\textbf{\textit{f}}(\cdot)=\pmb{\psi}^{\prime}(\textbf{s})\pmb{\eta}$, which is similar to the strategy used in \citet{bornn2012modeling} and \citet{shand2017modeling}. This leads naturally to questions on how to specify spatial basis functions. In general, we use radial basis functions with equally spaced knot locations as suggested in \citet{nychka} and \citet{cressie2008fixed}. One might also consider the use of information criteria to adaptively select knot locations \citep{bradley2011,TzengHuang}.
There are several things we can learn from Theorem 1. First, every non-stationary covariance function can be written as a convolution in 2\textit{d}-dimensional space according to \eqref{thm1}. Second, $\{\textit{\textbf{f}}(\textbf{s}_{i})-\textit{\textbf{f}}(\textbf{s}_{j})\}^{\prime}\pmb{\omega}_{1}$ is a deformation, which shows an explicit connection between dimension expansion and deformation. Furthermore, this deformation induces non-stationarity, since $\{\textit{\textbf{f}}(\textbf{s}_{i})-\textit{\textbf{f}}(\textbf{s}_{j})\}^{\prime}\pmb{\omega}_{1}=0$ leads to the classical version of Bochner's Theorem. Third, if we assume a specific form of $G(d\pmb{\omega})$, we can use Equation (\ref{thm1}) to approximate the covariance function. For example, when $C(\cdot,\cdot)$ is the exponential covariance function (as is the case in \eqref{model}), then $G(d\pmb{\omega})$ has a corresponding Cauchy density \citep{steinML}. We provide a discussion and consider several other covariograms. Our empirical results suggest that the results appear robust to the choice of covariogram. Denote the density corresponding to $G(d\pmb{\omega})$ with $\frac{g(\omega)}{C(0,0)}$. Moreover, the ability to simulate from the spectral density without mathematical operations of covariance matries, allows us to completely circumvent computing and storing a covariance matrix \citep{mejia1974synthesis}.
\\
\noindent
\textit{Theorem 2:}
\textit{Let $\pmb{\omega}_{i} = (\pmb{\omega}_{1,i}^{\prime}, \pmb{\omega}_{2,i}^{\prime})^{\prime}$, $\pmb{\omega}_{i}\overset{ind}{\sim} G(d\pmb{\omega})$, and $\kappa_i\overset{ind}{\sim}U(-\pi,\pi)$. Then for a given $f:\mathbb{R}^{d}\to\mathbb{R}^{d}$ the random process,}
\begin{equation}
\widetilde{\nu}(s)\equiv\sigma_{\nu}\left(\frac{2}{K}\right)^{\frac{1}{2}}\sum_{i=1}^{\text{K}}\cos(\textbf{\textit{f}}(\textbf{s})^{\prime}\pmb{\omega}_{1,i}+\textbf{s}^{\prime}\pmb{\omega}_{2,i}+\kappa_i),\label{3}
\end{equation}
\textit{$E\{\widetilde{\nu}(s)\}=0$, and $E\{\widetilde{\nu}(\textbf{s}_{i})\widetilde{\nu}(\textbf{s}_{j})\}=C(\textbf{s}_{i},\textbf{s}_{j})$, and converges in distribution (as $K\to\infty$) to a mean-zero Gaussian process with covariance $C(\textbf{s}_{i},\textbf{s}_{j})$ in Equation \eqref{thm1} with spectral density $\prod \frac{g(\omega_{jk})}{\textit{C}(0,0)}$.}
\\
\noindent
\textit{Proof}: See Appendix A.\\
\noindent
The proof of Theorem 2 involves a simple combination of the result in \citet{perrin2003nonstationarity} and \citet[][pg. 204]{cressie1993statistics}. In practice, to use Theorem 2, we need to specify the spectral density. In our implementation, we assume that $\omega_{j,i} \overset{ind}{\sim} \frac{g(\omega)}{C(0,0)}$, where for each $i$, $\pmb{\omega}_{i} = (\omega_{1,i},\ldots, \omega_{2d,i})^{\prime}$ and $g(\cdot)$ is the Cauchy density. This choice of the Cauchy density leads to the exponential covariogram \citep{cressie1993statistics}.
It is arguably more common to simulate $\pmb{\nu}$ using a Cholesky decomposition. However, this requires order $n^{3}$ computation and order $n^{2}$ memory. Theorem 2 allows us to simulate $\pmb{\nu}$ without these memory and computational problems. It follows from the transformation theorem \citep{resnick2013probability} that the pdf of $\widetilde{\pmb{\nu}}$ is given, under our specification, by
\begin{align}
\nonumber
&f(\widetilde{\pmb{\nu}}\mid\pmb{\theta})\\
&=\int_{\widetilde{\pmb{\nu}}:\widetilde{\nu}(s_i)=\sigma_{\nu}\left(\frac{2}{K}\right)^{\frac{1}{2}}\sum_{i=1}^{\text{K}}\cos(\textbf{\textit{f}}(s_i)^{\prime}\pmb{\omega}_{1,i}+s_i^{\prime}\pmb{\omega}_{2,i}+\kappa_i)\}}\prod_{jk} \frac{g(\omega_{j,k})}{\textit{C}(0,0)}\prod_{i=1}^{n}\frac{1}{2\pi}I(-\pi<\kappa<\pi)d\pmb{\omega}_{1,i}d\pmb{\omega}_{2,i}d\kappa_{1}\ldots d\kappa_{n},
\label{thm333}
\end{align}
where $I(\cdot)$ is the indicator function. Again, from \citet[][pg. 204]{cressie1993statistics} and Theorem 2 the pdf in \eqref{thm333} is roughly Gaussian with mean zero and covariance $C(\pmb{\theta})$.
\section{Collapsed Gibbs Sampling}
In this section, we outline the steps needed for collapsed Gibbs sampling. Gibbs sampling requires simulating from full-conditional distributions \citep{gelfand1990sampling}. In a collapsed Gibbs sampler, some of the events conditioned on in the full-conditional distribution are integrated out \citep{liu1994collapsed}. For simplicity, we use the bracket notation, where [$X\mid Y$] represents the conditional distribution of a \textit{X} given \textit{Y} for generic random variables \textit{X} and \textit{Y}. In Algorithm 1, we present the steps needed for our proposed collapsed Gibbs sampler.
\begin{algorithm}[H]
\caption{Implementation: Collapsed Gibbs sampler}
\begin{algorithmic}[1]
\STATE Initialize ${\pmb{\beta}}^{[1]}$, ${\pmb{\nu}}^{[1]}$,
$\pmb{\eta}^{[1]}$,$\sigma_{\nu}^{2[1]}$,$\sigma_{\beta}^{2[1]}$,
$\sigma_{\eta}^{2[1]}$, $\phi^{[1]}$ and $\delta^{2[1]}$
\STATE Set \textit{b} = 2.
\STATE Simulate ${\pmb{\beta}}^{[b]}$ from $[{\pmb{\beta}}\vert {\pmb{\nu}}^{[b-1]}, \pmb{\eta}^{[b-1]}, \sigma_{\nu}^{2[b-1]}, \sigma_{\beta}^{2[b-1]}, \sigma_{\eta}^{2[b-1]},\phi^{[b-1]},\delta^{2[b-1]},\textbf{Z}]$.
\STATE Simulate $\widetilde{\pmb{\nu}}$ from $f(\widetilde{\pmb{\nu}}\mid\pmb{\theta})$ using Theorem 2 with $K$ ``large".
\STATE Simulate ${\pmb{\nu}}^{[b]}$ from $[{\pmb{\nu}}\vert {\pmb{\beta}}^{[b]}, \pmb{\eta}^{[b-1]}, \sigma_{\nu}^{2[b-1]}, \sigma_{\beta}^{2[b-1]}, \sigma_{\eta}^{2[b-1]},\phi^{[b-1]},\delta^{2[b-1]},\{\widetilde{\pmb{\nu}}\},\textbf{Z}]$.
\STATE Simulate ${\pmb{\eta}}^{[b]}$ from $[{\pmb{\eta}}\vert {\pmb{\beta}}^{[b]}, {\pmb{\nu}}_{m}^{[b]}, \sigma_{\nu}^{2[b-1]}, \sigma_{\beta}^{2[b-1]}, \sigma_{\eta}^{2[b-1]},\phi^{[b-1]},\delta^{2[b-1]}, \textbf{Z}]$, ${\pmb{\nu}}_{m}^{[b]}$ is $m$-dimensional ($m<n$) consisting of $m$ distinct elements of ${\pmb{\nu}}^{[b]}$.
\STATE Simulate ${\sigma}_{\nu}^{2[b]}$ from $[{\sigma}_{\nu}^2\vert {\pmb{\beta}}^{[b]}, {\pmb{\nu}}^{[b]}, \pmb{\eta}^{[b]}, \sigma_{\beta}^{2[b-1]}, \sigma_{\eta}^{2[b-1]},\phi^{[b-1]},\delta^{2[b-1]},\{\widetilde{\pmb{\nu}}\},\textbf{Z}]$.
\STATE Simulate ${\sigma}_{\beta}^{2[b]}$ from $[{\sigma}_{\beta}^2\vert {\pmb{\beta}}^{[b]}, {\pmb{\nu}}^{[b]}, \pmb{\eta}^{[b]}, \sigma_{\nu}^{2[b]}, \sigma_{\eta}^{2[b-1]},\phi^{[b-1]},\delta^{2[b-1]},\textbf{Z}]$.
\STATE Simulate ${\sigma}_{\eta}^{2[b]}$ from $[{\sigma}_{\eta}^2\vert {\pmb{\beta}}^{[b]}, {\pmb{\nu}}^{[b]}, \pmb{\eta}^{[b]}, \sigma_{\nu}^{2[b]}, \sigma_{\beta}^{2[b]},\phi^{[b-1]},\delta^{2[b-1]},\textbf{Z}]$.
\STATE Simulate ${\phi}^{[b]}$ from $[\phi\vert {\pmb{\beta}}^{[b]}, {\pmb{\nu}}_{m}^{[b]}, \pmb{\eta}^{[b]}, \sigma_{\nu}^{2[b]}, \sigma_{\beta}^{2[b]},\sigma_{\eta}^{2[b]},\delta^{2[b-1]}, \textbf{Z}]$.
\STATE Simulate ${\delta}^{2[b]}$ from $[\delta^2\vert {\pmb{\beta}}^{[b]}, {\pmb{\nu}}^{[b]}, \pmb{\eta}^{[b]}, \sigma_{\nu}^{2[b]}, \sigma_{\beta}^{2[b]},\sigma_{\eta}^{2[b]},\phi^{[b]},\{\widetilde{\pmb{\nu}}\},\textbf{Z}]$.
\STATE Let $b = b+1$.
\STATE If $b < B$ (a prespecified value) repeat Steps 3 $\--$ 12, otherwise stop.
\end{algorithmic}
\end{algorithm}
\noindent The expressions for the full-conditional distributions listed in Algorithm 1 are derived in Appendix B. This collapsed Gibbs sampler can easily be modified to allow for heterogeneous variances, and allow for other choices for prior distributions.
The main motivation for collapsed Gibbs sampling is that Step 4 of Algorithm 1 is computationally straightforward. Additionally, in Step 5, the full-conditional distribution has a known, and easy to sample from expression. This is significant, as this full-conditional distribution traditionally involves inverses and determinants of high-dimensional matrices. Specifically, the following relationship holds,
\begin{equation*}
f(\pmb{\nu}\vert\cdot)\propto\exp\left\lbrace-\frac{(\textbf{Z}-\textbf{X}\pmb{\beta}-\textbf{O}\pmb{\nu})^{\prime}\textbf{V}_{\epsilon}^{-1}(\textbf{Z}-\textbf{X}\pmb{\beta}-\textbf{O}\pmb{\nu})}{2}\right\rbrace f(\pmb{\nu}\vert\widetilde{\pmb{\nu}},\delta,\pmb{\theta}),
\end{equation*}
where $ f(\pmb{\nu}\vert\hat{\pmb{\nu}}, \delta,\pmb{\theta})=\exp\left\lbrace-\frac{(\pmb{\nu}-\widetilde{\pmb{\nu}})^{\prime}(\pmb{\nu}-\widetilde{\pmb{\nu}})}{2\delta^{2}}\right\rbrace$. Then,
\begin{equation*}
f(\pmb{\nu}\vert\cdot)\propto\exp\left\lbrace-\frac{\pmb{\nu}^{\prime}(\delta^{-2}\textbf{I}+\textbf{O}'\textbf{V}_{\epsilon}^{-1}\textbf{O})\pmb{\nu}}{2}+\nu^{\prime}\left(\textbf{O}'\textbf{V}_{\epsilon}^{-1}(\textbf{Z}-\textbf{X}\pmb{\beta})+\frac{1}{\delta^{2}}\widetilde{\pmb{\nu}}\right)\right\rbrace.
\end{equation*}
This gives
\begin{equation*}
f(\pmb{\nu}\vert\cdot)=\text{N}(\pmb{\mu}^{*},\pmb{\Sigma}^{*}),
\end{equation*}
where $\pmb{\mu}^{*}=\pmb{\Sigma}^{*}\{\textbf{O}'\textbf{V}_{\epsilon}^{-1}(\textbf{Z}-\textbf{X}\pmb{\beta})+\frac{1}{\delta^{2}}\widetilde{\pmb{\nu}}\}$, and $(\pmb{\Sigma}^{*})^{-1}=\delta^{-2}\textbf{I}+\textbf{O}'\textbf{V}_{\epsilon}^{-1}\textbf{O}$, where we emphasis that $\pmb{\Sigma}^{*}$ is a computationally advantageous diagonal matrix.
\section{Simulation Studies}
We simulate data in a variety of settings, and compare to the current state-of-the-art in spatial statistics, the nearest neighbor Gaussian process (NNGP) model \citep{datta2016nearest} and the general Vecchia approximation \citep{katzfuss2017general,katzfuss2018vecchia}. The data are generated in several different ways, all of which differs from the model we fit. That is, we assume $\textbf{Z}=\textbf{Y}+\pmb{\epsilon}$, where $\textbf{Y}$ is a fixed and known $n$-dimensional vector and $\pmb{\epsilon}\sim N(0,\sigma_{\epsilon}^2\textbf{I}_n)$. We choose $\sigma_{\epsilon}^2$ based on the signal-noise-ratios (SNR equal to 2, 3, 5, and 10). For each SNR we allow for three different missing data assumptions. Specifically, 5\% missing at random, 10\% missing at random, and 20\% missing at random. In total, this produces 60 settings.
To define $f(\cdot)$ we use a nonlinear function proposed by \citet{friedman1991multivariate}, where for $\bf{x}=(x_1,\ldots, x_5)'$:
\begin{align*}
&f_0(\textbf{x})=10\text{sin}(\pi x_1 x_2)+20(x_3-0.5)^2+10x_4+5x_5,
\end{align*}
which implies that far away observations may be less similar, suggesting nonstationarity. Then we propose five simulation cases,
\begin{align*}
&\text{Case 1:}\quad Y(i)= f_0(\textbf{x}_i)\\
&\text{Case 2:}\quad Y(i)= 0.85f_0(\textbf{x}_i)+0.15\pmb{\zeta}(i)\\
&\text{Case 3:}\quad Y(i)= 0.5f_0(\textbf{x}_i)+0.5\pmb{\zeta}(i)\\
&\text{Case 4:}\quad Y(i)= 0.15f_0(\textbf{x}_i)+0.85\pmb{\zeta}(i)\\
&\text{Case 5:}\quad Y(i)= \pmb{\zeta}(i)
\end{align*}
and $\pmb{\zeta}=(\pmb{\zeta}(1),\ldots\pmb{\zeta}(n))'\sim\text{N}(0,R(\theta))$, where the $n\times n$ matrix $R(\theta)=\{\sigma_{\zeta}^{2}\exp\left(-\phi_{\zeta}\|i-j\|\right)\}$. So, the $\pmb{\zeta}$ is a stationary term. In Case 1, the data is generated from a highly nonstationary process and $\bf{x}_i$ is a five dimensional vector consisting of independent draws from a uniform distribution. In Case 3, we weight the process half with a nonlinear term and half with a stationary term. In Case 5, we only have a stationary term. So the data is rough in Case 1 and gradually becomes smoother as we consider other cases. We show examples of the data in Figure \eqref{DataSNR5}.
We generate 1,000 observations over this one-dimensional domain $\left[0,1\right]$ for each case. For Case 2 to Case 4, $\sigma^2_{\zeta}$ is set to be equal to the sample variance of the elements in the vector $(f(\textbf{x}_1),\ldots, f(\textbf{x}_n))'$. We fixed $\phi_{\zeta}=0.3$. SNR is defined to be
\begin{equation*}
\text{SNR}=\frac{\sum_{i=1}^{n}(Y(i)-\frac{1}{n}\sum_{j=1}^{n}Y(j))^2}{(n-1)\sigma_{\epsilon}^2}
\end{equation*}
so that
\begin{equation*}
\sigma_{\epsilon}^2=\frac{\sum_{i=1}^{n}(Y(i)-\frac{1}{n}\sum_{j=1}^{n}Y(j))^2}{(n-1)\text{SNR}}.
\end{equation*}
Each SNR has five different cases as we mention above. In practice, we often do not observe all the useful covariates. Thus, when implementing methods for spatial prediction, we only use $X=(x_1,x_3,x_4,x_5)$, which removes $x_2$.
In our model, the 20-dimensional vector $\pmb{\psi}(i)$ was chosen to consist of Gaussian radial basis functions over equally spaced knots. That is, $\pmb{\psi}(i)=(\psi_{1}(i)\ldots \psi_{20}(i))^{\prime}$, where
\begin{equation*}
\psi_{k}(i)=\exp(-\tau\|i-{c}_{k}\|);\hspace{2pt}i = 1,\ldots, 1,000, \hspace{2pt} k=1,\ldots,20,
\end{equation*}
and $\{{c}_{1},\ldots,{c}_{20}\}$ are the equally-spaced knots points over $\{1,\ldots,1,000\}$, and $\tau$ is equal to 1.5 times the median of non-zero distances between the points in $\{1,\ldots,1,000\}$. We set the spectral density equal to
\begin{equation*}
g(\omega)=\frac{1}{\pi(1+\omega^2)}.
\end{equation*}
In Table \eqref{Table:Estimation}, we provide the root mean squared prediction error (RMSPE) of each model for the data in the first row in Figure \eqref{DataSNR5}. The RMSPE is defined as
\begin{equation}
\sqrt{\frac{1}{1,000}\left\lbrace\sum_{i=1}^{1,000}\{Y(i)-\hat{Y}(i)\}^{2}\right\rbrace},\label{mspe}
\end{equation}
where $\hat{Y}(i)$ is the posterior mean from fitting the each model. Here we see that our method (referred to as the Expanded Spectral Density (ESD) method) clearly outperform NNGP and the Vecchia approximation. Our model performs well in terms of estimation as well. Using the data in the first row of Figure \eqref{DataSNR5}, and a half-t prior for $\sigma_{\epsilon}$, we have a posterior mean of $\sigma_{\epsilon}^2$ equal to 5.79 and highest posterior density interval, (2.68, 9.96). The true value is 4.813, which is contained in the interval.
\begin{figure}
\caption{Simulation data with SNR=5; First row to last row are examples of Case 1 to Case 5.}
\label{DataSNR5}
\end{figure}
\begin{table}[H]
\begin{center}
\caption{Estimation of $\sigma_{\epsilon}^2$ for the data in the first row of Figure \eqref{DataSNR5}. We call our method the Expanded Spectral Density (ESD) method.}
\label{Table:Estimation}
\begin{tabular}{c|c}
\hline
\textbf{Method} &\textbf{RMSPE} \\
\hline
\textbf{ESD} &1.94 \\
\textbf{NNGP} &3.24 \\
\textbf{Vecchia Approximation} &3.05 \\
\hline
\end{tabular}
\end{center}
\end{table}
\subsection{Comparisons Over Multiple Replicates}
To implement NNGP, we use 15 nearest neighbors which is consistent with what is suggested in \citet{datta2016hierarchical}. For the general Vecchia approximation, we use 15 nearest neighbors which is the same as NNGP. We also use the R-package \texttt{spNNGP} \citep{spNNGP} and \texttt{GpGp} \citep{guinness2018gpgp}. We record the performance (in terms of RMSPE) over the signal noise ratio (SNR) and the proportion of missing in the datasets for each case. The results are show in Figure \eqref{mspeplotS1} to Figure \eqref{mspeplotS5} for Case 1 to Case 5, respectively. For each figure, the first row is for SNR equal 2, the second row if for SNR equal 3, the third row is for SNR equal 5 and the fourth row is for SNR equal 10. For each row, the left plot has 5\% of the data missing, the middle plot has 10\% of the data missing and the right plot has 20\% of the data missing. The boxplot is computed over 100 independent replicates of the one thousand dimensional dataset. We compare each simulation with NNGP and the general Vecchia approximation.
For both Case 1 and Case 2, we find that our method outperforms the NNGP and the general Vecchia approximation when SNR equals 3, 5 and 10. However, we have a similar result with NNGP and perform slight worse than general Vecchia approximation when SNR=2. In Case 3, we find that our method outperforms the NNGP and general Vecchia approximation when SNR equals 5 and 10, and only slightly outperforms NNGP and general Vecchia approximation when SNR=3. Additionally, ESD performs slightly worse than general Vecchia approximation when SNR=2 and we are in Case 3. In Case 4, we find that general Vecchia approximation outperforms our method, and our method outperforms than NNGP in a few replicates. In Case 5, the stationary case, general Vecchia approximation and NNGP outperform our method, which is not surprising considering that our method is derived for nonstationary processes. Based on the results, we believe our model performs well in highly nonlinear setting with moderate to high signal-to-noise ratios. However, we find the RMSPE is worse than NNGP and general Vecchia approximation when as the process becomes smoother.
\begin{figure}
\caption{RMSPE for Case 1. First column to last column are results over 5\%, 10\% and 20\% missing at random. First row to last row are example of SNR equal to 2,3,5 and 10.}
\label{mspeplotS1}
\end{figure}
\begin{figure}
\caption{RMSPE for Case 2. First column to last column are results over 5\%, 10\% and 20\% missing at random. First row to last row are example of SNR equal to 2,3,5 and 10.}
\label{mspeplotS2}
\end{figure}
\begin{figure}
\caption{RMSPE for Case 3. First column to last column are results over 5\%, 10\% and 20\% missing at random. First row to last row are example of SNR equal to 2,3,5 and 10.}
\label{mspeplotS3}
\end{figure}
\begin{figure}
\caption{RMSPE for Case 4. First column to last column are results over 5\%, 10\% and 20\% missing at random. First row to last row are example of SNR equal to 2,3,5 and 10.}
\label{mspeplotS4}
\end{figure}
\begin{figure}
\caption{RMSPE for Case 5. First column to last column are results over 5\%, 10\% and 20\% missing at random. First row to last row are example of SNR equal to 2,3,5 and 10.}
\label{mspeplotS5}
\end{figure}
\section{Real Data Application}
\subsection{Ozone Data Application: Data Description}
As an illustration, we analyze the ozone dataset used in \citet{cressie2008fixed}, which has become a benchmark dataset in the spatial statistics literature \citep{zhang2018smoothed}. This dataset consists of $\textit{n}=173,405$ values of total column ozone (TCO) in Dobson units (see Figure \eqref{originalozone} for a plot of the data). The dataset was obtained through a Dobson spectrophotometer on board the Nimbus-7 polar orbiting satellite on October 1st, 1988. For details on how these data were collected see \citet{cressie2008fixed}. This dataset is made publically available by the Centre for Environmental Informatics at the University of Wollongong's National Institute for Applied Statistics Research Australia (\url{https://hpc.niasra.uow.edu.au/ckan/}).
\begin{figure}
\caption{Level 2 total column ozone data (in Dobson units) collected on October 1st, 1988, and analyzed by \citet{cressie2008fixed}
\label{originalozone}
\end{figure}
\subsection{Analysis}
We present an analysis of the ozone dataset using ESD. We partition the data into a training set and a prediction set. We randomly generated three different 5\% missing at random datasets for evaluating the prediction performance of all methods. A total of 5,000 MCMC iterations of the Gibbs sampler in Algorithm 1 were used. The first 1,000 iterations were treated as a burn-in. We informally check trace plots for convergence, and no lack of convergence was detected. Since \textit{d}=2, $\pmb{\psi}(\textbf{s})$ is an $r\times 2$ dimensional matrix, which we denote with $\pmb{\psi}(\textbf{s})=\left\lbrace\pmb{\phi}_{1}(\textbf{s}),\pmb{\phi}_{2}(\textbf{s})\right\rbrace$, where $\pmb{\phi}_{i}(\textbf{s})$ is an \textit{r}-dimensional vector, \textit{i}=1,2. Using the R-package \texttt{FRK}, we choose 92, 364 and 591 equally-spaced bisquare basis functions, which defines a 92, 364 and 591-dimensional vector $\pmb{\zeta}(\textbf{s})$ \citep{zammit2017frk}. Then, we set $\pmb{\phi}_{1}(\textbf{s})=\left\lbrace\textbf{0}_{3}^{\prime},\pmb{\zeta}(\textbf{s})^{\prime}\right\rbrace^{\prime}$, and we take $\pmb{\phi}_{2}(\textbf{s})=(1,\textbf{s}^{\prime},\textbf{0}_{r}^{\prime})^{\prime}$. This choice of $\pmb{\phi}_{2}(\textbf{s})$ isolates the effect of the latitude and longitude on the non-stationarity of the process. The covariates are defined to be $\textbf{X}(\textbf{s})=(1, \pmb{\zeta}(\textbf{s})^{\prime})^{\prime}$.
Figure \eqref{r92} displays the prediction and prediction variances using non-stationary spectral simulation. Upon comparison of Figure \eqref{originalozone} to Figure (\ref{r92}a),Figure (\ref{r364}a) and Figure (\ref{r591}a) , we see that we obtain small in-sample error. Additionally, Figure (\ref{r92}b), Figure (\ref{r364}b) and Figure (\ref{r591}b) shows that our prediction error is relatively constant over the globe. We randomly select 5\% of the total observations to act as a validation dataset, and compute the root mean squared prediction error (RMSPE). Specifically, we compute the average square distance between the validation data and its corresponding prediction, and then we take the square root. RMSPE for our method is around 16.55, 9.86 and 7.52 for 92, 364 and 591 basis functions, respectively (see Table \ref{table2}). We also computed the RMSPE for the fixed rank kriging method as implemented through the R-package \texttt{FRK} \citep{zammit2017frk}. The FRK predictor is based on $\textbf{X}(\textbf{s})=1$ and uses $\pmb{\zeta}(\textbf{s})$ as its basis set. The RMSPE for FRK is approximately 76.41, and hence, we outperform the FRK predictor in terms of RMSPE. We do compare to other methods as well. In \citet{zhang2018smoothed} paper, they compare the Smoothed Full-Scale Approximation (SFSA), Full-Scale Approximation using a block modulating function (FSAB) \citep{sang2012full}, NNGP, and a local Gaussian process method with adaptive local designs (LaGP) \citep{gramacy2015local}. Their results show that the RMSPE for SFSA, NNGP and FSAB are all around 27, and the RMSPE for LaGP is around 38. Thus, our method also outperforms these methods in terms of RMSPE. However, the general Vecchia approximation has a slightly better result. With RMSPE equal to roughly 5, which is smaller than our RMSPE of 7.52. This consistent with our simulation results that showed that in smoother nonstationary settings the general Vecchia approximation performs similar or better than ESD.
We also include the computation times in Table \eqref{Time}. Our method is less competitive. Although we avoid storing and inverting a high dimensional covariance matrix, we require nested loops, which can be computationally intensive (i.e., a loop in the Gibbs sampler and a loop over $i=1\ldots K$ in Theorem 2). However, we are able to produce spatial predictions.
\begin{figure}
\caption{Results for 92 basis function. In (a), we plot the posterior means (in Dobson units) from the model in (\ref{model}
\label{r92}
\end{figure}
\begin{figure}
\caption{Results for 364 basis function. In (a), we plot the posterior means (in Dobson units) from the model in (\ref{model}
\label{r364}
\end{figure}
\begin{figure}
\caption{Results for 591 basis function. In (a), we plot the posterior means (in Dobson units) from the model in (\ref{model}
\label{r591}
\end{figure}
\begin{table}[H]
\begin{center}
\caption{Sensitivity to the number of basis functions}
\label{table2}
\begin{tabular}{c|c}
\textbf{Total size of basis function} & \textbf{RMSPE}\\
\hline
92 & 16.55\\
\hline
364& 9.86\\
\hline
591& 7.52\\
\hline
\end{tabular}
\end{center}
\end{table}
\begin{table}[H]
\begin{center}
\caption{Computation Time by Basis Function.}
\label{Time}
\begin{tabular}{c|c|c}
\textbf{Method} &\textbf{Number of basis function}& \textbf{Time (seconds)}\\
\hline
ESD &92 &13925\\
&364 &111685\\
&591 &273024\\
\hline
Vecchia Approximation&- &200 \\
\hline
\end{tabular}
\end{center}
\end{table}
In terms of inference on parameters, we are particularly interested in $\pmb{\eta}$. This is because when $\pmb{\eta}$ is zero we obtain a stationary process (see Theorem 1). In Figure \eqref{PosVar} we plot the posterior covariance matrix. As the $r$ increases, we see the variances and covariances appear to be close to zero suggesting that $\pmb{\eta}$ is close to zero. This suggests that the process is smooth, which is consistent with our previous results. However, several credible intervals for elements of $\pmb{\eta}$ do not contain zero, which suggests that nonstationarity is present in this dataset.
\begin{figure}
\caption{Posterior Covariance Matrix Image Plot. From left to right is 92, 364, 591 basis function.}
\label{PosVar}
\end{figure}
\section{Discussion}
Bayesian analysis of big Gaussian spatial data is a challenging and important problem. We propose a Bayesian approach using non-stationary spectral simulation. To develop non-stationary spectral simulation we combine Bochner's theorem with dimension expansion \citep{perrin2003nonstationarity}, and apply \citet{mejia1974synthesis}'s spectral simulation method. The advantage is that no large matrix inversion or storage is needed to approximately simulate a non-stationary full-rank Gaussian process. Additionally, the proposed method is extremely broad, since every positive definite non-stationary covariance function can be written according to \eqref{thm1}.
In Section 4, the simulation study is used to show a scenario where our approach outperforms the nearest neighbor Gaussian process \citep[NNGP;][]{datta2016hierarchical} model and Vecchia approximation \citep{katzfuss2017general}. We generate data that is different from our model, and we find our method has better result in different scenarios based on how nonlinear the process is. In Section 5, we analyze the total column ozone dataset from \citet{cressie2008fixed}. We obtain predictions that have small in-sample error, and outperforms fixed rank kriging (FRK), SFSA, FSAB, NNGP, and LaGP in terms of out-of-sample error. The Vecchia approximation has a slightly better RMSPE. Additionally, our framework allows one to perform inference on the presence of nonstationarity.
Environmental studies are often based on high-dimensional spatial Gaussian datasets with complex patterns of non-stationarity. Several studies focus on simplifying matrix valued operations and storage \citep{higdon1999non,paciorek2006spatial,cressie2008fixed,banerjee2008gaussian,lindgren2011explicit,nychka2015multiresolution}. Thus, our ``matrix free" approach offers a unique solution to this important problem.
\section*{Acknowledgments} Jonathan Bradley's research was partially supported by the U.S. National Science Foundation (NSF) grant SES-1853099.
\section*{Appendix A: Proofs}
\noindent
\textbf{Proof of Theorem 1:}\\
It follows from \citet{perrin2003nonstationarity} that for every non-stationary positive definite function $\textit{C}$ and every pair of locations $s_{1}$ and $s_{2}$ there exists a $\textbf{w}_{1}$ and $\textbf{w}_{2}$ such that $C(\textbf{s}_{1},\textbf{s}_{2})=\rho\left\lbrace\bigl(\begin{smallmatrix}
\textbf{s}_{1}\\ \textbf{w}_{1}
\end{smallmatrix}\bigr),\bigl(\begin{smallmatrix}
\textbf{s}_{2}\\ \textbf{w}_{2}
\end{smallmatrix}\bigr)\right\rbrace$, where $\rho$ is a stationary covariogram. Let $f$ be the function that maps a generic location $\textbf{s}\in D$ to it's corresponding expanded dimension $\textbf{w}\in\mathbb{R}^{d}$.
It follows from Bochner's theorem \citep{bochner1959lectures} that $\rho\left\lbrace\bigl(\begin{smallmatrix}
\textbf{s}_{i}\\ \textbf{\textit{f}}(\textbf{s}_{i})
\end{smallmatrix}\bigr),\bigl(\begin{smallmatrix}
\textbf{s}_{j}\\ \textbf{\textit{f}}(\textbf{s}_{j})
\end{smallmatrix}\bigr)\right\rbrace$ is positive definite (and equivalently so is $C(s_{i},s_{j})$) if and only if
\begin{align*}
&C(s_{i},s_{j})=\rho\left\lbrace\bigl(\begin{smallmatrix}
\textbf{s}_{i}\\ \textbf{\textit{f}}(\textbf{s}_{i})
\end{smallmatrix}\bigr),\bigl(\begin{smallmatrix}
\textbf{s}_{j}\\ \textbf{\textit{f}}(\textbf{s}_{j})
\end{smallmatrix}\bigr)\right\rbrace=\int_{-\infty}^{\infty}\cos\left\lbrace\{\textbf{\textit{f}}(\textbf{s}_{i})-\textbf{\textit{f}}(\textbf{s}_{j})\}^{\prime}\pmb{\omega}_{1}+(\textbf{s}_{i}-\textbf{s}_{j})^{\prime}\pmb{\omega}_{2}\right\rbrace G(d\omega).
\end{align*}
This completes the result.\\
\textbf{Proof of Theorem 2:}\\
We have that,
\begin{equation*}
E\{\hat{\pmb{\nu}}(\textbf{s})\}=\frac{(2)^{\frac{1}{2}}}{2\pi\sigma_{\nu}}\int_{-\infty}^{\infty}\int_{-\pi}^{\pi}\cos(\textbf{\textit{f}}(\textbf{s})^{\prime}\pmb{\omega}_{1}+\textbf{s}^{\prime}\bm{\omega}_{2}+\kappa)g(\pmb{\omega})d\kappa d\pmb{\omega}=0,
\end{equation*}
since $\int_{-\pi}^{\pi}\cos(\kappa)d\kappa=\int_{-\pi}^{\pi}\sin(\kappa)d\kappa=0$. Also,
\begin{align*}
&E\{\hat{\pmb{\nu}}(s_{i})\hat{\pmb{\nu}}(s_{j})\}\\
&=2\sigma^{2}_{\nu}E[\cos(\textbf{\textit{f}}(\textbf{s}_{i})^{\prime}\pmb{\omega}_{1}+\textbf{s}_{i}^{\prime}\pmb{\omega}_{2}+\kappa_{1})\cos(\textbf{\textit{f}}(\textbf{s}_{j})^{\prime}\pmb{\omega}_{1}+\textbf{s}_{j}^{\prime}\pmb{\omega}_{2}+\kappa_{1})]\\
&=\int\cos\{(\textbf{\textit{f}}(\textbf{s}_{i})^{\prime}-\textbf{\textit{f}}(\textbf{s}_{j})^{\prime})\pmb{\omega}_{1}+(s_{i}-s_{j})\pmb{\omega}_{2}\}g(\pmb{\omega}_{1})g(\pmb{\omega}_{2})d\pmb{\omega}_{1}d\pmb{\omega}_{2}\\
&=\rho\left\lbrace\bigl(\begin{smallmatrix}
\textbf{s}_{i}\\ \textbf{f}(\textbf{s}_{i})
\end{smallmatrix}\bigr)-\bigl(\begin{smallmatrix}
\textbf{s}_{j}\\ \textbf{f}(\textbf{s}_{j})
\end{smallmatrix}\bigr)\right\rbrace=C(s_{i},s_{j}).
\end{align*}
since $\rho(\cdot)$ is a stationary covariogram it follow from \citet[][pg. 204]{cressie1993statistics} that $\widetilde{\pmb{\nu}}$ converges to a Gaussian process.
\section*{Appendix B: Full Conditional Distributions}
In this section, we derive the full conditional distribution of our parameters and random effects, which we use within the Gibbs sampler outlined in Algorithm 1.
\begin{enumerate}
\item The full conditional distribution for $\pmb{\beta}$ is
\begin{equation*}
f(\pmb{\beta}\vert\cdot)\propto\exp\left\lbrace-\frac{(\textbf{Z}-\textbf{X}\pmb{\beta}-\textbf{O}\pmb{\nu})^{\prime}\textbf{V}_{\epsilon}^{-1}(\textbf{Z}-\textbf{X}\pmb{\beta}-\textbf{O}\pmb{\nu})}{2}\right\rbrace\exp\left\lbrace-\frac{\pmb{\beta}^{\prime}\pmb{\beta}}{2\sigma_{\beta}^{2}}\right\rbrace
\end{equation*}
\begin{equation*}
\propto\exp\left\lbrace-\frac{\pmb{\beta}^{\prime}(\textbf{X}^{\prime}\textbf{V}_{\epsilon}^{-1}\textbf{X}+\sigma_{\beta}^{-2}\textbf{I}_{p})\pmb{\beta}}{2}+\pmb{\beta}^{\prime}\textbf{X}^{\prime}\textbf{V}_{\epsilon}^{-1}(\textbf{Z}-\textbf{O}\pmb{\nu})\right\rbrace
\end{equation*}
\begin{equation*}
\propto\exp\left\lbrace-\frac{\pmb{\beta}^{\prime}\pmb{\Sigma}_{*}^{-1}\pmb{\beta}}{2}+\pmb{\beta}^{\prime}\pmb{\Sigma}_{*}^{-1}\pmb{\mu}_{*}\right\rbrace
\end{equation*}
\begin{equation*}
\propto \exp\left\lbrace-\frac{1}{2}(\pmb{\beta}-\pmb{\mu}_{*})^{\prime}\pmb{\Sigma}_{*}^{-1}(\pmb{\beta}-\pmb{\mu}_{*})\right\rbrace.
\end{equation*}
Thus, $\pmb{\beta}\sim \text{N}(\pmb{\mu}_{*},\pmb{\Sigma}_{*})$, where $\pmb{\mu}_{*}=\pmb{\Sigma}_{*}^{-1}\pmb{X}'\textbf{V}_{\epsilon}^{-1}(\textbf{Z}-\textbf{O}\pmb{\nu})$ and $\pmb{\Sigma}_{*}^{-1}=\pmb{X}'\textbf{V}_{\epsilon}^{-1}\textbf{X}+\sigma_{\beta}^{-2}\textbf{I}_{p}$, $\textbf{X}=(\textbf{X}(\textbf{s}_{1})\ldots \textbf{X}(\textbf{s}_{n}))^{\prime}$.
\item First, we sample $\widetilde{\pmb{\nu}}$ using non-stationary spectral simulation. Then the full-conditional distribution is
\begin{equation*}
f(\pmb{\nu}\vert\cdot)\propto\exp\left\lbrace-\frac{(\textbf{Z}-\textbf{X}\pmb{\beta}-\textbf{O}\pmb{\nu})^{\prime}\textbf{V}_{\epsilon}^{-1}(\textbf{Z}-\textbf{X}\pmb{\beta}-\textbf{O}\pmb{\nu})}{2}\right\rbrace f(\pmb{\nu}\vert\widetilde{\pmb{\nu}},\delta,\pmb{\theta}),
\end{equation*}
where $f(\pmb{\nu}\vert\widetilde{\pmb{\nu}}, \delta,\pmb{\theta})=\exp\left\lbrace-\frac{(\pmb{\nu}-\widetilde{\pmb{\nu}})^{'}(\pmb{\nu}-\widetilde{\pmb{\nu}})}{2\delta^{2}}\right\rbrace$. Then,
\begin{equation*}
f(\pmb{\nu}\vert\cdot)\propto\exp\left\lbrace-\frac{\pmb{\nu}^{\prime}(\delta^{-2}\textbf{I}+\textbf{O}'\textbf{V}_{\epsilon}^{-1}\textbf{O})\pmb{\nu}}{2}+\nu^{\prime}\left(\textbf{O}'\textbf{V}_{\epsilon}^{-1}(\textbf{Z}-\textbf{X}\pmb{\beta})+\frac{1}{\delta^{2}}\widetilde{\pmb{\nu}}\right)\right\rbrace.
\end{equation*}
This gives
\begin{equation*}
f(\pmb{\nu}\vert\cdot)\propto \text{N}(\pmb{\mu}^{*},\pmb{\Sigma}^{*}),
\end{equation*}
where $\pmb{\mu}^{*}=\pmb{\Sigma}^{*}\{\textbf{O}'\textbf{V}_{\epsilon}^{-1}(\textbf{Z}-\textbf{X}\pmb{\beta})+\frac{1}{\delta^{2}}\widetilde{\pmb{\nu}}\}$, and $(\pmb{\Sigma}^{*})^{-1}=\delta^{-2}\textbf{I}+\textbf{O}'\textbf{V}_{\epsilon}^{-1}\textbf{O}$.
\item The approximated full conditional distribution for $\pmb{\eta}$ is given by
\begin{equation*}
f(\pmb{\eta}\vert\cdot) \propto \exp\left\lbrace-\frac{\pmb{\eta}^{\prime}\pmb{\eta}}{2\sigma_{\eta}^{2}}\right\rbrace\exp(-\frac{(\pmb{\nu}-\widetilde{\pmb{\nu}})^{\prime}(\pmb{\nu}-\widetilde{\pmb{\nu}})}{2\delta^{2}}),
\end{equation*}
where recall $\widetilde{\pmb{\nu}}$ is a function of $\pmb{\eta}$. We use Metropolis-Hasting to sample $\pmb{\eta}$ and we use a multivariate normal distribution for the proposal distribution.
\item For $\sigma^2_{\nu}$, we use a Inverse Gamma prior distribution,
\begin{equation*}
f(\sigma_{\nu}^{2}\vert\cdot) \propto (\sigma_{\nu}^{2})^{-(\alpha_{1}+\frac{n}{2})-1} \exp\left\lbrace{-\frac{\pmb{\nu}'_{m}C(\pmb{\theta})^{-1}\pmb{\nu}_{m}}{2\sigma_{\nu}^{2}}+\sigma_{\nu}^{-2}\beta_{1}}\right\rbrace,
\end{equation*}
which is an inverse gamma distribution with shape parameter $\alpha_{1}+\frac{n}{2}$ and scale parameter $\frac{\pmb{\nu}'_{m}C(\pmb{\theta})^{-1}\pmb{\nu}_{m}}{2}+\beta_{1}$.
\item The full conditional distribution of $\sigma_{\beta}^{2}$ is easily obtained and given by,
\begin{equation*}
f(\sigma_{\beta}^{2}\vert\cdot) \propto (\sigma_{\beta}^{2})^{-(\alpha_{2}+\frac{p}{2})-1} \exp\left\lbrace{-\frac{\pmb{\beta}^{\prime}\pmb{\beta}}{2\sigma_{\beta}^{2}}+\sigma_{\beta}^{-2}\beta_{2}}\right\rbrace,
\end{equation*}
which is an inverse gamma distribution with shape parameter $\alpha_{2}+\frac{p}{2}$ and scale parameter $\frac{\pmb{\beta}^{\prime}\pmb{\beta}}{2}+\beta_{2}$.
\item The full conditional distribution of $\sigma_{\eta}^{2}$ is easily obtained and given by,
\begin{equation*}
f(\sigma_{\eta}^{2}\vert\cdot) \propto (\sigma_{\eta}^{2})^{-(\alpha_{3}+\frac{r}{2})-1} \exp\left\lbrace{-\frac{\pmb{\eta}^{\prime}\pmb{\eta}}{2\sigma_{\eta}^{2}}+\sigma_{\eta}^{-2}\beta_{3}}\right\rbrace,
\end{equation*}
which is an inverse gamma distribution with shape parameter $\alpha_{3}+\frac{r}{2}$ and scale parameter $\frac{\pmb{\eta}^{\prime}\pmb{\eta}}{2}+\beta_{3}$.
\item The prior for $\phi$ is $\text{Uniform Distribution}(L,U)$, and the full conditional distribution for $\phi$ follows that,
\begin{equation*}
f(\phi\vert\cdot)\propto f(\pmb{\nu}_{m}\mid\widetilde{\pmb{\nu}}, \sigma_{\nu}^2,\phi)I_{(L\le\phi\le U)}
\end{equation*}
\item The full conditional distribution of $\delta^2$ is easily obtained and given by,
\begin{equation*}
f(\delta^2\vert\cdot)\propto(\delta^{2})^{-(\alpha_{4}+\frac{n}{2})-1}\exp\left\lbrace-\frac{(\pmb{\nu}-\widetilde{\pmb{\nu}})^{\prime}(\pmb{\nu}-\widetilde{\pmb{\nu}})}{2\delta^2}+\frac{\beta_{4}}{\delta^2}\right\rbrace.\label{delta}
\end{equation*}
which is an inverse gamma distribution with shape parameter $\alpha_{4}+\frac{n}{2}$ and scale parameter
$\frac{(\pmb{\nu}-\widetilde{\pmb{\nu}})'(\pmb{\nu}-\widetilde{\pmb{\nu}})}{2}+\beta_{4}$.
\item The full conditional distribution of $\sigma_{\epsilon}^2$ is easily obtained and given by,
\begin{equation*}
f(\sigma_{\epsilon}^2\vert\cdot)\propto(\sigma_{\epsilon}^2)^{-(\alpha_{5}+\frac{n}{2})-1}\exp\left\lbrace-\frac{(\textbf{Z}-\textbf{X}\pmb{\beta}-\pmb{\nu})^{\prime}(\textbf{Z}-\textbf{X}\pmb{\beta}-\pmb{\nu})}{2\sigma_{\epsilon}^2}+\frac{\beta_{5}}{\sigma_{\epsilon}^2}\right\rbrace.\label{tau}
\end{equation*}
which is an inverse gamma distribution with shape parameter $\alpha_{5}+\frac{n}{2}$ and scale parameter
$\frac{(\textbf{Z}-\textbf{X}\pmb{\beta}-\pmb{\nu})^{\prime}(\textbf{Z}-\textbf{X}\pmb{\beta}-\pmb{\nu})}{2}+\beta_{5}$.
\end{enumerate}
\nocite{*}
\end{document} |
\begin{document}
\title{{Affine\hspace*{0.1em} planes,\hspace*{0.1em} ternary\hspace*{0.1em} rings,\hspace*{0.1em}
and\hspace*{0.1em} examples\hspace*{0.1em}\\ of\hspace*{0.1em} non-Desarguesian\hspace*{0.1em} planes}}
\date{}
\author{\textnormal{Nikolai\hspace*{0.1em} V.\hspace*{0.1em} Ivanov}}
\renewcommand{\fnsymbol{footnote}}{\fnsymbol{footnote}}
\maketitle
\footnotetext{\hspace*{-0.8em}\copyright\ Nikolai V. Ivanov, 2008, 2014.\hskip.15em\
Neither the work reported in this paper,
nor its preparation were supported by any governmental
or non-governmental agency, foundation, or institution.}
\vspace*{6ex}
\myit{\hspace*{0em}\large Contents}\vspace*{1ex} \vspace*{
amount}\\
\myit{Preface}\hspace*{0.5em} \hspace*{0.5em} \vspace*{1ex}\\
\myit{\phantom{1}1.}\hspace*{0.5em} Introduction \hspace*{0.5em} \vspace*{0.25ex}\\
\myit{\phantom{1}2.}\hspace*{0.5em} Affine planes and ternary rings \hspace*{0.5em} \vspace*{0.25ex}\\
\myit{\phantom{1}3.}\hspace*{0.5em} Isomorphisms of ternary rings \hspace*{0.5em} \vspace*{0.25ex}\\
\myit{\phantom{1}4.}\hspace*{0.5em} Isotopisms of ternary rings \hspace*{0.5em} \vspace*{0.25ex}\\
\myit{\phantom{1}5.}\hspace*{0.5em} Weblen-Wedderburn systems \hspace*{0.5em} \vspace*{0.25ex}\\
\myit{\phantom{1}6.}\hspace*{0.5em} Near-fields, skew-fields, and isomorphisms \hspace*{0.5em} \vspace*{0.25ex}\\
\myit{\phantom{1}7.}\hspace*{0.5em} Translations \hspace*{0.5em} \vspace*{0.25ex}\\
\myit{\phantom{1}8.}\hspace*{0.5em} Andr\'{e} quasi-fields \hspace*{0.5em} \vspace*{0.25ex}\\
\myit{\phantom{1}9.}\hspace*{0.5em} Conclusion: non-Desarguesian planes \hspace*{0.5em} \vspace*{1ex}\\
\myit{Bibliographie}\hspace*{0.5em} \hspace*{0.5em} \vspace*{0.25ex}
\mynonumbersection{Preface}{preface}
{\small
The main goal of this paper is to present a detailed self-contained exposition of a part of the theory of affine planes
leading to a construction of affine (or, equivalently, projective) planes not satisfying the Desarques axiom.
Unfortunately, most expositions of the theory of affine and projective planes stop before such a construction.
Perhaps, the reason is that this theory is usually presented as a part of combinatorics, while such constructions
are in the spirit of the abstract algebra.
This article is intended to be a complement to the introductory expositions of the theory of affine and projective planes,
and as an easy reading for mathematicians with a taste for abstract algebra (and the geometry of points, lines, and planes).
We start with an axiomatic definition of affine planes
and show that all of them admit a coordinate system over a so-called \emph{ternary ring}
(in fact, ternary rings are defined in such a way as to make this statement true).
It is well known that the Desargues axiom for an affine plane is equivalent to the existence of a coordinate system over a skew-field.
Since there are excellent presentations of this equivalence (see, for example, the elegant book \cite{Har} by R. Harstshorne),
we simply define a Desarguesian plane as a plane which admits a coordinate system over a skew-field.
Similarly, since the correspondence between affine and projective planes is very well presented in the literature,
we do not consider the projective planes in this article.
A novelty of our exposition is the notation $(a,x,b)\longmapsto\langle ax+b \rangle$ for the ternary operation in a ternary ring,
replacing the standard notation $(a,x,b)\longmapsto x\cdot a \circ b$\hspace*{-0.2em}.\hspace*{0.2em}\
The author hopes that the much more suggestive notation $\langle ax+b \rangle$ will made this beautiful theory accessible to a more wide audience.
}
\vspace*{4
amount}
\renewcommand{1.01}{1.01}
\selectfont
\mysection{Introduction}{introduction}
\myitpar{Affine planes.} An\hskip.1em\ \emph{affine plane} $\mathbb{A}$ is a set,
the elements of which are called\hskip.1em\ \emph{points},\hskip.05em\ together with a collection of subsets,
called\hskip.1em\ \emph{lines}, satisfying the following three axioms.
\begin{description}
\item[A1.] \emph{For every two different points there is a unique line containing them.}
\item[A2.] \emph{For every line $l$ and a point $P$ not in $l$, there is a unique line containing $P$ and disjoint from $l$.}
\item[A3.] \emph{There are three points such that no line contains all three of them.}
\end{description}
Two lines are called\hskip.1em\ \emph{parallel}\hskip.15em\ if they are either equal, or disjoint.
Note that being parallel is an equivalence relation.
Indeed, this relation is obviously reflexive and symmetric.
If two lines $l_1{\hskip.1em},{\hskip.1em} l_2$ are parallel to a line $l$\hspace*{-0.2em},\hspace*{0.2em}\
then the intersection of $l_1$ and $l_2$ is empty by the axiom {\bf A2}, i.e. $l_1$ is parallel to $l_2$\hspace*{-0.2em}.\hspace*{0.2em}\
And \emph{isomorphism} of an affine plane $\mathbb{A}$ with an affine plane $\mathbb{A}'$
is defined as a bijection $\mathbb{A}\rightarrow\mathbb{A}'$ taking lines to lines.
Two affine planes $\mathbb{A}$\hspace*{-0.2em},\hspace*{0.2em}\ $\mathbb{A}'$ are called\hskip.05em\ \emph{isomorphic},
if there exist an isomorphism $\mathbb{A}\rightarrow\mathbb{A}'$\hspace*{-0.2em}.\hspace*{0.2em}\
\myitpar{Affine planes and skew-fields.} A \emph{skew-field}\hskip.05em\ is defined in the same way as a field, except that the commutativity of the multiplication is not assumed.
Skew-fields (in particular, fields) lead to the main examples of affine planes.
Namely{\hskip.025em}, for a skew-field $K$\hspace*{-0.2em},\hspace*{0.2em}\ let $\mathbb{A}=K^2$ and let $(x,y)$ be the canonical coordinates in $K^2$\hspace*{-0.2em}.\hspace*{0.2em}\
A \emph{line}\hskip.05em\ in $\mathbb{A}$ is defined as subset of $\mathbb{A}$ described by an equation having either the form $y=ax+b$ for some $a,b\in K$\hspace*{-0.2em},\hspace*{0.2em}\
or the form $x=c$ for some $c\in K$\hspace*{-0.2em}.\hspace*{0.2em}\
An easy exercise shows that $K^2$ with lines defined in this way is indeed an affine plane.
If an affine plane is isomorphic to $K^2$\hspace*{-0.2em},\hspace*{0.2em}\ then we say that it is\hskip.1em\ \emph{defined over $K$\hspace*{-0.2em}.\hspace*{0.2em}\ }
The class of affine planes defined over skew-fields can be characterized in purely geometric terms,
i.e. in terms involving only points and lines.
Namely, an affine plane $\mathbb{A}$ is defined over a skew-field if and only if $\mathbb{A}$
satisfies the so-called \emph{Minor \emph{and} Major Desargues axioms}.
Another way to look at this characterization involves \emph{projective planes}
(which are not used in this article and by this reason are not even defined).
Every affine plane can be canonically embedded in a projective plane, called its \emph{projective completion},
by adding a \emph{line at the infinity}\hskip.05em\ to it.
In particular, one can construct a projective plane starting from a skew-field $K$\hspace*{-0.2em}.\hspace*{0.2em}\
A projective plane constructed in this way is said to be \emph{defined over $K$\hspace*{-0.2em}.\hspace*{0.2em}\ }
A projective plane is defined over a skew-field
if and only if it satisfies the \emph{Desargues axiom} for projective planes.
Also, the projective completion of an affine plane $\mathbb{A}$
is defined over a skew-field $K$\hspace*{-0.2em},\hspace*{0.2em}\
i.e. can be obtained from some affine plane by adding the line at infinity
(which may be different from the original line at infinity added to $\mathbb{A}$\hspace*{-0.2em})
if and only $\mathbb{A}$ is defined over the same skew-field $K$\hspace*{-0.2em}.\hspace*{0.2em}\
In the present paper these characterizations serve only as a justification of the following term.
Namely, an affine plane is said to be \emph{non-Desarguesian} if it is not defined over a skew-field.
Our main goal is to construct examples of non-Desarguesian planes.
\myitpar{Prerequisites.} The prerequisites for reading this article are rather modest.
It is not even strictly necessary to be familiar beforehand with the notion of an affine plane.
But the reader is expected to be familiar with the notions of rings and fields.
At some places we speak about vector spaces over skew-fields;
without much loss the reader may assume that these skew-fields are actually fields.
In Section \ref{andre} we use one basic result from the Galois theory, but it can be well taken on faith.
Mainly, only a taste for abstract algebra is expected, especially in Section \ref{andre}.
\myitpar{The organization of the paper.} In Section \ref{ternary} we introduce the notion of coordinates in an affine plane,
not necessarily defined over a skew-field.
The coordinates of a point are taken from a set with a ternary operation, called a \emph{ternary ring}.
Conversely, every ternary ring defines an affine plane, as explained in Section \ref{ternary}.
A source of difficulties is the fact that isomorphic affine planes can be coordinatized by non-isomorphic ternary rings.
They are isomorphic under an obvious additional condition; this is discussed in Section \ref{isomorphisms}.
In Section \ref{isotopisms} we discuss a weaker notion of an isomorphism for ternary rings
(namely, the notion of an {\em isotopism}), which is better related to isomorphisms of affine planes.
But, in fact, this notion is not needed for our main goal, namely, for construction of non-Desarguesian planes,
and Section \ref{isotopisms} may be skipped without loss of the continuity.
In Section \ref{quasi-fields} we introduce the most tractable class of ternary rings, namely, the class of
\emph{Veblen-Wedderburn systems}, also called \emph{quasi-fields}.
Like the fields, they are sets with two binary operations, but satisfying only a fairly weak version of the axioms of a field.
Sections \ref{near-fields} and \ref{translations} are devoted to two different ways to prove that an affine plane is not defined over a skew-field.
Section \ref{andre} is devoted to a construction of quasi-fields not isomorphic to a skew-field.
Finally, in Section \ref{conclusion} we combine the results of the previous sections in order to construct non-Desarguesian planes.
The main ideas are contained in Sections \ref{ternary}, \ref{quasi-fields}, and \ref{andre}.
\myitpar{Further reading.} There are several excellent books exploring deeper these topics,
in particular, exploring the role of the Desargues axioms.
For a systematic introduction to the theory of affine and projective planes the reader may turn
to the classical unsurpassed books by E. Artin \cite{Ar} (see \cite{Ar}, Chapter II),
M. Hall \cite{Ha1} (see \cite{Ha1}, Chapter 20), and R. Hartshorne \cite{Har}.
The book by Hartshorne is the most elementary one of them.
The reader is not assumed even to be familiar with the notions of a group and of a field;
in fact, the affine and projective geometries are used to motivate these notions.
The book by M. Hall (or, rather, its last chapter, which actually does not depend much on the previous ones)
gives an in depth exposition directed to mature mathematicians of the theory of projective planes and its connections with algebra.
E. Artin's elegant exposition is written on an intermediate level between books of R. Hartshorne and M. Hall.
All these books present in details the characterization of planes defined over skew-fields in terms of Desargues axioms.
A lot of books in combinatorics discuss some elementary parts of the theory of affine and projective planes,
but very rarely include a construction of a non-Desarguesian plane.
An exception is another M. Hall's classics, namely \cite{Ha2}.
We followed \cite{Ha2} in that we deal with affine planes and not with projective ones,
and in the way we coordinatize affine planes in Section \ref{ternary}.
The book of D. Hughes and F. Piper \cite{HP} is, probably,
the most comprehensive exposition of the theory of projective planes
(the study of which is essentially equivalent to the study of affine planes).
The state of the art as of 2007 is discussed by Ch. Weibel \cite{W}.
\mysection{Affine planes and ternary rings}{ternary}
Let $\mathbb{A}$ be an affine plane. We start by introducing some sort of cartesian coordinates in $\mathbb{A}$\hspace*{-0.2em}.\hspace*{0.2em}\
We follow the approach of M. Hall (see \cite{Ha2}, Section 12.4).
Then we use these coordinates in order to define a ternary operation on any line in $\mathbb{A}$\hspace*{-0.2em}.\hspace*{0.2em}\
Next, we will turn the main properties of this operation into axioms.
This leads to the notion of a \emph{ternary ring}.
Our main novelty here is the notation $(a,x,b)\mapsto\langle ax+b \rangle$ for the ternary operation in ternary rings,
replacing the notation $(a,x,b)\mapsto x\cdot a \circ b$ used by M. Hall and other authors.
The notation $\langle ax+b \rangle$ seems to be much more suggestive than $x\cdot a \circ b$
and makes the whole theory much more transparent.\hspace*{-0.2em}.\hspace*{0.2em}\
\myitpar{The simplest form of cartesian coordinates on $\mathbb{A}$\hspace*{-0.2em}.\hspace*{0.2em}\ }\emph{Fix
two non-parallel lines $l$\hspace*{-0.2em},\hspace*{0.2em}\ \hspace*{-0.2em}$m$ in $\mathbb{A}$\hspace*{-0.2em}.\hspace*{0.2em}\ }
The lines $l$\hspace*{-0.2em},\hspace*{0.2em}\ $m$ allow us to identify $\mathbb{A}$ with the cartesian product $l\times m$ in the same way as one does this
while introducing the cartesian coordinates in the usual Euclidean plane.
Namely, given a point $p\in\mathbb{A}$ we assign to it the pair $(x,y)\in l\times m$\hspace*{-0.2em},\hspace*{0.2em}\
where $x$ is the point of intersection with the line $l$ of the line containing $p$ and parallel to $m$
(such a line cannot be parallel to $l$\hspace*{-0.2em},\hspace*{0.2em}\ because otherwise $l$ would be parallel to $m$ contrary to the assumption),
and where $y$ is defined in a similar manner.
This leads to a map $\mathbb{A}\rightarrow l\times m$\hspace*{-0.2em}.\hspace*{0.2em}\
One can also define a map $l\times m\rightarrow\mathbb{A}$ by assigning to $(x,y)$
the intersection point of the line containing $x$ and parallel to $m$ and the line containing $y$ and parallel to $l$\hspace*{-0.2em}.\hspace*{0.2em}\
Clearly, these two maps are the inverses of each other.
We will use them to identify $\mathbb{A}$ with $l\times m$\hspace*{-0.2em}.\hspace*{0.2em}\
Such an identification is the simplest form of the \emph{cartesian coordinates} on $\mathbb{A}$\hspace*{-0.2em}.\hspace*{0.2em}\
Let $\bf 0$ be the point of intersection of $l$ and $m$\hspace*{-0.1em};\hskip.05em\ the point $\bf 0$ serves as the origin of our coordinate system.
\myitpar{Identifying $l$ and $m$\hspace*{-0.2em}.\hspace*{0.2em}\ } Next, we would like to identify the lines $l$ and $m$\hspace*{-0.2em}.\hspace*{0.2em}\
In order to do this, we need a line $d$ passing through $\bf 0$ and different from $l$, $m$\hspace*{-0.2em}.\hspace*{0.2em}\
One can get such a line, for example, by taking any line $d_0$ intersecting both $l$ and $m$
in such a way that the points of intersection are different from $\bf 0$\hspace*{-0.2em},\hspace*{0.2em}\
and then take as $d$ the line parallel to $d_0$ and passing through $\bf 0$\hspace*{-0.2em}.\hspace*{0.2em}\
\emph{From now on, we will assume that such a line $d$ is fixed.}
Using $d$\hspace*{-0.2em},\hspace*{0.2em}\ we can construct a natural bijection between $l$ and $m$\hspace*{-0.2em}.\hspace*{0.2em}\
Namely, given $x\in l$\hspace*{-0.2em},\hspace*{0.2em}\ let $z(x)\in d$ be the intersection point with $d$ of the line containing $x$ and parallel to $m$\hspace*{-0.2em},\hspace*{0.2em}\
and let $y(x)\in m$ be the intersection point with $m$ of the line containing $z(x)$ and parallel to $l$\hspace*{-0.2em}.\hspace*{0.2em}\
Clearly, $x\mapsto y(x)$ is a bijection $l\rightarrow m$\hspace*{-0.2em}.\hspace*{0.2em}\
Now, let $K$ be any set endowed with a bijection $K\rightarrow l$\hspace*{-0.2em}.\hspace*{0.2em}\
By composing it with the bijection from the previous paragraph, we get a bijection $K\rightarrow m$\hspace*{-0.2em}.\hspace*{0.2em}\
Formally, we can simply set $K=l$\hspace*{-0.2em},\hspace*{0.2em}\
but we are going to treat $K$ and $l$ differently, and by this reason it is better to consider them as different objects.
The set $K$ is going to play a role similar to the role of $\bf R$ in Euclidean geometry.
\myitpar{The cartesian coordinates on $\mathbb{A}$\hspace*{-0.2em}.\hspace*{0.2em}\ } Our bijections allow us to identify $\mathbb{A}$ with $K^2$\hspace*{-0.2em}.\hspace*{0.2em}\
We consider this identification as the \emph{cartesian coordinates} on $\mathbb{A}$\hspace*{-0.2em}.\hspace*{0.2em}\
This identification, obviously, turns the line $d$ into the \emph{diagonal}\hskip.1em\ $\{(x,x) : x\in K\}$\hspace*{-0.2em}.\hspace*{0.2em}\
Guided by the construction of the usual cartesian coordinates, we denote
the element of $K$ corresponding to the point ${\bf 0}\in l$ by $0$\hspace*{-0.2em}.\hspace*{0.2em}\
Then ${\bf 0}=(0,0)$\hspace*{-0.2em}.\hspace*{0.2em}\
We would like also to have an analogue of the number $1$\hspace*{-0.2em}.\hspace*{0.2em}\
In fact, we can choose as $1$ an arbitrary element of $K$ different from $0$\hspace*{-0.2em}.\hspace*{0.2em}\
This freedom of choice of $1$ corresponds to the freedom of choice of the unit of measurement in the Euclidean geometry.
\emph{From now on, we will assume that such an element $1\in K$ is fixed.}
\myitpar{The slopes of lines.} Next, we define the \emph{slope} of a line $L$ in $\mathbb{A}$\hspace*{-0.2em}.\hspace*{0.2em}\
If $L$ is parallel to $l$\hspace*{-0.2em},\hspace*{0.2em}\ its slope is defined to be $0$\hspace*{-0.2em}.\hspace*{0.2em}\
Such lines are called \emph{horizontal}.
If $L$ is parallel to $m$\hspace*{-0.2em},\hspace*{0.2em}\ its \emph{slope} is defined to be $\infty$\hspace*{-0.2em}.\hspace*{0.2em}\
Such lines are called \emph{vertical}.
If the line $L$ is not vertical, consider the line $L'$ parallel to $L$ and passing through $(0,0)$\hspace*{-0.2em}.\hspace*{0.2em}\
Let $(1,a)$ be the intersection point of $L'$ with the line $\{(1,z) : z\in K\}$ (
i.e. with the the vertical line passing through $(1,0)$\hspace*{-0.1em}).
The \emph{slope} of $L$ is defined to be $a$\hspace*{-0.2em}.\hspace*{0.2em}\
Note that it depends on the choice of $1$\hspace*{-0.2em}.\hspace*{0.2em}\
By the definition, the parallel lines have the same slope.
Since $d$ contains the point $(1,1)$\hspace*{-0.2em},\hspace*{0.2em}\ the slope of $d$ is equal to $1$\hspace*{-0.2em}.\hspace*{0.2em}\
Clearly, two lines have the same slope if and only if they are parallel.
\myitpar{The ternary operation on $K$\hspace*{-0.2em}.\hspace*{0.2em}\ } Let us define a ternary operation
$(a,x,b)\longmapsto\langle ax+b \rangle$ on $K$ as follows.
Let $L$ be the unique line intersecting the line $m$ at the point $(0,b)$ and having the slope $a\neq\infty$\hspace*{-0.2em}.\hspace*{0.2em}\
Since $L$ is a non-vertical line, it intersects the line $m$ at a single point, say $(0,b)$\hspace*{-0.2em}.\hspace*{0.2em}\
For every $x\in K$\hspace*{-0.2em},\hspace*{0.2em}\ the line $L$ intersects the vertical line $\{(x,z) : z\in K\}$ at a unique point.
Let $(x,y)$ be this point and set
\begin{equation}
\label{line}
\langle ax+b \rangle{\hskip.2em} ={\hskip.2em} y\hspace*{0.05em}.
\end{equation}
We consider $(a,x,b)\longmapsto\langle ax+b \rangle$ as a ternary operation in $K$\hspace*{-0.2em}.\hspace*{0.2em}\
In general we do not have separate multiplication and addition operations in $K$\hspace*{-0.1em};\hskip.1em\
the angle brackets are intended to stress this.
Clearly, every non-vertical line is the set of points $(x,y)\in K^2$ satisfying (\ref{line}) for some $a,b\in K$\hspace*{-0.2em}.\hspace*{0.2em}\
Every vertical line is the set of point $(x,y)\in K^2$ satisfying, for some fixed $c\in K$\hspace*{-0.2em},\hspace*{0.2em}\ the equation $x=c$\hspace*{-0.2em}.\hspace*{0.2em}\
\myitpar{The main properties of\hskip.15em\ $(a,x,b)\longmapsto\langle ax+b \rangle$\hspace*{-0.2em}.\hspace*{0.2em}\ } They are the following.
\begin{description}
\item[T1.] $\langle 1x+0 \rangle{\hskip.2em} ={\hskip.2em} \langle x1+0 \rangle{\hskip.2em} ={\hskip.2em} x$\hspace*{-0.2em}.\hspace*{0.2em}\
\item[T2.] $\langle a0+b \rangle{\hskip.2em} ={\hskip.2em} \langle 0a+b \rangle{\hskip.2em} ={\hskip.2em} b$\hspace*{-0.2em}.\hspace*{0.2em}\
\item[T3.] \emph{\hspace*{0.4em}If\hskip.05em\ $a,x,y\in K$\hspace*{-0.2em},\hspace*{0.2em}\
then there is a unique $b\in K$ such that $\langle ax+b \rangle{\hskip.2em} ={\hskip.2em} y$\hspace*{-0.2em}.\hspace*{0.2em}\ }
\item[T4.] \emph{\hspace*{0.4em}If\hskip.05em\ $a,a',b,b'\in K$ and $a\neq a'$\hspace*{-0.2em},\hspace*{0.2em}\
then the equation $\langle ax+b \rangle{\hskip.2em} ={\hskip.2em} \langle a'x+b' \rangle$ has a unique solution $x\in K$\hspace*{-0.2em}.\hspace*{0.2em}\ }
\item[T5.] \emph{\hspace*{0.4em}If\hskip.05em\ $x,y,x',y'\in K$ and $x\neq x'$\hspace*{-0.2em},\hspace*{0.2em}\
then there is a unique pair $a,b\in K$ such that $y{\hskip.2em} ={\hskip.2em} \langle ax+b \rangle$
and $y'{\hskip.2em} ={\hskip.2em} \langle ax'+b \rangle$\hspace*{-0.2em}.\hspace*{0.2em}\ }
\end{description}
Let us explain the geometric meaning of these properties.
This explanation also proves that they indeed hold for $(a,x,b)\longmapsto\langle ax+b \rangle$\hspace*{-0.2em}.\hspace*{0.2em}\
\begin{description}
\item[T1{\hskip.05em}:] The equation $\langle 1x+0 \rangle{\hskip.2em} ={\hskip.2em} x$ means that
$d{\hskip.2em} ={\hskip.2em} \{(x,x) : x\in K\}$ is a line with the slope $1$\hspace*{-0.2em}.\hspace*{0.2em}\
The equation $\langle x1+0 \rangle{\hskip.2em} ={\hskip.2em} x$
means that the slope of the line passing through $(0,0)$ and $(1,x)$ is equal to $x$ (which is true by the
definition of the slope).
\item[T2{\hskip.05em}:] The equation $\langle a0+b \rangle{\hskip.2em} ={\hskip.2em} b$
means that the line defined by the equation (\ref{line}) intersects $m$ at $(0,b)$
(which is true by the definition of $\langle ax+b \rangle$).
The equation $\langle 0a+b \rangle{\hskip.2em} ={\hskip.2em} b$ means that
the horizontal line passing through $(0,b)$ consists of points $(a,b)$, $a\in K$\hspace*{-0.2em}.\hspace*{0.2em}\
\item[T3{\hskip.05em}:] This means that for every slope $\neq \infty$
there is a unique line with this slope passing through $(x,y)$\hspace*{-0.2em}.\hspace*{0.2em}\
\item[T4{\hskip.05em}:] This means that two lines with different slopes $\neq\infty$ intersect at a unique point.
\item[T5{\hskip.05em}:] This means that every two points not on the same vertical line (i.e. not on the same line with slope $\infty$) are contained in a unique line with slope $\neq\infty$\hspace*{-0.2em}.\hspace*{0.2em}\
\end{description}
\myitpar{Ternary rings.} Motivated by these properties, suppose that we have a set $K$ with two distinguished elements $0$ and $1\neq 0$,
and a ternary operation $(a,x,b)\longmapsto \langle ax+b \rangle$
satisfying {\bf T1}{\hskip.025em}--{\hskip.025em}{\bf T5}.
Such a $K$ is called a {\em ternary ring}.
Consider the set of points $\mathbb{A}=K^2$, and introduce the lines in the following manner: for every $x_0\in K$ we have a line $\{(x_0,y) : y\in K\}$
(such lines are called {\em vertical\/}),
and for every $a,b\in K$ we have a line $\{(x,y) : y=\langle ax+b \rangle\}$\hspace*{-0.2em}.\hspace*{0.2em}\
Clearly, this defines a structure of an affine plane on $\mathbb{A}=K^2$
(notice that the instances of the axioms of an affine plane involving vertical lines hold trivially).
\mypar{Proposition.}{prop1} {\em If $K$ is a finite set, then the condition\hskip.1em\ {\bf T5\hskip.15em\ } follows from\hskip.1em\ {\bf T3\hskip.15em\ } and\hskip.1em\ {\bf T4\hskip.15em\ }\hspace*{-0.1em}\hspace*{-0.2em}.}
\proof Given $x,x'\in K$ such that $x\neq x'$\hspace*{-0.2em},\hspace*{0.2em}\ consider the map $f\colon K^2\rightarrow K^2$ defined by
\[
f(a,b){\hskip.2em} ={\hskip.2em} (\langle ax+b \rangle, \langle ax'+b \rangle).
\]
Suppose that $f$ is not injective, i.e. that
\begin{equation}
\label{eq1}
\langle ax+b \rangle{\hskip.2em} ={\hskip.2em} \langle a'x+b' \rangle,
\end{equation}
\begin{equation}
\label{eq2}
\langle ax'+b \rangle{\hskip.2em} ={\hskip.2em} \langle a'x'+b' \rangle
\end{equation}
for some $(a,b)\neq (a',b')$\hspace*{-0.2em}.\hspace*{0.2em}\
If $a=a'$, then (\ref{eq1}) contradicts {\bf T3}.
If $a\neq a'$, then the two equalities (\ref{eq1}), (\ref{eq2}) together contradict {\bf T4}.
Therefore {\bf T3} and {\bf T4} imply that $f$ is injective.
Since $f$ is a self-map of a finite set to itself, the injectivity of $f$ implies its surjectivity.
So, $f$ is a bijection.
{\bf T5} follows. $\blacksquare$
\mysection{Isomorphisms of ternary rings}{isomorphisms}
\myitpar{The choices involved in the construction of a ternary ring by an affine plane.}
The construction of the ternary ring $K$ associated to an affine plane $\mathbb{A}$ involves several choices.
First, we selected two non-parallel lines $l$ and $m$\hspace*{-0.2em}.\hspace*{0.2em}\
Then we choose a set $K$ together with a bijection $K\,{\to}\, l$\hspace*{-0.2em},\hspace*{0.2em}\ which we may consider as an identification.
This choice is not essential at all.
As we noted, we could simply take $K=l$ and the map $K\,{\to}\, l$ be the identity map.
Then we chose a third line $d$ passing through the intersection point $\bf 0$ of $l$ and $m$\hspace*{-0.2em},\hspace*{0.2em}\ and a element $1\in K$\hspace*{-0.2em},\hspace*{0.2em}\ $1\neq 0$\hspace*{-0.2em}.\hspace*{0.2em}\
The line $d$ and the element $1\in K$ define a point $z\in\mathbb{A}$\hspace*{-0.1em}:\hskip.05em\
the intersection point with $d$ of the line parallel to $m$ and passing through the point of $l$ corresponding to $1$\hspace*{-0.2em},\hspace*{0.2em}\
This point corresponds to $(1,1)$ under our identification of $\mathbb{A}$ with $K^2$\hspace*{-0.2em}.\hspace*{0.2em}\
Conversely, given a point $z\in\mathbb{A}$ not on $l,m$\hspace*{-0.2em},\hspace*{0.2em}\
we can define $d$ as the line connecting $\bf 0$ with $z$\hspace*{-0.2em},\hspace*{0.2em}\
and define $1$ as the element of $K$ corresponding to the intersection point with $l$ of the line parallel to $m$ and containing $z$.
Therefore, the choice of $d$ and $1$ is equivalent to the choice of a point $z$ not contained in the union $l\cup m$\hspace*{-0.2em}.\hspace*{0.2em}\
\myitpar{The effect of choices.} Let $\mathbb{A} '$ be another affine plane with two lines and a point $l',m',z'$ as above,
and let $K'$ be its coordinate ring.
Clearly, there is an isomorphism $f\colon \mathbb{A} \rightarrow \mathbb{A} '$
such that $f(l)=l'$\hspace*{-0.2em},\hspace*{0.2em}\ $f(m)=m'$\hspace*{-0.2em},\hspace*{0.2em}\ $f(z)=z'$ if and only if
there is bijection $F\colon K\rightarrow K'$ such that $F(0)=0$\hspace*{-0.2em},\hspace*{0.2em}\ $F(1)=1$\hspace*{-0.2em},\hspace*{0.2em}\ and
\[
F(\langle ax+b \rangle){\hskip.2em} ={\hskip.2em} \langle F(a)F(x)+F(b) \rangle
\]
for all $a,x,b\in K$\hspace*{-0.2em}.\hspace*{0.2em}\
Such a bijection is called an {\em isomorphism} $K\rightarrow K'$\hspace*{-0.2em}.\hspace*{0.2em}\
\mypar{Proposition.}{prop2} {\em The following two conditions are equivalent.
{\em (i)}\hspace*{0.5em} There is an isomorphism $K^2\rightarrow (K')^2$ taking $\bf 0$ to $\bf 0$\hspace*{-0.2em},\hspace*{0.2em}\ $K\times 0$ to $K'\times 0$\hspace*{-0.2em},\hspace*{0.2em}\
$0\times K$ to $0\times K'$\hspace*{-0.2em},\hspace*{0.2em}\ and $(1,1)$ to $(1,1)$\hspace*{-0.2em}.
{\em (ii)}\hspace*{0.5em} There is an isomorphism $K\rightarrow K'$\hspace*{-0.2em}.}
\proof It is sufficient to apply the above observation to $\mathbb{A}=K^2$\hspace*{-0.2em},\hspace*{0.2em}\ $l=K\times 0$\hspace*{-0.2em},\hspace*{0.2em}\ $m=0\times K$\hspace*{-0.2em},\hspace*{0.2em}\ $z=(1,1)$\hspace*{-0.2em},\hspace*{0.2em}\
and $\mathbb{A}=(K')^2$\hspace*{-0.2em},\hspace*{0.2em}\ $l=K'\times 0$\hspace*{-0.2em},\hspace*{0.2em}\ $m'=0\times K'$\hspace*{-0.2em},\hspace*{0.2em}\ $z'=(1,1)$\hspace*{-0.2em}.\hspace*{0.2em}\ $\blacksquare$
We see that up to an isomorphism $K$ is determined by the plane $\mathbb{A}$ with a fixed choice of $l,m,z$. We call the
ternary ring $K$ a {\em coordinate ring} of the plane $\mathbb{A}$ with a triple $(l,m,z)$ as above.
\mysection{Isotopisms of ternary rings}{isotopisms}
\emph{The later sections do not depend on this one.}
\myitpar{Isotopism.} Ternary rings corresponding to the same affine plane $\mathbb{A}$ and the same choice of lines $l{\hskip.1em},{\hskip.1em} m$\hspace*{-0.2em},\hspace*{0.2em}\
but to different choices of the point $z$\hspace*{-0.2em},\hspace*{0.2em}\ may lead to non-isomorphic ternary rings.
Still, different choices of $z$ lead to ternary rings which are \emph{isotopic} in the following sense.
A triple $(F,G,H)$ of bijections $K\rightarrow K'$ is called
an \emph{isotopism}, if $H(0)=0$ and
\[
H(\langle ax+b \rangle){\hskip.2em} ={\hskip.2em} \langle F(a)G(x)+H(b) \rangle
\]
for all $a,x,b\in K$\hspace*{-0.2em}.\hspace*{0.2em}\
Such a triple induces a map $\varphi\colon K^2\rightarrow (K')^2$ by the rule $\varphi (x,y)=(G(x),H(y))$\hspace*{-0.2em}.\hspace*{0.2em}\
Clearly, $\varphi$ takes vertical lines to vertical lines.
The equation $y=\langle ax+b \rangle$ implies $H(y)=\langle F(a)G(x)+H(b) \rangle$\hspace*{-0.2em},\hspace*{0.2em}\
which means that $(x',y')=\varphi (x,y)$ satisfies the equation $y'=\langle F(a)x'+H(b) \rangle$\hspace*{-0.2em}.\hspace*{0.2em}\
It follows that $\varphi$ takes the lines with slope $a\neq\infty$ to the lines with slope $F(a)\neq\infty$\hspace*{-0.2em}.\hspace*{0.2em}\
We see that $\varphi\colon K^2\rightarrow (K')^2$ is an isomorphism of affine planes.
\mypar{Lemma.}{lemma1} {\em $\varphi$ takes horizontal lines to horizontal lines \textup{(}{\hskip.05em} i.e. $F(0)=0$\hspace*{-0.2em}\textup{)},\hskip.1em\
and also takes $\bf 0$ to $\bf 0$\hspace*{-0.2em}.}
\proof In order to prove the first statement, note that $\varphi$ takes the line $\{(x,0): x\in K\}$
to the line $\{(G(x),H(0)): x\in K\}=\{(x',0): x'\in K'\}$
(since $H(0)=0$ and $G$ is a bijection).
Since both these lines have slope $0$\hspace*{-0.2em},\hspace*{0.2em}\ we have $F(0)=0$\hspace*{-0.2em}.\hspace*{0.2em}\
Therefore, $\varphi$ takes horizontal lines to horizontal lines.
Let $a$, $a'$ be two different slopes.
Then the lines with equations $y=\langle ax+0 \rangle$\hspace*{-0.2em},\hspace*{0.2em}\ $y=\langle ax+0 \rangle$ intersect at ${\bf 0}=(0,0)$\hspace*{-0.2em}.\hspace*{0.2em}\
Their images have the equations $y=\langle F(a)x+0 \rangle$\hspace*{-0.2em},\hspace*{0.2em}\ $y=\langle F(a')x+0 \rangle$ (recall that $H(0)=0$).
Since $F$ is a bijection, $F(a)\neq F(a')$ and therefore these two lines intersect only at $\bf 0$\hspace*{-0.2em}.\hspace*{0.2em}\
It follows that $\varphi({\bf 0})={\bf 0}$\hspace*{-0.2em}.\hspace*{0.2em}\ $\blacksquare$
\mypar{Corollary.}{cor1} {\em For an isotopism $(F,G,H)$ we have $F(0)=0$ and $G(0)=0$\hspace*{-0.2em},\hspace*{0.2em}\ in addition to $H(0)=0$\hspace*{-0.2em}.}
\proof $F(0)=0$ is already proved. $G(0)=0$ follows from the following two facts: (i) $\varphi({\bf 0})={\bf 0}$\hspace*{-0.1em};\hskip.1em\ (ii)
$\varphi$ takes the vertical line $x=0$ to the vertical line $x=G(0)$. $\blacksquare$
\mypar{Corollary.}{cor2} {\em The isomorphism of affine planes induced by an isotopism of ternary rings
takes the horizontal (respectively, vertical) line containing $\bf 0$
to the horizontal (respectively, vertical) line containing $\bf 0$\hspace*{-0.2em}.} $\blacksquare$
\mypar{Theorem.}{theorem1} {\em Let $K$ be a coordinate ring of the plane $\mathbb{A}$ with a choice of $l{\hskip.1em},{\hskip.1em} m{\hskip.1em},{\hskip.1em} z$ as above, and
let $K'$ be the coordinate ring of the plane $\mathbb{A}'$ with a choice of $l'{\hskip.1em},{\hskip.1em} m'{\hskip.1em},{\hskip.1em} z'$\hspace*{-0.2em}.\hspace*{0.2em}\
There is an isomorphism $\mathbb{A}\rightarrow\mathbb{A}'$ taking $l$ to $l'$ and $m$ to $m'$ \textup{(}{\hskip.05em} but not necessarily $z$ to $z'$\hspace*{-0.1em}\textup{)} if
and only if there is an isotopism $K\rightarrow K'$\hspace*{-0.2em}.}
\proof The {\em ``if''} direction is already proved. Let us prove the {\em ``only if''} direction.
Let us identify $\mathbb{A}$ with $K^2$ and $\mathbb{A}'$ with $(K')^2$\hspace*{-0.2em}.\hspace*{0.2em}\
Let $G\colon K\rightarrow K'$ be the map corresponding to the map $K\times 0\rightarrow K'\times 0$ induced by $\varphi$\hspace*{-0.2em}.\hspace*{0.2em}\
Similarly, let $H\colon K\rightarrow K'$ be the map corresponding to the map $0\times K\rightarrow 0\times K'$ induced by $\varphi$\hspace*{-0.2em}.\hspace*{0.2em}\
Using the fact that every point is the intersection of a unique vertical line with a unique horizontal line,
and the fact that $\varphi$ maps the vertical (respectively, horizontal) lines to the vertical (respectively, horizontal) lines,
we see that $\varphi$ is determined by the maps $G{\hskip.1em},{\hskip.1em} H$, and, in fact, $\varphi (x,y)=(G(x), H(y))$\hspace*{-0.2em}.\hspace*{0.2em}\
Clearly, $G(0)=0$ and $H(0)=0$\hspace*{-0.2em}.\hspace*{0.2em}\
In order to define $F$\hspace*{-0.2em},\hspace*{0.2em}\ consider for each $a\in K$ the line in $\mathbb{A}$ with the slope $a$ passing through $\bf 0$\hspace*{-0.2em}.\hspace*{0.2em}\
The map $\varphi$ takes it to a line in $\mathbb{A}'$ passing through $\bf 0$\hspace*{-0.2em}.\hspace*{0.2em}\
Let $F(a)\in K'$ be its slope.
Let us check that $(F,G,H)$ is an isotopism.
Since $\varphi$ takes parallel lines to parallel lines,
$\varphi$ takes any line with the slope $a$ to a line with the slope $F(a)$\hspace*{-0.2em}.\hspace*{0.2em}\
So, it takes the line with the equation $y=\langle ax+b \rangle$ into a line with the equation of the form $y'=\langle F(a)x'+b' \rangle$\hspace*{-0.2em}.\hspace*{0.2em}\
The first line contains the point $(0,b)$ (since $\langle a0+b \rangle=b$ by {\bf T2}).
Therefore, the second line contains the point $\varphi (0,b)=(0,H(b))$\hspace*{-0.2em}.\hspace*{0.2em}\
This implies that $H(b)=\langle F(a)0+b'\rangle$\hspace*{-0.2em}.\hspace*{0.2em}\
But $\langle F(a)0+b'\rangle=b'$ by {\bf T2}.
Therefore, $b'=H(b)$\hspace*{-0.2em}.\hspace*{0.2em}\
We see that $\varphi$ maps the line with the equation
$y=\langle ax+b \rangle$ into the line with the equation $y'=\langle F(a)x'+H(b) \rangle$\hspace*{-0.2em}.\hspace*{0.2em}\
Since $\varphi (x,y)=(G(x),H(y))$\hspace*{-0.2em},\hspace*{0.2em}\ we see that $y=\langle ax+b \rangle$ implies $H(y)=\langle F(a)G(x)+H(b) \rangle$\hspace*{-0.2em}.\hspace*{0.2em}\
It follows that $(F,G,H)$ is an isotopism. $\blacksquare$
\myitpar{Remark.} If $(F,G,H)$ is an isotopism, then $F$ and $G$ are determined by $H$ and two elements $F^{-1}(1)$\hspace*{-0.2em},\hspace*{0.2em}\ $G^{-1}(1)$\hspace*{-0.2em}.\hspace*{0.2em}\
Indeed,
\[
F(a){\hskip.2em}
={\hskip.2em} \langle F(a)1+0 \rangle{\hskip.2em}
={\hskip.2em} \langle F(a)G(G^{-1}(1))+H(0) \rangle{\hskip.2em}
={\hskip.2em} H(\langle aG^{-1}(1)+0 \rangle)\hspace*{0.05em},
\]
and
\[
G(a){\hskip.2em}
={\hskip.2em} \langle 1G(a)+0 \rangle{\hskip.2em}
={\hskip.2em} \langle F(F^{-1}(1))G(a)+H(0) \rangle{\hskip.2em}
={\hskip.2em} H(\langle F^{-1}(1)a+0 \rangle)\hspace*{0.05em}.
\]
\myitpar{Historical note.} For non-associative algebras, the notion of an equivalence weaker
than an isomorphism was first introduced by A. A. Albert \cite{Al}.
He called two algebras $A$\hspace*{-0.2em},\hspace*{0.2em}\ $A'$ {\em isotopic} if there is a triple of linear maps $P{\hskip.1em},{\hskip.1em} Q{\hskip.1em},{\hskip.1em} R\colon A\,{\to}\, A'$
such that
\[
R(xy){\hskip.2em} ={\hskip.2em} P(x)Q(y)\hspace*{0.05em}.
\]
He called such a triple an {\em isotopy} of $A$ and $A'$\hspace*{-0.2em}.\hspace*{0.2em}\
Albert relates that
\begin{quote}
\emph{The concept of isotopy was suggested to the author by the work of N. Steenrod who,
in his study of homotopy groups in topology, was led to study isotopy of division algebras.}
\end{quote}
Albert noticed that if associativity of the multiplication is not assumed, the notion of isotopy
is more suitable than the obvious notion of isomorphism, which leads to too many non-isomorphic
(but isotopic) algebras.
It is only natural that the notion of an isomorphism is too narrow for the ternary rings also.
The corresponding notion of an {\em isotopism} was introduced by
M. V. D. Burmester \cite{Bu}, and, independently, by D. Knuth \cite{Kn}.
Both Burmester and Knuth proved Theorem \ref{theorem1} above.
D. Knuth \cite{Kn}, moreover, found an affine plane $\mathbb{A}$ such that all ternary rings corresponding
to different choices of $z$ (but the same choice of $l{\hskip.1em},{\hskip.1em} m$\hspace*{-0.1em}) are pairwise non-isomorphic.
His plane is finite, and the corresponding ternary rings have $32$ elements.
See \cite{Kn}, Section 5.
Unfortunately, his plane was found with the help of a computer, and, as Knuth writes,
\emph{``No way to construct this plane, except by trial and error, is known.''}
To the best knowledge of the author, this is still the case.
\mysection{Veblen-Wedderburn systems}{quasi-fields}
\myitpar{The left Veblen-Wedderburn systems.} Let $K$ be a set with two binary operations $(x{\hskip.05em},{\hskip.05em} y)\hspace*{-0.2em}\mapsto x+y$ and $(x{\hskip.05em},{\hskip.05em} y)\mapsto x{\hskip.05em} y$\hspace*{-0.2em},\hskip.1em\
called the \emph{addition} and the \emph{multiplication}, respectively,
and two distinguished elements $0$\hspace*{-0.2em},\hspace*{0.2em}\ $1$\hspace*{-0.2em},\hspace*{0.2em}\ $0\neq 1$\hspace*{-0.2em}.\hspace*{0.2em}\
If the following properties\hskip.15em\ {\bf VW1}{\hskip.05em}--{\hskip.05em}{\bf VW5}\hskip.15em\ hold,
$K$ is called a\hskip.05em\ \emph{left Veblen-Wedderburn system},\hskip.05em\
or, more recently, a\hskip.05em\ \emph{left quasi-field}.
\begin{description}
\item[VW1.] \emph{$K$ is an abelian group with respect to the addition $+$\hspace*{-0.1em}.\hspace*{0.1em}\ }
\item[VW2.] \emph{\hspace*{0.4em}Given $a{\hskip.1em},{\hskip.1em} b\neq 0$\hspace*{-0.2em},\hspace*{0.2em}\ each of the equations $a{\hskip.1em} x=b$ and $x{\hskip.1em} a=b$ has a unique solution $x$\hspace*{-0.1em};\hskip.05em\ \hspace*{1.0em}moreover, this solution is $\neq 0$\hspace*{-0.2em}.\hspace*{0.2em}\ In addition, if $a,b\neq 0$\hspace*{-0.2em},\hspace*{0.2em}\ then $ab\neq 0$\hspace*{-0.2em}.\hspace*{0.2em}\ }
\item[VW3.] \emph{$1{\hskip.1em} x=x{\hskip.1em} 1=x$\hspace*{-0.2em},\hspace*{0.2em}\ $0{\hskip.1em} x=x{\hskip.1em} 0=0$\hspace*{-0.2em},\hspace*{0.2em}\ and $x+0=0+x=x$ for all $x$\hspace*{-0.2em}.\hspace*{0.2em}\ }
\item[VW4.] \emph{\hspace*{0.4em}Left distributivity: $a(x+y)=ax+ay$ for all $a{\hskip.1em},{\hskip.1em} x{\hskip.1em},{\hskip.1em} y$\hspace*{-0.2em}.\hspace*{0.2em}\ }
\item[VW5.] \emph{\hspace*{0.4em}For $a\neq a'$\hspace*{-0.2em},\hspace*{0.2em}\ the equation $a{\hskip.1em} x=a'{\hskip.025em} x+b$ has a unique solution $x$\hspace*{-0.2em}.\hspace*{0.2em}\ }
\end{description}
This notion was introduced by O. Veblen and J. Wedderburn \cite{VW}.
Notice that\hskip.15em\ {\bf VW5}\hskip.15em\ is a weak version of the right distributivity.
Clearly, under conditions\hskip.15em\ {\bf VW1},\hskip.15em\ {\bf VW2}\hskip.15em\ it follows from the right distributivity.
\myitpar{The right Veblen-Wedderburn systems.} In order to define \emph{right Veblen-Wedderburn system}, or \emph{right quasi-fields},
we replace\hskip.15em\ {\bf VW4}\hskip.15em\ and\hskip.15em\ {\bf VW5}\hskip.15em\ by the following two conditions.
\begin{description}
\item[VW4-r{}.] \emph{Right distributivity: $(x+y)a=x{\hskip.05em} a+y{\hskip.05em} a$ for all $a{\hskip.1em},{\hskip.1em} x{\hskip.1em},{\hskip.1em} y$\hspace*{-0.2em}.\hspace*{0.2em}\ }
\item[VW5-r{}.] \emph{For $a\neq a'$\hspace*{-0.2em},\hspace*{0.2em}\ the equation $x{\hskip.05em} a=x{\hskip.05em} a'+b$ has a unique solution $x$\hspace*{-0.2em}.\hspace*{0.2em}\ }
\end{description}
Clearly, $K$ is a right quasi-field if and only if $K$ with the same addition, $0$\hspace*{-0.2em},\hspace*{0.2em}\ $1$\hspace*{-0.2em},\hspace*{0.2em}\ and the opposite multiplication
$a\cdot b=b{\hskip.05em} a$\hspace*{-0.2em},\hspace*{0.2em}\ is a left quasi-field.
\myitpar{Weak quasi-fields.} If $K$ satisfies only conditions\hskip.15em\ {\bf VW1}{\hskip.05em}--{\hskip.05em}{\bf VW4},\hskip.15em\
it is called a \emph{a weak left quasi-field}.
Similarly, $K$ is called a \emph{weak right quasi-field}, if it satisfies conditions\hskip.15em\ {\bf VW1}{\hskip.05em}--{\hskip.05em}{\bf VW3}\hskip.15em\ and\hskip.15em\ {\bf VW4-r{}.}
\myitpar{From Veblen-Wedderburn systems to ternary rings.} If $K$ is a left or right quasi-field,
then we can define a ternary operation $(a{\hskip.1em},{\hskip.1em} x{\hskip.1em},{\hskip.1em} b)\mapsto \langle a{\hskip.1em} x+b \rangle$ by the obvious rule $\langle a{\hskip.05em} x+b \rangle=a{\hskip.05em} x+b$\hspace*{-0.2em}.\hspace*{0.2em}\
We claim that $K$ with this ternary operation and the distinguished elements $0$ and $1$ is a ternary ring.
Let us check this first for left quasi-fields.
\begin{description}
\item[T1{\hskip.05em}:] This condition follows from\hskip.15em\ {\bf VW3}.
\item[T2{\hskip.05em}:] This condition also follows from\hskip.15em\ {\bf VW3}.
\item[T3{\hskip.05em}:] This condition follows from\hskip.15em\ {\bf VW1}.
\item[T4{\hskip.05em}:] Let $a{\hskip.1em},{\hskip.1em} a'{\hskip.1em},{\hskip.1em} b{\hskip.1em},{\hskip.1em} b'\in K$ and $a\neq a'$\hspace*{-0.2em}.\hspace*{0.2em}\
The equation $a{\hskip.05em} x+b =a'{\hskip.025em} x+b'$ for $x$ is equivalent to $a{\hskip.05em} x=a'{\hskip.025em} x+(b'-b)$ by\hskip.15em\ {\bf VW1}.
It has a unique solution by\hskip.15em\ {\bf VW5}.
\item[T5{\hskip.05em}:] Let $x{\hskip.1em},{\hskip.1em} y{\hskip.1em},{\hskip.1em} x'{\hskip.1em},{\hskip.1em} y'\in K$ and $x\neq x'$\hspace*{-0.2em}.\hspace*{0.2em}\
The equations $y=a{\hskip.05em} x+b$ and $y'=a{\hskip.05em} x'+b$ for $a{\hskip.1em},{\hskip.1em} b$ imply
\[
y-y'{\hskip.2em} ={\hskip.2em} a{\hskip.05em} x-a{\hskip.05em} x'
\]
by\hskip.15em\ {\bf VW1}, and hence imply
\[
y-y'{\hskip.2em} ={\hskip.2em} a(x-x')
\]
by\hskip.15em\ {\bf VW4}.
If $y\neq y'$\hspace*{-0.2em},\hspace*{0.2em}\ this equation is uniquely solvable for $a$ by\hskip.15em\ {\bf VW2}.
If we know $a$\hspace*{-0.2em},\hspace*{0.2em}\ we can find $b$ from
either of the equations $y=a{\hskip.05em} x+b$\hspace*{-0.2em},\hspace*{0.2em}\ $y'=a{\hskip.05em} x'+b$\hspace*{-0.2em}.\hspace*{0.2em}\
Therefore $b$ is unique.
This proves\hskip.15em\ {\bf T5} in the case $y\neq y'$\hspace*{-0.2em}.\hspace*{0.2em}\
If $y=y'$\hspace*{-0.2em},\hspace*{0.2em}\ then $a$ has to be equal to $0$ by\hskip.15em\ {\bf VW2}\hskip.15em\ ({\hskip.05em} since $x-x'\neq 0$\hspace*{-0.1em}).
Therefore $b=y=y'$\hspace*{-0.2em}.\hspace*{0.2em}\
This proves\hskip.15em\ {\bf T5} in the case $y=y'$\hspace*{-0.2em}.\hspace*{0.2em}\
\end{description}
For a right quasi-field $K$ the conditions\hskip.15em\ {\bf T1}{\hskip.05em}--{\hskip.05em}{\bf T3}\hskip.15em\ hold by the same reasons as for the left quasi-fields
(they do not depend on the distributivity).
Let us check\hskip.15em\ {\bf T4}\hskip.15em\ and\hskip.15em\ {\bf T5}.
\begin{description}
\item[T4{\hskip.05em}:] Let $a{\hskip.1em},{\hskip.1em} a'{\hskip.1em},{\hskip.1em} b{\hskip.1em},{\hskip.1em} b'\in K$ and $a\neq a'$\hspace*{-0.2em}.\hspace*{0.2em}\
The equation $ax+b =a'x+b'$ for $x$ is equivalent to $(a-a')x=(b'-b)$ by\hskip.15em\ {\bf VW1} and\hskip.15em\
{\bf VW4-r{}} (the right distributivity).
It has a unique solution by\hskip.15em\ {\bf VW2} and\hskip.15em\ {\bf VW3}\hskip.1em\ (the latter is needed if $b'-b=0$).
\item[T5{\hskip.05em}:] Let $x{\hskip.1em},{\hskip.1em} y{\hskip.1em},{\hskip.1em} x'{\hskip.1em},{\hskip.1em} y'\in K$ and $x\neq x'$\hspace*{-0.2em}.\hspace*{0.2em}\
The equations $y=ax+b$ and $y'=ax'+b$ for $a{\hskip.1em},{\hskip.1em} b$ imply
\[
ax{\hskip.2em} ={\hskip.2em} ax'+(y-y')\hspace*{0.05em}.
\]
Since $x\neq x'$\hspace*{-0.2em},\hspace*{0.2em}\ this equation has a unique solution $a$ by {\bf VW5-r}.
As above, if we know $a$\hspace*{-0.2em},\hspace*{0.2em}\ we can find $b$ from either of the equations $y=ax+b$\hspace*{-0.2em},\hspace*{0.2em}\ $y'=ax'+b$\hspace*{-0.2em}.\hspace*{0.2em}\
Therefore $b$ is unique.
This proves \hskip.15em\ {\bf T5}.
\end{description}
Notice that going from left to right quasi-fields switches the roles of\hskip.15em\ {\bf VW4}\hskip.15em\ and\hskip.15em\ {\bf VW5}.
\myitpar{Reconstructing quasi-field from the corresponding ternary ring.} A left quasi-field can be restored
from the corresponding ternary ring in an obvious manner: it has the same $0$ and $1$\hspace*{-0.1em};
the addition and the multiplication are defined by $a+b=\langle 1a+b \rangle$ and $ab=\langle ab+0\rangle$\hspace*{-0.2em}.\hspace*{0.2em}\
Indeed, $1(ax+0)+b=ax+0+b=ax+b$\hspace*{-0.2em}.\hspace*{0.2em}\
Therefore, we may consider quasi-fields as a special class of ternary rings.
In particular, a quasi-field $K$ defines an affine plane.
Of course, this plane can be described directly: its set of points is $K^2$\hspace*{-0.2em},\hspace*{0.2em}\
and its lines are given by the equations of the form $x=a$
and of the form $y=ax+b$\hspace*{-0.2em},\hspace*{0.2em}\ where $(x,y)\in K^2$ and $a{\hskip.1em},{\hskip.1em} b$ are fixed elements of $K$\hspace*{-0.2em}.\hspace*{0.2em}\
\mypar{Proposition.}{prop3} \emph{If\hskip.15em\ $K$ is weak left quasi-field and is finite,
then $K$ is a left quasi-field \textup{(}{\hskip.05em} i.e.\hskip.15em\ {\bf VW5}\hskip.15em\ follows
from\hskip.15em\ {\bf VW1}{\hskip.05em}--{\hskip.05em}{\bf VW4\hskip.15em\ }\hskip.15em\ if\hskip.15em\ $K$\hskip.1em\ is finite{\hskip.05em}\textup{)}.}
\proof For $a\neq a'$\hspace*{-0.2em},\hspace*{0.2em}\ let $f(x)=ax-a'x$\hspace*{-0.2em}.\hspace*{0.2em}\
Suppose that $f$ is not injective, i.e. $ax-a'x=ay-a'y$ for some $x\neq y$\hspace*{-0.2em}.\hspace*{0.2em}\
Then $a(x-y)=a'(x-y)$ by\hskip.15em\ {\bf VW1}\hskip.15em\ and\hskip.15em\ {\bf VW4}\hskip.15em\ (the left distributivity).
Since $a\neq a'$\hspace*{-0.2em},\hspace*{0.2em}\ this contradicts\hskip.15em\ {\bf VW2}.
Therefore, $f$ is injective.
Being a self-map of a finite set to itself, it is bijective (cf. the proof of Proposition \ref{prop1}).
Therefore, for every $b$ there is a unique $x$ such that $ax-a'x=b$\hspace*{-0.2em}.\hspace*{0.2em}\
Hence,\hskip.15em\ {\bf VW5}\hskip.15em\ holds. $\blacksquare$
\myitpar{Finiteness.} Proposition \ref{prop1} shows that in the finite case we can drop\hskip.15em\ {\bf T5}\hskip.15em\ form the axioms of a ternary ring.
By Proposition \ref{prop3} we can also drop\hskip.15em\ {\bf VW5}\hskip.15em\ form the axioms of a quasi-field for finite $K$\hspace*{-0.2em}.\hspace*{0.2em}\
While checking\hskip.15em\ {\bf T4\hskip.15em\ } for the ternary ring associated to a quasi-field above, we referred to\hskip.15em\ {\bf VW5}.
If the quasi-field is finite and we drop the axiom\hskip.15em\ {\bf VW5}, we have to use the Proposition \ref{prop3},
and the role of\hskip.15em\ {\bf VW5}\hskip.15em\ is passed to the left distributivity.\\
In some situations the finiteness can be replaced by the finite dimensionality over an appropriate skew-field.
\mypar{Proposition.}{prop4} \emph{Suppose that a weak left quasi-field $K$ contains a subset $F$
which is a skew-field with respect to the same operations and with the same $0$ and $1$\hspace*{-0.2em}.\hspace*{0.2em}\
Suppose that, in addition,
\begin{equation}
\label{weak-1}
(x{\hskip.1em} y)a{\hskip.2em} ={\hskip.2em} x{\hskip.05em}(y{\hskip.1em} a),
\end{equation}
\begin{equation}
\label{weak-2}
(x+y)a{\hskip.2em} ={\hskip.2em} x{\hskip.1em} a+y{\hskip.1em} a\hspace*{0.05em},
\end{equation}
for all $a\in F$ and $x{\hskip.1em},{\hskip.1em} y\in K$\hspace*{-0.2em}.\hspace*{0.2em}\
Then $K$ is a right vector space over $F$\hspace*{-0.2em}.\hspace*{0.2em}\
If this vector space is finitely dimensional, then $K$ is a left quasi-field \textup{(}i.e. the condition\hskip.15em\ {\bf VW5}\hskip.15em\ holds\textup{)}.}
\proof The first statement is clear.
Let us prove the second one.
For $a\in K$\hspace*{-0.2em},\hspace*{0.2em}\ let $L_a\colon K \rightarrow K$ be the left multiplication by $a$\hspace*{-0.2em},\hspace*{0.2em}\
i.e. $L_a(x)=ax$\hspace*{-0.2em}.\hspace*{0.2em}\
By\hskip.15em\ {\bf VW4}\hskip.15em\ we have $L_a(x+y)=L_a(x)+L_a(y)$ for all $x{\hskip.1em},{\hskip.1em} y\in K$\hspace*{-0.2em}.\hspace*{0.2em}\
Moreover, if $b\in F$\hspace*{-0.2em},\hspace*{0.2em}\ then $L_a(x{\hskip.1em} b)=a(x{\hskip.1em} b)=(a{\hskip.1em} x)b=L_a(x)b$\hspace*{-0.2em}.\hspace*{0.2em}\
It follows that $L_a$ is (right) linear map of the vector space $K$ to itself.
We need to check that for $a\neq a'$ the equation $L_a(x)=L_{a'}(x)+b$ has a unique solution $x$\hspace*{-0.2em}.\hspace*{0.2em}\
Let $L=L_a-L_{a'}$\hspace*{-0.1em}.\hspace*{0.1em}\
It is sufficient to show that the equation $L(x)=b$ has a unique solution $x$\hspace*{-0.2em}.\hspace*{0.2em}\
Clearly, $L$ is a linear map.
If $L(y)=0$\hspace*{-0.2em},\hspace*{0.2em}\ then $a{\hskip.1em} y-a'{\hskip.025em} y=0$ and $a{\hskip.1em} y=a'{\hskip.025em} y$\hspace*{-0.2em}.\hspace*{0.2em}\
Since $a\neq a'$\hspace*{-0.2em},\hspace*{0.2em}\ the condition\hskip.15em\ {\bf VW2}\hskip.15em\ implies that this is possible only if $y=0$\hspace*{-0.2em}.\hspace*{0.2em}\
We see that $L$ is linear self-map of $K$ with trivial kernel.
Since $K$ is assumed to be finitely dimensional, $L$ is an isomorphism.
This implies that $L(x)=b$ has a unique solution.
This proves the second statement of the proposition. $\blacksquare$
Our proof of Proposition \ref{prop4} follows the proof of Theorem 7.3 in \cite{HP}.
\mysection{Near-fields, skew-fields, and isomorphisms}{near-fields}
In general, if affine planes $K^2$ and $(K')^2$ are isomorphic, the ternary rings $K$ and $K'$ do not need to be isomorphic.
The goal of this section is to prove that they will be isomorphic if $K'$ is a skew-field.
See Corollary \ref{cor3} below.
A part of the proof works in a greater generality, namely for near-fields, which we will define in a moment.
A \emph{left near-field} is a left quasi-field with associative multiplication.
Non-zero elements of a left near-field form a group with respect to the multiplication.
The \emph{right near-fields} are defined in an obvious manner.
Clearly, being a skew-field is equivalent to be being a left and right near-field simultaneously.
\mypar{Lemma.}{lemma2} \emph{Let\hskip.15em\ $K'$\hskip.1em\ be a left near-field.
Let\hskip.1em\ ${\bf 0}=(0,0)\in (K')^2$\hspace*{-0.2em},\hspace*{0.2em}\ and let\hskip.15em\ $l{\hskip.1em},{\hskip.1em} m$\hskip.1em\ be, respectively,
the horizontal and the vertical lines in\hskip.1em\ $(K')^2$\hskip.05em\ passing through\hskip.1em\ $\bf 0$ \textup{(}i.e. $l=K'\times 0$\hskip.05em\ and\hskip.1em\ $m=0\times K'$\hspace*{-0.1em}\textup{)}{\hskip.025em}.
For every two points\hskip.05em\ $z{\hskip.1em},{\hskip.1em} z'\in (K')^2$\hskip.05em\ not in\hskip.1em\ $l\cup m$\hspace*{-0.2em},\hspace*{0.2em}\
there is an automorphism of the affine plane $(K')^2$\hskip.05em\ preserving\hskip.05em\ $\bf 0$\hspace*{-0.2em},\hspace*{0.2em}\ $l$\hspace*{-0.2em},\hspace*{0.2em}\ and\hskip.05em\ $m$\hspace*{-0.2em},\hspace*{0.2em}\ and taking $z$\hskip.05em\ to\hskip.05em\ $z'$\hspace*{-0.2em}.\hspace*{0.2em}\ }
\proof It is sufficient to consider the case when $z=(1,1)$\hspace*{-0.2em}.\hspace*{0.2em}\
Let $z'=(u{\hskip.1em},{\hskip.1em} v)$\hspace*{-0.2em}.\hspace*{0.2em}\
Since $(u{\hskip.1em},{\hskip.1em} v)$ is not on $l{\hskip.1em},{\hskip.1em} m$\hspace*{-0.2em},\hspace*{0.2em}\ both $u$ and $v$ are non-zero.
Consider the map $f\colon (K')^2\rightarrow (K')^2$ defined by $f(x{\hskip.1em},{\hskip.1em} y)=(ux{\hskip.1em},{\hskip.1em} vy)$\hspace*{-0.2em}.\hspace*{0.2em}\
Clearly, $f(1{\hskip.1em},{\hskip.1em} 1)=(u{\hskip.1em},{\hskip.1em} v)$\hspace*{-0.2em},\hspace*{0.2em}\ and $f$ takes the vertical line $x=a$ to the vertical line $x=au$\hspace*{-0.2em}.\hspace*{0.2em}\
If $y=ax+b$\hspace*{-0.2em},\hspace*{0.2em}\ then $vy=v(ax)+vb=(vau^{-1})ux+vb$ by the left distributivity and the associativity of the multiplication
(here, as usual, $u^{-1}$ is the unique solution of the equation $xu=1$\hspace*{-0.2em}){\hskip.025em}.
It follows that $f$ takes the line $y=ax+b$ to the line $y=(vau^{-1})x+vb$\hspace*{-0.2em}.\hspace*{0.2em}\
Hence $f$ is an automorphism of $(K')^2$\hspace*{-0.2em}.\hspace*{0.2em}\ $\blacksquare$
\mypar{Corollary.}{cor3} {\em Let $K$ be a ternary ring, and let $K'$ be a left near-field.
Suppose that there is an isomorphism of planes $f\colon K^2\rightarrow (K')^2$
taking $\bf 0$ to $\bf 0$ and taking the horizontal \textup{(}{\hskip.05em} respectively, vertical{\hskip.05em}\textup{)} line through $\bf 0$ in $K^2$
to horizontal \textup{(}{\hskip.05em} respectively,
vertical{\hskip.05em}\textup{)} line through $\bf 0$ in $(K')^2$\hspace*{-0.2em}.\hspace*{0.2em}\
Then $K$ is isomorphic to $K'$ as a ternary ring.}
\proof Let $z=(1{\hskip.1em},{\hskip.1em} 1)\in K^2$. By taking the composition of $f$ with an appropriate automorphism $g\colon (K')^2\rightarrow (K')^2$\hspace*{-0.2em},\hspace*{0.2em}\
if necessary, we can assume that $f(1{\hskip.1em},{\hskip.1em} 1)=(1{\hskip.1em},{\hskip.1em} 1)$ (the required $g$ exists by the lemma).
It remains to apply Proposition \ref{prop2}. $\blacksquare$
\mypar{Lemma.}{lemma3} {\em Let $K'$ be a skew-field.\hskip.15em\
Let ${\bf 0}=(0{\hskip.1em},{\hskip.1em} 0)\in (K')^2$\hspace*{-0.2em},\hspace*{0.2em}\ and let\hskip.1em\ $l{\hskip.1em},{\hskip.1em} m$\hskip.1em\ be, respectively,
the horizontal and the vertical lines in\hskip.05em\ $(K')^2$\hskip.05em\ passing through $\bf 0$
\textup{(}{\hskip.05em} i.e. $l=K'\times 0$ and $m=0\times K'$\hspace*{-0.1em}\textup{)}.
Let\hskip.1em\ $l'{\hskip.1em},{\hskip.1em} m'$\hskip.1em\ be any two non-parallel lines in\hskip.05em\ $(K')^2$\hspace*{-0.2em}.\hspace*{0.2em}\
Then there is an automorphism of the affine plane\hskip.05em\ $(K')^2$\hskip.05em\ taking\hskip.05em\ $l$\hskip.05em\ to\hskip.05em\ $l'$\hskip.05em\ and\hskip.05em\ $m$\hskip.05em\ to\hskip.05em\ $m'$\hskip.05em\
\textup{(}{\hskip.1em} and, in particular, taking\hskip.05em\ $\bf 0$\hskip.05em\ to the intersection point of\hskip.1em\ $l'$\hskip.1em\ and\hskip.1em\ $m'$\hspace*{-0.1em}\textup{)}{\hskip.025em}.}
\proof If $K$ is a field, this is a fact well-known from the linear algebra.
In general, one needs to check that there is no need to use commutativity of the multiplication.
Let us first check that some natural maps are isomorphisms.
\begin{description}
\item[\rm (i)] The map $D(x{\hskip.1em},{\hskip.1em} y)=(y{\hskip.1em},{\hskip.1em} x)$ is an isomorphism.
Indeed, it takes the line $x=a$ to the line $y=0x+a$\hspace*{-0.2em},\hspace*{0.2em}\ and the line $y=0x+b$ to the line $x=b$\hspace*{-0.2em}.\hspace*{0.2em}\
If $a\neq 0$\hspace*{-0.2em},\hspace*{0.2em}\ it takes the line $y=ax+b$\hspace*{-0.2em},\hspace*{0.2em}\ i.e.
the line $x=a^{-1}y-a^{-1}b$ (where $a^{-1}$ is the unique solution of the equation $xa=1$\hspace*{-0.1em}) to the line $y=a^{-1}x-a^{-1}b$\hspace*{-0.2em}.\hspace*{0.2em}\
Here we used the left distributivity and the associativity of the multiplication.
\item[\rm (ii)] For any $c{\hskip.1em},{\hskip.1em} d\in K'$ the map $f(x{\hskip.1em},{\hskip.1em} y)=(x+c{\hskip.1em},{\hskip.1em} y+d)$ is an isomorphism.
Indeed, it takes the line $x=a$ to the line $x=a+c$, and the line $y=ax+b$, to the line $y=ax-ac+b+d$\hspace*{-0.2em}.\hspace*{0.2em}\
Here we used the left distributivity.
\item[\rm (iii)] For any $c\in K'$ the map $f(x{\hskip.1em},{\hskip.1em} y)=(x{\hskip.1em},{\hskip.1em} y-cx)$ is an isomorphism.
Indeed, it takes every line $x=a$ to itself, and it takes
the line $y=ax+b$ to the line $y=(a-c)x+b$\hspace*{-0.2em}.\hspace*{0.2em}\
Here we used the right distributivity.
\item[\rm (iv)] For any $c\in K'$ the map $g(x{\hskip.1em},{\hskip.1em} y)=(x-cy{\hskip.1em},{\hskip.1em} y)$ is an isomorphism.
Indeed, $g=D\circ f\circ D$\hspace*{-0.2em},\hspace*{0.2em}\ where $D(x{\hskip.1em},{\hskip.1em} y)=(y{\hskip.1em},{\hskip.1em} x)$
and $f(x{\hskip.1em},{\hskip.1em} y)=(x{\hskip.1em},{\hskip.1em} y-cx)$\hspace*{-0.2em}.\hspace*{0.2em}\
\end{description}
By using an isomorphism of type\hskip.1em\ (ii)\hskip.1em\ if necessary, we can assume that $l'{\hskip.1em},{\hskip.1em} m'$ intersect at $\bf 0$\hspace*{-0.2em}.\hspace*{0.2em}\
By using the isomorphism $D$ from\hskip.1em\ (i)\hskip.1em\ if necessary, we can assume that $l'$ is not equal to $m=0\times K$\hspace*{-0.2em}.\hspace*{0.2em}\
Then $l'$ has the form $y=cx$\hspace*{-0.2em}.\hspace*{0.2em}\
The map $f(x{\hskip.1em},{\hskip.1em} y)=(x{\hskip.1em},{\hskip.1em} y-cx)$ is of type\hskip.1em\ (iii)\hskip.1em\ and takes $l'$ to $l$\hspace*{-0.2em}.\hspace*{0.2em}\
Therefore we can assume that $l'=l$ and $m'$ intersects $l'=l$ at $\bf 0$\hspace*{-0.2em}.\hspace*{0.2em}\
Then $m'$ has a equation of the form $x=cy$ and an isomorphism of type\hskip.1em\ (iv)\hskip.1em\ takes $m'$ to $m$\hspace*{-0.2em}.\hspace*{0.2em}\
Since any automorphisms of type\hskip.1em\ (iv)\hskip.1em\ takes $l$ to $l$\hspace*{-0.2em},\hspace*{0.2em}\ this completes the proof. $\blacksquare$
\mypar{Theorem.}{theorem2} {\em Let $K$ be a ternary ring, and let $K'$ be a skew-field.
Suppose that there is an isomorphism of planes $f\colon K^2\rightarrow (K')^2$\hspace*{-0.2em}.\hspace*{0.2em}\
Then $K$ is isomorphic to $K'$ as a ternary ring.}
\proof This follows from Lemma \ref{lemma3} and Corollary \ref{cor3}. $\blacksquare$
It follows that in order to construct an affine plane not coming from a skew-field, it is sufficient to construct
a quasi-field which is not a skew-field
(a quasi-field which is isomorphic to a skew-field is a skew-field itself).
In particular, it is sufficient to construct a left quasi-field in which
the right distributivity does not hold or a right quasi-field in which the left distributivity does not hold.
Alternatively, it is sufficient to construct a (left or right) quasi-field in which
the associativity of multiplication does not hold.
We will present a construction of such quasi-field in Section \ref{andre}.
\mysection{Translations}{translations}
The previous section provided us with a method of constructing affine planes not isomorphic to any affine plane defined by a skew-field.
In this section we will present another method, based on an investigation of special automorphisms of affine planes called \emph{translations}.
It allows to show that some planes are not isomorphic even to any plane defined by a left quasi-field (see Theorem \ref{theorem3}\hskip.15em\ below).
Let $\mathbb{A}$ be an affine plane. An automorphism $f\colon\mathbb{A}\rightarrow\mathbb{A}$ is called a \emph{translation}
if $f(l)$ is parallel to $l$ for every line $l$ (equal lines are considered to be parallel),
and if $f$ preserves every line from a class of parallel lines.
Clearly, for a non-trivial (i.e., not equal to the identity) translation there is exactly one such class of parallel lines.
Every line from this class is called a\hskip.05em\ \emph{trace}\hskip.05em\ of\hskip.05em\ $f$\hspace*{-0.2em}.\hspace*{0.2em}\
If $\mathbb{A}$ is realized as $K^2$ for a ternary ring $K$\hspace*{-0.2em},\hspace*{0.2em}\
then a translation is called\hskip.1em\ \emph{horizontal}\hskip.1em\ if it preserves all horizontal lines,
i.e. if the class of horizontal lines is its trace.
\myitpar{\hspace*{-0.3em}}
\emph{The next two propositions are not used in the rest of the paper.}
\mypar{Proposition.}{prop5} \emph{A non-trivial translation has no fixed points.}
\proof Let $f$ be a translation fixing a point $z$\hspace*{-0.2em}.\hspace*{0.2em}\
Let $l$ be trace of $f$\hspace*{-0.2em}.\hspace*{0.2em}\
Let $m$ be a line containing $z$ and not parallel to $l$\hspace*{-0.2em}.\hspace*{0.2em}\
Since $f(m)$ is parallel to $m$ and contains $z$\hspace*{-0.2em},\hspace*{0.2em}\ we have $f(m)=m$\hspace*{-0.2em}.\hspace*{0.2em}\
Every point of $m$ is the unique intersection point of $m$ and a line parallel to $l$\hspace*{-0.2em}.\hspace*{0.2em}\
The map $f$ leaves both of these lines invariant.
Therefore $f$ fixes all points of $m$\hspace*{-0.2em}.\hspace*{0.2em}\
We see that $f$ fixes all points except, possibly, the points of the line $l_z$ passing through $z$ and parallel to $l$\hspace*{-0.2em}.\hspace*{0.2em}\
By applying the same argument to any point not on $l_z$ in the role of $z$\hspace*{-0.2em},\hspace*{0.2em}\
we conclude that $f$ fixes the points of $l_z$ also, i.e. that $f={\rm id}$\hspace*{-0.2em}.\hspace*{0.2em}\ $\blacksquare$
\mypar{Proposition.}{prop6} \emph{Let $z,z'$ be two different points, and let $l$ be the line passing through $z{\hskip.1em},{\hskip.1em} z'$\hspace*{-0.2em}.\hspace*{0.2em}\
There is no more than one translation taking $z$ to $z'$\hspace*{-0.2em},\hspace*{0.2em}\
and if such a translation exists, it leaves invariant every line parallel to $l$\hspace*{-0.2em}.\hspace*{0.2em}\ }
\proof If $f_1{\hskip.1em},{\hskip.1em} f_2$ are two different translations taking $z$ to $z'$\hspace*{-0.2em},\hspace*{0.2em}\
then $f_1^{-1}\circ f_2$ is a non-trivial translation fixing $z$\hspace*{-0.2em},\hspace*{0.2em}\
contradicting to Proposition \ref{prop5}.
Now, let $f$ be a translation such that $f(z)=z'$\hspace*{-0.2em},\hspace*{0.2em}\
and let $m$ be a trace of $f$\hspace*{-0.2em}.\hspace*{0.2em}\
Let $m_z$ be the line passing through $z$ and parallel to $m$\hspace*{-0.2em}.\hspace*{0.2em}\
Then $m_z$ is also a trace of $f$\hspace*{-0.2em}.\hspace*{0.2em}\
Clearly, we have $z\in m_z$ and $z'=f(z)\in m_z$\hspace*{-0.2em}.\hspace*{0.2em}\
It follows that $l=m_z$ and, hence, $l$ is a trace of $f$\hspace*{-0.2em}.\hspace*{0.2em}\
Therefore, $f$ leaves invariant every line parallel to $l$\hspace*{-0.2em}.\hspace*{0.2em}\
This completes the proof. $\blacksquare$
\mypar{Lemma.}{lemma4} \emph{Let $K$ be a left quasi-field.
For every two points $(c_1{\hskip.1em},{\hskip.1em} d_1)$\hspace*{-0.2em},\hspace*{0.2em}\ $(c_2{\hskip.1em},{\hskip.1em} d_2)$ of the plane $K^2$\hspace*{-0.2em},\hspace*{0.2em}\
there is a translation of $K^2$ taking $(c_1{\hskip.1em},{\hskip.1em} d_1)$ to\hskip.05em\ $(c_2{\hskip.1em},{\hskip.1em} d_2)$\hspace*{-0.2em}.\hspace*{0.2em}\ }
\proof In this case there are obvious maps expected to be translations,
namely the maps of the form $f(x{\hskip.1em},{\hskip.1em} y)=(x+c{\hskip.1em},{\hskip.1em} y+d)$\hspace*{-0.2em},\hspace*{0.2em}\ where $c{\hskip.1em},{\hskip.1em} d\in K$\hspace*{-0.2em}.\hspace*{0.2em}\
Clearly, if $c=c_2-c_1$\hspace*{-0.2em},\hspace*{0.2em}\ $d=d_2-d_1$\hspace*{-0.2em},\hspace*{0.2em}\ then $f(c_1{\hskip.1em},{\hskip.1em} d_1)=(c_2{\hskip.1em},{\hskip.1em} d_2)$\hspace*{-0.2em}.\hspace*{0.2em}\
Let us check that these maps are indeed translations.
The map $f(x{\hskip.1em},{\hskip.1em} y)=(x+c{\hskip.1em},{\hskip.1em} y+d)$ takes the line $x=a$ to the line $x=a+c$\hspace*{-0.2em},\hspace*{0.2em}\
and the line $y=ax+b$ to the line $y=ax-ac+b+d$\hspace*{-0.2em}.\hspace*{0.2em}\
(Cf. (ii) in the proof of Lemma \ref{lemma3} in the previous section.)
In particular, it takes vertical lines to vertical lines,
and the lines with the slope $a$ to the lines with the slope $a$\hspace*{-0.2em}.\hspace*{0.2em}\
If $c=0$\hspace*{-0.2em},\hspace*{0.2em}\ then $f$ preserves all vertical lines, and therefore is a translation.
If $c\neq 0$\hspace*{-0.2em},\hspace*{0.2em}\ then $d=ec$ for some $e$ by {\bf VW2}.
Since $f$ takes the line $y=ex+b$ to the line $y=ex-ec+b+d$ and $ex-ec+b+d=ex-d+b+d=ex+b$\hspace*{-0.2em},\hspace*{0.2em}\
we see that $f$ leaves invariant every line with the slope $e$\hspace*{-0.2em}.\hspace*{0.2em}\
It follows that $f$ is a translation in this case also. $\blacksquare$
\mypar{Lemma.}{lemma5} \emph{Let $K$ be a right quasi-field.
Suppose that for every $v\in K$\hspace*{-0.2em},\hspace*{0.2em}\ $v\neq 0$\hspace*{-0.2em},\hspace*{0.2em}\
the plane $K^2$ admits a translation taking $\bf 0$ to $(v,0)$\hspace*{-0.2em}.\hspace*{0.2em}\
Then the left distributivity law holds in $K$\hspace*{-0.2em}.}
\proof Let $f$ be a translation such that $f(0{\hskip.1em},{\hskip.1em} 0)=(v{\hskip.1em},{\hskip.1em} 0)$\hspace*{-0.2em}.\hspace*{0.2em}\
Since the line passing through $(0{\hskip.1em},{\hskip.1em} 0)$ and $(v{\hskip.1em},{\hskip.1em}0)$ is the horizontal line $K\times 0$\hspace*{-0.2em},\hspace*{0.2em}\
the translation $f$ is a horizontal translation.
Let us show that $f$ has the expected form $f(a{\hskip.1em},{\hskip.1em} b)=(a+v{\hskip.1em},{\hskip.1em} b)$\hspace*{-0.2em}.\hspace*{0.2em}\
This follows from the following four observations.
\begin{description}
\item[\rm 1.] The line $y=x$ with the slope $1$ passing through $(0{\hskip.1em},{\hskip.1em} 0)$ is mapped to the line with the slope $1$ passing through
$(v{\hskip.1em},{\hskip.1em} 0)$\hspace*{-0.2em},\hspace*{0.2em}\ i.e. to the line $y=x-v$\hspace*{-0.2em}.\hspace*{0.2em}\
\item[\rm 2.] For every $a\in K$, the map $f$ preserves the line $y=a$\hspace*{-0.2em}.\hspace*{0.2em}\
It follows that $f$ takes the point of intersection
of the lines $y=a$ and $y=x$ to the point of intersection of the lines $y=a$ and $y=x-v$\hspace*{-0.2em}.\hspace*{0.2em}\
This means that $f(a{\hskip.1em},{\hskip.1em} a)=(a+v{\hskip.1em},{\hskip.1em} a)$\hspace*{-0.2em}.\hspace*{0.2em}\
\item[\rm 3.] The vertical line $x=a$ containing $(a{\hskip.1em},{\hskip.1em} a)$ is mapped to the vertical line containing $(a+v{\hskip.1em},{\hskip.1em} a)$\hspace*{-0.2em},\hspace*{0.2em}\
i.e. to the line $x=a+v$\hspace*{-0.2em}.\hspace*{0.2em}\
\item[\rm 4.] The point of intersection of the lines $y=b$ and $x=a$ is mapped to the point of intersection of the lines
$y=b$ and $x=a+v$\hspace*{-0.2em}.\hspace*{0.2em}\
In other terms, $f(a{\hskip.1em},{\hskip.1em} b)=(a+v{\hskip.1em},{\hskip.1em} b)$\hspace*{-0.2em}.\hspace*{0.2em}\
\end{description}
Now, $f$ takes the line $y=ax$ containing $(0{\hskip.1em},{\hskip.1em} 0)$ to another line with the slope $a$\hspace*{-0.2em},\hspace*{0.2em}\
i.e. to a line of the form $y=ax-c$\hspace*{-0.2em}.\hspace*{0.2em}\
Since it contains $(v,0)$, we have $c=av$\hspace*{-0.2em}.\hspace*{0.2em}\
So, the line $y=ax$ is mapped to the line $y=ax-av$\hspace*{-0.2em}.\hspace*{0.2em}\
For every $u\in K$ the point $(u,au)$ belongs to the line $y=ax$ and is mapped to the point $(u+v{\hskip.1em},{\hskip.1em} au)$\hspace*{-0.2em}.\hspace*{0.2em}\
Therefore, $(u+v{\hskip.1em},{\hskip.1em} au)$ belongs to the line $y=ax-av$\hspace*{-0.2em},\hspace*{0.2em}\ i.e. $au=a(u+v)-av$\hspace*{-0.2em},\hspace*{0.2em}\ or $a(u+v)=au+av$\hspace*{-0.2em}.\hspace*{0.2em}\
Since this is true for all $a{\hskip.1em},{\hskip.1em} u{\hskip.1em},{\hskip.1em} v\in K$\hspace*{-0.2em},\hspace*{0.2em}\ the left distributivity holds. $\blacksquare$
\mypar{Theorem.}{theorem3} \emph{Let $K$ be a right quasi-field for which the left distributivity does not hold,
and let $K'$ be a a left quasi-field.
Then the planes $K^2$ and $(K')^2$ are not isomorphic.}
\proof By Lemma \ref{lemma5} there is a point $(c{\hskip.1em},{\hskip.1em} d)\in K^2$
such that no translation takes $(0{\hskip.1em},{\hskip.1em} 0)$ to $(c,d)$\hspace*{-0.2em}.\hspace*{0.2em}\
Therefore, $K^2$ is not isomorphic to any plane constructed from a left quasi-field by Lemma \ref{lemma4}. $\blacksquare$
\mysection{Andr\'e quasi-fields}{andre}
\myitpar{The norm map.} Let $K$ be a field, and let $G$ be a finite group of automorphisms of $K$\hspace*{-0.2em}.\hspace*{0.2em}\
Let $F$ be the subfield of $K$ consisting of all elements fixed by $G$\hspace*{-0.2em}.\hspace*{0.2em}\
By the Galois theory, the dimension of $K$ as a vector space over $F$ is equal to the order of $G$\hspace*{-0.1em};
in particular, it is finite.
The \emph{norm map} $N$ is defined as follows:
\[
N(x){\hskip.2em} ={\hskip.2em} \prod_{g\in G} g(x)\hspace*{0.05em}.
\]
Clearly, $N(x)\in F$ for all $x\in K$\hspace*{-0.2em}.\hspace*{0.2em}\
Moreover, $N$ defines a homomorphism $K^*\rightarrow F^*$ from the multiplicative group $K^*$ to the multiplicative group $F^*$ of the
fields $K$\hspace*{-0.2em},\hspace*{0.2em}\ $F$ respectively.
Obviously, $N(g(a))=N(a)$ for any $g\in G$\hspace*{-0.2em}.\hspace*{0.2em}\
\myitpar{Modifying the multiplication.} Note that $N(1)=1$\hspace*{-0.2em},\hspace*{0.2em}\ and, therefore, $1\in N(K^*)$\hspace*{-0.2em}.\hspace*{0.2em}\
Let $\varphi\colon N(K^*)\rightarrow G$ be a map subject to the only condition $\varphi(1)=1$\hspace*{-0.2em}.\hspace*{0.2em}\
In particular, $\varphi$ does not need to be a homomorphism.
Given such a map $\varphi$\hspace*{-0.2em},\hspace*{0.2em}\ we construct a new multiplication $\odot$ in $K$ as follows.
Of course, the new multiplication $\odot$ will depend on $\varphi$\hspace*{-0.2em},\hspace*{0.2em}\ but we will omit this dependence from the notations.
Let $\alpha$ be equal to $\varphi\circ N$ on $K^*$\hspace*{-0.2em},\hspace*{0.2em}\ and let $\alpha(0)=1={\rm id}_K$\hspace*{-0.2em}.\hspace*{0.2em}\
So, $\alpha$ is a map $K\rightarrow G$\hspace*{-0.2em}.\hspace*{0.2em}\
We will often denote $\alpha(x)$ by $\alpha_x$\hspace*{-0.1em}; it is an automorphism $K\rightarrow K$ belonging to the group $G$\hspace*{-0.2em}.\hspace*{0.2em}\
The multiplication $\odot$ is defined by the formula
\[
x\odot y{\hskip.2em} ={\hskip.2em} x{\hskip.1em}\alpha_x(y)\hspace*{0.05em},
\]
for all $x{\hskip.1em},{\hskip.1em} y\in K$\hspace*{-0.2em}.\hspace*{0.2em}\
Let $K_{\varphi}$ be the set $K$ endowed with the same addition and the same elements $0$\hspace*{-0.2em},\hspace*{0.2em}\ $1$ as $K$\hspace*{-0.2em},\hspace*{0.2em}\ and with the multiplication $\odot$\hspace*{-0.2em}.\hspace*{0.2em}\
\mypar{Theorem.}{theorem4} \emph{$K_{\varphi}$ is a left quasi-field.}
\proof {\bf VW1{\hskip.05em}:} This property holds for $K_{\varphi}$ because it holds for $K$\hspace*{-0.2em}.\hspace*{0.2em}\
{\bf VW3{\hskip.05em}:} Note that $\alpha_1=\varphi (N(1))=\varphi(1)=1$\hspace*{-0.2em}.\hspace*{0.2em}\
This implies $1\odot x=x$ for all $x$\hspace*{-0.2em}.\hspace*{0.2em}\
Also, since $\alpha_x$ is an automorphism of $K$\hspace*{-0.2em},\hspace*{0.2em}\
we have $\alpha_x(1)=1$\hspace*{-0.2em},\hspace*{0.2em}\ $\alpha_x(0)=0$\hspace*{-0.2em},\hspace*{0.2em}\ and, therefore, $x\odot 1=x$\hspace*{-0.2em},\hspace*{0.2em}\ $x\odot 0=0$ for all $x\in K$\hspace*{-0.2em}.\hspace*{0.2em}\
In addition, $0\odot y=0{\hskip.1em}\alpha_0(y)=0{\hskip.1em} y=0$\hspace*{-0.2em}.\hspace*{0.2em}\
These observations imply the multiplicative part of {\bf VW3} for $K_{\varphi}$\hspace*{-0.1em};\hskip.15em\
the additive part holds for $K_{\varphi}$ because it holds for $K$\hspace*{-0.2em}.\hspace*{0.2em}\
{\bf VW4{\hskip.05em}:} Since $\alpha_x$ is an automorphism of $K$\hspace*{-0.2em},\hspace*{0.2em}\
we have $\alpha_x(y+z)=\alpha_x(y)+\alpha_x(z)$\hspace*{-0.2em},\hspace*{0.2em}\
and therefore $x\odot (y+z)=x\odot y + x\odot z$\hspace*{-0.2em}.\hspace*{0.2em}\
So, the left distributivity law {\bf VW4} holds for $K_{\varphi}$\hspace*{-0.2em}.\hspace*{0.2em}\
{\bf VW2{\hskip.05em}:} Suppose that $a{\hskip.1em},{\hskip.1em} b\neq 0$\hspace*{-0.2em}.\hspace*{0.2em}\
First of all, notice that $\alpha_a(b)\neq 0$
(because $\alpha_a$ is an automorphism of $K$\hspace*{-0.1em}), and, therefore, $a\odot b \neq 0$\hspace*{-0.2em}.\hspace*{0.2em}\
Next, consider the equation $a\odot x=b$\hspace*{-0.2em}.\hspace*{0.2em}\
It is equivalent to $a{\hskip.1em}\alpha_a(x)=b$\hspace*{-0.2em},\hspace*{0.2em}\ which, in turn,
is equivalent to $\alpha_a(x)=a^{{\minus}1}b$\hspace*{-0.2em}.\hspace*{0.2em}\
It follows that $x=\alpha_a^{{\minus}1}(a^{{\minus}1}b)$ is the unique solution.
It remains to consider the equation $x\odot a=b$\hspace*{-0.2em}.\hspace*{0.2em}\
Notice that
\[
N(x\odot a){\hskip.2em} ={\hskip.2em} N(x{\hskip.05em}\alpha_x(a)){\hskip.2em} ={\hskip.2em} N(x)N(\alpha_x(a)){\hskip.2em} =N{\hskip.2em} (x){\hskip.1em} N(a)\hspace*{0.05em},
\]
since $N(g(a)){\hskip.2em} ={\hskip.2em} N(a)$ for any $g\in G$\hspace*{-0.2em}.\hspace*{0.2em}\
Therefore,\hskip.02em\ $x\odot a{\hskip.2em} ={\hskip.2em} b$ implies that $N(x){\hskip.1em} N(a){\hskip.2em} ={\hskip.2em} N(b)$\hspace*{-0.2em}.\hspace*{0.2em}\
This, in turn, implies that
\[
N(x){\hskip.2em} ={\hskip.2em} N(a)^{{\minus}1}N(b){\hskip.2em} ={\hskip.2em} N(a^{{\minus}1}b)\hspace*{0.05em},
\]
and
$\displaystyle
\alpha(x){\hskip.2em} ={\hskip.2em} \varphi(N(x)){\hskip.2em} ={\hskip.2em} \varphi(N(a^{-1}b)){\hskip.2em} ={\hskip.2em} \alpha(a^{{\minus}1}b)$.
In other terms, $\alpha_x{\hskip.2em} ={\hskip.2em} \alpha_{a^{{\minus}1}b}$\hspace*{-0.2em}.\hspace*{0.2em}\
Therefore,\hskip.02em\ if\hskip.02em\ $x{\hskip.1em}\alpha_x(a){\hskip.2em} ={\hskip.2em} x\odot a{\hskip.2em} ={\hskip.2em} b$\hspace*{-0.2em},\hspace*{0.2em}\
then $x{\hskip.1em}\alpha_{a^{{\minus}1}b}(a){\hskip.2em} ={\hskip.2em} b$ and
\begin{equation}
\label{solution}
x{\hskip.2em} ={\hskip.2em} b(\alpha_{a^{{\minus}1}b}(a))^{{\minus}1}.
\end{equation}
It follows that the equation $x\odot a{\hskip.2em} ={\hskip.2em} b$ has no more than one solution.
Let us check that (\ref{solution}) is, indeed, a solution.
Let $g=\alpha_{a^{-1}b}$\hspace*{-0.2em}.\hspace*{0.2em}\
If $x$ is defined by (\ref{solution}), then
\[
\alpha_x{\hskip.2em} ={\hskip.2em} \alpha(x){\hskip.2em} ={\hskip.2em} \varphi(N(b(\alpha_{a^{-1}b}(a))^{-1}){\hskip.2em}
={\hskip.2em} \varphi(N(b{\hskip.05em} (g(a)^{{\minus}1}))\hspace*{0.05em}.
\]
At the same time,
\begin{align*}
N(b{\hskip.05em} (g(a)^{{\minus}1}))
&= N(b){\hskip.1em} N(g(a)^{{\minus}1})\hspace*{1em}\mbox{ ({\hskip.05em} because $N$ is a homomorphism) } \\
&= N(b){\hskip.1em} N(g(a^{{\minus}1}))\hspace*{1em}\mbox{ ({\hskip.05em} because $g\in G$ and hence $g(a)^{-1}=g(a^{-1})${\hskip.1em}) } \\
&= N(b){\hskip.1em} N(a^{{\minus}1})\hspace*{2.4em}\mbox{ ({\hskip.05em} because $g\in G$ and hence $N(g(b))=N(b)${\hskip.1em}) } \\
&= N(ba^{{\minus}1}){\hskip.1em}\hspace*{4.0em}\mbox{ ({\hskip.05em} because $N$ is a homomorphism) } \\
&= N(a^{{\minus}1}b){\hskip.1em}\hspace*{4.0em}\mbox{ ({\hskip.05em} because the multiplication in $K$ is commutative)\hspace*{0.05em}. }
\end{align*}
It follows that
$\displaystyle
\alpha_x=\varphi(N(b{\hskip.05em} (g(a)^{{\minus}1}))=\varphi(N(a^{{\minus}1}b))=\alpha_{a^{{\minus}1}b}\hspace*{0.05em}.
$
Therefore
\[
x\odot a=x{\hskip.1em}\alpha_x(a)=x{\hskip.1em}\alpha_{a^{{\minus}1}b}(a)=b{\hskip.1em} (\alpha_{a^{{\minus}1}b}(a))^{{\minus}1}{\hskip.1em}\alpha_{a^{{\minus}1}b}(a)=b
\]
This proves that (\ref{solution}) is a solution of $x\odot a=b$ and completes our verification of {\bf VW2}.
{\bf VW5{\hskip.05em}:} It remains to check {\bf VW5}.
To this end, we will apply Proposition \ref{prop4} (see Section \ref{quasi-fields}).
We already established that $K_{\varphi}$ is a weak left quasi-field.
Notice that
\begin{equation}
\label{alphaxa}
\alpha_x(a)=a\hspace*{1.9em}\mbox{{\hskip.05em} for\hspace*{0.5em} any\hspace*{0.5em} $x\in K$\hspace*{0.5em} and\hspace*{0.5em} $a\in F$}\hspace*{0.05em},
\end{equation}
because $F$ is fixed by all elements of $G$\hspace*{-0.2em}.\hspace*{0.2em}\
Therefore,
\begin{equation}
\label{odot}
x\odot a{\hskip.2em} ={\hskip.2em} x{\hskip.1em} a\hspace*{1.5em}
\mbox{{\hskip.05em} for\hspace*{0.5em} all\hspace*{0.5em} $x\in K$,\hspace*{0.5em} $a\in F$}\hspace*{0.05em}.
\end{equation}
This immediately implies the condition (\ref{weak-2}) of Proposition \ref{prop4}.
Now, let $x,y\in K$\hspace*{-0.2em},\hspace*{0.2em}\ and $a\in F$\hspace*{-0.2em}.\hspace*{0.2em}\
The following calculation shows that the condition (\ref{weak-1}) of Proposition \ref{prop4} also holds:
\begin{align*}
(x\odot y) \odot a{\hskip.2em} & ={\hskip.2em} (x\odot y){\hskip.1em} a\hspace*{2.3em}\mbox{ ({\hskip.05em} by (\ref{odot}){\hskip.1em}) } \\
& ={\hskip.2em} x{\hskip.1em}\alpha_x(y){\hskip.1em} a\hspace*{2.3em}\mbox{ ({\hskip.05em} by the definition of $\odot${\hskip.05em}) } \\
& ={\hskip.2em} x{\hskip.1em}\alpha_x(y){\hskip.05em}\alpha_x(a)\hspace*{0.5em}\mbox{ ({\hskip.05em} by (\ref{alphaxa}){\hskip.1em}) } \\
& ={\hskip.2em} x{\hskip.1em}\alpha_x(y a)\hspace*{2.3em}\mbox{ ({\hskip.05em} because $\alpha_x$ is a homomorphism) } \\
& ={\hskip.2em} x\odot (y a) \hspace*{2.3em}\mbox{ ({\hskip.05em} by the definition of $\odot${\hskip.05em}) } \\
& ={\hskip.2em} x\odot (y\odot a)\hspace*{1.15em}\mbox{ ({\hskip.05em} by (\ref{odot}){\hskip.1em}). }
\end{align*}
Since $K$ is finitely dimensional vector space over $F$\hspace*{-0.2em},\hspace*{0.2em}\ the Proposition \ref{prop4} applies.
It implies that\hskip.1em\ {\bf VW5}\hskip.15em\ holds and hence $K_{\varphi}$ is a left quasi-field.
This completes the proof. $\blacksquare$
The left quasi-fields $K_{\varphi}$ are called {\em left Andr\'e quasi-fields}.
The {\em right Andr\'e quasi-fields} are
constructed in a similar manner, with the multiplication given by the formula $x\odot y = \alpha_y(x){\hskip.1em} y$\hspace*{-0.2em}.\hspace*{0.2em}\
As the next two theorems show, in a left Andr\'e quasi-field
the multiplication is almost never associative (Theorem \ref{theorem5}), and the right distributivity holds only if $\varphi$ is the
\emph{trivial map}, i.e. the map taking every element to $1\in G$ (Theorem \ref{theorem6}).
Of course, the corresponding results hold for the right Andr\'e quasi-fields.
We will call an Andr\'e quasi-field {\em non-trivial} if $\varphi$ is a non-trivial map.
\mypar{Theorem.}{theorem5} {\em The multiplication in\hskip.1em\ $K_{\varphi}$ is associative
if and and only if $\varphi$ is a homomorphism $N(K^*)\rightarrow G$\hspace*{-0.2em},\hspace*{0.2em}\
i.e. if and only if $\varphi(u{\hskip.1em} v)=\varphi(u){\hskip.1em}\varphi(v)$ for all $u{\hskip.1em},{\hskip.1em} v\in N(K^*)$\hspace*{-0.2em}.\hspace*{0.2em}\ }
\proof Let $x{\hskip.1em},{\hskip.1em} y\in K^*$ and $g\in G$\hspace*{-0.2em}.\hspace*{0.2em}\
Then
\begin{align*}
\alpha(x{\hskip.1em} g(y)){\hskip.2em} & ={\hskip.2em} \varphi(N(x{\hskip.1em} g(y))\hspace*{4.5em}\mbox{{\hskip.05em} (by the definition of $\alpha$) } \\
& ={\hskip.2em} \varphi(N(x){\hskip.1em} N(g(y)))\hspace*{2.5em}\mbox{{\hskip.05em} (because $N$ is a homomorphism) } \\
& ={\hskip.2em} \varphi(N(x){\hskip.1em} N(y))\hspace*{3.9em}\mbox{{\hskip.05em} (because $N(y){\hskip.2em} ={\hskip.2em} N(g(y))$) } \\
& ={\hskip.2em} \varphi(N(x{\hskip.1em} y)){\hskip.2em} ={\hskip.2em} \alpha(x{\hskip.1em} y).
\end{align*}
Therefore, $\alpha(x{\hskip.1em} g(y)){\hskip.2em} ={\hskip.2em} \alpha(x{\hskip.1em} y)$\hspace*{-0.2em},\hspace*{0.2em}\ or, equivalently,
$\alpha_{x{\hskip.1em} g(y)}{\hskip.2em} ={\hskip.2em} \alpha_{x{\hskip.1em} y}$ for all $x{\hskip.1em},{\hskip.1em} y\in K$ and $g\in G$\hspace*{-0.2em}.\hspace*{0.2em}\
{{\hskip.2em}}It follows that
\begin{equation}
\label{xyz}
\alpha_{x{\hskip.1em} g(y)}{\hskip.1em} (z){\hskip.2em} ={\hskip.2em} \alpha_{x{\hskip.1em} y}{\hskip.1em} (z)
\end{equation}
for all $x{\hskip.1em},{\hskip.1em} y\in K^*$\hspace*{-0.2em},\hspace*{0.2em}\ $z\in K$\hspace*{-0.2em},\hspace*{0.2em}\ and $g\in G$\hspace*{-0.2em}.\hspace*{0.2em}\
If $g=\alpha_x$\hspace*{-0.2em},\hspace*{0.2em}\ then $x{\hskip.1em} g(y){\hskip.2em} ={\hskip.2em} x{\hskip.1em}\alpha_x(y){\hskip.2em} ={\hskip.2em} x\odot y$
and hence (\ref{xyz}) turns into
\begin{equation}
\label{axy}
\alpha_{x\odot y}{\hskip.1em} (z){\hskip.2em} ={\hskip.2em} \alpha_{x{\hskip.1em} y}{\hskip.1em} (z)\hspace*{0.05em}.
\end{equation}
By applying (\ref{axy}) we can compute $(x\odot y)\odot z$ as follows:
\[
(x\odot y)\odot z{\hskip.2em}
={\hskip.2em} (x\odot y){\hskip.1em}\alpha_{x\odot y}(z){\hskip.2em}
={\hskip.2em} (x\odot y){\hskip.1em}\alpha_{x{\hskip.1em} y}(z){\hskip.2em}
={\hskip.2em} x{\hskip.1em}\alpha_x(y){\hskip.1em}\alpha_{x{\hskip.1em} y}(z).
\]
Next, let us compute $x\odot (y\odot z)$:
\[
x\odot (y\odot z){\hskip.2em}
={\hskip.2em} x\odot (y{\hskip.1em}\alpha_y(z))
={\hskip.2em} x{\hskip.1em}\alpha_x(y{\hskip.1em}\alpha_y(z))
={\hskip.2em} x{\hskip.1em}\alpha_x(y){\hskip.1em}\alpha_x(\alpha_y(z)).
\]
By comparing the results of these computations, we see that for $x{\hskip.1em},{\hskip.1em} y\in K^*$
the associativity law $(x\odot y)\odot z = x\odot (y\odot z)$ holds if and only if
$\alpha_{x{\hskip.1em} y}(z) = \alpha_x(\alpha_y(z))$\hspace*{-0.2em}.\hspace*{0.2em}\
Since the associativity law trivially holds when $x=0$ or $y=0$\hspace*{-0.2em},\hspace*{0.2em}\
the associativity law for $\odot$ holds if and only if $\alpha_{x{\hskip.1em} y}(z) = \alpha_x(\alpha_y(z))$
for all $x{\hskip.1em},{\hskip.1em} y{\hskip.1em},{\hskip.1em} z\in K$\hspace*{-0.2em},\hspace*{0.2em}\
or, equivalently, if and only if $\alpha_{x{\hskip.1em} y} =\alpha_x\circ\alpha_y$ for all $x{\hskip.1em},{\hskip.1em} y\in K$\hspace*{-0.2em}.\hspace*{0.2em}\
Recalling the definition of $\alpha$\hspace*{-0.2em},\hspace*{0.2em}\ we see that $\alpha_{x{\hskip.1em} y}{\hskip.2em} ={\hskip.2em} \alpha_x\circ\alpha_y$ is equivalent to
\[
\varphi(N(x)){\hskip.1em}\varphi(N(y)){\hskip.2em} ={\hskip.2em} \varphi(N(x{\hskip.1em} y))\hspace*{0.05em},
\]
and since $N(x{\hskip.1em} y){\hskip.2em} ={\hskip.2em} N(x){\hskip.1em} N(y)$\hspace*{-0.2em},\hspace*{0.2em}\ is equivalent to
\begin{equation}
\label{phi}
\varphi(N(x)){\hskip.1em}\varphi(N(y)){\hskip.2em} ={\hskip.2em} \varphi(N(x){\hskip.1em} N(y))\hspace*{0.05em}.
\end{equation}
Clearly, (\ref{phi}) holds for all $x{\hskip.1em},{\hskip.1em} y\in K$ if and only if $\varphi\colon N(K^*)\,{\to}\, G^*$ is homomorphism.
It follows that the associativity is equivalent to $\varphi$ being a homomorphism. $\blacksquare$
\mypar{Theorem.}{theorem6} {\em The right distributivity law holds for $K_{\varphi}$ if and only if $\varphi$ maps all
elements of $N(K^*)$ to $1\in G$ \textup{(}{\hskip.05em} and therefore $K_{\varphi}=K$\hspace*{-0.1em}\textup{)}.}
\proof Clearly, if $\varphi$ maps $N(K^*)$ to $1$, then $K_{\varphi}=K$ and the right distributivity holds.
Suppose now that the right distributivity holds. Then for all $x{\hskip.1em} ,{\hskip.1em} y{\hskip.1em} ,{\hskip.1em} a\in K$ we have
\[
x\odot a + y\odot a{\hskip.2em} ={\hskip.2em} (x+y)\odot a\hspace*{0.05em}.
\]
We can rewrite this as
\begin{equation}
\label{dist-1}
x{\hskip.1em} X(a)+y{\hskip.1em} Y(a){\hskip.2em} ={\hskip.2em} (x+y){\hskip.1em} Z(a)\hspace*{0.05em},
\end{equation}
where $X=\alpha_x$\hspace*{-0.2em},\hspace*{0.2em}\ $Y=\alpha_y$\hspace*{-0.2em},\hspace*{0.2em}\ $Z=\alpha_{x+y}$\hspace*{-0.2em}.\hspace*{0.2em}\
By using the fact that $X{\hskip.1em} ,{\hskip.1em} Y{\hskip.1em} ,{\hskip.1em} Z$ are automorphisms of $K$
and applying (\ref{dist-1}) to $a{\hskip.025em} b$ in the role of $a$\hspace*{-0.2em},\hspace*{0.2em}\ we get
\begin{equation}
\label{dist-2}
x{\hskip.1em} X(a){\hskip.1em} X(b)+y{\hskip.1em} Y(a){\hskip.025em} Y(b){\hskip.2em} ={\hskip.2em} x{\hskip.1em} X(a{\hskip.025em} b)+y{\hskip.1em} Y(a{\hskip.025em} b)
\end{equation}
\[
\phantom{x{\hskip.1em} X(a){\hskip.1em} X(b)+y{\hskip.1em} Y(a){\hskip.025em} Y(b){\hskip.2em} }
={\hskip.2em} (x+y){\hskip.1em} Z(a{\hskip.025em} b){\hskip.2em} ={\hskip.2em} (x+y){\hskip.1em} Z(a){\hskip.1em} Z(b)\hspace*{0.05em}.
\]
By combining (\ref{dist-2}) with (\ref{dist-1}), we get
\[
x{\hskip.1em} X(a){\hskip.1em} X(b)+y{\hskip.025em} Y(a){\hskip.1em} Y(b){\hskip.2em} ={\hskip.2em} (x{\hskip.1em} X(a)+y{\hskip.025em} Y(a)){\hskip.1em} Z(b)\hspace*{0.05em}.
\]
Let us multiply this identity by $x+y$, and then apply (\ref{dist-1}) to $b$ in the role of $a$\hspace*{-0.1em}:
\begin{equation*}
(x+y)(x{\hskip.1em} X(a){\hskip.1em} X(b)+y{\hskip.025em} Y(a){\hskip.025em} Y(b)){\hskip.2em}
\end{equation*}
\begin{equation*}
={\hskip.2em} (x+y)(x{\hskip.1em} X(a)+y{\hskip.025em} Y(a)){\hskip.1em} Z(b){\hskip.2em}
\end{equation*}
\vspace*{-2.7
amount}
\begin{equation*}
\hspace*{0em}
={\hskip.2em} (x+y){\hskip.1em} Z(b){\hskip.1em} (x{\hskip.1em} X(a)+y{\hskip.025em} Y(a)){\hskip.2em}
={\hskip.2em} (x{\hskip.1em} X(b)+y{\hskip.025em} Y(b))(x{\hskip.1em} X(a)+y{\hskip.025em} Y(a))\hspace*{0.05em}.
\end{equation*}
By opening the parentheses and canceling the equal terms, we get
\[
y{\hskip.1em} x{\hskip.1em} X(a){\hskip.1em} X(b)+x{\hskip.1em} y{\hskip.025em} Y(a){\hskip.025em} Y(b){\hskip.2em} ={\hskip.2em} x{\hskip.1em} y{\hskip.1em} X(b){\hskip.025em} Y(a)+y{\hskip.1em} x{\hskip.025em} Y(b){\hskip.1em} X(a)\hspace*{0.05em}.
\]
Suppose that $x{\hskip.1em} ,{\hskip.1em} y\neq 0$\hspace*{-0.2em}.\hspace*{0.2em}\
Then we can divide the last equation by $x{\hskip.1em} y\neq 0$ and get
\[
Y(a){\hskip.025em} Y(b)+X(a){\hskip.1em} X(b){\hskip.2em} ={\hskip.2em} X(b){\hskip.025em} Y(a)+Y(b){\hskip.1em} X(a)\hspace*{0.05em}.
\]
The last identity is equivalent to
\[
Y(a){\hskip.025em} Y(b)-Y(a){\hskip.1em} X(b)+X(a){\hskip.1em} X(b)-X(a){\hskip.025em} Y(b){\hskip.2em} ={\hskip.2em} 0\hspace*{0.05em},
\]
and, therefore, to
\begin{equation}
\label{yxab}
(Y(a)-X(a)){\hskip.025em} (Y(b)-X(b)){\hskip.2em} ={\hskip.2em} 0\hspace*{0.05em}.
\end{equation}
This holds for all $a{\hskip.1em} ,{\hskip.1em} b\in K$\hspace*{-0.2em}.\hspace*{0.2em}\
If $Y(a)-X(a)\neq 0$ for some $a$\hspace*{-0.2em},\hspace*{0.2em}\
then (\ref{yxab}) implies that $Y(b)-X(b)=0$ for all $b$\hspace*{-0.2em},\hspace*{0.2em}\
and, in particular, for $b=a$ in contradiction with $Y(a)-X(a)\neq 0$\hspace*{-0.2em}.\hspace*{0.2em}\
It follows that $X(a)=Y(a)$ for all $a$\hspace*{-0.2em}.\hspace*{0.2em}\
In other terms, $X=Y$\hspace*{-0.2em}.\hspace*{0.2em}\
By recalling that $X=\alpha_x$\hspace*{-0.2em},\hspace*{0.2em}\ $Y=\alpha_y$\hspace*{-0.2em},\hspace*{0.2em}\
and that $x{\hskip.1em} ,{\hskip.1em} y$ are arbitrary non-zero elements of $K$\hspace*{-0.2em},\hspace*{0.2em}\
we conclude that all automorphisms $\alpha_x$ with $x\neq 0$ are equal,
and, in particular, are equal to $\alpha_1$\hspace*{-0.2em}.\hspace*{0.2em}\
But the $\alpha_1=\varphi(N(1))=\varphi(1)=1$ by the assumption.
It follows that $\varphi(N(x))=\alpha_x=1$ for all $x\in K^*$\hspace*{-0.2em},\hspace*{0.2em}\
and hence $\varphi(z)=1$ for all $z\in N(K^*)$\hspace*{-0.2em}.\hspace*{0.2em}\
This completes the proof. $\blacksquare$
The Galois theory provides many explicit examples of field $K$ with a finite group of automorphisms $G$. The freedom
of choice of the map $\varphi$ allows to construct left Andr\'e quasi-field with non-associative multiplication
(by using Theorem \ref{theorem5}), and left Andr\'e quasi-field in which the right distributivity does not hold (by using Theorem \ref{theorem6}).
One can also construct a left Andr\'e quasi-field with associative multiplication in which the right distributivity does not hold.
We leave this as an exercise for the readers moderately familiar with Galois theory.
\myitpar{Remark.} In this section we to a big extent followed the exposition in \cite{HP}, Section IX.3.
\mysection{Conclusion: non-Desarguesian planes}{conclusion}
If $K$ is a left quasi-field with non-associative multiplication (say, a left Andr\'e quasi-field), then $K$ is not isomorphic to any skew-field.
By Theorem \ref{theorem2}, $K^2$ is not isomorphic to any plane defined over a skew-field, and,
therefore, is a non-Desarguesian plane.
If $K$ is a left quasi-field in which the right distributivity does not hold, then, again, $K$ is not isomorphic to any skew-field.
By Theorem \ref{theorem2}, $K^2$ is a non-Desarguesian plane.
Let $K$ be a right quasi-field in which the left distributivity does not hold.
For example, one can take as $K$ any nontrivial right Andr\'e quasi-field
(we can take as $K$ a left Andr\'e quasi-field with the opposite multiplication,
or use the right Andr\'e quasi-field version of Theorem \ref{theorem6}).
Then, by Theorem \ref{theorem3}, $K^2$ is not isomorphic to any plane defined over a left quasi-field.
In particular, $K^2$ is not isomorphic to any plane defined over a skew-field, and, therefore, is a non-Desarguesian plane.
\begin{flushright}
October 8, 2014
April 15, 2016 (minor edits)
\vspace*{
amount}
http:/\!/\hspace*{-0.07em}nikolaivivanov.com
\end{flushright}
\end{document} |
\mathfrak{m}athfrak{b}egin{document}
\def\mathfrak{m}athfrak{n}ab#1#2#3{\mathfrak{m}athfrak{n}abla^{\mathfrak{m}athfrak{h}box{$\mathfrak{m}athfrak{s}criptstyle{#1}$}}_{\mathfrak{m}athfrak{h}box{$\mathfrak{m}athfrak{s}criptstyle{#2}$}}{\mathfrak{m}athfrak{h}box{$#3$}}}
\def\mathfrak{m}athfrak{n}abl#1#2{\mathfrak{m}athfrak{n}abla_{\mathfrak{m}athfrak{h}box{$\mathfrak{m}athfrak{s}criptstyle{#1}$}}{\mathfrak{m}athfrak{h}box{$#2$}}}
\def\tnabl#1#2{\mathfrak{m}athfrak{h}at{\mathfrak{m}athfrak{n}abla}_{\mathfrak{m}athfrak{h}box{$\mathfrak{m}athfrak{s}criptstyle{#1}$}}{\mathfrak{m}athfrak{h}box{$#2$}}}
\def\mathfrak{m}athfrak{n}nab#1{\mathfrak{m}athfrak{n}abla_{\mathfrak{m}athfrak{h}box{$\mathfrak{m}athfrak{s}criptstyle{#1}$}}}
\def\tnab#1#2{\widetilde{\mathfrak{m}athfrak{n}abla}_{\mathfrak{m}athfrak{h}box{$\mathfrak{m}athfrak{s}criptstyle{#1}$}}\mathfrak{m}athfrak{h}box{$#2$}}
\def\mathfrak{m}athbb{R}{\mathfrak{m}athbb{R}}
\def\mathfrak{m}athbb{C}{\mathfrak{m}athbb{C}}
\def\mathfrak{m}athbb{Z}{\mathfrak{m}athbb{Z}}
\def\mathfrak{m}athbb{K}{\mathfrak{m}athbb{K}}
\def\mathfrak{m}athcal{I}{\mathfrak{m}athcal{I}}
\def\mathfrak{m}athcal{L}{\mathfrak{m}athcal{L}}
\def\mathfrak{m}athfrak{v}{\mathfrak{m}athfrak{v}}
\def\mathfrak{m}athfrak{b}{\mathfrak{m}athfrak{b}}
\def\mathfrak{m}athfrak{r}{\mathfrak{m}athfrak{r}}
\def\mathfrak{m}athfrak{z}{\mathfrak{m}athfrak{z}}
\def\mathfrak{m}athfrak{g}{\mathfrak{m}athfrak{g}}
\def\mathfrak{m}athfrak{g}l{\mathfrak{m}athfrak{gl}}
\def\mathfrak{m}athfrak{sl}{\mathfrak{m}athfrak{sl}}
\def\mathfrak{m}athfrak{k}{\mathfrak{m}athfrak{k}}
\def\mathfrak{m}athfrak{p}{\mathfrak{m}athfrak{p}}
\def\mathfrak{m}athfrak{h}{\mathfrak{m}athfrak{h}}
\def\mathfrak{m}athfrak{s}{\mathfrak{m}athfrak{s}}
\def\mathfrak{m}athfrak{s}o{\mathfrak{m}athfrak{so}}
\def\mathfrak{m}athfrak{n}{\mathfrak{m}athfrak{n}}
\def\mathfrak{m}{\mathfrak{m}athfrak{m}}
\def\mathfrak{a}{\mathfrak{m}athfrak{a}}
\def\mathcal{V}{\mathfrak{m}athcal{V}}
\def\mathcal{H}{\mathfrak{m}athcal{H}}
\def\mathcal{L}{\mathfrak{m}athcal{L}}
\def\mathfrak{B}{\mathfrak{m}athfrak{B}}
\def\mathcal{H}i{\mathfrak{m}athfrak{H}}
\def\mathfrak{U}{\mathfrak{m}athfrak{U}}
\def\mathcal{F}{\mathfrak{m}athcal{F}}
\def\operatorname{trace}{\operatorname{trace}}
\def\mathfrak{m}athfrak{s}p{\operatorname{span}}
\def\mathfrak{m}athfrak{g}rad{\operatorname{grad}}
\def\operatorname{div}{\operatorname{div}}
\def\mathfrak{a}d{\operatorname{ad}}
\def\operatorname{Ad}{\operatorname{Ad}}
\def\operatorname{Aut}{\operatorname{Aut}}
\def\operatorname{Ric}{\operatorname{Ric}}
\def\operatorname{dim}{\operatorname{dim}}
\def\operatorname{End}{\operatorname{End}}
\def\mathfrak{m}athfrak{p}d#1{\mathfrak{m}athfrak{r}ac{\mathfrak{m}athfrak{p}artial}{\mathfrak{m}athfrak{p}artial #1}}
\def\dd#1{\mathfrak{m}athfrak{r}ac{\operatorname{d}}{\operatorname{d}#1}}
\def\dop#1{\operatorname{d}#1}
\def\SPE#1#2{\langle #1,#2\rangle}
\def\mathfrak{m}athfrak{p}ro{\textsc{Proof}}
\title{Curvature conditions for complex-valued\\
harmonic morphisms}
\date{}
\mathfrak{a}uthor{Jonas Nordstr\" om}
\mathfrak{m}athfrak{k}eywords{harmonic morphisms, totally geodesic, holomorphic}
\mathfrak{m}athfrak{s}ubjclass[2000]{58E20, 53C43, 53C12}
\mathfrak{a}ddress
{Department of Mathematics, Faculty of Science, Lund University,
Box 118, S-221 00 Lund, Sweden}
\email{Jonas.Nordstrom@math.lu.se}
\mathfrak{m}athfrak{b}egin{abstract}
We study the curvature of a manifold on which there can be defined a complex-valued
submersive harmonic morphism with either, totally geodesic fibers or that is holomorphic with
respect to a complex structure which is compatible with the second fundamental form.
We also give a necessary curvature condition for the existence of complex-valued harmonic
morphisms with totally geodesic fibers on Einstein manifolds.
\end{abstract}
\mathfrak{m}aketitle
\mathfrak{m}athfrak{s}ection{Introduction}
A harmonic morphism is a map between two Riemannian manifolds
that pulls back local harmonic functions to local harmonic functions.
The simplest examples of harmonic morphisms are constant maps,
real-valued harmonic functions and isometries.
A characterization of harmonic morphisms was given by Fuglede and Ishihara,
they showed in \mathfrak{m}athcal{I}te{Fuglede} and \mathfrak{m}athcal{I}te{Ishihara}, respectively, that harmonic
morphisms are exactly the horizontally weakly conformal harmonic maps.
If we restrict our attention to the maps where the codomain is a surface then
the harmonic morphisms are the horizontally weakly conformal maps with minimal fibers
at regular points.
Between two surfaces the harmonic morphisms are exactly the weakly conformal maps. Since the composition
of two harmonic morphisms is again a harmonic morphism, we get that,
locally any harmonic morphism to a surface can be turned into a harmonic morphism to the
complex plane by composing with a weakly conformal map.
Local existence of harmonic morphisms can be characterized in terms of foliations.
If the codomain is a surface then the existence of a local harmonic morphism is equivalent to
the existence of a local conformal foliation with minimal fibers at regular points, see \mathfrak{m}athcal{I}te{Wood86} by Wood.
Baird and Wood found a necessary condition, see \mathfrak{m}athcal{I}te{BaiWoo} Corollary 4.4, on the curvature
for local existence of complex-valued harmonic morphisms on three-manifolds.
In this case the fibers are geodesics and there is an orthonormal basis
$\{X,Y\}$ for the horizontal space such that the \textbf{Ricci curvature condition}
\[\operatorname{Ric}(X,X)=\operatorname{Ric}(Y,Y)\textrm{ and }\operatorname{Ric}(X,Y)=0,\]
is satisfied. In three dimensions this is equivalent to
\[\SPE{R(X,U)U}{X}=\SPE{R(Y,U)U}{Y}\textrm{ and }\SPE{R(X,U)U}{Y}=0\]
for any vertical unit vector $U$, which in turn is equivalent to the fact that the
sectional curvature $K(X_{\theta}\wedge U)$ is
independent of $\theta$ where $X_{\theta}=\cos(\theta)X+\mathfrak{m}athfrak{s}in(\theta)Y$.
We show in this paper that the last condition is true for any complex-valued submersive harmonic morphism with
totally geodesic fibers.
\mathfrak{m}athfrak{b}egin{theorem}\label{Jon-Curv}
Let $(M,g)$ and $(N^{2},h)$ be a Riemannian manifolds, let $\mathfrak{m}athfrak{p}hi:(M,g)\to (N^{2},h)$ be a submersive
harmonic morphism with totally geodesic fibers and $p\in M$.
Given any $U,V\in\mathcal{V}_{p}=\mathfrak{m}athfrak{k}er(\dop\mathfrak{m}athfrak{p}hi)$ and any orthonormal basis $\{X,Y\}$ for $\mathcal{H}_{p}=\mathcal{V}_{p}^{\mathfrak{m}athfrak{b}ot}$,
set $X_{\theta}=\cos(\theta)X+\mathfrak{m}athfrak{s}in(\theta)Y$. Then
\[\SPE{R(X_{\theta}\wedge U)}{X_{\theta}\wedge V}\]
is independent of $\theta$.
\end{theorem}
In four dimensions or more this is stronger than the Ricci curvature condition.
Note that Example 6.1 and 6.2 of \mathfrak{m}athcal{I}te{Gud-Sven-2013} by Gudmundsson and Svensson
do not have totally geodesic fibers. So they are counterexamples to the Ricci curvature condition
only in the case of minimal but not totally geodesic fibers.
We present these two examples in Example \ref{GudSvenEx1} and \ref{GudSvenEx2}.
If we assume that the domain $(M,g)$ is an Einstein manifold, then the curvature operator,
in a suitably chosen basis,
splits into two blocks and we find that there are at least $\operatorname{dim}(M)-2$
double eigenvalues for the curvature operator.
We use this to give an example of a five dimensional homogeneous Einstein
manifold that does not have any submersive harmonic morphism with totally geodesic
fibers.
Harmonic morphisms with totally geodesic fibers have been studied in different ways before. Baird and Wood,
Section 6.8 \mathfrak{m}athcal{I}te{BW-book}, classify them in the constant curvature case. Later Pantilie generalized
to the case where the domain is conformaly equivalent to constant curvature, \mathfrak{m}athcal{I}te{Pantilie08}.
Mustafa \mathfrak{m}athcal{I}te{Mustafa} gave a Bochner type curvature formula and applied it to foliations
with large codimension.
We end this paper by showing that the Ricci curvature condition is satisfied by harmonic morphisms
that are holomorphic with respect to a complex structure and where the second fundamental
form is compatible with the complex structure. The result is similar
to Proposition 6.3 from \mathfrak{m}athcal{I}te{LouPan}, where Loubeau and Pantilie describe twistorial
harmonic morphisms, but only in $4$ dimensions.
\mathfrak{m}athfrak{s}ection{The curvature condition}
Let $(M,g)$ and $(N,h)$ be Riemannian manifolds and let $\mathfrak{m}athfrak{p}hi:(M,g)\to(N,h)$
be a smooth submersion. Denote the vertical distribution associated with $\mathfrak{m}athfrak{p}hi$ by $\mathcal{V}=\mathfrak{m}athfrak{k}er(\dop\mathfrak{m}athfrak{p}hi)$
and the horizontal distribution by $\mathcal{H}=\mathcal{V}^{\mathfrak{m}athfrak{b}ot}$.
For two vector fields $E,F$ on $M$ define the tensors $A$ and $B$, introduced in \mathfrak{m}athcal{I}te{ONeill}, by
\[A_{E}F=\mathcal{V}(\mathfrak{m}athfrak{n}abla_{\mathcal{H} E}\mathcal{H} F)\textrm{ and }B_{E}F=\mathcal{H}(\mathfrak{m}athfrak{n}abla_{\mathcal{V} E}\mathcal{V} F).\]
$B$ is called the second fundamental form and the fibers of $\mathfrak{m}athfrak{p}hi$ are said to be totally geodesic if $B=0$.
The dual $A_{X}^{*}$ of $A_{X}$ satisfies $A^{*}_{X}F=-\mathcal{H}(\mathfrak{m}athfrak{n}abla_{X}\mathcal{V} F)$ for $X\in \mathcal{H}$ and the dual $B_{U}^{*}$
of $B_{U}$ satisfies $B^{*}_{U}F=-\mathcal{V}(\mathfrak{m}athfrak{n}abla_{U}\mathcal{H} F)$ for $U\in\mathcal{V}$.
Gudmundsson calculated the curvature for a horizontally conformal submersion in \mathfrak{m}athcal{I}te{Gud-thesis}, we state the
results from Proposition 2.1.2, and Theorem 2.2.3 (2) and (3) below.
\mathfrak{m}athfrak{b}egin{proposition}\label{Gud-curv}
Let $(M,g)$ and $(N,h)$ be Riemannian manifolds and let $\mathfrak{m}athfrak{p}hi:(M,g)\to(N,h)$ be a
horizontally conformal submersion with dilation $\lambda:M\to(0,\infty)$.
Let $U,V,W$ be vertical vectors and $X,Y$ be horizontal vectors, then
\mathfrak{m}athfrak{b}egin{align*}
(i)&\,A_{X}Y=\mathfrak{m}athfrak{r}ac{1}{2}\mathcal{V}([X,Y])+\SPE{X}{Y}\mathcal{V}(\mathfrak{m}athfrak{g}rad\ln\lambda)\\
(ii)&\,\SPE{R(U\wedge V)}{W\wedge X}=\SPE{(\mathfrak{m}athfrak{n}abla_{U}B)_{V}W}{X}-\SPE{(\mathfrak{m}athfrak{n}abla_{V}B)_{U}W}{X}\\
(iii)&\,\SPE{R(U\wedge X}{Y\wedge V}=\SPE{(\mathfrak{m}athfrak{n}abl{U}{A})_{X}Y}{V}+\SPE{A^{*}_{X}U}{A^{*}_{Y}V}+\SPE{(\mathfrak{m}athfrak{n}abla_{X}B^{*})_{U}Y}{V}\\
&-\SPE{B^{*}_{V}Y}{B^{*}_{U}X}-2 V(\ln \lambda)\SPE{A_{X}Y}{U}.
\end{align*}
\end{proposition}
It is well-known that $A_{X}Y+A_{Y}X=2\SPE{X}{Y}\mathcal{V}(\mathfrak{m}athfrak{g}rad\ln\lambda)$ and $A_{X}Y-A_{Y}X=\mathcal{V}([X,Y])$.
Suppose that $(N,h)$ is a surface and $\{X,Y\}$ is an orthonormal basis for $\mathcal{H}$ and $U\in \mathcal{V}$ then
\[A^{*}_{X}U=\SPE{A^{*}_{X}U}{X}X+\SPE{A^{*}_{X}U}{Y}Y=\SPE{U}{A_{X}X}X+\SPE{U}{A_{X}Y}Y.\]
From this we see that $\SPE{A^{*}_{X}U}{A^{*}_{X}U}$ does not depend on the direction of $X$. First
\mathfrak{m}athfrak{b}egin{align*}
\SPE{A^{*}_{X}U}{A^{*}_{X}U}=&\SPE{U}{A_{X}X}^{2}+\SPE{U}{A_{X}Y}^{2}\\
=&\SPE{U}{\mathcal{V}(\mathfrak{m}athfrak{g}rad\ln\lambda)}^{2}+\SPE{U}{\mathfrak{m}athfrak{r}ac{1}{2}[X,Y]}^{2}\\
=&U(\ln\lambda)^2+\mathfrak{m}athfrak{r}ac{1}{4}\SPE{U}{[X,Y]}^{2}.
\end{align*}
Now, since the vertical part of the Lie bracket of horizontal vector fields is a tensor, the term
$\mathfrak{m}athfrak{r}ac{1}{4}\SPE{U}{[X,Y]}^{2}$ is in fact independent of our choice of orthonormal basis
$\{X,Y\}$ for the horizontal space. To see this, suppose $a^2+b^2=1$, then
\mathfrak{m}athfrak{b}egin{align*}
\SPE{U}{[aX+bY,bX-aY]}^{2}=&\SPE{U}{-a^2[X,Y]+b^2[Y,X]}^{2}\\
=&(-1)^{2}\SPE{U}{[X,Y]}^{2}
\end{align*}
The proof of Theorem \ref{Jon-Curv} follows from the calculation above by polarizing twice, once in $X$
and once in $U$, but we give a direct proof below.
\mathfrak{m}athfrak{b}egin{proof}
From Proposition \ref{Gud-curv} (iii) the curvature of a horizontally conformal
submersion with totally geodesic fibers is
\[\SPE{R(U\wedge X}{Y\wedge V}=\SPE{(\mathfrak{m}athfrak{n}abl{U}{A})_{X}Y}{V}+\SPE{A^{*}_{X}U}{A^{*}_{Y}V}-2 V(\ln \lambda)\SPE{A_{X}Y}{U}\]
for any $U,V\in\mathcal{V}$ and any $X,Y\in\mathcal{H}$.
Both sides of the expression are tensors, so we may extend the vectors to vector fields in any way we choose.
\mathfrak{m}athfrak{b}egin{align*}
\SPE{R(X_{\theta}\wedge U)}{X_{\theta}\wedge V}
=&\cos^{2}(\theta)\SPE{R(X\wedge U)}{X\wedge V}+\mathfrak{m}athfrak{s}in^{2}(\theta)\SPE{R(Y\wedge U)}{Y\wedge V}\\
&+\cos(\theta)\mathfrak{m}athfrak{s}in(\theta)\left(\SPE{R(X\wedge U)}{Y\wedge V}+\SPE{R(Y\wedge U)}{X\wedge V}\right).
\end{align*}
Extend $X,Y,U,V$ to unit vector fields, then $2\SPE{\mathfrak{m}athfrak{n}abla_{U}X}{X}=U\SPE{X}{X}=0$ and
\mathfrak{m}athfrak{b}egin{align*}
\SPE{R(X\wedge U)}{X\wedge V}=&\SPE{(\mathfrak{m}athfrak{n}abla_{U}A)_{X}X}{V}+\SPE{A^{*}_{X}U}{A^{*}_{X}V}-2V(\ln\lambda)\SPE{A_{X}X}{U}\\
=&\SPE{\mathfrak{m}athfrak{n}abla_{U}(A_{X}X)}{V}-\SPE{A_{\mathfrak{m}athfrak{n}abla_{U}X}X}{V}-\SPE{A_{X}(\mathfrak{m}athfrak{n}abla_{U}X)}{V}\\
&+\SPE{\SPE{A^{*}_{X}U}{X}X+\SPE{A^{*}_{X}U}{Y}Y}{\SPE{A^{*}_{X}V}{X}X+\SPE{A^{*}_{X}V}{Y}Y}\\
&-2V(\ln\lambda)\SPE{\mathcal{V}(\mathfrak{m}athfrak{g}rad\ln\lambda)}{U}\\
=&\SPE{\mathfrak{m}athfrak{n}abla_{U}\mathcal{V}(\mathfrak{m}athfrak{g}rad\ln\lambda)}{V}-\SPE{A_{\mathfrak{m}athfrak{n}abla_{U}X}X+A_{X}(\mathfrak{m}athfrak{n}abla_{U}X)}{V}\\
&+\SPE{A_{X}X}{U}\SPE{A_{X}X}{V}+\SPE{A_{X}Y}{U}\SPE{A_{X}Y}{V}-2V(\ln\lambda)U(\ln\lambda)\\
=&\SPE{\mathfrak{m}athfrak{n}abla_{U}\mathcal{V}(\mathfrak{m}athfrak{g}rad\ln\lambda)}{V}-\SPE{\SPE{\mathfrak{m}athfrak{n}abla_{U}X}{X}\mathcal{V}(\mathfrak{m}athfrak{g}rad \ln\lambda)}{V}\\
&+U(\ln\lambda)V(\ln\lambda)+\mathfrak{m}athfrak{r}ac{1}{4}\SPE{[X,Y]}{U}\SPE{[X,Y]}{V}-2U(\ln\lambda)V(\ln\lambda)\\
=&\SPE{\mathfrak{m}athfrak{n}abla_{U}\mathcal{V}(\mathfrak{m}athfrak{g}rad\ln\lambda)}{V}+U(\ln\lambda)V(\ln\lambda)\\
&+\mathfrak{m}athfrak{r}ac{1}{4}\SPE{[X,Y]}{U}\SPE{[X,Y]}{V}-2U(\ln\lambda)V(\ln\lambda).
\end{align*}
A similar calculation shows that this equals $\SPE{R(Y\wedge U)}{Y\wedge V}$.
Now since we extended to unit vector fields $\SPE{\mathfrak{m}athfrak{n}abla_{U}X}{Y}=-\SPE{X}{\mathfrak{m}athfrak{n}abla_{U}Y}$, so
\mathfrak{m}athfrak{b}egin{align*}
\SPE{R(X\wedge U)}{Y\wedge V}+\SPE{R(Y\wedge U)}{X\wedge V}=&
\SPE{\mathfrak{m}athfrak{n}abla_{U}(A_{X}Y)}{V}-\SPE{A_{\mathfrak{m}athfrak{n}abla_{U}X}Y}{V}-\SPE{A_{X}(\mathfrak{m}athfrak{n}abla_{U}Y)}{V}\\
&+\SPE{\mathfrak{m}athfrak{n}abla_{U}(A_{Y}X)}{V}-\SPE{A_{\mathfrak{m}athfrak{n}abla_{U}Y}X}{V}-\SPE{A_{Y}(\mathfrak{m}athfrak{n}abla_{U}X)}{V}\\
&+\SPE{A_{X}X}{U}\SPE{A_{Y}X}{V}+\SPE{A_{X}Y}{U}\SPE{A_{Y}Y}{V}\\
&+\SPE{A_{Y}Y}{U}\SPE{A_{X}Y}{V}+\SPE{A_{Y}X}{U}\SPE{A_{X}X}{V}\\
&-2V(\ln\lambda)\SPE{A_{X}Y}{U}-2V(\ln\lambda)\SPE{A_{Y}X}{U}\\
=&\SPE{\mathfrak{m}athfrak{n}abla_{U}(A_{X}Y)}{V}+\SPE{\mathfrak{m}athfrak{n}abla_{U}(A_{Y}X)}{V}\\
&-\SPE{A_{\mathfrak{m}athfrak{n}abla_{U}X}Y}{V}-\SPE{A_{Y}(\mathfrak{m}athfrak{n}abla_{U}X)}{V}\\
&-\SPE{A_{X}(\mathfrak{m}athfrak{n}abla_{U}Y)}{V}-\SPE{A_{\mathfrak{m}athfrak{n}abla_{U}Y}X}{V}\\
&+\SPE{\mathcal{V}(\mathfrak{m}athfrak{g}rad\ln\lambda)}{U}\SPE{A_{Y}X+A_{X}Y}{V}\\
&+\SPE{A_{X}Y+A_{Y}X}{U}\SPE{\mathcal{V}(\mathfrak{m}athfrak{g}rad\ln\lambda)}{V}\\
&-2V(\ln\lambda)\SPE{A_{X}Y+A_{Y}X}{U}\\
=&\SPE{\mathfrak{m}athfrak{n}abla_{U}\mathcal{V}([X,Y])}{V}+\SPE{\mathfrak{m}athfrak{n}abla_{U}\mathcal{V}([Y,X])}{V}\\
&-\SPE{\SPE{X}{\mathfrak{m}athfrak{n}abla_{U}Y}\mathcal{V}(\mathfrak{m}athfrak{g}rad\ln\lambda)}{V}\\
&-\SPE{\SPE{Y}{\mathfrak{m}athfrak{n}abla_{U}X}\mathcal{V}(\mathfrak{m}athfrak{g}rad\ln\lambda)}{V}\\
=&0.
\end{align*}
Thus, the value of $\SPE{R(X_{\theta}\wedge U)}{X_{\theta}\wedge V}$ does not depend on $\theta$.
\end{proof}
\mathfrak{m}athfrak{s}ection{Implications for Einstein manifolds}
Proposition \ref{Gud-curv} (ii) says that for a horizontally conformal submersion $\mathfrak{m}athfrak{p}hi:(M,g)\to(N,h)$ with
totally geodesic fibers the curvature operator of $M$ satisfies
\[\SPE{R(U\wedge V)}{W\wedge X}=0\]
for all $U,V,W\in\mathcal{V}$ and all $X\in\mathcal{H}$.
Let $\{U_{k}\}$ be an orthonormal basis for $\mathcal{V}$ and $\{X,Y\}$ be an orthonormal basis for $\mathcal{H}$.
If we assume that the domain $M$ is an Einstein manifold then
\mathfrak{m}athfrak{b}egin{align*}
0=\operatorname{Ric}(X,U)&=\SPE{R(X\wedge Y)}{Y\wedge U}+\mathfrak{m}athfrak{s}um_{k}\SPE{R(X\wedge U_{k})}{U_{k}\wedge U}\\
&=\SPE{R(X\wedge Y)}{Y\wedge U}
\end{align*}
for all $U\in\mathcal{V}$. This means that the curvature operator $R$ splits into invariant components
\[\wedge^{2}T_{p}M=(\wedge^{2}\mathcal{V}\oplus\wedge^{2}\mathcal{H})\oplus W,\]
where $W$ is generated by the mixed vectors, that is,
\[R(\wedge^{2}\mathcal{V}\oplus\wedge^{2}\mathcal{H})\mathfrak{m}athfrak{s}ubseteq\wedge^{2}\mathcal{V}\oplus\wedge^{2}\mathcal{H}\textrm{ and }R(W)\mathfrak{m}athfrak{s}ubseteq W.\]
Thus, the eigenvalues of $R$ are the union of the eigenvalues
of $R|_{\wedge^{2}\mathcal{V}\oplus\wedge^{2}\mathcal{H}}$ and $R|_{W}$.
We can define a complex structure $J$ on $W$ by $J(X\wedge U)=Y\wedge U$ and $J(Y\wedge U)=-X\wedge U$.
The curvature tensor $R|_{W}$ is, due to Theorem \ref{Jon-Curv},
represented by an Hermitian matrix $H$ with respect to this complex structure.
Let $e_{j}$ be an eigenvector to the Hermitian matrix $H$, then $e_{j}$ and $J e_{j}$ represent different real
eigenvectors for $R|_{W}$ with the same eigenvalue. Thus $R|_{W}$ and therefore $R$ has at least
$\operatorname{dim}(M)-2$ double eigenvalues. We get
\mathfrak{m}athfrak{b}egin{proposition}
Let $(M,g)$ be an Einstein manifold and $(N^{2},h)$ be a Riemannian surface.
Let $R$ be the curvature operator of $(M,g)$ at $p\in M$. If there is a submersive harmonic
morphism $\mathfrak{m}athfrak{p}hi:(M,g)\to(N^{2},h)$ with totally geodesic fibers then $R$ has at least
$\operatorname{dim}(M)-2$ pairs of eigenvalues.
\end{proposition}
In particular, the relationship between the determinants of $R|_{W}$ and $H$ is $\det(R|_{W})=\det(H)^{2}$.
So if $F$ is the characteristic polynomial of $H$ and $f$ the characteristic polynomial of $R_{W}$,
then $f=F^{2}$ and $F$ is a factor of $\mathfrak{m}athfrak{g}cd(f,f^{'})$. Thus $\mathfrak{m}athfrak{g}cd(f,f^{'})$ is a polynomial of degree at least
$\deg(F)=\operatorname{dim}(M)-2$.
\mathfrak{m}athfrak{s}ection{Examples}
We give an example of a five dimensional manifold that does not have any
conformal foliations with totally geodesic fibers, not even locally. The two homogeneous
Einstein manifolds below were found by Alekseevsky in \mathfrak{m}athcal{I}te{Alek}, but we use the notation of \mathfrak{m}athcal{I}te{Nikon}.
\mathfrak{m}athfrak{b}egin{example}\label{NoTot}
Let $S$ be the five dimensional homogeneous Einstein manifold given in Theorem 1(5) of \mathfrak{m}athcal{I}te{Nikon}.
This is a solvable simply connected Lie group corresponding to the Lie algebra $\mathfrak{m}athfrak{s}$ given by an orthonormal basis
$\{A,X_{1},X_{2},X_{3},X_{4}\}$ with Lie brackets
\mathfrak{m}athfrak{b}egin{align*}
&[X_{1},X_{2}]=\mathfrak{m}athfrak{s}qrt{\mathfrak{m}athfrak{r}ac{2}{3}} X_{3},\,[X_{1},X_{3}]=\mathfrak{m}athfrak{s}qrt{\mathfrak{m}athfrak{r}ac{2}{3}} X_{4},\\
&[A,X_{j}]=\mathfrak{m}athfrak{r}ac{j}{\mathfrak{m}athfrak{s}qrt{30}}X_{j},\, j=1,2,3,4.
\end{align*}
A long but straightforward calculation shows that the curvature operator is given by
\[\mathfrak{m}athfrak{r}ac{1}{30}\left[\mathfrak{m}athfrak{b}egin{array}{cccccccccc}
13 & -2\mathfrak{m}athfrak{s}qrt{5} & -4\mathfrak{m}athfrak{s}qrt{5} & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
-2\mathfrak{m}athfrak{s}qrt{5} & 4 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
-4\mathfrak{m}athfrak{s}qrt{5} & 0 & 16 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
0 & 0 & 0 & 8 & 0 & 0 & 0 & 0 & 0 & 0\\
0 & 0 & 0 & 0 & 1 & 0 & 0 & \mathfrak{m}athfrak{s}qrt{5} & 0 & \mathfrak{m}athfrak{s}qrt{5}\\
0 & 0 & 0 & 0 & 0 & 9 & -3\mathfrak{m}athfrak{s}qrt{5} & 0 & -3\mathfrak{m}athfrak{s}qrt{5} & 0\\
0 & 0 & 0 & 0 & 0 & -3\mathfrak{m}athfrak{s}qrt{5} & 17 & 0 & 5 & 0\\
0 & 0 & 0 & 0 & \mathfrak{m}athfrak{s}qrt{5} & 0 & 0 & 1 & 0 & 5\\
0 & 0 & 0 & 0 & 0 & -3\mathfrak{m}athfrak{s}qrt{5} & 5 & 0 & -1 & 0\\
0 & 0 & 0 & 0 & \mathfrak{m}athfrak{s}qrt{5} & 0 & 0 & 5 & 0 & 7
\end{array}\right]\]
with respect to the basis
\[\{X_{1}\wedge X_{3},X_{2}\wedge A,X_{4}\wedge A,X_{2}\wedge X_{4},
X_{1}\wedge A,X_{3}\wedge A,X_{1}\wedge X_{2},X_{2}\wedge X_{3},X_{1}\wedge X_{4},X_{3}\wedge X_{4}\}.\]
Let $f$ be the characteristic polynomial, then $\mathfrak{m}athfrak{g}cd(f(x),f'(x))=-\mathfrak{m}athfrak{r}ac{4}{15}+x$, which is a
polynomial of degree $1<3$, so there are no conformal foliations with totally geodesic fibers.
\end{example}
One way to produce foliations on a Lie group $G$ is to find a subalgebra $\mathfrak{m}athfrak{v}$
of the Lie algebra $\mathfrak{m}athfrak{g}$ of $G$. The Riemannian metric on $G$ is the left translation of the
scalar product on $\mathfrak{m}athfrak{g}$. If $\mathfrak{m}athfrak{v}$ corresponds to a closed subgroup $K$ we foliate by left translating
this subgroup, $\mathcal{F}=\{L_{g}K\}_{g\in G}$.
The foliation has totally geodesic fibers if and only if
\mathfrak{m}athfrak{b}egin{align*}
\SPE{B_{U}V}{X}&=-\mathfrak{m}athfrak{r}ac{1}{2}(\SPE{[X,U]}{V}+\SPE{[X,V]}{U})=0
\end{align*}
for all $U,V\in\mathfrak{m}athfrak{v}$ and all $X\in\mathfrak{m}athfrak{h}=\mathfrak{m}athfrak{v}^{\mathfrak{m}athfrak{b}ot}$ and is conformal if
\mathfrak{m}athfrak{b}egin{align*}
(\mathcal{L}_{V}g)(X,Y)&=-\mathfrak{m}athfrak{r}ac{1}{2}(\SPE{[V,X]}{Y}+\SPE{[V,Y]}{X})=\mathfrak{m}athfrak{n}u(V)\SPE{X}{Y}
\end{align*}
for all $V\in\mathfrak{m}athfrak{v}$ and all $X,Y\in\mathfrak{m}athfrak{h}$ where $\mathfrak{m}athfrak{n}u$ is a linear functional on $\mathfrak{m}athfrak{v}$.
For the Example \ref{NoTot}.
If we define a foliation by setting $\mathfrak{m}athfrak{v}=\{A,X_{2},X_{4}\}$ and
$\mathfrak{m}athfrak{h}=\{X_{1},X_{3}\}$ in the procedure above, then we get a foliation
with totally geodesic fibers but it is not conformal.
If instead we define a foliation by setting $\mathfrak{m}athfrak{v}=\{X_{2},X_{3},X_{4}\}$ and
$\mathfrak{m}athfrak{h}=\{A,X_{1}\}$, then we get a conformal foliation but
this does not have totally geodesic fibers, in fact, not even minimal fibers.
We also give an example of a five dimensional manifold with a conformal foliation with totally geodesic fibers,
and see how the curvature operator behaves.
\mathfrak{m}athfrak{b}egin{example}
Let $S$ be the five dimensional homogeneous Einstein manifold given in Theorem 1(4) of \mathfrak{m}athcal{I}te{Nikon}.
This is a solvable simply connected Lie group corresponding to the Lie algebra $\mathfrak{m}athfrak{s}$ given by an orthonormal basis
$\{A,X_{1},X_{2},X_{3},X_{4}\}$ with Lie brackets
\mathfrak{m}athfrak{b}egin{align*}
&[X_{1},X_{2}]=\mathfrak{m}athfrak{s}qrt{\mathfrak{m}athfrak{r}ac{2}{3}} X_{3},\\
&[A,X_{1}]=\mathfrak{m}athfrak{r}ac{2}{\mathfrak{m}athfrak{s}qrt{33}}X_{1},\,[A,X_{2}]=\mathfrak{m}athfrak{r}ac{2}{\mathfrak{m}athfrak{s}qrt{33}}X_{2},\,
[A,X_{3}]=\mathfrak{m}athfrak{r}ac{4}{\mathfrak{m}athfrak{s}qrt{33}}X_{3},\,
[A,X_{4}]=\mathfrak{m}athfrak{r}ac{3}{\mathfrak{m}athfrak{s}qrt{33}}X_{4}.
\end{align*}
If we left translate $\mathfrak{m}athfrak{v}=\{A,X_{3},X_{4}\}$ and $\mathfrak{m}athfrak{h}=\{X_{1},X_{2}\}$ we get a conformal
foliation with totally geodesic fibers.
The curvature operator is given by
\[\mathfrak{m}athfrak{r}ac{1}{66}\left[\mathfrak{m}athfrak{b}egin{array}{cccccccccc}
41 & -4\mathfrak{m}athfrak{s}qrt{22} & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
-4\mathfrak{m}athfrak{s}qrt{22} & 32 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
0 & 0 & 18 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
0 & 0 & 0 & 24 & 0 & 0 & 0 & 0 & 0 & 0\\
0 & 0 & 0 & 0 & 8 & 0 & 0 & 2\mathfrak{m}athfrak{s}qrt{22} & 0 & 0\\
0 & 0 & 0 & 0 & 0 & 8 & -2\mathfrak{m}athfrak{s}qrt{22} & 0 & 0 & 0\\
0 & 0 & 0 & 0 & 0 & -2\mathfrak{m}athfrak{s}qrt{22} & 5 & 0 & 0 & 0\\
0 & 0 & 0 & 0 & 2\mathfrak{m}athfrak{s}qrt{22} & 0 & 0 & 5 & 0 & 0\\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 12 & 0\\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 12
\end{array}\right]\]
with respect to the basis
\[\{X_{1}\wedge X_{2},X_{3}\wedge A,X_{4}\wedge A,X_{3}\wedge X_{4},X_{1}\wedge A,X_{2}\wedge A,
X_{1}\wedge X_{3},X_{2}\wedge X_{3},X_{1}\wedge X_{4},X_{2}\wedge X_{4}\}.\]
We see that the curvature operator
satisfies the conclusions of Theorem \ref{Jon-Curv}.
\end{example}
We now give some details about Example 6.1 and 6.2 of \mathfrak{m}athcal{I}te{Gud-Sven-2013}.
\mathfrak{m}athfrak{b}egin{example}\label{GudSvenEx1}
For Example 6.1 we let $\mathfrak{m}athfrak{g}_{1}$ be the Lie algebra generated by the orthonormal vectors
$\{W,X_{1},\ldots,X_{n+1}\}$ with Lie brackets
\[[W,X_{k}]=X_{k+1}\textrm{ for }k=1,\ldots,n.\]
Let $\mathfrak{m}athfrak{v}=\mathfrak{m}athfrak{s}p\{X_{2},\ldots,X_{n+1}\}$, this is a subalgebra of codimension $2$. The Ricci curvature of $G_{1}$ the
simply connected Lie group related to $\mathfrak{m}athfrak{g}_{1}$
satisfies $\operatorname{Ric}(W,W)-\operatorname{Ric}(X_{1},X_{1})=\mathfrak{m}athfrak{r}ac{1-n}{2}$.
In this case
\[\SPE{B_{X_{2}}X_{3}}{W}=\mathfrak{m}athfrak{r}ac{1}{2},\]
and thus the foliation defined by $\mathfrak{m}athfrak{v}$ it is not totally geodesic.
\end{example}
\mathfrak{m}athfrak{b}egin{example}\label{GudSvenEx2}
For Example 6.2, let $\mathfrak{m}athfrak{g}_{2}$ be generated by the orthonormal vectors
$\{W,X_{1},\ldots,X_{n}\}$ with Lie brackets
\[[W,X_{k}]=\mathfrak{a}lpha_{k}X_{k}\textrm{ where }\mathfrak{a}lpha_{k}\in\mathfrak{m}athbb{R}\textrm{ for }k=1,\ldots,n.\]
Let $\mathfrak{m}athfrak{v}=\mathfrak{m}athfrak{s}p\{X_{2},\ldots,X_{n}\}$, this is a subalgebra of codimension $2$. The Ricci curvature of $G_{2}$ the
simply connected Lie group related to $\mathfrak{m}athfrak{g}_{2}$ satisfies
\[\operatorname{Ric}(X_{1})=-\mathfrak{a}lpha_{1}(\mathfrak{a}lpha_{1}+\ldots+\mathfrak{a}lpha_{n})X_{1}\textrm{ and }
\operatorname{Ric}(W)=-(\mathfrak{a}lpha_{1}^{2}+\ldots+\mathfrak{a}lpha_{n}^{2})W.\]
The fibers of the foliation given by $\mathfrak{m}athfrak{v}$ are totally geodesic if and only if
$\mathfrak{a}lpha_{2}=\ldots=\mathfrak{a}lpha_{n}=0$, in which case the Ricci curvature condition is satisfied.
\end{example}
\mathfrak{m}athfrak{s}ection{Holomorphic harmonic morphisms}
In this section we will show that the Ricci curvature condition still holds under weaker conditions than
totally geodesic fibers.
\mathfrak{m}athfrak{b}egin{definition}
Let $\mathfrak{m}athfrak{p}hi:(M^{2m},g,J)\to (N^{2},h,J^{N})$ be a horizontally conformal map between almost
Hermitian manifolds. We say that $J$ is \textbf{adapted} to $\mathfrak{m}athfrak{p}hi$ if $\mathfrak{m}athfrak{p}hi$ is holomorphic with respect to $J$.
\end{definition}
If $M$ is $4$-dimensional then locally there exist exactly two adapted almost complex structures
(up to sign), in higher dimensions there are several such structures.
If $J$ is adapted then $J\mathcal{V}\mathfrak{m}athfrak{s}ubseteq\mathcal{V}$, $J\mathcal{H}\mathfrak{m}athfrak{s}ubseteq\mathcal{H}$ and $J$ commutes with the
orthogonal projections of $TM$ onto $\mathcal{V}$ and $\mathcal{H}$.
An almost complex structure $J$ is integrable if and only if the \textbf{Nijenhuis tensor},
\[N_{J}(Z,W)=[Z,W]+J[JZ,W]+J[Z,JW]-[JZ,JW],\]
is zero, in which case we say that $J$ is a complex structure.
\mathfrak{m}athfrak{b}egin{definition}
Let $\mathcal{F}$ be a foliation on an almost Hermitian manifold $(M^{2m},g,J)$ with vertical distribution $\mathcal{V}$.
We say that the almost complex structure is compatible with the second fundamental form
$B$ of $\mathcal{F}$ if $J B_{U}V=B_{JU}V=B_{U}JV$ for all $U,V\in\mathcal{V}$.
\end{definition}
\mathfrak{m}athfrak{b}egin{definition}
Let $\mathcal{F}$ be a foliation on an almost Hermitian manifold $(M,g,J)$ with vertical distribution $\mathcal{V}$.
$\mathcal{F}$ is said to have \textbf{superminimal} fibers if $\mathfrak{m}athfrak{n}abla_{U}J=0$ for all $U\in\mathcal{V}$.
\end{definition}
It is known that if a conformal foliation on an almost Hermitian manifold has superminimal fibers then
the almost complex structure is compatible with the second fundamental form, Section 7.8 in \mathfrak{m}athcal{I}te{BW-book}
and is integrable, Proposition 7.9.1 of \mathfrak{m}athcal{I}te{BW-book}.
\mathfrak{m}athfrak{b}egin{lemma}
Let $(M,g,J)$ be an almost Hermitian manifold. If $J$ is compatible with the second fundamental
form $B$, then
\[B^{*}_{U}JX=-B^{*}_{JU}X=JB^{*}_{U}X,\]
for all $U\in\mathcal{V}$ and $X\in\mathcal{H}$.
\end{lemma}
\mathfrak{m}athfrak{b}egin{proof}
The proof is a simple calculation. Let $V\in\mathcal{V}$, then
\mathfrak{m}athfrak{b}egin{align*}
\SPE{B^{*}_{U}JX}{V}=&\SPE{JX}{B_{U}V}\\
=&-\SPE{X}{J B_{U}V}\\
=&-\SPE{X}{B_{JU}V}=-\SPE{B^{*}_{JU}X}{V}\\
=&-\SPE{X}{B_{U}JV}=-\SPE{B^{*}_{U}X}{JV}=\SPE{JB^{*}_{U}X}{V},
\end{align*}
since $V$ is arbitrary the lemma follows.
\end{proof}
We will show that the Ricci curvature condition holds in any even dimension if one of the adapted almost
complex structures is integrable and compatible with the second fundamental form.
The result is similar to Proposition 6.3 in \mathfrak{m}athcal{I}te{LouPan} that deals with the $4$-dimensional case.
Wood showed, see Proposition 3.9 of \mathfrak{m}athcal{I}te{Wood92},
that in four dimensions the adapted almost complex structure is integrable if and only if
the fibers of the foliation are superminimal. Thus in four dimensions we only have to assume
that the adapted almost complex structure is integrable.
\mathfrak{m}athfrak{b}egin{theorem}
Let $\mathfrak{m}athfrak{p}hi:M^{2m}\to N^{2}$ be a harmonic morphism between Hermitian manifolds $(M^{2m},g,J)$ and
$(N^{2},h,J^{N})$. Suppose that $J$ is adapted to $\mathfrak{m}athfrak{p}hi$ and compatible with the second
fundamental form $B$. Then
\[\operatorname{Ric}(X,X)=\operatorname{Ric}(Y,Y)\textrm{ and }\operatorname{Ric}(X,Y)=0\]
for $X,Y\in\mathcal{H}$ orthonormal.
\end{theorem}
\mathfrak{m}athfrak{b}egin{proof}
Let $\{X,Y\}$ be an orthonormal basis for $\mathcal{H}$ and $\{U_{i},V_{i}\}_{i=1}^{m}$ be an orthonormal basis for $\mathcal{V}$
chosen in such a way that $JX=Y$ and $JU_{i}=V_{i}$. We have
\mathfrak{m}athfrak{b}egin{align*}
\operatorname{Ric}(X,X)&=\mathfrak{m}athfrak{s}um_{i}(R(X,U_{i},U_{i},X)+R(X,V_{i},V_{i},X))+R(X,Y,Y,X)\\
\operatorname{Ric}(Y,Y)&=\mathfrak{m}athfrak{s}um_{i}(R(Y,U_{i},U_{i},Y)+R(Y,V_{i},V_{i},Y))+R(Y,X,X,Y)\\
\end{align*}
From the symmetries of the curvature operator $R(X,Y,Y,X)=R(Y,X,X,Y)$.
The curvature is given by Proposition \ref{Gud-curv} (iii). That the terms not including $B^{*}$, that is,
\[\SPE{(\mathfrak{m}athfrak{n}abl{U}{A})_{X}Y}{V}+\SPE{A^{*}_{X}U}{A^{*}_{Y}V}-2 V(\ln \lambda)\SPE{A_{X}Y}{U},\]
satisfy the Ricci curvature condition is clear from Theorem \ref{Jon-Curv}. We denote the terms that contain
$B^{*}$ by $\tilde{R}$,
\[\SPE{\tilde{R}(U\wedge X}{Y\wedge V}=\SPE{(\mathfrak{m}athfrak{n}abla_{X}B^{*})_{U}Y}{V}-\SPE{B^{*}_{V}Y}{B^{*}_{U}X},\]
where by definition
\[\SPE{(\mathfrak{m}athfrak{n}abla_{X}B^{*})_{U}X}{U}=\SPE{\mathfrak{m}athfrak{n}abla_{X}(B^{*}_{U}X)}{U}-\SPE{B^{*}_{\mathfrak{m}athfrak{n}abla_{X}U}X}{U}-\SPE{B^{*}_{U}\mathfrak{m}athfrak{n}abla_{X}X}{U}.\]
Thus to prove $\operatorname{Ric}(X,X)=\operatorname{Ric}(Y,Y)$, we want to show that
\mathfrak{m}athfrak{b}egin{align*}
\tilde{R}(X,U_{i},U_{i},X)+\tilde{R}(X,V_{i},V_{i},X)=\tilde{R}(Y,U_{i},U_{i},Y)+\tilde{R}(Y,V_{i},V_{i},Y)
\end{align*}
for each $i$. Since we prove it for each $i$ we will suppress the index and assume $JU=V$.
Now define $F_{1},F_{2},F_{3}$ and $F_{4}$ by
\mathfrak{m}athfrak{b}egin{align*}
F_{1}(X)&=\SPE{\mathfrak{m}athfrak{n}abla_{X}(B^{*}_{U}X)}{U}+\SPE{\mathfrak{m}athfrak{n}abla_{X}(B^{*}_{V}X)}{V}\\
F_{2}(X)&=\SPE{B^{*}_{\mathfrak{m}athfrak{n}abla_{X}U}X}{U}+\SPE{B^{*}_{\mathfrak{m}athfrak{n}abla_{X}V}X}{V}\\
F_{3}(X)&=\SPE{B^{*}_{U}\mathfrak{m}athfrak{n}abla_{X}X}{U}+\SPE{B^{*}_{V}\mathfrak{m}athfrak{n}abla_{X}X}{V}\\
F_{4}(X)&=|B^{*}_{U}X|^{2}+|B^{*}_{V}X|^{2}.
\end{align*}
Then
\[\tilde{R}(X,U,U,X)+\tilde{R}(X,V,V,X)=F_{1}(X)-F_{2}(X)-F_{3}(X)-F_{4}(X).\]
We will show that $F_{j}(X)=F_{j}(Y)$ for $j=1,\ldots,4$, which implies $\operatorname{Ric}(X,X)=\operatorname{Ric}(Y,Y)$.
We start with $F_{1}$
\mathfrak{m}athfrak{b}egin{align*}
F_{1}(Y)=&\SPE{\mathfrak{m}athfrak{n}abla_{JX}(B^{*}_{U}JX)}{U}+\SPE{\mathfrak{m}athfrak{n}abla_{JX}(B^{*}_{V}JX)}{V}\\
=&-\SPE{\mathfrak{m}athfrak{n}abla_{JX}(B^{*}_{JU}X)}{U}-\SPE{\mathfrak{m}athfrak{n}abla_{JX}(B^{*}_{JV}X)}{V}\\
=&-\SPE{\mathfrak{m}athfrak{n}abla_{B^{*}_{JU}X}JX+[JX,B^{*}_{JU}X]}{U}-\SPE{\mathfrak{m}athfrak{n}abla_{B^{*}_{JV}X}JX+[JX,B^{*}_{JV}X]}{V}\\
=&-\SPE{J\mathfrak{m}athfrak{n}abla_{B^{*}_{JU}X}X-[JX,JB^{*}_{U}X]}{U}-\SPE{J\mathfrak{m}athfrak{n}abla_{B^{*}_{JV}X}X-[JX,JB^{*}_{V}X]}{V}\\
=&\SPE{\mathfrak{m}athfrak{n}abla_{B^{*}_{JU}X}X}{JU}+\SPE{[JX,JB^{*}_{U}X]}{U}+\SPE{\mathfrak{m}athfrak{n}abla_{B^{*}_{JV}X}X}{JV}+\SPE{[JX,JB^{*}_{JU}X]}{V}\\
=&\SPE{\mathfrak{m}athfrak{n}abla_{B^{*}_{V}X}X}{V}+\SPE{\mathfrak{m}athfrak{n}abla_{B^{*}_{U}X}X}{U}+\SPE{[JX,JB^{*}_{U}X]-J[JX,B^{*}_{U}X]}{U}\\
=&\SPE{\mathfrak{m}athfrak{n}abla_{X}B^{*}_{V}X-[X,B^{*}_{V}X]}{V}+\SPE{\mathfrak{m}athfrak{n}abla_{X}B^{*}_{U}X-[X,B^{*}_{U}X]}{U}\\
&+\SPE{[JX,JB^{*}_{U}X]-J[JX,B^{*}_{U}X]}{U}\\
=&\SPE{\mathfrak{m}athfrak{n}abla_{X}(B^{*}_{U}X)}{U}+\SPE{\mathfrak{m}athfrak{n}abla_{X}(B^{*}_{V}X)}{V}\\
&+\SPE{[JX,JB^{*}_{U}X]-J[JX,B^{*}_{U}X]-[X,B^{*}_{U}X]-J[X,JB^{*}_{U}X]}{U}\\
=&\SPE{\mathfrak{m}athfrak{n}abla_{X}(B^{*}_{U}X)}{U}+\SPE{\mathfrak{m}athfrak{n}abla_{X}(B^{*}_{V}X)}{V}-\SPE{N_{J}(X,B^{*}_{U}X)}{U}\\
=&\SPE{\mathfrak{m}athfrak{n}abla_{X}(B^{*}_{U}X)}{U}+\SPE{\mathfrak{m}athfrak{n}abla_{X}(B^{*}_{V}X)}{V}\\
=&F_{1}(X).
\end{align*}
Next is $F_{2}$
\mathfrak{m}athfrak{b}egin{align*}
F_{2}(Y)=&\SPE{B^{*}_{\mathfrak{m}athfrak{n}abla_{JX}U}JX}{U}+\SPE{B^{*}_{\mathfrak{m}athfrak{n}abla_{JX}V}JX}{V}\\
=&\SPE{JX}{B_{\mathfrak{m}athfrak{n}abla_{JX}U}U}+\SPE{JX}{B_{\mathfrak{m}athfrak{n}abla_{JX}V}V}\\
=&\SPE{JX}{B_{U}(\mathfrak{m}athfrak{n}abla_{JX}U)}+\SPE{JX}{B_{V}(\mathfrak{m}athfrak{n}abla_{JX}V)}\\
=&\SPE{JX}{B_{U}(\mathfrak{m}athfrak{n}abla_{U}JX+[JX,U])}+\SPE{JX}{B_{V}(\mathfrak{m}athfrak{n}abla_{V}JX+[JX,V])}\\
=&\SPE{JX}{B_{U}(\mathfrak{m}athfrak{n}abla_{U}JX)}+\SPE{JX}{B_{V}(\mathfrak{m}athfrak{n}abla_{V}JX)}\\
&+\SPE{JX}{B_{U}([JX,U])}+\SPE{JX}{B_{JU}([JX,JU])}\\
=&\SPE{X}{B_{U}(\mathfrak{m}athfrak{n}abla_{U}X)}+\SPE{X}{B_{V}(\mathfrak{m}athfrak{n}abla_{V}X)}+\SPE{X}{B_{U}([JX,JU]-J[JX,U])}\\
=&\SPE{X}{B_{U}(\mathfrak{m}athfrak{n}abla_{X}U-[X,U])}+\SPE{X}{B_{V}(\mathfrak{m}athfrak{n}abla_{X}V-[X,V])}\\
&+\SPE{X}{B_{U}([JX,JU]-J[JX,U])}\\
=&\SPE{B^{*}_{\mathfrak{m}athfrak{n}abla_{X}U}X}{U}+\SPE{B^{*}_{\mathfrak{m}athfrak{n}abla_{X}V}X}{V}\\
&+\SPE{X}{B_{U}(-[X,U]-J[X,JU]+[JX,JU]-J[JX,U])}\\
=&\SPE{B^{*}_{\mathfrak{m}athfrak{n}abla_{X}U}X}{U}+\SPE{B^{*}_{\mathfrak{m}athfrak{n}abla_{X}V}X}{V}-\SPE{X}{B_{U}(N_{J}(X,U))}\\
=&F_{2}(X).
\end{align*}
Now we show that $F_{3}(X)=0$, the same is true for $F_{3}(Y)$,
\mathfrak{m}athfrak{b}egin{align*}
F_{3}(X)=&\SPE{B^{*}_{U}\mathfrak{m}athfrak{n}abla_{X}X}{U}+\SPE{B^{*}_{V}\mathfrak{m}athfrak{n}abla_{X}X}{V}\\
=&\SPE{\mathfrak{m}athfrak{n}abla_{X}X}{B_{U}U+B_{V}V}\\
=&0.
\end{align*}
The last one $F_{4}$ follows from
\mathfrak{m}athfrak{b}egin{align*}
F_{4}(X)=&|B^{*}_{U}X|^{2}+|B^{*}_{V}X|^{2}\\
=&|B^{*}_{V}JX|^{2}+|B^{*}_{U}JX|^{2}\\
=&|B^{*}_{U}Y|^{2}+|B^{*}_{V}Y|^{2}\\
=&F_{4}(Y)
\end{align*}
We have shown that $\operatorname{Ric}(X,X)=\operatorname{Ric}(Y,Y)$ for any orthonormal basis, since
$\{\mathfrak{m}athfrak{r}ac{1}{\mathfrak{m}athfrak{s}qrt{2}}(X+Y),\mathfrak{m}athfrak{r}ac{1}{\mathfrak{m}athfrak{s}qrt{2}}(X-Y)\}$ also is an orthonormal basis we have
\[\operatorname{Ric}(X,Y)=\mathfrak{m}athfrak{r}ac{1}{2}(\operatorname{Ric}(\mathfrak{m}athfrak{r}ac{X+Y}{\mathfrak{m}athfrak{s}qrt{2}},\mathfrak{m}athfrak{r}ac{X+Y}{\mathfrak{m}athfrak{s}qrt{2}})
-\operatorname{Ric}(\mathfrak{m}athfrak{r}ac{X-Y}{\mathfrak{m}athfrak{s}qrt{2}},\mathfrak{m}athfrak{r}ac{X-Y}{\mathfrak{m}athfrak{s}qrt{2}})=0.\]
\end{proof}
We take another look at Example \ref{GudSvenEx1}. Any adapted
almost complex structure $J$ must satisfy $JW= X_{1}$ and $J(\mathfrak{m}athfrak{v})\mathfrak{m}athfrak{s}ubseteq\mathfrak{m}athfrak{v}$. Thus
\mathfrak{m}athfrak{b}egin{align*}
N_{J}(W,X_{n+1})=&[W,X_{n+1}]+J[W,J X_{n+1}]+J[JW,X_{n+1}]+[J W,J X_{n+1}]\\
=&J[W,J X_{n+1}]\mathfrak{m}athfrak{n}eq 0,
\end{align*}
and non of the adapted almost complex structures are integrable.
\mathfrak{m}athfrak{b}egin{thebibliography}{99}
\mathfrak{m}athfrak{b}ibitem{Alek}
D.V. Alekseevsky,
{\it Homogeneous Riemannian spaces of negative curvature},
Mat. Sb. {\mathfrak{m}athfrak{b}f 96}, (1975), 93-117; English translation: Math. USSR-Sb. {\mathfrak{m}athfrak{b}f 25}, (1975), 87-109.
\mathfrak{m}athfrak{b}ibitem{BaiWoo}
P. Baird, J. C. Wood,
{\it Harmonic morphisms, Seifert fibre spaces and conformal foliations},
Proc. London Math. Soc. {\mathfrak{m}athfrak{b}f 64}, (1992), 170-196.
\mathfrak{m}athfrak{b}ibitem{BW-book}
P. Baird, J. C. Wood,
{\it Harmonic morphisms between Riemannian manifolds},
London Math. Soc. Monogr. {\mathfrak{m}athfrak{b}f 29},
Oxford Univ. Press (2003).
\mathfrak{m}athfrak{b}ibitem{Fuglede}
B. Fuglede,
{\it Harmonic morphisms between Riemannian manifolds},
Ann. Inst. Fourier(Grenoble) {\mathfrak{m}athfrak{b}f 28}, (1978), 107-144.
\mathfrak{m}athfrak{b}ibitem{Gud-bib}
S. Gudmundsson, {\it The Bibliography of Harmonic
Morphisms}, {\tt http://www.matematik.lu.se/\\
matematiklu/personal/sigma/harmonic/bibliography.html}
\mathfrak{m}athfrak{b}ibitem{Gud-thesis}
S. Gudmundsson, {\it On the Geometry of Harmonic Morphisms},
Ph.D. Thesis, University of Leeds, 1992.
\mathfrak{m}athfrak{b}ibitem{Gud-Sven-2013}
S. Gudmundsson, M. Svensson,
{\it Harmonic morphisms from four dimensional Lie groups},
arXiv:1310.5113v3.
\mathfrak{m}athfrak{b}ibitem{Ishihara}
T. Ishihara,
{\it A mapping of Riemannian manifolds which preserves harmonic functions},
J. Math. Kyoto Univ. {\mathfrak{m}athfrak{b}f 19}, (1979), 215-229.
\mathfrak{m}athfrak{b}ibitem{LouPan}
E. Loubeau, R. Pantilie, {\it Harmonic morphisms between Weyl spaces and
twistorial maps},
Comm. Anal. Geom. {\mathfrak{m}athfrak{b}f 14}, (2006), 847-881.
\mathfrak{m}athfrak{b}ibitem{Mustafa}
M.T. Mustafa,
{\it The structure of harmonic morphisms with totally geodesic fibres},
Commun. Contemp. Math. {\mathfrak{m}athfrak{b}f 6}, (2004), 419-430.
\mathfrak{m}athfrak{b}ibitem{Nikon}
Y.G. Nikonorov,
{\it Noncompact homogeneous Einstein 5-manifolds},
Geom. Dedic. {\mathfrak{m}athfrak{b}f 113}, (2005), 107-143.
\mathfrak{m}athfrak{b}ibitem{ONeill}
B. O'Neill,
{\it The fundamental equations of a submersion},
Michigan Math. J. {\mathfrak{m}athfrak{b}f 13}, 1966, 459-469.
\mathfrak{m}athfrak{b}ibitem{Pantilie08}
R. Pantilie, {\it On a class of twistorial maps},
Differential Geom. App. {\mathfrak{m}athfrak{b}f 26}, 2008, 366-376.
\mathfrak{m}athfrak{b}ibitem{Wood86}
J. C. Wood,
{\it Harmonic morphisms, foliations and Gauss maps},
Contemp. Math. {\mathfrak{m}athfrak{b}f 49}, (1986), 145-84.
\mathfrak{m}athfrak{b}ibitem{Wood92}
J. C. Wood,
{\it Harmonic morphisms and Hermitian structures on Einstein $4$-manifolds},
Internat. J. Math. {\mathfrak{m}athfrak{b}f 3}, (1992), 415-439.
\end{thebibliography}
\end{document} |
\begin{document}
\begin{center}
{\bf Approximate integrals of motion and the quantum chaoticity problem\\
V. E. Bunakov, I. B. Ivanov\\{\it Peterburg Nuclear Physics Institute}}
{\bf Abstract}
\end{center}
{\small
The problem of existence and constructing of integrals of motion in stationary
quantum mechanics and its connection with quantum chaoticity is discussed.
It is shown that the earlier suggested quantum chaoticity criterion
characterizes destruction of initial symmetry of regular system
and of basis quantum numbers under influence of perturbation.
The convergent procedure allowing to construct approximate integrals of motion
in the form of non-trivial combinations depending on operators $(q,p)$ is suggested.
Properties of the obtained integrals with complicated structure
and the consequences of their existence for system's dynamics are discussed.
The method is used for explicit construction and investigation of the
approximate integrals in Henon-Heiles problem.
}
\section{Introduction}
For last decades the investigations of quantum chaos have been extensively
carried out but this field remains under hot discussions. One part
of researchers believe that quantum chaos doesn't exist and at best should
be studied in the semiclassical approximation. It is obvious from correspondence
principle that quantum counterpart of classical system should have properties
reflecting regularity or chaoticity of classical trajectories. The law of
level spacing distribution is considered to be one of such properties.
It is believed that the quantum analogue of the chaotic classical obeys
Wigner level spacing law, while Poissonian law holds for regular systems.
However many authors (including us [1]), pointed out the
incompleteness and crudeness of this criterion of quantum chaoticity.
In this paper we continue to develop our approach [2-4] to the chaotic
properties of the quantum Hamiltonian systems. Our main point is connection
between the symmetry properties of a system and its regularity or chaoticity.
We show that the earlier suggested chaoticity criterion characterizes the
initial symmetry breaking and destruction of the corresponding integrals of
motion in a perturbed system, which leads to chaotisation. We compare our
approach with known criterion of existence of the approximate
quantum numbers by Hose and Taylor [5, 6] which is based on the analysis
of effective Hamiltonians.
One may ask if the new integrals of motion might appear in the
perturbed system and how can they influence the system's dynamics.
The problem of construction of the approximate integrals of motion has
always attracted great attention due to its practical and
philosophical importance. In classical mechanics we have KAM-theory
which guarantees existence of invariant tori under perturbation;
as to quantum mechanics the situation is somewhat
tangled [5, 6]. The normal form method [7, 8] is well known for construction
of the new approximate integrals of
motion and integrable approximations to Hamiltonian.
Its generalization on quantum systems was done in refs. [9-11]. After
analyzing the reasons of divergence in the normal form method we propose a
rather simple way for construction of approximate quantum numbers based on the
unitary transformation of the basis integrals of motion.
In the final part of the paper we discuss the possibility of the exact
integrals
existence in the perturbed system and formulate the hypothesis
of formal integrability in stationary quantum mechanics. We define formal
integrability as the existence (in mathematical sense) of complete set of
independent mutually commuting operators which have some characteristic
differences from the usual first integrals of motion. It is impossible to
write such operators in closed form (they have extremely complicated
structure) and we can't find them before the solution of Schr\"odinger
equation is obtained;
these integrals don't correspond to known symmetry groups and are
non-separable. We discuss why such a formal (mathematical) integrability
doesn't contradict the system's quantum chaoticity. The conception
of chaos means that the system has algorithmic complexity (it is very
difficult to solve equations with high accuracy) and statistical hypothesis works well
in the system. The existence of non-separable integrals of motion with
very complicated structure can't help us to solve equations and doesn't
influence the validity of statistical hypothesis.
\section{Destruction of quantum numbers and the chaoticity criterion}
Presently we consider stationary quantum system with Hamiltonian $H$
as a sum of Hamiltonian $H_0$ of an integrable system and
perturbation $\lambda V$:
\begin{equation}
\label{H}
H=H_0+\lambda V.
\end{equation}
Eigenfunctions $\{\phi_\alpha\}$ of the unperturbed integrable
Hamiltonian $H_0$
are common for some complete set of independent mutually commuting
operators $\{J_\rho\}_{\rho=1}^N$ ($N$ - the number of degrees of
freedom of the Hamiltonian $H_0$). Eigenstates $\psi_i$ of the full
Hamiltonian may be expanded in eigenfunctions $\phi_\alpha$
of the unperturbed Hamiltonian $H_0$:
\begin{equation}
\label{expansion}
\psi_i=\sum_\alpha \phi^*_\alpha\psi_i\phi_\alpha =
\sum_\alpha c^{\alpha}_i\phi_\alpha.
\end{equation}
Let us consider the probability $P_\alpha(E_i)$ to find the basis state
$\phi_\alpha$ in the state $\psi_i$ with energy $E_i$,
which is equal to the squared
absolute value of the corresponding coefficient in (\ref{expansion})
and define the energy width $\Gamma_{spr}^\alpha$ of $P_\alpha(E_i)$ distribution:
the minimal energy interval for which the sum of probabilities
$P_\alpha(E_i)$ is larger or equal to $0.5$.
Thus defined $\Gamma_{spr}^\alpha$ is the energy spreading width of basis
state $\phi_\alpha$. The spectrum of $H_0$ may be degenerate
and then the irreducible representations of symmetry group of $H_0$
consist of several basis functions which belong to one energy level (shell).
We want to find a parameter characterizing the measure
of initial symmetry breaking of $H_0$ under the influence of
the perturbation $V$. It's clear that
such symmetry breaking results only due to significant mixing between
functions from different irreducible representations. The mixing of
states within one shell doesn't change their symmetry. Unless the
spreading width $\Gamma_{spr}^\alpha$ is smaller then the distance $D_0$
between the neighboring levels in the spectrum of $H_0$ we can distinguish
"localization domain" (in energy) of one set of basis states from
"localization domain" of another one. When the spreading width exceeds
$D_0$ we start loosing the "signatures" of basis functions in the spectrum of
$H$ and can't even approximately compare states $\psi_i$ with irreducible
representation of symmetry group $H_0$. Thus parameter
\begin{equation}
\label{kappa}
\ae^\alpha = \Gamma_{spr}^\alpha /D_0
\end{equation}
is the natural measure of symmetry breaking.
When the parameter $\ae^\alpha$ exceeds unity the symmetry of the
Hamiltonian $H_0$ disappears. Such a value of perturbation is accompanied by
disappearance of the initial selection rules, the levels are distributed
approximately uniformly (level repulsion) and the level spacing
distribution approaches Wigner's law.
One can say that the transition from regularity to chaoticity
has taken place in quantum system and $\ae^\alpha$
may be considered as the parameter of chaoticity.
The spreading width $\Gamma_{spr}^\alpha$ depends on the number $\alpha$
of basis state of Hamiltonian $H_0$, i.e. on its quantum numbers.
The classical analogy to this is the dependence of the
invariant torus stability on the corresponding values of integrals of motion.
It is clear that in order to obtain the global chaoticity
characteristic in quantum
case it is necessary to average $\Gamma_{spr}^\alpha$ over the basis states
$\phi\alpha$ belonging to the same irreducible representation (shell).
The averaged chaoticity parameter $\Gamma_{spr}$
unlike local $\Gamma_{spr}^\alpha$
has an important feature of invariance with respect to the choice of the basis
for integrable Hamiltonian $H_0$ [12]. From the theoretical point of view the
above chaoticity parameter has one more useful property. As it was shown in
[3], in the semiclassical limit $\Gamma_{spr}^\alpha/\hbar$ transforms into
Lyapunov's exponent of the corresponding classical motion.
Thus we see that criterion $\Gamma_{spr}$
"measures" destruction (fragmentation) of the irreducible representations of the
basis and, hence of the Casimir operator (the main quantum number), which is
the approximate integral of motion for small perturbations. The other quantum
numbers of the basis states suffer destruction in general under a
smaller perturbation due to the strong mixing within one shell.
To define directly the degree of destruction of approximate integrals
of motion one may use an ordinary mean-square-root deviation from the mean
value. This deviation of some operator $A$ in the state
$\psi_\alpha$ is calculated as follows:
\begin{equation}
\label{dA}
\Delta A = \sqrt{\psi_\alpha^*(A - \psi_\alpha^*A\psi_\alpha)^2\psi_\alpha}.
\end{equation}
The operator $A$ is the approximate integral of motion if the ratio
of $\Delta A$ to
the difference between its neighboring eigenvalues is less then $1$.
Another way to study and construct the approximate integrals of motion is the
well known method of effective Hamiltonians. In the series of works Hose
and Taylor [5, 6] suggested the criterion of existence of the effective
Hamiltonian and of connected with it integrals of motion. According to this
criterion we may build the convergent sequence of approximations to the
effective
Hamiltonian under the condition that projection of perturbed Hamiltonian
wave function to the model
space is greater then 0.5. Thus if projection of some states $\psi_\alpha$
of Hamiltonian $H$ to the shell space exceeds 0.5, then the main quantum
number has to be the approximate integral of motion for these states. It
is obvious that this criterion practically coincides with our spreading
width one.
In order to compare the above criteria of destruction of the integrals of
motion we have analysed the quantum Henon-Heiles system:
\begin{equation}
\label{hhh}
H(q,p) = \frac{1}{2}(p^2_1+q^2_1)+\frac{1}{2}(p^2_2+q^2_2) +
\lambda (q^{2}_1 q_2 - q^3_2/3)
\end{equation}
The eigenfunctions were obtained using the oscillator basis of $496$
states ($30$ shells). Fig.1 shows the dependence on the perturbation
intensity (energy $E$) of the parameter $\kappa$, the averaged spreading
width of operator $N=n_x+n_y$ (see \ref{dA}) and of the
averaged projection ($P_s$) of the exact wave functions to the shell.
For the sake of easier comparison with other quantities in Fig.1 we
plotted the value $Pr=2(1-P_s)$.
We see that destruction of the initial $SU(2)$-symmetry according
to all these three criteria takes place approximately at $E = 0.10$.
Thus $\Gamma_{spr}$ measures the degree of mixing between the irreducible
representations and the destruction of the corresponding Cazimir operators.
The question arises if some new integrals of motion might appear instead of
the destroyed basis integrals.
The method of normal forms is the most known way to construct the
approximate integrals and the integrable
approximations to Hamiltonian in classical mechanics [7, 8]. It has been
generalized
on quantum systems in refs. [9-11]. In this method the perturbed wave
functions
are constructed as certain superposition of the basis functions belonging
to a single irreducible representation. Therefore the symmetry of the wave
functions is not changed and perturbation $V$ with lower symmetry
leads only to the splitting of the degenerated level.
This approach gives rather good results when the
perturbation is small.
However Siegel (1941) proved the divergence of classical
normal forms in the case of nonintegrable initial system.
In quantum mechanics the question about
convergence hasn't got a final solution, though the authors [10, 11]
stress the asymptotic character of the series arising
and confirm it by numerical calculations.
Two reasons for the divergence in the method of normal forms may be
pointed out. The first one is well known --- it is the non-analyticity
of solutions at the point $\lambda=0$
(the replacement $\lambda \to -\lambda$ in Hamiltonian may lead to significant
changes of spectrum properties) and, as a consequence, to the divergence of
the expansion
into the powers of $\lambda$. The second reason is as follows.
In the quantum Birkgoff-Gustavson method the
integrable approximations are constructed in the
form of power series of operators which mix basis states only inside one
irreducible representation of the basis symmetry group (one shell).
If the interaction effectively mixes states from different irreducible
representations the Birkgoff-Gustavson
method obviously can't generate good integrable
approximations and convergent integrals in principle. This is
why the quantum numbers given by normal form might be good only when mixing
between the different shells is weak ($\Gamma_{spr}<1$).
The difficulties described doesn't mean the principal absence of
approximate integrals of motion in the perturbed system, they only reveal the
shortcomings of the methods. To get a convergent method of constructing the
integrals one should use combinations of
operators which mix states in any given finite-dimensional subspace.
In this case we can improve the integrable approximation to $H$
(in the sense of operator norm) by simply increasing the dimensions of
this subspace.
In the next section we'll describe the convergent procedure allowing
to construct approximate integrals in a rather trivial way.
\section{The convergent method for integrals of motion}
Performing a unitary transformation $U$ of some basis with a set of quantum
numbers we can always easily find a new set of mutually commuting
operators for which a transformed basis functions are eigenfunctions.
One can see that $J'_\rho = UJ_\rho U^\dagger$
($J_\rho$ -- operators of initial basis) are the desirable operators
and such bases are equivalent in the sense of the quantity of quantum numbers.
If we assume the completeness of eigenstates $\psi_\alpha$ of Hamiltonian $H$
in the Hilbert space $\cal H$ then $\psi_\alpha=U\phi_\alpha$
($\phi_\alpha$ is the eigenfunction of $H_0$), because any two complete
orthonormal bases are connected via some unitary transformation $U$.
Operators $J'_\rho$ commute with $H$ in the full Hilbert space $\cal H$.
Actually, for any function from complete basis $\psi_\alpha$
$$
[H, J'_\rho]\psi_\alpha = HUJ_\rho U^\dagger\psi_\alpha -
UJ_\rho U^\dagger H\psi_\alpha = HUJ_\rho\phi_\alpha -
E_\alpha UJ_\rho\phi_\alpha =
$$
$$
j_{\rho\alpha}HU\phi_\alpha -
E_\alpha j_{\rho\alpha} U\phi_\alpha =
j_{\rho\alpha}E_\alpha\psi_\alpha - E_\alpha j_{\rho\alpha}\psi_\alpha = 0.
$$
$J'_\rho$ are derived from $J_\rho$ with the aid of unitary transformation,
hence they also form a complete set of independent commuting operators
and their eigenvalues $j_{\rho\alpha}$ uniquely define every eigenstate
$\psi_\alpha$ of Hamiltonian $H$. Therefore $H$ is a function depending
on operators $J'_\rho$: $H=H(J'_\rho )$, and $E_\alpha = H(j_{\rho\alpha})$.
Having determined the approximate wave functions of perturbed Hamiltonian we
can construct the unitary transformation $U$ and the
approximate integrals of motion $J'_\rho$. The question about convergence
of the procedure suggested is reduced to investigating whether the
corresponding method for solving of Schr\"odinger equation converges or not.
For example it has been proved that Ritz's method converges
(with increasing basis) in the case of Hermitian operators
with lower-bounded discrete spectrum [13].
The introduced operators $J'_\rho$
seem to be formal unless we construct them explicitly as functions
of dynamic variables $(q,p)$. This, however is not difficult to do
with the help of methods taken from the theory of continuous groups'
representations. In the remaining part of this section the realization
of this method is described in details and properties of the integrals
obtained are discussed.
Let us consider Schr\"odinger equation $H\psi=E\psi$ with discrete spectrum
$E_\alpha$ and $\psi_\alpha$ and write the Hamiltonian $H$ in the form of
spectral decomposition
\begin{equation}
\label{Hexp}
H=\sum_\alpha E_\alpha\psi_\alpha\psi_\alpha^*.
\end{equation}
We represent the complete Hilbert space $\cal H$ as a sum of the
finite-dimensional model space $\cal P$ and its orthogonal
adjunct $\cal Q$:
$\cal {H = P + Q}$. It is convenient to construct the space $\cal P$
of eigenfunctions $\{\phi_\mu\}_{\mu=1}^{dim\cal P}$ by using
some complete set of mutually commuting operators $\{J_\rho\}_{\rho=1}^N$
($N$ - the number of degrees of freedom $H$). We'll find the
approximate wave functions of $H$ in the $\cal P$-space
as a combination of basis states $\phi_\mu$ (as, for example, in Ritz's
variational method or in different versions of perturbation theory)
\begin{equation}
\label{stexp}
\psi_{p \alpha}=\sum_{\mu\in\cal P}c_\alpha^\mu\phi_\mu.
\end{equation}
Orthonormal states $\psi_{p \alpha}$ are derived from minimum condition for
the energy
functional in the $\cal P$-space and they form a
subspace in the $\cal P$-space (obviously,
only a small number of combinations (\ref{stexp}) will satisfy Schr\"odinger
equation with sufficient accuracy). We'll denote this subspace of solutions
by $\cal S$ and the energy of states by $E_{p \alpha}$.
The rest $dim{\cal P} - dim{\cal S}$ of the basis functions in the
$\cal P$-space may be chosen arbitrarily, and we denote the new basis in
the $\cal P$-space $\{\phi'_\mu\}_{\mu =1}^{dim\cal P}
(\phi'_\mu = \psi_{p\mu}, \mu =1,...,dim\cal S)$.
Let us show that the operator
\begin{equation}
\label{Hs}
H_s=\sum_{\alpha\in\cal S} E_{p\alpha}\psi_{p \alpha}\psi_{p \alpha}^*
\end{equation}
(i) commutes with operators forming a complete set in the full Hilbert
space $\cal H$, i.e. it is integrable, (ii) $H_s$ is approximating
$H$ in the sense of operator norm in the $\cal S$-space,
(iii) $H_s$ may be expressed in terms of dynamic variables $(q,p)$ as well
as the initial Hamiltonian $H$.
By calculation of wave functions $\psi_{p \alpha}$
we constructed simultaneously the unitary transformation
$\phi'_\mu = U\phi_\mu$ of space $\cal H$, which is defined
by coefficients $c_\alpha^\mu$ in the $\cal P$-space and
is the identical transformation in the $\cal Q$-space. Operators
$J'_\rho = UJ_\rho U^\dagger$ are known to form the complete set with
the same quantum numbers $j_{\rho\alpha}$ and eigenfunctions $\phi'_\mu$.
As far as eigenfunctions of operators $J'_\rho$ and $H_s$ in the
$\cal S$-space coincide, the operators commute in this space. Outside the
$\cal S$-space $H_s\equiv 0$ and hence it also commute with $J'_\rho$.
Therefore $[H_s, J'_\rho]=0$ in the full space $\cal H$.
Now we are going to check that Hamiltonian $H_s$ is close to $H$ in the sense
of the operator norm in $\cal H$, i.¥.
$||H-H_s||_{\cal S} < \epsilon$, under the condition that the residual
of the approximate solutions
$(H-E_{p\alpha})\psi_{p\alpha} = \delta\psi_\alpha$
doesn't exceed $\epsilon$: $||\delta\psi_\alpha|| < \epsilon$.
Really, for an arbitrary function $\chi = \sum a^\delta\psi_{p\delta},\;
||\chi||=1,\;\chi\in\cal S$
$$
||H\chi - H_s\chi|| = ||(\sum_{\alpha\in\cal H}
E_\alpha\psi_\alpha\psi_\alpha^* - \sum_{\alpha\in\cal S}
E_{p\alpha}\psi_{p\alpha}\psi_{p\alpha}^*)\sum_{\delta\in\cal S}a^\delta
\psi_{p\delta}||=
$$
$$
||\sum_{\delta\in\cal S}a^\delta (\sum_{\alpha\in\cal H}
E_\alpha\psi_\alpha\psi_\alpha^*\psi_{p\delta} - E_{p\delta}\psi_{p\delta})||
\le\epsilon\sum_{\delta\in\cal S}|a^\delta|\le\epsilon\sqrt{dim\cal S}.
$$
In the last estimate we used the fact that $\sum|a^\delta|$
under the condition $\sum|a^\delta|^2 = 1$ reaches its maximum value when
all $a_\delta$ are identical and equal to $1/\sqrt{dim\cal S}$.
The accuracy $\epsilon$ depends on dimensionality of $\cal P$-space;
we may fix $dim\cal S$ and decrease $\epsilon$ in such way that the
norm $||H-H_s||_{\cal S}$ should be as small as we need.
The introduced operators $H_s$, $U$ and $J'_\rho$ seem to be formal unless we
construct them explicitly as functions of dynamic variables $(q,p)$.
Writing operator $\psi_{p \alpha}\psi_{p \alpha}^*$ in terms
of expansion (\ref{stexp}) we have:
\begin{equation}
\label{Hsb}
H_s=\sum_{\mu,\nu\in\cal P}\Bigl\{\sum_{\alpha\in\cal S}
E_{p\alpha}c_\alpha^\mu {c_\alpha^{\nu}}^*\Bigl\}\phi_\mu\phi_{\nu}^*.
\end{equation}
The operator $U$ may be also represented as a combination of basis
operators $\phi_\mu\phi_{\nu}^*$:
\begin{equation}
\label{U}
U= 1+\sum_{\alpha\ne\beta\in\cal P}U_{\alpha\beta}
\phi_{\beta}\phi_{\alpha}^{*}+\sum_{\gamma\in\cal P}
(U_{\gamma\gamma}-1)\phi_{\gamma}\phi_{\gamma}^{*}.
\end{equation}
The first $dim\cal S$ rows of the unitary matrix $U_{\alpha\beta}$
coincide with matrix $c_\alpha^\beta$, the rest
$dim{\cal P} - dim{\cal S}$ may be chosen in arbitrary way. It is easy
to verify that components $\chi$ don't change outside $\cal P$ when
$U$ acts on arbitrary state $\chi=\sum a^\delta\phi_\delta$, while inside
$\cal P$ they are transformed by unitary matrix:
$$
U\chi= \sum_{\delta\notin\cal P}a^\delta\phi_\delta +
\sum_{\alpha\in\cal P}\Bigl(\sum_{\beta\in\cal P}
U_{\beta\alpha}a^\beta\Bigr)\phi_\alpha.
$$
Now we'll construct the basis operators $\phi_\mu\phi_{\nu}^*$ as ordinary
operators in the form of combinations depending on variables $(q,p)$
and acting on states of Hilbert space in co-ordinate representation.
If $\cal G$ is the group of transformations corresponding
to the complete operator set $J_\rho(q,p)$, then $\cal P$-space
is in general a direct sum of irreducible representations ${\cal T}_s$ of
the group $\cal G$:
$$
{\cal P} = \sum_s\oplus {\cal T}_s.
$$
Operator $\phi_\mu\phi_{\nu}^*$ transforms the function $\phi_\nu$
into $\phi_\mu$. If the group $\cal G$ is Abelian one, the irreducible
representations ${\cal T}_s$ are one-dimensional and consist of the
function $\phi_s$. For non-Abelian group $\phi_\mu$ and $\phi_\nu$ may
belong to one irreducible representation.
Our aim is to write the operators which generate all possible
transitions between different ${\cal T}_s$ and within some ${\cal T}_s$
as well. It's not difficult to solve this problem by methods of group theory
and actually the problem is equivalent to the realization of the basis
$\phi_\mu$.
To perform transformations inside the irreducible representations
${\cal T}_s$ it's sufficient to use combinations of generators of the basis
symmetry group; to connect different ${\cal T}_s$ we need generators
of special non-invariance group of basis. Its infinite-dimensional
irreducible representation is spanned on our basis. We know
non-invariance groups and corresponding algebras for various bases, for
example,
$so(4,2)$ --- for Coulomb's basis and $so(3,2)$--- for isotropic
two-dimensional oscillator basis [14].
After realization of operators of non-invariance algebra $A_\alpha$ in the
form of combinations of dynamic variables $(q,p)$ we look for vacuum
state $\phi_0$ for which the decreasing operators from the set
$A_\alpha (q,p)$ give zero. The vacuum state forms one-dimensional
irreducible representation of symmetry group $\cal G$, and we shall
naturally obtain states from other irreducible representations
$\cal G$ acting on it with creation operators from the
set $A_\alpha$. Notation $S^\dagger _\mu(A_\alpha)$ defines the
operator composed of generators $A_\alpha$ which produces
basis function $\phi_\mu$:
$\phi_\mu = S^\dagger _\mu \phi_0$ ¨ $S_\mu\phi_\mu = \phi_0$.
We don't present the general formula for $S^\dagger _\mu =S^\dagger _\mu
(A_\alpha)$ because it is not difficult to do it in any specific case
(see section 4); usually $S^\dagger _\mu$ are
polynomials composed of generators
$A_\alpha$ the power of which increases with state's number $\mu$.
Then operator $\phi_\mu\phi_{\nu}^*$ on the
Hilbert space $\cal H$ may be written as follows:
\begin{equation}
\label{baop}
\phi_\mu\phi_{\nu}^* = S^\dagger _\mu S_\nu P_\nu,
\end{equation}
where $P_\nu$ is the projector on the state $\phi_\nu$.
The projector $P_\nu$ may also be expressed in terms of dynamic variables
$(q,p)$ in the following way. Let $T(x)$ be the operators of unitary
representation $\cal G$ in Hilbert space,
$D^{s}_{\alpha\beta}(x)$ --- matrix elements of
irreducible representation ${\cal T}_s$, $dx$ --- invariant Haar's measure
on $G$. Then the projector $P_\nu$ on the basis state $\phi_\nu \in
{\cal T}_s$ may be presented as [15]:
\begin{equation}
\label{proekt}
P_\nu = dim{\cal T}_s\int_G dx{D^{s *}_{\nu\nu}}(x)T(x).
\end{equation}
Operators (\ref{proekt}) are bounded, and since $T(x)$ is the exponent to the
power of generators $\cal G$ which form sub-algebra with respect to
operator algebra $A_\alpha (q,p)$, we have achieved our goal ---
expressed the basis operators $\phi_\mu\phi_{\nu}^*$,
$H_s$ (\ref{Hsb}), $U$ (\ref{U}) and integrals $J'_\rho$,
in terms of variables $(q,p)$.
Thus constructed integrable approximations $H_s(q,p)$ and
integrals of motion $J'_\rho(q,p)$, apart from their approximate character
(the commutators with $H$ are not exactly equal to zero) are local.
The Hamiltonian $H_s$ is close to $H$ in the sense
of operator norm only in the finite-dimensional
subspace $\cal S$, while the operators $J'_\rho$ are good
invariants also only in the $\cal S$-space ( outside the
$\cal P$-space they coincide with the old operators $J_\rho$).
In the following section we shall demonstrate how the method works in
Henon-Heiles problem and then continue to discuss the
properties of the integrals obtained.
\section{Approximate integrals in Henon-Heiles problem}
Here we apply the method of the integral construction developed in the
previous
sections to the well known Henon-Heiles problem with Hamiltonian (\ref{hhh}).
Introducing operators of creation and annihilation
$a^\dagger _k = \frac{1}{\sqrt{2}}(q_k + ip_k),
a_k = \frac{1}{\sqrt{2}}(q_k - ip_k), k = 1,2$ we construct, as usual,
the Cartesian oscillator basis
\begin{equation}
\label{cob}
\phi_\mu = \phi_{n_1n_2} = \frac{1}{\sqrt{n_1!n_2!}}
(a^\dagger _1)^{n_1}(a^\dagger _2)^{n_2}\phi_0
\end{equation}
We present the projector
$P_{n_1n_2}$ on the state $\phi_{n_1n_2}(q_1, q_2)
=\phi_{n_1}(q_1)\otimes\phi_{n_2}(q_2)$
as a product of projectors
on the states $\phi_{n_1}(q_1)$ and $\phi_{n_2}(q_2)$ of the corresponding
one-dimensional oscillator. For one-dimensional oscillator
${\cal G} = U(1),\;T(x) = e^{ia^\dagger ax}, \;
x \in [0, 2\pi ], \; D^s_{\nu\nu} = e^{i\nu x}$, where
the number of state $\nu$ equals the number of quanta in this state.
Then according to (\ref{proekt})
$$
P_n = \int_0^{2\pi}\frac{dx}{2\pi}e^{-inx}e^{ia^\dagger ax}=
-\frac{i}{2\pi}(a^\dagger a - n)^{-1}\{e^{2\pi i(a^\dagger a - n)} - 1\}=
$$
$$
\frac{1}{\pi}(a^\dagger a - n)^{-1}e^{i\pi (a^\dagger a - n)}
\sin{\pi (a^\dagger a - n)}=
\frac{1}{\pi}(a^\dagger a - n)^{-1}\sin{\pi (a^\dagger a - n)}.
$$
We neglect the phase in the last expression because it does not affect the
action of $P_n$. It's easy to check that $P_n$
acts in the necessary way due to its slightly exotic form:
$$
P_n\phi_\mu = \delta _{n\mu}\phi_\mu.
$$
The total projector $P_{n_1n_2}$ takes the form
\begin{equation}
\label{SU(2)prkt}
P_{n_1n_2} = \frac{1}{\pi^2}(a^\dagger _1 a_1 - n_1)^{-1}
(a^\dagger _2 a_2 - n_2)^{-1}\sin{\pi (a^\dagger _1a_1 - n_1)}
\sin{\pi (a^\dagger _2a_2 - n_2)}.
\end{equation}
As a result the formulae (\ref{baop}) together with (\ref{cob}) gives us the
operator $\phi_\mu\phi_{\nu}^*$:
\begin{equation}
\label{hhbo}
\phi_\mu\phi_{\nu}^* =
\frac{(a^\dagger _1)^{n_1(\mu )}(a^\dagger _2)^{n_2(\mu
)}}
{\sqrt{n_1(\mu )!n_2(\mu )!}}\frac{(a_1)^{n_1(\nu )}(a_2)^{n_2(\nu )}}
{\sqrt{n_1(\nu )!n_2(\nu )!}}P_{n_1(\nu )n_2(\nu )},
\end{equation}
where $n_1$ and $n_2$ are the quantum numbers of states. Using (\ref{Hs}),
(\ref{U}) and determining coefficients $c^\mu _\alpha$ and $E_{p\alpha}$ we
get integrable approximation $H_s$ and approximate integrals
$J'_\rho = UJ_\rho U^\dagger$. We may take $n_1$ and $n_2$ or $n_1$
and $n = n_1 + n_2$ as independent integrals $J_\rho (\rho = 1,2)$.
The coefficients of expansion $c^\mu _\alpha$ and the
energies $E_{p\alpha}$ were calculated by Ritz's method with the aid of
diagonalization of matrix $H$ on the basis (\ref{cob}).
We shan't write explicit expressions for $J'_\rho$ for they are
very cumbersome: if $D$ is the basis dimensionality,
then operators $J'_\rho$ consist of $D^4$ terms of the type
(\ref{hhbo}). To determine the degree of destruction of
approximate integrals of motion $J'_\rho$ in the states $\psi_\alpha$ we
calculate the mean-square-root deviation $J'_\rho$ with the help of
(\ref{dA}). The solutions obtained with the basis
of $496$ states ($30$ shells) were considered to be true wave functions.
Fig.2 shows the dependence of averaged measure of destruction
(\ref{dA}) of operators $J'_\rho$
($J_1 = l$ ¨ $J_2 = n$) on the perturbation intensity (energy $E$) in the
subspaces with different symmetries. Henon-Heiles Hamiltonian has a symmetry
$C_{3v}$. Therefore the eigenfunctions' space can be divided into 4 subspaces
$A, B, C, D$ (the states belonging to $C$ and $D$ subspaces have the same
energy and thus produce the sequences of degenerate levels). The approximate
integrals $J'_\rho$ were calculated for the $\cal P$-space of different
dimensionalities (1, 10, 15 and 20 shells).
One can see that
increasing of $\cal P$-space dimensionality is accompanied
by the decrease of fragmentation of the approximate integrals in the
$\cal S$-space; outside the $\cal S$-space operators
$J'_\rho$ loose their advantages in comparison with $J_\rho$.
This example shows that the approximate integrals $J'_\rho$
really have smaller spreading (fragmentation) in the $\cal S$-space
then the basis integrals of motion $J_\rho$. We may get very small
values $\Delta J'_\rho\approx 0$ for the bounded states
by increasing of $dim\cal P$. As a result the analytical structure of
$J'_\rho$ becomes very complicated.
\section{Are integrability and chaos compatible?}
Now we consider the question about the convergence of the suggested
procedure and the question about existence of exact integrals of motion
of Hamiltonian $H$ in the full Hilbert space (integrability of $H$).
With the help of Ritz's method we can find in principle any finite number of
states with any finite accuracy and thus construct integrable approximation
$H_s$ to $H$ and integrals $J'_\rho$ in any finite subspace with any
desirable accuracy. The question is: can we obtain the full infinite
spectrum of $H$ by tending $\cal P \to \cal H$, because
there is an effect of systematic "delay" of $\cal S$-space
dimensionality with respect to dimensionality of
the model $\cal P$-space (only states far from the boundary of the
approximate spectrum are reasonably accurate in diagonalization).
In other words, does the sequence of Hamiltonians $PHP$ converge to the
initial
Hamiltonian: $PHP \to H$ while $P\to 1$. Physically it seems to be so
but we are in a difficulty to give rigorous mathematical proof,
because the sequence $PHP$ is not Caushy's sequence in the sense of
operator norm in $\cal H$.
We can formulate the following hypothesis about integrability of $H$.
If there exist a good (in mathematical sense)
unitary operator $U$ connecting two complete
orthonormal bases: the initial $\phi_\alpha$ and the basis of eigenstates
$\psi_\alpha$, then the Hamiltonian $H$, as we have seen above, commutes with
the complete set of independent operators $J'_\rho$ and it may be expressed
in terms of only these variables $H=H(J'_\rho )$. (Moreover, according to
Dirac [16] any functions of arbitrary complete orthonormal basis are
eigenfunctons of some complete set of commuting observables. This allows to
extend our conclusions for any observable, including the case of continuum
spectra). Therefore the Hamiltonian $H$ seems to be formally integrable.
However, in the above considered
example (the Henon-Heiles Hamiltonian) the system is chaotic according
to all the criteria of Section 2. Moreover, we know for sure that in the
classical limit this system is one of the textbook examples of chaoticity.
The problem is how to remove the contradiction between the seeming formal
integrability and the chaoticity of the system.
One possible answer is connected with the properties of the new integrals
$J'_\rho$.
These integrals of motion
are independent and global (provided the convergence of $PHP$ discussed above
would be proved). However they have extremely complicated structure
and can't be expressed in closed form. Therefore they are useless
to separate variables (non-separable) and to solve a problem.
We restore them after the approximate numerical solution has been found.
These integrals don't give selection rules for transitions between levels.
Therefore they are definitely not the quantum analogs of the classical first
(isolating) integrals, which define the classical regular integrable system.
\section{Conclusions}
The problem of existing and constructing of integrals of motion in
stationary quantum mechanics and its connection with notion of
quantum chaoticity has been investigated. It has been shown that the
previously suggested quantum chaoticity criterion characterises
destruction of initial symmetry of regular system and basis
integrals of motion under the influence of perturbation. Our approach
conforms with known probability criterion of Hose and Taylor [5-6]
and direct estimate of fragmentation (\ref{dA}).
We use variational Ritz's method for explicit construction
of approximate integrals of motion in the form of combinations
depending on operators $(q,p)$ though in principle
another method for solving Schr\"odinger equation may be used.
As a result we obtained finite large-dimensional sums consisting of
non-invariance algebra operators in various powers and projectors
nontrivially expressed in terms of invariance algebra generators.
The quality of approximate integrals of motion is simply controlled
by dimensionality of model space in use.
These integrals of motion are independent and global (provided the
convergence of $PHP\to H$ discussed above would be proved).
However they have extremely complicated structure but
have extremely complicated structure
and can't be expressed in closed form, therefore they are useless
to separate variables (nonseparable). This also explains why the
existence of these integrals doesn't create obstacles to statistical
description of quantum system. That's why such formal integrability
(even if we'll prove the existence of global integrals rigorously)
doesn't make system to be regular in the sense of absence of chaotical
properties. Therefore they are definitely not the quantum
analogs of the classical first
(isolating) integrals, which define the classical regular integrable system.
One of the authors (IBI) is indebted to Prof.Zikiki and to the Organizing
Committee of V.Gribov's Foundation for their support.
\end{document} |
\begin{document}
\begin{frontmatter}
\title {Cluster size in bond percolation \\ on the Platonic solids}
\runtitle {Cluster size in bond percolation on the Platonic solids}
\author {Nicolas Lanchier\thanks{Nicolas Lanchier was partially supported by NSF grant CNS-2000792.} and Axel La Salle}
\runauthor {Nicolas Lanchier and Axel La Salle}
\address {School of Mathematical and Statistical Sciences \\ Arizona State University \\ Tempe, AZ 85287, USA.}
\maketitle
\begin{abstract} \ \
The main objective of this paper is to study the size of a typical cluster of bond percolation on each of the five Platonic solids:
the tetrahedron, the cube, the octahedron, the dodecahedron and the icosahedron.
Looking at the clusters from a dynamical point of view, i.e., comparing the clusters with birth processes, we first prove that the first and second
moments of the cluster size are bounded by their counterparts in a certain branching process, which results in explicit upper bounds that are accurate
when the density of open edges is small.
Using that vertices surrounded by closed edges cannot be reached by an open path, we also derive upper bounds that, on the contrary, are accurate when
the density of open edges is large.
These upper bounds hold in fact for all regular graphs.
Specializing in the five~Platonic solids, the exact value of (or lower bounds for) the first and second moments are obtained from the inclusion-exclusion
principle and a computer program.
The goal of our program is not to simulate the stochastic process but to compute exactly sums of integers that are too large to be computed by hand so
these results are analytical, not numerical.
\end{abstract}
\begin{keyword}[class=AMS]
\kwd[Primary ]{60K35}
\end{keyword}
\begin{keyword}
\kwd{Bond percolation, Platonic solids, branching processes, inclusion-exclusion identity.}
\end{keyword}
\end{frontmatter}
\section{Introduction}
\label{sec:intro}
Bond percolation on a simple undirected graph is a collection of independent Bernoulli random variables with the same success probability~$p$ indexed by the
set of edges, with the edges associated to a success being referred to as open edges, and the ones associated to a failure being referred to as closed edges.
The open cluster containing a vertex~$x$ is the random subset of vertices that are connected to vertex~$x$ by a path of open edges.
This stochastic model was introduced in~\cite{broadbent_hammersley_1957} to study the random spread of a fluid through a medium. \\
\mathbf{1}ent Bond percolation is traditionally studied on infinite graphs such as the~$d$-dimensional integer lattice in which case the quantity of interest is
the percolation probability, the probability that the cluster of open edges containing the origin is infinite.
For bond percolation on integer lattices, it follows from Kolmogorov's zero-one law that the existence of an infinite cluster of open edges is an event that
has probability either zero or one.
This, together with a basic coupling argument, implies that there is a phase transition at a critical value~$p_c$ for the density of open edges from a
subcritical phase where all the open clusters are almost surely finite to a supercritical phase where there is at least one infinite cluster of open edges.
It is known that the cluster size decays exponentially in the subcritical phase~\cite{menshikov_1986} and that there is a unique infinite cluster of open
edges called the infinite percolation cluster in the supercritical phase~\cite{aizenman_kesten_newman_1987a, aizenman_kesten_newman_1987b}.
Using planar duality, coupling arguments and the uniqueness of the infinite percolation cluster, it can also be proved that the critical value in two
dimensions is equal to one-half~\cite{kesten_1980}.
We refer the interested reader to~\cite{grimmett_1999} for additional results about bond percolation on integer lattices,
and to~\cite[chapter~13]{Lanchier_2017} for a brief overview. \\
\mathbf{1}ent Bond percolation has also been studied on fairly general finite connected graphs~\cite{alon_benjamini_stacey_2004}.
Important particular cases are the complete graph, in which case the set of open edges form the very popular~Erd\H{o}s-R\'{e}nyi random
graph~\cite{erdos_renyi_1959}, as well as the hypercube~\cite{ajtai_komlos_szemeredi_1982}.
Due to the finiteness of the underlying graph, all the open clusters are finite so whether there exists an infinite percolation cluster or not becomes
irrelevant.
Such processes, however, still exhibit a phase transition in the sense that, in the limit as the number of vertices goes to infinite, there is a giant
component of open edges (an open cluster whose size scales like the size of the graph) if and only if~$p$ is exceeds a certain critical value.
In particular, most of the works about bond percolation on finite graphs is concerned with asymptotics in the large graph limit. \\
\mathbf{1}ent In contrast, the objective of this paper is to study~(the first and second moments of) the size distribution of a typical cluster of bond
percolation on each of the five Platonic solids: the tetrahedron, the cube, the octahedron, the dodecahedron and the icosahedron.
The motivation originates from our previous works~\cite{jevtic_lanchier_2020, jevtic_lanchier_lasalle_2020} that introduce a mathematical framework based
on Poisson processes, random graphs equipped with a cost topology and bond percolation to model the aggregate loss resulting from cyber risks.
Insurance premiums are based on the mean and variance of the aggregate loss which, in turn, can be easily expressed using the first and second
moments of the size of the percolation clusters.
Estimates for the size of the clusters are given in~\cite{jevtic_lanchier_2020} for the process on finite random trees
and in~\cite{jevtic_lanchier_lasalle_2020} for the process on path, ring and star graphs.
Even though our present work does not have any applications in the field of cyber insurance (because the Platonic solids are not realistic models
of insurable networks), studying the size of percolation clusters on the Platonic solids is a very natural question in probability theory.
\section{Main results}
\label{sec:results}
\begin{figure}
\caption{\upshape{Picture of the five Platonic solids.
The numbers between parentheses refer to the number of vertices, the number of edges, and the number of faces, respectively.
Note that the tetrahedron is dual to itself, the cube and the octahedron are dual to each other, and the dodecahedron and icosahedron
are dual to each other.}
\label{fig:graphs}
\end{figure}
\begin{figure}
\caption{\upshape{First moment on the left and second moment on the right of the size distribution of bond percolation clusters on the tetrahedron (top),
cube (middle) and octahedron (bottom) as functions of the probability~$p$.
The thick solid lines show the exact expressions in~TH~\ref{th:04}
\label{fig:moment-04-06-08}
\end{figure}
\begin{figure}
\caption{\upshape{First moment on the left and second moment on the right of the size distribution of bond percolation clusters on the dodecahedron (top)
and icosahedron (bottom) as functions of the probability~$p$.
The thick dashed lines show the second moment obtained from the average of one hundred thousand independent realizations of the process for various
values of~$p$ while the other curves show the upper bounds in TH~\ref{th:branching}
\label{fig:moment-12-20}
\end{figure}
Having a simple undirected graph~$\mathscr{G} = (\mathscr{V}, \mathscr{E})$, let
$$ x = \uniform (\mathscr{V}) \quad \hbox{and} \quad \zeta (e) = \bernoulli (p), e \in \mathscr{E} $$
be a vertex chosen uniformly at random and a collection of Bernoulli random variables with the same success probability~$p$ on the set of edges.
The edges with~$\zeta (e) = 1$ are said to be open while the edges with~$\zeta (e) = 0$ are said to be closed, and we let
$$ \mathscr{C}_x = \{y \in \mathscr{V} : \hbox{there is a path of open edges connecting~$x$ and~$y$} \} $$
be the percolation cluster containing~$x$.
The main objective of this paper is to study the first and second moments of~$S = \card (\mathscr{C}_x)$ = the size of this percolation cluster when the graph~$\mathscr{G}$
consists of each of the five Platonic solids depicted in Figure~\ref{fig:graphs}.
Our first result gives upper bounds for the first and second moments of the cluster size that apply to all finite regular graphs and are not
restricted to the Platonic solids.
The idea is to think of the cluster~$\mathscr{C}_x$ as a dynamical object described by a birth process starting with one particle at~$x$ and in which particles
give birth with probability~$p$ onto vacant adjacent vertices.
The size of the cluster is equal to the ultimate number of particles in the birth process which, in turn, is dominated stochastically by the number of
individuals up to generation~$\card (\mathscr{V}) - 1$ in a certain branching process.
Computing the first and second moments of the number of individuals in the branching process gives the following upper bounds.
\begin{theorem} --
\label{th:branching}
For every~$D$-regular graph with~$N$ vertices,
$$ \begin{array}{rcl}
E (S) & \hspace*{-6pt} \leq \hspace*{-6pt} & \displaystyle 1 + Dp \bigg(\frac{1 - \hspace*{-6pt}u^R}{1 - \hspace*{-6pt}u} \bigg) \vspace*{8pt} \\
E (S^2) & \hspace*{-6pt} \leq \hspace*{-6pt} & \displaystyle \bigg(1 + Dp \bigg(\frac{1 - \hspace*{-6pt}u^R}{1 - \hspace*{-6pt}u} \bigg) \bigg)^2 + \frac{Dp (1 - p)}{(1 - \hspace*{-6pt}u)^2} \bigg(\frac{(1 - \hspace*{-6pt}u^R)(1 + \hspace*{-6pt}u^{R + 1})}{1 - \hspace*{-6pt}u} - 2R \hspace*{-6pt}u^R \bigg) \end{array} $$
where~$\hspace*{-6pt}u = (D - 1) p$ and~$R = N - 1$.
\end{theorem}
Taking~$D$ and~$N$ in the theorem to be the degree and the number of vertices in each of the Platonic solids, we get the solid curves
in Figures~\ref{fig:moment-04-06-08} and~\ref{fig:moment-12-20}.
Note that these upper bounds are only accurate for~$p$ small.
To have upper bounds that are accurate for~$p$ large, we simply use that a vertex~$y \hspace*{-6pt}eq x$ cannot be in the percolation cluster~$\mathscr{C}_x$ when all the edges
incident to~$x$ are closed.
This gives the following result that again applies to all finite regular graphs.
\begin{theorem} --
\label{th:plarge}
For every~$D$-regular graph with~$N$ vertices,
$$ \begin{array}{rcl}
E (S) & \hspace*{-6pt} \leq \hspace*{-6pt} & N - (N - 1)(1 - p)^D \vspace*{4pt} \\
E (S^2) & \hspace*{-6pt} \leq \hspace*{-6pt} & N^2 - (N - 1)(2N - 1)(1 - p)^D + (N - 1)(N - 2)(1 - p)^{2D - 1}. \end{array} $$
\end{theorem}
Taking~$D$ and~$N$ in the theorem to be the degree and the number of vertices in each of the Platonic solids, we get the dashed curves
in Figures~\ref{fig:moment-04-06-08} and~\ref{fig:moment-12-20}. \\
\mathbf{1}ent Our last results are specific to the five Platonic solids and we denote by~$S_f$ the size of a percolation cluster on the solid with~$f$ faces.
To explain these results, we first observe that the mean cluster size can be easily expressed using the probability that each vertex belongs to the open
cluster~$\mathscr{C}_x$ which, in turn, is equal to the probability that at least one of the self-avoiding paths connecting~$x$ to this vertex is open.
In particular, identifying all the self-avoiding paths connecting~$x$ to any other vertex and using the inclusion-exclusion identity give an exact
expression for the first moment.
The same holds for the second moment looking instead at all the pairs of paths connecting~$x$ to two other vertices.
This approach also shows that the first and second moments of the cluster size are polynomials in~$p$ with integer coefficients and degree (at most) the
total number of edges so, to state our next results and shorten the notation, we let
$$ P_k = (p^0, p^1, p^2, \ldots, p^k)^T \quad \hbox{for all} \quad k \in \mathbb{N}. $$
The main difficulties following this strategy is to identify all the self-avoiding paths and compute the probability that any sub-collection of
paths are simultaneously open.
Recall that, when dealing with~$n$ events, the inclusion-exclusion identity consists of a sum of~$2^n - 1$ terms.
For the tetrahedron, the moments of the cluster size are polynomials with degree six, and there are five self-avoiding paths connecting any two vertices,
and ten pairs of self-avoiding paths connecting any three vertices, therefore the number of terms in the inclusion-exclusion identity are
$$ 2^5 - 1 = 31 \ \ \hbox{for the first moment} \quad \hbox{and} \quad 2^{10} - 1 = 1,023 \ \ \hbox{for the second moment}. $$
In particular, we compute the first moment by hand whereas for the second moment we rely on a computer program that returns the exact value of
the (seven) coefficients.
\begin{theorem}[tetrahedron] --
\label{th:04}
For all~$p \in (0, 1)$,
$$ E (S_4) = (1, 3, 6, 0, -21, 21, -6) \cdot P_6 \quad \hbox{and} \quad E (S_4^2) = (1, 9, 36, 30, -171, 153, -42) \cdot P_6. $$
\end{theorem}
The cube and the octahedron both have twelve edges.
There are respectively
\begin{itemize}
\item 15, 16, 18 self-avoiding paths connecting two vertices at distance 1, 2, 3 on the cube, \vspace*{4pt}
\item 26, 28 self-avoiding paths connecting two vertices at distance 1, 2 on the octahedron.
\end{itemize}
In particular, the first moment of the cluster size cannot be computed by hand for the cube and the octahedron because the number of terms in the
inclusion-exclusion identity ranges from tens of thousands to hundreds of millions.
Identifying all these paths and using the same computer program as before, we get the following theorem.
\begin{theorem}[cube and octahedron] --
\label{th:06-08}
For all~$p \in (0, 1)$,
$$ \begin{array}{rcl}
E (S_6) & \hspace*{-6pt} = \hspace*{-6pt} & (1, 3, 6, 12, 9, 12, -81, - 75, 69, 473, - 777, 447, -91) \cdot P_{12} \vspace*{4pt} \\
E (S_8) & \hspace*{-6pt} = \hspace*{-6pt} & (1, 4, 12, 20, -14, -196, 12, 1316, -2815, 2824, -1564, 464, -58) \cdot P_{12}. \end{array} $$
\end{theorem}
The dodecahedron and the icosahedron both have thirty edges.
For these two solids, even writing down all the self-avoiding paths connecting two vertices is beyond human capability so we only focus on the paths
of length at most five for the dodecahedron and of length at most three for the icosahedron.
Using that two vertices are in the same open cluster if (but not only if) at least one of the paths is open, together with the inclusion-exclusion
identity and our computer program, we get the following lower bounds for the mean cluster size.
\begin{theorem}[dodecahedron and icosahedron] --
\label{th:12-20}
For all~$p \in (0, 1)$,
$$ \begin{array}{rcl}
E (S_{12}) & \hspace*{-6pt} \geq \hspace*{-6pt} & (1, 3, 6, 12, 24, 30, -24, -30, -36, 3, -6, 42, \vspace*{2pt} \\ && \hspace*{20pt}
-6, 18, -21, 14, 0, -6, -9, 0, 0, 6, 0, 0, -1, 0, 0, 0, 0, 0, 0) \cdot P_{30} \vspace*{4pt} \\
E (S_{20}) & \hspace*{-6pt} \geq \hspace*{-6pt} & (1, 5, 20, 60, -90, -75, 0, 190, -10, -80, -60, 10, \vspace*{2pt} \\ && \hspace*{20pt}
-5, 120, -35, -88, 35, 40, -35, 10, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) \cdot P_{30}. \end{array} $$
\end{theorem}
The first and second moments in Theorem~\ref{th:04} and the first moments in Theorem~\ref{th:06-08} are represented
by the thick solid curves in Figure~\ref{fig:moment-04-06-08}.
These curves fit perfectly with numerical solutions obtained from one hundred thousands independent realizations of the percolation process.
The lower bounds for the first moments in Theorem~\ref{th:12-20} are represented by the dotted curves in Figure~\ref{fig:moment-12-20}.
\section{Proof of Theorem~\ref{th:branching} (branching processes)}
\label{sec:branching}
This section is devoted to the proof of Theorem~\ref{th:branching}.
Though our focus is on the Platonic solids, we recall that the theorem applies to every finite~$D$-regular graph~$\mathscr{G} = (\mathscr{V}, \mathscr{E})$.
The basic idea of the proof is to use a coupling argument to compare the size of the percolation cluster starting at a given vertex with the number of
individuals in a certain branching process. \vspace*{4pt}
\hspace*{-6pt}oindent{\bf Birth process.}
Having a vertex~$x \in \mathscr{V}$ and a realization of bond percolation with parameter~$p$ on the graph, we consider the following discrete-time
birth process~$(\xi_n)$.
The state at time~$n$ is a spatial configuration of particles on the vertices:
$$ \xi_n \subset \mathscr{V} \quad \hbox{where} \quad \xi_n = \hbox{set of vertices occupied by a particle at time~$n$}. $$
The process starts at generation~0 with a particle at~$x$, i.e., $\xi_0 = \{x \}$.
\begin{itemize}
\item For each vertex~$y$ adjacent to vertex~$x$, the particle at~$x$ gives birth to a particle sent to vertex~$y$ if and only if edge~$(x, y)$ is open.
\end{itemize}
The children of the particle at~$x$ are called the particles of generation~1.
Assume that the process has been defined up to generation~$n > 0$, and let
$$ Y_n = \card (\xi_n \setminus \xi_{n - 1}) $$
be the number of particles of that generation.
Label arbitrarily~$1, 2, \ldots, Y_n$ the particles of generation~$n$ and let~$x_{n, 1}, x_{n, 2}, \ldots, x_{n, Y_n}$ be their locations so that
$$ \xi_n \setminus \xi_{n - 1} = \{x_{n, 1}, x_{n, 2}, \ldots, x_{n, Y_n} \}. $$
Then, generation~$n + 1$ is defined as follows:
\begin{itemize}
\item For each vertex~$y$ adjacent to~$x_{n, 1}$, the first particle of generation~$n$ gives birth to a particle sent to~$y$ if and only if~$y$
is empty and edge~$(x_{n, 1}, y)$ is open. \vspace*{4pt}
\item For each vertex~$y$ adjacent to~$x_{n, 2}$, the second particle of generation~$n$ gives birth to a particle sent to~$y$ if and only if~$y$
is empty and edge~$(x_{n, 2}, y)$ is open. \vspace*{4pt}
\item $\cdots$ \vspace*{4pt}
\item For each vertex~$y$ adjacent to~$x_{n, Y_n}$, the~$Y_n$th particle of generation~$n$ gives birth to a particle sent to~$y$ if and only if~$y$
is empty and edge~$(x_{n, Y_n}, y)$ is open.
\end{itemize}
Note that two particles~$i$ and~$j$ with~$i < j$ might share a common neighbor~$y$ in which case a child of particle~$i$ sent to~$y$ prevents
particle~$j$ from giving birth onto~$y$.
For a construction of the birth process from a realization of bond percolation on the dodecahedron, we refer to Figure~\ref{fig:birth}.
The process is designed so that particles ultimately occupy the open cluster starting at~$x$.
In particular, the total number of particles equals the cluster size, as proved in the next lemma.
\begin{figure}
\caption{\upshape{Example of a construction of the birth process from a realization of bond percolation (top left picture) on the dodecahedron.
The thick lines represent the open edges, the black dots represent the vertices occupied by a particle at each generation, an the
arrows represent the birth events, from parent to children.}
\label{fig:birth}
\end{figure}
\begin{lemma} --
\label{lem:wet-particle}
The cluster size is given by
$$ S = \card (\mathscr{C}_x) = \card (\xi_{N - 1}) = Y_0 + Y_1 + \cdots + Y_{N - 1} \quad \hbox{where} \quad N = \card (\mathscr{V}). $$
\end{lemma}
\begin{proof}
To begin with, we observe that
\begin{itemize}
\item Because particles can only give birth to another particle sent to an empty vertex, each vertex is ultimately occupied by at most one particle. \vspace*{4pt}
\item The open cluster containing~$x$ can be written as
$$ \begin{array}{rcl}
\mathscr{C}_x = \{y \in \mathscr{V} & \hspace*{-6pt} : \hspace*{-6pt} & \hbox{there is a self-avoiding path of} \\ & \hspace*{-6pt} \hspace*{-6pt} & \hbox{open edges connecting vertex~$x$ and vertex~$y$} \}. \end{array} $$
\item The set of vertices occupied by a particle of generation~$n$ is
$$ \begin{array}{rcl}
\xi_n \setminus \xi_{n - 1} = \{y \in \mathscr{C}_x & \hspace*{-6pt} : \hspace*{-6pt} & \hbox{the shortest self-avoiding path of} \\ & \hspace*{-6pt} \hspace*{-6pt} & \hbox{open edges connecting~$x$ and~$y$ has length~$n$} \}. \end{array} $$
\end{itemize}
These three properties imply that all the vertices in the open cluster~$\mathscr{C}_x$ are ultimately occupied by exactly one particle whereas the vertices
outside the cluster remain empty therefore
\begin{equation}
\label{eq:wet-particle-1}
\begin{array}{rcl}
S = \card (\mathscr{C}_x) & \hspace*{-6pt} = \hspace*{-6pt} & \displaystyle \card (\xi_0) + \card \bigg(\bigcup_{n = 1}^{\infty} \,(\xi_n \setminus \xi_{n - 1}) \bigg) \\
& \hspace*{-6pt} = \hspace*{-6pt} & \displaystyle \card (\xi_0) + \sum_{n = 1}^{\infty} \,\card (\xi_n \setminus \xi_{n - 1}) = \sum_{n = 0}^{\infty} \,Y_n. \end{array}
\end{equation}
In addition, because the graph has~$N$ vertices, the shortest self-avoiding path on this graph must have at most~$N - 1$ edges, from which it follows that
\begin{equation}
\label{eq:wet-particle-2}
\xi_n = \xi_{n - 1} \quad \hbox{and} \quad Y_n = \card (\xi_n \setminus \xi_{n - 1}) = 0 \quad \hbox{for all} \quad n > N.
\end{equation}
Combining~\eqref{eq:wet-particle-1} and~\eqref{eq:wet-particle-2} gives the result.
\end{proof} \\
\hspace*{-6pt}oindent{\bf Coupling with a branching process.}
The next step is to compare the number of particles in the birth process with the number of individuals in a branching process~$(X_n)$.
The process coincides with the birth process when the graph is a tree and is defined by
$$ X_0 = 1 \quad \hbox{and} \quad X_{n + 1} = X_{n, 1} + X_{n, 2} + \cdots + X_{n, X_n} \quad \hbox{for all} \quad n \geq 0 $$
where the random variables~$X_{n, i}$ representing the offspring distribution (number of offspring of individual~$i$ at time~$n$) are independent
and have probability mass function
$$ X_{0, 1} = \binomial (D, p) \quad \hbox{and} \quad X_{n, i} = \binomial (D - 1, p) \quad \hbox{for all} \quad n, i \geq 1. $$
This branching process can be visualized as the number of particles in the birth process above modified so that births onto already occupied vertices are allowed.
In particular, the branching process dominates stochastically the birth process.
\begin{lemma} --
\label{lem:branching-particle}
For all~$n \geq 0$, we have the stochastic domination~$Y_n \preceq X_n$.
\end{lemma}
\begin{proof}
As for the branching process, for all~$n \geq 0$ and~$i \leq Y_n$, we let
$$ Y_{n, i} = \hbox{\# offspring of the~$i$th particle of generation~$n$ in the birth process}. $$
Because the edges are independently open with the same probability~$p$ and there are exactly~$D$ edges starting from each vertex, the number
of offspring of the first particle is
\begin{equation}
\label{eq:branching-particle-1}
Y_1 = Y_{0, 1} = \binomial (D, p).
\end{equation}
For each subsequent particle, say the particle located at~$z$, we distinguish two types of edges starting from~$z$ just before the particle gives birth.
\begin{itemize}
\item There are~$m$ edges~$(z, y)$ that are connected to an occupied vertex~$y$.
Because parent and offspring are located on adjacent vertices, we must have~$m \geq 1$. \vspace*{4pt}
\item There are~$D - m$ edges~$(z, y)$ that are connected to an empty vertex~$y$.
These edges have not been used yet in the construction of the birth process, i.e., there has been no previous attempt to give birth through these
edges, therefore each of these edges is open with probability~$p$ independently of the past of the process.
\end{itemize}
From the previous two properties, we deduce that, for all~$n > 0$ and~$i \leq Y_n$,
\begin{equation}
\label{eq:branching-particle-2}
\begin{array}{rcl}
P (Y_{n, i} \geq k) & \hspace*{-6pt} = \hspace*{-6pt} &
E (P (Y_{n, i} \geq k \,| \,Y_{0, 1}, Y_{1, 1}, \ldots, Y_{n, i - 1})) \vspace*{4pt} \\ & \hspace*{-6pt} \leq \hspace*{-6pt} &
P (\binomial (D - 1, p) \geq k) = P (X_{n, i} \geq k). \end{array}
\end{equation}
The stochastic domination follows from~\eqref{eq:branching-particle-1} and~\eqref{eq:branching-particle-2}.
\end{proof} \\
\hspace*{-6pt}oindent{\bf Number of individuals.}
It directly follows from Lemmas~\ref{lem:wet-particle} and~\ref{lem:branching-particle} that
\begin{equation}
\label{eq:wet-branching}
E (S^k) = E ((Y_0 + Y_1 + \cdots + Y_{N - 1})^k) \leq E ((X_0 + X_1 + \cdots + X_{N - 1})^k)
\end{equation}
for all~$k > 0$.
In view of~\eqref{eq:wet-branching}, the last step to complete the proof of Theorem~\ref{th:branching} is to show that the upper bounds in the theorem
are in fact the first and second moments of the total number of individuals up to generation~$R = N - 1$ in the branching process:
$$ E (\bar X_R) \quad \hbox{and} \quad E (\bar X_R^2) \quad \hbox{where} \quad \bar X_R = X_0 + X_1 + \cdots + X_R. $$
The rest of this section is devoted to computing these moments.
\begin{lemma} --
\label{lem:branching-first}
Let~$\hspace*{-6pt}u = (D - 1) p$. Then,
$$ E (\bar X_R) = 1 + Dp \bigg(\frac{1 - \hspace*{-6pt}u^R}{1 - \hspace*{-6pt}u} \bigg) \quad \hbox{for all} \quad R > 0. $$
\end{lemma}
\begin{proof}
For~$i = 1, 2, \ldots, X_1$, let
$$ \begin{array}{rcl}
\bar Z_i & \hspace*{-6pt} = \hspace*{-6pt} & \hbox{number of descendants of the~$i$th offspring of the first individual} \vspace*{2pt} \\ &&
\hbox{up to generation~$R$, including the offspring}. \end{array} $$
Then~$\bar X_R = 1 + \bar Z_1 + \cdots + \bar Z_{X_1}$ and the~$\bar Z_i$ are independent of~$X_1$ so
$$ \begin{array}{rcl}
E (\bar X_R) & \hspace*{-6pt} = \hspace*{-6pt} & E (E (\bar X_R \,| \,X_1)) = E (E (1 + \bar Z_1 + \cdots + \bar Z_{X_1} \,| \,X_1)) \vspace*{4pt} \\
& \hspace*{-6pt} = \hspace*{-6pt} & E (1 + X_1 E (\bar Z_i)) = 1 + E (X_1) E (\bar Z_i) = 1 + Dp E (\bar Z_i). \end{array} $$
Because~$\bar Z_i$ is the number of individuals up to generation~$R - 1$ in a branching process with offspring
distribution~$\binomial (D - 1, p)$, we deduce from~\cite[Theorem~2]{jevtic_lanchier_2020} that
$$ E (\bar X_R) = 1 + Dp \bigg(\frac{1 - (\mu p)^R}{1 - \mu p} \bigg)
= 1 + Dp \bigg(\frac{1 - \hspace*{-6pt}u^R}{1 - \hspace*{-6pt}u} \bigg) \quad \hbox{where} \quad \hspace*{-6pt}u = \mu p = (D - 1) p. $$
This completes the proof.
\end{proof} \\ \\
Using the same decomposition as in the previous lemma, we now compute the second moment of the number of individuals up to generation~$R = N - 1$.
\begin{lemma} --
\label{lem:branching-second}
Let~$\hspace*{-6pt}u = (D - 1) p$. Then, for all~$R > 0$,
$$ E (\bar X_R^2) = \bigg(1 + Dp \bigg(\frac{1 - \hspace*{-6pt}u^R}{1 - \hspace*{-6pt}u} \bigg) \bigg)^2 +
\frac{Dp (1 - p)}{(1 - \hspace*{-6pt}u)^2} \bigg(\frac{(1 - \hspace*{-6pt}u^R)(1 + \hspace*{-6pt}u^{R + 1})}{1 - \hspace*{-6pt}u} - 2R \hspace*{-6pt}u^R \bigg). $$
\end{lemma}
\begin{proof}
Using again~$\bar X_R = 1 + \bar Z_1 + \cdots + \bar Z_{X_1}$ and independence, we get
\begin{equation}
\label{eq:branching-second-1}
\begin{array}{rcl}
E (\bar X_R^2) & \hspace*{-6pt} = \hspace*{-6pt} & E (E ((1 + \bar Z_1 + \cdots + \bar Z_{X_1})^2 \,| \,X_1)) \vspace*{4pt} \\
& \hspace*{-6pt} = \hspace*{-6pt} & E (E (1 + 2 (\bar Z_1 + \cdots + \bar Z_{X_1}) + (\bar Z_1 + \cdots + \bar Z_{X_1})^2 \,| \,X_1)) \vspace*{4pt} \\
& \hspace*{-6pt} = \hspace*{-6pt} & E (1 + 2 X_1 E (\bar Z_i) + X_1 E (\bar Z_i^2) + X_1 (X_1 - 1)(E (Z_i))^2) \vspace*{4pt} \\
& \hspace*{-6pt} = \hspace*{-6pt} & 1 + 2 E (X_1) E (\bar Z_i) + E (X_1) E (\bar Z_i^2) + E (X_1 (X_1 - 1))(E (Z_i))^2. \end{array}
\end{equation}
In addition, using that~$X_1 = \binomial (D, p)$, we get
\begin{equation}
\label{eq:branching-second-2}
\begin{array}{rcl}
E (X_1 (X_1 - 1)) & \hspace*{-6pt} = \hspace*{-6pt} & \var (X_1) + (E (X_1))^2 - E (X_1) \vspace*{4pt} \\
& \hspace*{-6pt} = \hspace*{-6pt} & Dp (1 - p) + D^2 p^2 - Dp = D (D - 1) p^2. \end{array}
\end{equation}
Combining~\eqref{eq:branching-second-1} and~\eqref{eq:branching-second-2} gives
$$ \begin{array}{rcl}
E (\bar X_R^2) & \hspace*{-6pt} = \hspace*{-6pt} & 1 + 2 Dp E (\bar Z_i) + Dp E (\bar Z_i^2) + D (D - 1) p^2 (E (\bar Z_i))^2 \vspace*{4pt} \\
& \hspace*{-6pt} = \hspace*{-6pt} & 1 + 2 Dp E (\bar Z_i) + Dp (\var (\bar Z_i) + (E (\bar Z_i))^2) + D (D - 1) p^2 (E (\bar Z_i))^2 \vspace*{4pt} \\
& \hspace*{-6pt} = \hspace*{-6pt} & 1 + 2 Dp E (\bar Z_i) + Dp (Dp + 1 - p)(E (\bar Z_i))^2 + Dp \var (\bar Z_i) \vspace*{4pt} \\
& \hspace*{-6pt} = \hspace*{-6pt} & (1 + Dp E (\bar Z_i))^2 + Dp (1 - p)(E (\bar Z_i))^2 + Dp \var (\bar Z_i). \end{array} $$
Then, applying~\cite[Theorem~2]{jevtic_lanchier_2020} with~$\mu = D - 1$ and~$\sigma^2 = 0$, we get
$$ \begin{array}{rcl}
E (\bar X_R^2) = \bigg(1 & \hspace*{-6pt} + \hspace*{-6pt} & \displaystyle Dp \bigg(\frac{1 - \hspace*{-6pt}u^R}{1 - \hspace*{-6pt}u} \bigg) \bigg)^2 + Dp (1 - p) \bigg(\frac{1 - \hspace*{-6pt}u^R}{1 - \hspace*{-6pt}u} \bigg)^2 \vspace*{8pt} \\
& \hspace*{-6pt} + \hspace*{-6pt} & \displaystyle Dp \ \frac{\hspace*{-6pt}u (1 - p)}{(1 - \hspace*{-6pt}u)^2} \bigg(\frac{1 - \hspace*{-6pt}u^{2R - 1}}{1 - \hspace*{-6pt}u} - (2R - 1) \hspace*{-6pt}u^{R - 1} \bigg). \end{array} $$
Observing also that
$$ \begin{array}{rcl}
\displaystyle Dp (1 - p) \bigg(\frac{1 - \hspace*{-6pt}u^R}{1 - \hspace*{-6pt}u} \bigg)^2 & \hspace*{-6pt} + \hspace*{-6pt} &
\displaystyle Dp \ \frac{\hspace*{-6pt}u (1 - p)}{(1 - \hspace*{-6pt}u)^2} \bigg(\frac{1 - \hspace*{-6pt}u^{2R - 1}}{1 - \hspace*{-6pt}u} - (2R - 1) \hspace*{-6pt}u^{R - 1} \bigg) \vspace*{8pt} \\ & \hspace*{-6pt} = \hspace*{-6pt} &
\displaystyle \frac{Dp (1 - p)}{(1 - \hspace*{-6pt}u)^2} \bigg(\frac{(1 - \hspace*{-6pt}u)(1 - \hspace*{-6pt}u^R)^2 + \hspace*{-6pt}u (1 - \hspace*{-6pt}u^{2R - 1})}{1 - \hspace*{-6pt}u} - (2R - 1) \hspace*{-6pt}u^R \bigg) \vspace*{8pt} \\ & \hspace*{-6pt} = \hspace*{-6pt} &
\displaystyle \frac{Dp (1 - p)}{(1 - \hspace*{-6pt}u)^2} \bigg(\frac{1 - 2 \hspace*{-6pt}u^R + 2 \hspace*{-6pt}u^{R + 1} - \hspace*{-6pt}u^{2R + 1}}{1 - \hspace*{-6pt}u} - (2R - 1) \hspace*{-6pt}u^R \bigg) \vspace*{8pt} \\ & \hspace*{-6pt} = \hspace*{-6pt} &
\displaystyle \frac{Dp (1 - p)}{(1 - \hspace*{-6pt}u)^2} \bigg(\frac{1 - 2 \hspace*{-6pt}u^R + 2 \hspace*{-6pt}u^{R + 1} - \hspace*{-6pt}u^{2R + 1} + (1 - \hspace*{-6pt}u) \hspace*{-6pt}u^R}{1 - \hspace*{-6pt}u} - 2R \hspace*{-6pt}u^R \bigg) \vspace*{8pt} \\ & \hspace*{-6pt} = \hspace*{-6pt} &
\displaystyle \frac{Dp (1 - p)}{(1 - \hspace*{-6pt}u)^2} \bigg(\frac{(1 - \hspace*{-6pt}u^R)(1 + \hspace*{-6pt}u^{R + 1})}{1 - \hspace*{-6pt}u} - 2R \hspace*{-6pt}u^R \bigg) \end{array} $$
completes the proof.
\end{proof} \\ \\
Theorem~\ref{th:branching} directly follows from~\eqref{eq:wet-branching}, and from Lemmas~\ref{lem:branching-first} and~\ref{lem:branching-second}.
\section{Proof of Theorem~\ref{th:plarge}}
\label{sec:plarge}
Theorem~\ref{th:plarge} relies on the following simple observation:
vertex~$y \hspace*{-6pt}eq x$ cannot be in the percolation cluster starting at~$x$ when all the edges incident to~$y$ are closed.
In contrast with the comparison with branching processes, this result leads to a good approximation of the moments of the size distribution when
the probability~$p$ approaches one.
To prove the theorem, note that
\begin{equation}
\label{eq:plarge-1}
\begin{array}{rcl}
E (S^k) = \displaystyle E \bigg(\sum_{y \in \mathscr{V}} \,\mathbf{1} \{y \in \mathscr{C}_x \} \bigg)^k & \hspace*{-6pt} = \hspace*{-6pt} &
\displaystyle \sum_{y_1, \ldots, y_k \in \mathscr{V}} E (\mathbf{1} \{y_1 \in \mathscr{C}_x \} \ \cdots \ \mathbf{1} \{y_k \in \mathscr{C}_x \}) \vspace*{4pt} \\ & \hspace*{-6pt} = \hspace*{-6pt} &
\displaystyle \sum_{y_1, \ldots, y_k \in \mathscr{V}} P (x \leftrightarrow y_1, \ldots, x \leftrightarrow y_k) \end{array}
\end{equation}
for all integers~$k$.
To estimate the last sum, we let~$B_y$ be the event that all the edges incident to~$y$ are closed.
Using that there are exactly~$D$ edges incident to each vertex, and that there is at most one edge connecting any two different vertices, say~$y \hspace*{-6pt}eq z$, we get
\begin{equation}
\label{eq:plarge-2}
\begin{array}{rcl}
P (B_y) & \hspace*{-6pt} = \hspace*{-6pt} & (1 - p)^D \vspace*{4pt} \\
P (B_y \cup B_z) & \hspace*{-6pt} = \hspace*{-6pt} & P (B_y) + P (B_z) - P (B_y \cap B_z) \geq 2 (1 - p)^D - (1 - p)^{2D - 1}. \end{array}
\end{equation}
In addition, we have the inclusion of events
\begin{equation}
\label{eq:plarge-3}
B_y \subset \{x \hspace*{-6pt}ot \leftrightarrow y \} \quad \hbox{for all} \quad y \hspace*{-6pt}eq x.
\end{equation}
Combining~\eqref{eq:plarge-2} and~\eqref{eq:plarge-3}, we get
\begin{equation}
\label{eq:plarge-4}
P (x \hspace*{-6pt}ot \leftrightarrow y \ \hbox{or} \ x \hspace*{-6pt}ot \leftrightarrow z) \geq \left\{\begin{array}{lcl}
(1 - p)^D & \hbox{when} & \card \{x, y, z \} = 2 \vspace*{4pt} \\
2 (1 - p)^D - (1 - p)^{2D - 1} & \hbox{when} & \card \{x, y, z \} = 3. \end{array} \right.
\end{equation}
Using~\eqref{eq:plarge-1} with~$k = 1$ and~\eqref{eq:plarge-4}, we deduce that
$$ \begin{array}{rcl}
E (S) & \hspace*{-6pt} = \hspace*{-6pt} & \displaystyle 1 + \sum_{y \hspace*{-6pt}eq x} \,P (x \leftrightarrow y) = 1 + \sum_{y \hspace*{-6pt}eq x} \,(1 - P (x \hspace*{-6pt}ot \leftrightarrow y)) \vspace*{4pt} \\
& \hspace*{-6pt} \leq \hspace*{-6pt} & \displaystyle 1 + \sum_{y \hspace*{-6pt}eq x} \,(1 - (1 - p)^D) = 1 + (N - 1)(1 - (1 - p)^D) = N - (N - 1)(1 - p)^D. \end{array} $$
Similarly, applying~\eqref{eq:plarge-1} with~$k = 2$, observing that
$$ \begin{array}{rcl}
\card \{(y, z) \in \mathscr{V}^2 : \card \{x, y, z \} = 2 \} & \hspace*{-6pt} = \hspace*{-6pt} & 3 (N - 1) \vspace*{4pt} \\
\card \{(y, z) \in \mathscr{V}^2 : \card \{x, y, z \} = 3 \} & \hspace*{-6pt} = \hspace*{-6pt} & (N - 1)(N - 2), \end{array} $$
and using~\eqref{eq:plarge-4}, we deduce that
$$ \begin{array}{rcl}
E (S^2) & \hspace*{-6pt} \leq & \hspace*{-6pt} 1 + 3 (N - 1)(1 - (1 - p)^D) + (N - 1)(N - 2)(1 - 2 (1 - p)^D + (1 - p)^{2D - 1}) \vspace*{4pt} \\
& \hspace*{-6pt} = \hspace*{-6pt} & N^2 - 3 (N - 1)(1 - p)^D - (N - 1)(N - 2)(2 (1 - p)^D - (1 - p)^{2D - 1}) \vspace*{4pt} \\
& \hspace*{-6pt} = \hspace*{-6pt} & N^2 - (N - 1)(2N - 1)(1 - p)^D + (N - 1)(N - 2)(1 - p)^{2D - 1}. \end{array} $$
This completes the proof of Theorem~\ref{th:plarge}.
\section{Proof of Theorems~\ref{th:04}--\ref{th:12-20} (inclusion-exclusion identity)}
\label{sec:inclusion-exclusion}
Theorems~\ref{th:04}--\ref{th:12-20} follow from an application of the inclusion-exclusion identity.
To begin with, we prove a result (see~\eqref{eq:inclusion-exclusion-4} below) that holds not only for all five Platonic solids but also a larger class of finite
regular graphs.
Fix a vertex~$x \in \mathscr{V}$, let~$r$ be the radius of the graph, and define
$$ \mathscr{L}ambda_s = \{y \in \mathscr{V} : d (x, y) = s \} \quad \hbox{and} \quad N_s = \card (\mathscr{L}ambda_s) \quad \hbox{for} \quad s = 0, 1, \ldots, r. $$
At least for the Platonic solids, $N_s$ does not depend on the choice of~$x$. Fixing
$$ y_s \in \mathscr{L}ambda_s \quad \hbox{for all} \quad s = 0, 1, \ldots, r, $$
and applying~\eqref{eq:plarge-1} with~$k = 1$, we get
\begin{equation}
\label{eq:inclusion-exclusion-1}
E (S) = \sum_{y \in \mathscr{V}} \,P (x \leftrightarrow y) = \sum_{s = 0}^r \ \sum_{y \in \mathscr{L}ambda_s} P (x \leftrightarrow y) = \sum_{s = 0}^r \,N_s P (x \leftrightarrow y_s).
\end{equation}
To compute the probabilities~$p_s = P (x \leftrightarrow y_s)$, we label the edges~$0, 1, \ldots, n - 1$, think of each self-avoiding path~$\pi$ as the
collection of its edges, and let
$$ \begin{array}{rcl}
\pi_1 (y_s), \ldots, \pi_{K_s} (y_s) & \hspace*{-6pt} = \hspace*{-6pt} & \hbox{all the self-avoiding paths~$x \to y_s$} \vspace*{4pt} \\
A_i & \hspace*{-6pt} = \hspace*{-6pt} & \hbox{the event that~$\pi_i (y_s)$ is an open path for} \ i = 1, 2, \ldots, K_s. \end{array} $$
Because the edges are independently open with the same probability~$p$,
$$ \begin{array}{rcl}
P (A_{i_1} \cap \cdots \cap A_{i_j}) & \hspace*{-6pt} = \hspace*{-6pt} & P (\pi_{i_1} (y_s), \ldots, \pi_{i_j} (y_s) \ \hbox{are open paths}) \vspace*{4pt} \\
& \hspace*{-6pt} = \hspace*{-6pt} & P (e \ \hbox{is open for all} \ e \in \pi_{i_1} (y_s) \cup \cdots \cup \pi_{i_j} (y_s)) \vspace*{4pt} \\
& \hspace*{-6pt} = \hspace*{-6pt} & p^{\card (\pi_{i_1} (y_s) \,\cup \,\cdots \,\cup \,\pi_{i_j} (y_s))} \end{array} $$
for all~$0 < i_1 < \cdots < i_j \leq K_s$.
Here~$\card$ refers to the number of edges in the subgraph that consists of the union of the self-avoiding paths.
Using that~$x \leftrightarrow y_s$ if and only if at least one of the paths connecting the two vertices is open, and the inclusion-exclusion
identity, we deduce that
\begin{equation}
\label{eq:inclusion-exclusion-2}
\begin{array}{rcl}
\displaystyle P (x \leftrightarrow y_s) & \hspace*{-6pt} = \hspace*{-6pt} &
\displaystyle P \bigg(\bigcup_{j = 1}^{K_s} \,A_j \bigg) =
\displaystyle \sum_{j = 1}^{K_s} \ (- 1)^{j + 1} \sum_{0 < i_1 < \cdots < i_j \leq K_s} P (A_{i_1} \cap \cdots \cap A_{i_j}) \vspace*{4pt} \\ & \hspace*{-6pt} = \hspace*{-6pt} &
\displaystyle \sum_{j = 1}^{K_s} \ (- 1)^{j + 1} \sum_{0 < i_1 < \cdots < i_j \leq K_s} p^{\card (\pi_{i_1} (y_s) \,\cup \,\cdots \,\cup \,\pi_{i_j} (y_s))}. \end{array}
\end{equation}
Note that, in the previous expression, the index~$j$ corresponds to the number of self-avoiding paths while the second sum is over all possible
choices of~$j$ paths.
In particular, the double sum consists in looking at all the possible nonempty sub-collections of the~$K_s$ self-avoiding paths, therefore the right-hand
side of~\eqref{eq:inclusion-exclusion-2} can be rewritten as
\begin{equation}
\label{eq:inclusion-exclusion-3}
P (x \leftrightarrow y_s) =
\sum_{B \subset [K_s] : B \hspace*{-6pt}eq \varnothing} \ (- 1)^{\card (B) + 1} \ p^{\card \!\! \big(\bigcup_{i \in B} \pi_i (y_s) \big)}
\end{equation}
where~$[K_s] = \{1, 2, \ldots, K_s \}$.
Combining~\eqref{eq:inclusion-exclusion-1} and~\eqref{eq:inclusion-exclusion-3} gives
\begin{equation}
\label{eq:inclusion-exclusion-4}
E (S) = \sum_{s = 0}^r \ N_s \ \bigg(\sum_{B \subset [K_s] : B \hspace*{-6pt}eq \varnothing} \ (- 1)^{\card (B) + 1} \ p^{\card \!\! \big(\bigcup_{i \in B} \pi_i (y_s) \big)} \bigg).
\end{equation}
The previous equation shows that, at least in theory, computing the mean cluster size reduces to finding the self-avoiding paths that connect any two
vertices of the graph.
We now apply~\eqref{eq:inclusion-exclusion-4} to each of the five Platonic solids in order to prove Theorems~\ref{th:04}--\ref{th:12-20}. \\ \\
\begin{proofof}{Theorem~\ref{th:04}}
\begin{figure}
\caption{\upshape{The three pictures on the left show planar representations of the tetrahedron, the cube and the octahedron, along with an arbitrary
labeling of their edges.
The tables on the right give the list of the self-avoiding paths connecting the two vertices (or pairs of self-avoiding paths connecting
the three vertices) represented by the black, dark grey, light grey and/or white dots in the pictures.
Each path is represented by the collection of its edges using the labels shown in the pictures.
The numbers in the first column of each table indicate the length of the paths.}
\label{fig:path-468}
\end{figure}
For the tetrahedron, all the vertices are distance one apart and there are exactly five self-avoiding paths connecting any two vertices (see
first table in Figure~\ref{fig:path-468}).
Calling these paths~$\pi_1, \ldots, \pi_5$ in the order they are listed in the table, and writing
$$ \card (\pi_{i_1} \cup \pi_{i_2} \cup \cdots \cup \pi_{i_j}) = |\pi_{i_1, i_2, \ldots, i_j}| $$
for short, one can easily check that
$$ \begin{array}{rclrclrclrclrcl}
|\pi_1| & \hspace*{-6pt} = \hspace*{-6pt} & 1 \quad & |\pi_{1, 2}| & \hspace*{-6pt} = \hspace*{-6pt} & 3 \quad & |\pi_{1, 2, 3}| & \hspace*{-6pt} = \hspace*{-6pt} & 5 \quad & |\pi_{1, 2, 3, 4}| & \hspace*{-6pt} = \hspace*{-6pt} & 6 \quad & |\pi_{1, 2, 3, 4, 5}| & \hspace*{-6pt} = \hspace*{-6pt} & 6 \vspace*{2pt} \\
|\pi_2| & \hspace*{-6pt} = \hspace*{-6pt} & 2 \quad & |\pi_{1, 3}| & \hspace*{-6pt} = \hspace*{-6pt} & 3 \quad & |\pi_{1, 2, 4}| & \hspace*{-6pt} = \hspace*{-6pt} & 5 \quad & |\pi_{1, 2, 3, 5}| & \hspace*{-6pt} = \hspace*{-6pt} & 6 \vspace*{2pt} \\
|\pi_3| & \hspace*{-6pt} = \hspace*{-6pt} & 2 \quad & |\pi_{1, 4}| & \hspace*{-6pt} = \hspace*{-6pt} & 4 \quad & |\pi_{1, 2, 5}| & \hspace*{-6pt} = \hspace*{-6pt} & 5 \quad & |\pi_{1, 2, 4, 5}| & \hspace*{-6pt} = \hspace*{-6pt} & 6 \vspace*{2pt} \\
|\pi_4| & \hspace*{-6pt} = \hspace*{-6pt} & 3 \quad & |\pi_{1, 5}| & \hspace*{-6pt} = \hspace*{-6pt} & 4 \quad & |\pi_{1, 3, 4}| & \hspace*{-6pt} = \hspace*{-6pt} & 5 \quad & |\pi_{1, 3, 4, 5}| & \hspace*{-6pt} = \hspace*{-6pt} & 6 \vspace*{2pt} \\
|\pi_5| & \hspace*{-6pt} = \hspace*{-6pt} & 3 \quad & |\pi_{2, 3}| & \hspace*{-6pt} = \hspace*{-6pt} & 4 \quad & |\pi_{1, 3, 5}| & \hspace*{-6pt} = \hspace*{-6pt} & 5 \quad & |\pi_{2, 3, 4, 5}| & \hspace*{-6pt} = \hspace*{-6pt} & 5 \vspace*{2pt} \\
& & \quad & |\pi_{2, 4}| & \hspace*{-6pt} = \hspace*{-6pt} & 4 \quad & |\pi_{1, 4, 5}| & \hspace*{-6pt} = \hspace*{-6pt} & 6 \vspace*{2pt} \\
& & \quad & |\pi_{2, 5}| & \hspace*{-6pt} = \hspace*{-6pt} & 4 \quad & |\pi_{2, 3, 4}| & \hspace*{-6pt} = \hspace*{-6pt} & 5 \vspace*{2pt} \\
& & \quad & |\pi_{3, 4}| & \hspace*{-6pt} = \hspace*{-6pt} & 4 \quad & |\pi_{2, 3, 5}| & \hspace*{-6pt} = \hspace*{-6pt} & 5 \vspace*{2pt} \\
& & \quad & |\pi_{3, 5}| & \hspace*{-6pt} = \hspace*{-6pt} & 4 \quad & |\pi_{2, 4, 5}| & \hspace*{-6pt} = \hspace*{-6pt} & 5 \vspace*{2pt} \\
& & \quad & |\pi_{4, 5}| & \hspace*{-6pt} = \hspace*{-6pt} & 5 \quad & |\pi_{3, 4, 5}| & \hspace*{-6pt} = \hspace*{-6pt} & 5 \end{array} $$
This, together with~\eqref{eq:inclusion-exclusion-3}, implies that, for all~$x \hspace*{-6pt}eq y$,
\begin{equation}
\label{eq:tetrahedron-1}
\begin{array}{rcl}
P (x \leftrightarrow y) & \hspace*{-6pt} = \hspace*{-6pt} & (p + 2p^2 + 2p^3) - (2p^3 + 7p^4 + p^5) + (9p^5 + p^6) - (p^5 + 4p^6) + p^6 \vspace*{4pt} \\
& \hspace*{-6pt} = \hspace*{-6pt} & p + 2p^2 - 7p^4 + 7p^5 - 2p^6 = (0, 1, 2, 0, -7, 7, -2) \cdot P_6. \end{array}
\end{equation}
Using also~\eqref{eq:inclusion-exclusion-4} and that~$N_1 = 3$ for the tetrahedron, we conclude that
$$ E (S_4) = 1 + 3 \,(0, 1, 2, 0, -7, 7, -2) \cdot P_6 = (1, 3, 6, 0, -21, 21, -6) \cdot P_6 $$
which proves the first part of Theorem~\ref{th:04}. \\
\mathbf{1}ent To compute the second moment, we observe that any three distinct vertices of the tetrahedron always form a triangle (regardless of the choice
of the vertices) and, for all~$x \in \mathscr{V}$,
$$ \begin{array}{rcl}
\card \{(y, z) \in \mathscr{V}^2 : \card \{x, y, z \} = 2 \} & \hspace*{-6pt} = \hspace*{-6pt} & 3 \times 3 = 9 \vspace*{4pt} \\
\card \{(y, z) \in \mathscr{V}^2 : \card \{x, y, z \} = 3 \} & \hspace*{-6pt} = \hspace*{-6pt} & 3 \times 2 = 6. \end{array} $$
Using also~\eqref{eq:plarge-1} with~$k = 2$, we get
\begin{equation}
\label{eq:tetrahedron-2}
E (S_4^2) = P (x \leftrightarrow x) + 9 P (x \leftrightarrow y) + 6 P (x \leftrightarrow y, x \leftrightarrow z)
\end{equation}
where vertices~$x, y, z$ are arbitrary but all three distinct.
In addition, letting~$\gamma_1, \gamma_2, \ldots, \gamma_K$ be the pairs of self-avoiding paths connecting all three vertices, and using the same argument
as before based on the inclusion-exclusion identity, we get
\begin{equation}
\label{eq:inclusion-exclusion-5}
P (x \leftrightarrow y, x \leftrightarrow z) =
\sum_{B \subset [K] : B \hspace*{-6pt}eq \varnothing} \ (- 1)^{\card (B) + 1} \ p^{\card \!\! \big(\bigcup_{i \in B} \gamma_i \big)}
\end{equation}
which can be viewed as the analog of~\eqref{eq:inclusion-exclusion-3}.
For the tetrahedron, there are~$K = 10$ such paths (see the second table in Figure~\ref{fig:path-468}).
As previously, computing
$$ \card \bigg(\bigcup_{i \in B} \gamma_i \bigg) \quad \hbox{for every} \ B \subset [10] = \{1, 2, \ldots, 10 \} $$
is straightforward in the sense that it does not require any logical thinking.
However, having ten self-avoiding paths, the sum in~\eqref{eq:inclusion-exclusion-5} is now over
$$ 2^{10} - 1 = 1,023 \ \ \hbox{terms} $$
and is therefore unrealistic to compute by hand.
Also, to compute~\eqref{eq:inclusion-exclusion-5}, we designed a computer program that goes through all the possible subsets~$B \subset [10]$
and returns six~(= number of edges of the tetrahedron) coefficients~$a_0, a_1, \ldots, a_6$.
These seven coefficients are initially set to zero and increase or decrease by one according to the following algorithm:
\begin{equation}
\label{eq:algorithm}
\begin{array}{rcl}
\hbox{replace~$a_j \to a_j + 1$} & \hspace*{-6pt} \hbox{each time} \hspace*{-6pt} & \card \Big(\bigcup_{i \in B} \gamma_i \Big) = j \ \hbox{and} \ \card (B) \ \hbox{is odd} \vspace*{4pt} \\
\hbox{replace~$a_j \to a_j - 1$} & \hspace*{-6pt} \hbox{each time} \hspace*{-6pt} & \card \Big(\bigcup_{i \in B} \gamma_i \Big) = j \ \hbox{and} \ \card (B) \ \hbox{is even}. \end{array}
\end{equation}
In other words, because the tetrahedron contains six edges, the right-hand side of~\eqref{eq:inclusion-exclusion-5} is a polynomial with degree at most six,
and the algorithm returns the value of the seven coefficients of this polynomial.
We point out that the values we obtain are exact because the computer is used to add a large number of integers rather than to simulate the
percolation process.
Therefore, the expression of the second moment in the theorem is indeed exact even though we rely on the use of a computer.
The input of the program is the ten self-avoiding paths represented by the subsets of edges in the second table of Figure~\ref{fig:path-468}, and the output
of the program is
$$ a_0 = 0, \quad a_1 = 0, \quad a_2 = 3, \quad a_3 = 5, \quad a_4 = - 18, \quad a_5 = 15, \quad a_6 = - 4. $$
This, together with~\eqref{eq:tetrahedron-1} and~\eqref{eq:tetrahedron-2}, implies that
$$ \begin{array}{rcl}
E (S_4^2) & \hspace*{-6pt} = \hspace*{-6pt} & 1 + 9 \,(0, 1, 2, 0, -7, 7, -2) \cdot P_6 + 6 \,(a_0, a_1, a_2, a_3, a_4, a_5, a_6) \cdot P_6 \vspace*{4pt} \\
& \hspace*{-6pt} = \hspace*{-6pt} & 1 + 9 \,(0, 1, 2, 0, -7, 7, -2) \cdot P_6 + 6 \,(0, 0, 3, 5, -18, 15, -4) \cdot P_6 \vspace*{4pt} \\
& \hspace*{-6pt} = \hspace*{-6pt} & (1, 9, 36, 30, -171, 153, -42) \cdot P_6. \end{array} $$
This completes the proof of Theorem~\ref{th:04}.
\end{proofof} \\ \\
\begin{proofof}{Theorem~\ref{th:06-08}}
\begin{figure}
\caption{\upshape{Coefficients returned by algorithm~\eqref{eq:algorithm}
\label{fig:poly-06-08}
\end{figure}
The idea is again to compute the sum~\eqref{eq:inclusion-exclusion-4} explicitly by first collecting the self-avoiding paths connecting two vertices and then
using the computer program mentioned above to obtain the exact value of the coefficients of the polynomial. \vspace*{5pt} \\
\hspace*{-6pt}oindent {\bf Cube.}
For the cube, there are respectively fifteen, sixteen and eighteen self-avoiding paths connecting any two vertices at distance one, two, and three
from each other, as shown in~Figure~\ref{fig:path-468}.
Because the cube has twelve edges, the sum consists of a polynomial with degree~12.
The first four columns in the first table of Figure~\ref{fig:poly-06-08} show the coefficients computed by our program from the list of all the
self-avoiding paths.
The first column simply means that, with probability one, a vertex is in the open cluster starting from itself while the second column means that a vertex
of the cube at distance one of vertex~$x$ is in the open cluster starting at~$x$ with probability
$$ \begin{array}{l}
(0, 1, 0, 2, -2, 8, -15, -5, 0, 67, -99, 55, -11) \cdot P_{12} \vspace*{4pt} \\ \hspace*{20pt} = \
p + 2p^3 -2p^4 + 8p^5 - 15p^6 - 5p^7 + 67p^9 - 99p^{10} + 55p^{11} - 11p^{12}. \end{array} $$
The second row in the first table of Figure~\ref{fig:poly-06-08} shows the value of~$N_s$ for the cube.
The last column is simply the linear combination of the first four columns where column~$s$ has weight~$N_s$.
By~\eqref{eq:inclusion-exclusion-4}, this is the expected value of the cluster size so the proof for the cube is complete. \vspace*{5pt} \\
\hspace*{-6pt}oindent {\bf Octahedron.}
Because the radius of the octahedron is two, two distinct vertices can only be at distance one or two apart.
There are respectively twenty-six and twenty-eight self-avoiding paths connecting any two vertices at distance one and two from each
other (see Figure~\ref{fig:path-468}).
Note that the sum in~\eqref{eq:inclusion-exclusion-3} for two vertices of the octahedron at distance two apart now contains
$$ 2^{28} - 1 = 268,435,455 \ \ \hbox{terms} $$
so the use of a computer is absolutely necessary to compute this sum explicitly.
The sum again consists of a polynomial with degree~12, the common number of edges in the cube and the octahedron,
and the program gives the coefficients reported in the second table of Figure~\ref{fig:poly-06-08}.
The rest of the proof is exactly the same as for the cube.
\end{proofof} \\ \\
\begin{proofof}{Theorems~\ref{th:12-20}}
\begin{figure}
\caption{\upshape{Picture of the self-avoiding paths with length at most five connecting two vertices of the dodecahedron at respectively
distance~1, 2, 3, 4, and 5, of each other, and picture of the self-avoiding paths with length at most three connecting two
vertices of the icosahedron at respectively distance~1, 2, and 3, of each other.
The label~(2) next to some pictures means that the mirror image of the path is another path connecting the same two vertices.}
\label{fig:path-12-20}
\end{figure}
\begin{figure}
\caption{\upshape{Coefficients returned by algorithm~\eqref{eq:algorithm}
\label{fig:poly-12-20}
\end{figure}
For the dodecahedron and the icosahedron, not only the sum~\eqref{eq:inclusion-exclusion-4} cannot be computed by hand, but also the number
of self-avoiding paths connecting two vertices is beyond human capability.
However, we can find lower bounds for the mean cluster size by only taking into account a subset of paths.
More precisely, given~$x \hspace*{-6pt}eq y$, and letting
\begin{itemize}
\item $\pi_1, \pi_2, \ldots, \pi_J$ be the self-avoiding paths of length~$\leq c$ connecting~$x$ and~$y$, \vspace*{2pt}
\item $\pi_{J + 1}, \pi_{J + 2}, \ldots, \pi_K$ be the self-avoiding paths of length~$> c$ connecting~$x$ and~$y$,
\end{itemize}
we deduce from~\eqref{eq:inclusion-exclusion-3} that
\begin{equation}
\label{eq:inclusion-exclusion-6}
\begin{array}{l}
P (x \leftrightarrow y) =
\displaystyle \sum_{B \subset [K] : B \hspace*{-6pt}eq \varnothing} \ (- 1)^{\card (B) + 1} \ p^{\card \!\! \big(\bigcup_{i \in B} \pi_i \big)} \vspace*{0pt} \\ \hspace*{100pt} \geq
\displaystyle \sum_{B \subset [J] : B \hspace*{-6pt}eq \varnothing} \ (- 1)^{\card (B) + 1} \ p^{\card \!\! \big(\bigcup_{i \in B} \pi_i \big)}. \end{array}
\end{equation}
The inequality follows from an inclusion of events:
if at least one of the first~$J$ paths is open then at least one of the~$K$ paths is open.
For both the dodecahedron and the icosahedron, we choose the cutoff~$c$ to be the radius of the graph, meaning that we only consider self-avoiding paths
with length at most five for the dodecahedron and self-avoiding paths with length at most three for the icosahedron.
These paths are drawn in Figure~\ref{fig:path-12-20}.
Because both graphs have thirty edges, the right-hand side of~\eqref{eq:inclusion-exclusion-6} is a polynomial with degree at most~30.
Fixing a labeling of the edges for both graphs to turn the self-avoiding paths into subsets of~$\{0, 1, \ldots, 29 \}$, and using these
subsets as inputs, our program returns the values shown in the first table
of Figure~\ref{fig:poly-12-20} for the dodecahedron and the values shown in the second table for the icosahedron.
As previously, multiplying each column by the appropriate~$N_s$ listed in the first row of each table gives the coefficients of the polynomial
on the right-hand side of~\eqref{eq:inclusion-exclusion-6}, which completes the proof of Theorem~\ref{th:12-20}.
These polynomials have degree less than~30 because we only take into account the shortest self-avoiding paths.
\end{proofof} \\ \\
In conclusion, using the inclusion-exclusion identity and independence, we proved that computing the expected value of the size of an open cluster
reduces to finding all the self-avoiding paths connecting two vertices at distance~$1, 2, \ldots, r$ apart.
Whenever finding all these paths is possible like for the tetrahedron, the cube and the octahedron, our program returns the exact value of the
coefficients of the polynomial representing the expected value.
When finding all the paths is not possible like for the dodecahedron and the icosahedron, one can still obtain lower bounds by only looking at
a subset of self-avoiding paths.
\end{document} |
\begin{document}
\title{Whenever a quantum environment emerges as a classical
system,
it behaves like a measuring apparatus}
\author{Caterina Foti}
\email{caterina.foti@unifi.it}
\address{Dipartimento di Fisica e Astronomia, Universit\`a di Firenze, I-50019, Sesto Fiorentino (FI), Italy}
\address{INFN, Sezione di Firenze, I-50019, Sesto Fiorentino (FI), Italy}
\author{Teiko Heinosaari}
\address{QTF Centre of Excellence, Turku Centre for Quantum Physics, Department of Physics and Astronomy, University of Turku,
FIN-20014, Turku, Finland}
\author{Sabrina Maniscalco}
\address{QTF Centre of Excellence, Turku Centre for Quantum Physics, Department of Physics and Astronomy, University of Turku,
FIN-20014, Turku, Finland}
\author{Paola Verrucchi}
\address{ISC-CNR, at Dipartimento di Fisica e Astronomia, Universit\`a di Firenze, I-50019, Sesto Fiorentino (FI), Italy}
\address{Dipartimento di Fisica e Astronomia, Universit\`a di Firenze, I-50019, Sesto Fiorentino (FI), Italy}
\address{INFN, Sezione di Firenze, I-50019, Sesto Fiorentino (FI), Italy}
\date{\today}
\begin{abstract}
We study the dynamics of a quantum system $\Gamma$
with an environment $\Xi$ made of $N$ elementary quantum
components. We aim at answering the following questions:
can the evolution of $\Gamma$ be characterized by some general features
when $N$ becomes very large,
regardless of the specific form of its interaction with each and every
component of $\Xi$? In other terms: should
we expect all quantum systems with a macroscopic environment to undergo
a somehow similar evolution? And if yes, of what type?
In order to answer these questions we use well established results
from large-$N$ quantum field theories, particularly referring to the
conditions ensuring a large-$N$ quantum model to be effectively
described by a classical theory.
We demonstrate that the fulfillment of these conditions, when properly
imported into the framework of the open quantum systems dynamics,
guarantees that the evolution of $\Gamma$ is always of the same
type of that expected if $\Xi$ were a measuring apparatus,
no matter the details of the actual interaction. On the other
hand, such details are found to determine the specific basis w.r.t. which
$\Gamma$ undergoes the decoherence dictated by the dynamical
description of the quantum measurement process.
This result wears two hats: on the one hand it clarifies the physical
origin of the formal statement that, under certain conditions, any
channel from $\rho_\Gamma$ to $\rho_\Xi$ takes the form of a
measure-and-prepare map, as recently
shown in Ref.~\cite{BrandaoPH15};
on the other hand, it formalizes the qualitative argument that
the reason why we do not observe state superpositions is the continual
measurement performed by the environment.
\end{abstract}
\maketitle
\section{Introduction}
\label{s.Introduction}
There exist two closely-related questions about the quantum mechanical
nature of our universe that keep being intriguing after decades of
thought processing: how is it that we do not experience state
superpositions, and why we cannot even see them when observing
quantum systems.
As for the latter question, it is somehow assumed that this is due to
the continual measurement process acted upon by the environment.
However, despite often being considered as an acceptable answer, this
argument is not a formal result, and attempts to make it such have been only
recently proposed ~\cite{BrandaoPH15,Zurek09,HorodeckiKH15}. In fact, the current
analysis of the quantum measurement process~\cite{BuschEtal16}, its
Hamiltonian description ~\cite{Ozawa1984,LiuzzoScorpoCV15epl}, as well as its
characterization in the framework of the open quantum systems (OQS)
dynamics~\cite{HeinosaariZ12} has revealed the qualitative nature of the
above argument, thus making it ever more urgent to develop a rigorous
approach to the
original question. This is the main goal of our work.
Getting back to the first question, the answer offered by the statement
that microscopic systems obey quantum rules while macroscopic objects
follow the classical ones, is by now considered unsatisfactory.
Macroscopic objects, indeed, may exhibit a distinctive quantum
behaviour (as seen for instance in superconductivity, Bose-Einstein
condensation, magnetic properties of large molecules with $S=1/2$),
meaning that the
large-$N$ condition is not sufficient per-s\'e for a system made of $N$
quantum particles to behave classically. In fact, there exist assumptions which
single out the minimal structure any quantum theory should possess if it is
to have a classical limit~\cite{Yaffe82}. Although variously expressed
depending on the approach adopted by different authors (see the thorough
discussion on the relation between large-$N$ limits and classical
theories developed in Sec.VII of Ref.~\cite{Yaffe82}), these assumptions
imply precise physical constraints on the quantum theory that describes
a macroscopic quantum system if this has to behave classically.
In what follows, these assumptions will formally characterize the
quantum environment, in order to guarantee that the environment, and it alone, behaves
classically. The relevance of the sentence "and it alone" must be
stressed: indeed, the work done in the second half of the last century
on the $N\to\infty$ limit of quantum theories is quite comprehensive but it
neglects the case when the large-$N$ system is the big partner of a
principal quantum system, that only indirectly experiences such limit.
This is, however, an exemplary situation in quantum
technologies and OQS, hence the questions asked at the beginning of this
Introduction have recently been formulated in the corresponding framework
~\cite{BrandaoPH15,Zurek09,HorodeckiKH15,BraunHS01,ChiribellaDA06,GalveZM16,GiorgiGZ15,
RigovaccaEtal15,KnottEtal18,KorbiczEtal17,PleasanceG17}.
In this work, we develop an original approach which uses results for the
large-$N$ limit of quantum theories in the framework of OQS dynamics.
This allows us to show that details of the interaction between a quantum
principal system $\Gamma$ and its environment $\Xi$ are irrelevant in
determining the main features of the state of $\Xi$ at any time $\tau$
in the large-$N$ limit, as long as such limit implies a classical
behaviour for $\Xi$ itself. If this is the case, indeed, such state can
always be recognized as that of an apparatus that measures some
observable of the principal system. The relation between our findings
and the two questions that open this section is evident.
The paper is structured as follows. In the first section we define the dynamical maps
characterizing the two evolutions that we aim at comparing. We do so through
a parametric representation introduced in Sec.~\ref{s.coherent}.
In Sec.~\ref{s.largeN}, we focus on a peculiar property of generalized coherent states,
particularly relevant when the large-$N$ limit is considered. As the environment is
doomed to be macrocopic and behave classically, we then implement such limit in
Sec.~\ref{s.macroclassic_env}, being finally able to show what we were looking for.
In Sec.~\ref{s.discussion} we comment on the assumptions made,
while the results obtained are summed up in the concluding section.
\section{Schmidt decomposition and dynamical maps}
\label{s.Schmidt}
We consider the unitary evolution of an isolated bipartite system
$\Psi=\Gamma+\Xi$, with Hilbert space ${\cal H}_{_\Gamma}\otimes{\cal H}_{_\Xi}$; being
$\Psi$ isolated, it is
\begin{equation} \ket{\Psi(t)}=e^{-i\hat H t}\ket{\Psi}\;,
\label{e.true_evo}
\end{equation}
where $\hbar=1$ and $\hat H$ is any Hamiltonian, describing whatever
interaction between $\Gamma$ and $\Xi$.
The state $\ket{\Psi}$ is assumed separable
\begin{equation}
\ket{\Psi}=\ket{\Gamma}\otimes\ket{\Xi}~,
\label{e.initial_state}
\end{equation}
meaning that we begin
studying the evolution at a time $t=0$ when both $\Gamma$ and $\Xi$ are
in pure states. This is not a neutral assumption, and we will
get back to it in Sec.~\ref{s.discussion}.
At any fixed time $\tau$, there exists a
Schmidt decomposition of the state \eqref{e.true_evo},
\begin{equation}\
\ket{\Psi(\tau)}=\sum_{\gamma}c_\gamma
\ket{\gamma}\ket{\xi_{\gamma}},
\label{e.schmidt_dec}
\end{equation}
with $\gamma=1,...,{\rm dim}{\cal{H}}_\Gamma$,
$c_\gamma\in\mathbb{R}^{+}$ for $\gamma\le\gamma_{\rm max}\le {\rm
dim}{\cal{H}}_\Gamma$, $c_\gamma=0$ for $\gamma>\gamma_{\rm max}$,
$\sum_{\gamma} c_{\gamma}^2=1$, and the symbol $\otimes$ understood (as
hereafter done whenever convenient).
The states $\lbrace \ket{\gamma}\rbrace_{{\cal H}_\Gamma}$, and
$\lbrace \ket{\xi_j}\rbrace_{{\cal H}_\Xi}$ with
$j=1,...\dim{\cal H}_\Xi$, form what we will hereafter call the
$\tau$-Schmidt bases, to remind that the Schmidt decomposition is
state-specific and therefore depends on the time $\tau$ appearing
in the LHS of Eq.\eqref{e.schmidt_dec}, in whose RHS we have instead
understood the $\tau$-dependence of $c_\gamma$, $\ket{\gamma}$, and
$\ket{R_\gamma}$, for the sake of a lighter notation.
Consistently with the idea that $\Xi$ is a macroscopic system, we take
$\gamma_{\rm max}<{\rm dim}{\cal H}_\Xi$: therefore, the states
$\lbrace\ket{\xi_{\gamma}}\rbrace_{{\cal H}_{\Xi}}$
entering Eq.\eqref{e.schmidt_dec} are a subset of the pertaining
$\tau$-Schmidt basis.
Given that $\ket{\Gamma}$ is fully generic, the unitary evolution
\eqref{e.true_evo} defines, via $\rho_{_\Xi}={\rm Tr}_{_\Gamma} \rho_{_\Psi}$, the
CPTP linear map (from $\Gamma$- to $\Xi$-states)
\begin{equation}
{\cal{E}}{:}\proj{\Gamma}\rightarrow\rho_{_\Xi}=\sum_\gamma
c^2_\gamma\proj{\xi_\gamma}~.
\label{e.true_map}
\end{equation}
Being the output $\rho_\Xi$ a convex sum of orthogonal
projectors, Eq.\eqref{e.true_map} might describe a projective
measurement acted upon by $\Xi$ on the principal system $\Gamma$,
by what is often referred to as measure-and-prepare (m\&p) map.
However, for this being the case, the {\it probability reproducibility
condition}~\cite{BuschLM96} must also hold, meaning that, given
\begin{equation}
\ket{\Gamma}=\sum_\gamma a_\gamma\ket{\gamma}~,
\label{e.initial_Gamma}
\end{equation}
it should also be $c^2_\gamma=|a_\gamma|^2, \forall \gamma$. This condition, however,
cannot be generally true, if only for the $\tau$-dependence of the
Schmidt coefficients $\{c_\gamma\}$ which is not featured by the set
$\{a_\gamma\}$. In fact, there
exists a dynamical model (the Ozawa's model~\cite{Ozawa1984} for
projective von Neumann measurement described in Appendix \ref{a.Ozawa})
for which $c_\gamma^2=|a_\gamma|^2, \forall\gamma$ and $\forall\tau$.
Such model is defined by a
Hamiltonian where the operators acting on $\Gamma$ must commute with
each other, a condition that identifies what we will hereafter dub a
measure-like Hamiltonian, $\hat H^{\mbox{\tiny M}}$, with the apex M
hinting at the corresponding measurement process. The
evolution defined by $\exp\{-it\hat H^{\mbox{\tiny M}}\}$
will be consistently dubbed measure-like dynamics
\footnote{Giving a Hamiltonian description of more general quantum
measurement processes, i.e., identifying the appropriate propagator for
the dynamics of such processes up to the output production, is a very
relevant problem that has recently attracted the interest of several
authors, including some of us.}.
Once established that Eq.\eqref{e.true_map} does not define a
m\&p map, we can nonetheless use the elements provided by the
Schmidt decomposition as ingredients to construct a measure-like
Hamiltonian $\hat H^{\mbox{\tiny M}}$ whose corresponding m\&p map, ${\cal
E}^{\mbox{\tiny M}}:\proj{\Gamma}\to\rho_\Xi^{\mbox{\tiny M}}$ is the "nearest" possible to
the actual ${\cal E}$, Eq.\eqref{e.true_map}.
To this aim, we first use the $\tau$-Schmidt bases $\{\ket{\gamma}\}_{{\cal H}_\Gamma}$
and $\{\ket{\xi_j}\}_{{\cal H}_\Xi}$ to define the hermitian
operators
\begin{equation}
\hat O_{_\Gamma}=\sum_{\gamma}\varepsilon_{\gamma}\proj{\gamma}~~~,~~~
\hat O_{_\Xi}=\sum_jE_j\proj{\xi_j}~,~
\label{e.Schmidt-operators}
\end{equation}
with $\varepsilon_\gamma,E_j$ arbitrary real numbers; we then write the
interaction Hamiltonian
\begin{equation}
\hat H^{\mbox{\tiny M}}=g\hat O_{_\Gamma}\otimes\hat O_{_\Xi}\,,
\label{e.meas_H}
\end{equation}
with $g$ some coupling constant, which has the form
prescribed by the Ozawa's model (see Appendix \ref{a.Ozawa} for more
details).
Further using the Schmidt coefficients, we construct the separable state
\begin{equation}\label{e.meas_state_in}
\ket{\Psi^{\mbox{\tiny M}}}=\ket{\Gamma}\otimes\ket{\Xi^{\mbox{\tiny
M}}}\;,
\end{equation}
where $\ket{\Gamma}$ is the same as in Eq.\eqref{e.initial_state}, while
$\ket{\Xi^{\mbox{\tiny M}}}
=\sum_\gamma c_\gamma\ket{\xi_\gamma}~,$ with
$c_\gamma$ and $\ket{\xi_\gamma}$ as in Eq.\eqref{e.schmidt_dec}.
Finally we define
\begin{equation}
\ket{\Psi_\tau^{\mbox{\tiny M}}}= e^{-i\hat H^{\mbox{\tiny M}}\tau}
\ket{\Psi^{\mbox{\tiny M}}},
\label{e.meas_evo_def}
\end{equation}
that reads, using $\hat O_{_\Gamma}\ket{\gamma}=\varepsilon_\gamma\ket{\gamma}$,
$\hat O_{_\Xi}\ket{\xi_{\gamma}}=E_{\gamma}\ket{\xi_{\gamma}}$, and
$\ket{\Gamma}=\sum_\gamma a_\gamma\ket{\gamma}$,
\begin{align}
\ket{\Psi_\tau^{\mbox{\tiny M}}}=
& e^{-i\hat H^{\mbox{\tiny M}}\tau}
\sum_{\gamma} a_{\gamma}\ket{\gamma}
\sum_{\gamma'}c_{\gamma '}\ket{\xi_{\gamma '}}
\nonumber\\
=& \sum_{\gamma,\gamma'}a_{\gamma}\ket{\gamma}c_{\gamma'}
e^{-i\varphi_{\gamma\gamma'}}
\ket{\xi_{\gamma'}}\;,
\label{e.meas_evo}
\end{align}
with $\varphi_{\gamma\gamma'}\equiv\tau g\varepsilon_\gamma
E_{\gamma'}\in\mathbb{R}$. Do notice the different notation for the
time-dependence in Eqs.~\eqref{e.schmidt_dec} and
\eqref{e.meas_evo_def}: this is to underline that while the former
indicates how the state $\ket{\Psi}$ of a system with Hamiltonian
$\hat{H}$ evolves into $\ket{\Psi(t)}$ at any time $t$,
the latter represents a state whose dependence on
$\tau$ not only enters as a proper time in the propagator, but also, as
a parameter, in the definition of $\hat{H}^{\mbox{\tiny M}}$ and $\ket{\Xi^{\mbox{\tiny M}}}$,
via the $\tau$-dependence of the Schmidt decomposition
\eqref{e.schmidt_dec}. Nonetheless, the state $\ket{\Psi^{\mbox{\tiny M}}_\tau}$
can still be recognized as that in which $\Psi$ would be at time $\tau$,
were its initial state $\ket{\Psi^{\mbox{\tiny M}}}$ and its evolution ruled by the
measure-like interaction Eq.~\eqref{e.meas_H}.
Given that $\ket{\Gamma}$ is fully generic,
Equation \eqref{e.meas_evo_def} defines, via
$\rho_{_\Xi}={\rm Tr}_{_\Gamma} \rho_{_\Psi}$, the CPTP map from
$\Gamma$- to $\Xi$-states
\begin{align}
{\cal{E}^{\mbox{\tiny M}}}&:\proj{\Gamma}\rightarrow\rho_{_\Xi}^{\mbox{\tiny M}}=
\nonumber\\
&=\sum_{\gamma \gamma'\gamma''}|a_\gamma|^2 c_{\gamma'} c_{\gamma''}
e^{i(\varphi_{\gamma\gamma''}-\varphi_{\gamma\gamma'})}
\ket{\xi_{\gamma'}}\!\!\bra{\xi_{\gamma''}}~.
\label{e.meas_map}
\end{align}
Notice that ${\cal E}^{\rm M}$ depends on $\tau$ directly,
via $\varphi_{\gamma\gamma'}\propto\tau$, and indirectly, via the $\tau$-dependence
of the Schmidt decomposition, that is of the
coefficients $c_\gamma$ and the states $\ket{\xi_\gamma}$.
Comparing Eqs.\eqref{e.true_map} and \eqref{e.meas_map} we
see that ${\cal E}^{\mbox{\tiny M}}$ has the right coefficients
$\{|a_\gamma|^2\}$ but the wrong form, i.e., it is not a sum of
orthogonal projectors, while ${\cal E}$ has the correct form but with
the wrong coefficients, $\{c_\gamma^2\}$.
In fact, were these two maps equal in some limit, it
would mean the following: for each time $\tau$, there exists an observable for $\Gamma$,
(depending on $\tau$ itself) such that the state into which $\Xi$ has
evolved due to its true interaction with $\Gamma$ is the same,
in such limit, as if $\Xi$ itself were
some measuring apparatus proper to that observable, which is quite a statement.
Since ${\cal{E}}$ and ${\cal{E}^{\mbox{\tiny M}}}$ are linear,
they are the same map iff the output states $\rho_{_\Xi}$ and
$\rho_{_\Xi}^{\mbox{\tiny M}}$ are equal for whatever input $\ket{\Gamma}$. We can
therefore concentrate upon the structure of such output states, which we will
do in the next section by introducing a proper parametric representation.
\section{Parametric representation with environmental coherent states}
\label{s.coherent}
The parametric representation with environmental coherent states (PRECS)
is a theoretical tool that has been recently
introduced~\cite{CCGV13pnas,TesiDario} to specifically address those
bipartite quantum systems where one part, on its own made by $N$
elementary components, shows an emerging classical behaviour
in the large-$N$ limit
~\cite{LiuzzoScorpoCV15epl,CalvaniEtal13,LiuzzoScorpoCV15ijtp,FotiCV16,RossiEtal17}.
The method makes use of generalized coherent states (GCS) for the system
intended to become macroscopic.
The construction of GCS, sometimes referred to as group-theoretic, goes
as follows~\cite{ZhangFG90}.
Associated to any quantum system there is a Hilbert space ${\cal H}$ and a
dynamical group ${\cal G}$, which is
the group containing all the propagators that describe possible
evolutions of the system (quite equivalently, ${\cal G}$ is the group
corresponding to the Lie algebra $\mathfrak{g}$ to which all
the physical Hamiltonians of the system belong).
Once these ingredients are known, a reference
state $\ket{0}$ is arbitrarily chosen in $\cal{H}$ and the subgroup
${\cal F}$ of the propagators that leave such state unchanged (apart
from an irrelevant overall phase) is determined. This is
usually referred to as the stability subgroup.
Elements $\hat\omega$ of ${\cal G}$ that do not belong to such
subgroup,
$\hat{\omega}\in{\cal G}/{\cal F}$, generate the GCS
upon acting on the reference state, $\hat\omega\ket{0}=\ket{\omega}$,
and are usually dubbed "displacement" operators.
The GCS construction further entails the definition of an
invariant\footnote{The measure $d\mu(\hat\omega)$ is called \emph{invariant}
because it is left unchanged by the action of ${\cal
G}$.}
measure $d\mu(\hat\omega)$ on ${\cal G}/{\cal F}$ such that a resolution
of the identity on ${\cal{H}}$ is provided in the form
\begin{equation}
\int_{{\cal G}/{\cal F}}d\mu(\hat\omega)\proj{\omega}=\hat{\mathbb{I}}_{\cal{H}}~.
\label{e.ECS_resolid}
\end{equation}
One of the most relevant byproduct of the GCS construction is the
definition of a differentiable manifold ${\cal{M}}$ via the chain of
one-to-one correspondences
\begin{equation}
\hat{\omega}\subset{\cal G}/{\cal F}\Leftrightarrow\ket{\omega}\in{\cal
H}\Leftrightarrow\omega\subset{\cal M}~,
\label{e.GCScorrespondence}
\end{equation}
so that to any GCS is univoquely associated a point on ${\cal M}$, and
viceversa. A measure $d\mu(\omega)$ on ${\cal
M}$ is consistently associated to the above
introduced $d\mu(\hat\omega)$, so that requiring GCS to be normalized,
$\exval{\omega|\omega}=1$, implies
\begin{align}
\exval{\omega|\omega}&=\exval{\omega|\left[ \int_{{\cal G}/{\cal F}}
d\mu(\hat\omega)\proj{\omega}\right]|\omega}\nonumber\\
&=\int_{\cal
M}d\mu(\omega)|\exval{\omega|\omega}|^2=1~;
\label{e.GCS_non-orthogonal}
\end{align}
notice that GCS are not necessarily orthogonal.
One important aspect of the GCS construction is that it
ensures the function $\exval{\omega|\rho|\omega}$ for whatever state
$\rho$ (often called Husimi
function in the literature\footnote{In fact, a "Husimi function" is in
principle defined on a classical phase-space, while ${\cal M}$ is a
differential manifold with a simplectic structure that should not be
considered a phase-space, yet, i.e., before the large-$N$ limit is taken;
however, it is quite conventional to extend the term to the expectation
value of $\rho$ on GCS.})
is a well-behaved probability
distribution on $\cal M$ that uniquely
identifies $\rho$ itself. As a consequence,
studying
$\exval{\omega|\rho|\omega}$ on $\cal M$ is fully equivalent to perform
a state-tomography of $\rho$ on the Hilbert space, and
once GCS are available one can analyze any state $\rho$
of the system by studying its Husimi function on $\cal M$, which is what
we will do in the following.
We refer the reader to Refs.~\cite{ZhangFG90,Perelomov72} for
more details.
When GCS are relative to a system $\Xi$ which is
the environment of a principal system $\Gamma$, we call them
Environmental Coherent States (ECS).
\begin{figure}
\caption{ $|\exval{\alpha|n}
\label{f.bosonoverlap1}
\end{figure}
\begin{figure*}
\caption{Sum $|\exval{\alpha|n'}
\label{f.bosonoverlap2}
\end{figure*}
Getting back to the setting of section~\ref{s.Schmidt}, we first
recognize that, if they were to represent different evolutions of the
same physical system, the propagators $\exp\{-i\hat H\tau\}$ and
$\exp\{-i\hat{H}^{\mbox{\tiny M}}\tau\}$ must belong to the same dynamical group,
as far as their action on ${\cal H}_{_\Xi}$ is concerned.
More explicitely, this group is identified as follows:
$i)$ consider all the operators acting on ${\cal H}_\Xi$ in the total
Hamiltonians $\hat H$ and $\hat H^M$;
$ii)$ find the algebra to which they all belong
(notice that, as both Hamiltonians refer to the same physical system,
the above operators must belong to the same algebra $\mathfrak{g}$;
$iii)$ recognize the dynamical group as that associated to the above
algebra $\mathfrak{g}$ via the usual exponential Lie map (for several examples see
for instance Refs.~\cite{LiuzzoScorpoCV15epl,CCGV13pnas,FotiCV16,RossiEtal17}).
This is the
group to be used for constructing the ECS, according to the procedure
briefly sketched above. Once ECS are constructed, the PRECS of any
pure state $\ket{\psi}$ of
$\Psi$ is obtained by inserting an identity resolution
in the form \eqref{e.ECS_resolid} into any decomposition of
$\ket{\psi}$ as linear combination of separable (w.r.t. the
partition $\Psi=\Gamma+\Xi$) states.
Explicitly, one has
\begin{equation}
\ket{\psi}=\int_{\cal M}
d\mu(\omega)\chi(\omega)\ket{\omega}\ket{\Gamma(\omega)}~,
\label{e.PRECS_general}
\end{equation}
where $\ket{\Gamma(\omega)}$ is a normalized state for $\Gamma$ that
parametrically depends on $\omega$, while $\chi(\omega)$ is a real
function on $\cal M$ whose square
\begin{equation}
\chi(\omega)^2=\exval{\omega|\rho_{_\Xi}|\omega}~,
\label{e.Husimi}
\end{equation}
is the environmental Husimi function relative to
$\rho_{_\Xi}={\rm Tr}_{_\Gamma}\proj{\psi}$,
i.e., the normalized distribution on ${\cal M}$
that here represents the probability for the environment $\Xi$ to be in
the GCS $\ket{\omega}$ when $\Psi$ is in the pure state
$\ket{\psi}$.
The explicit form of $\chi(\omega)$ and $\ket{\Gamma(\omega)}$ is
obtained from any decomposition of $\ket{\psi}$ into a linear
combination of separable (w.r.t. the partition $\Gamma+\Xi$) states.
In particular,
for the states
\eqref{e.schmidt_dec} and \eqref{e.meas_evo}, it is
\begin{equation}
\chi(\omega)^2=
{\sum_{\gamma}c_\gamma^2
\left| \bra{\omega}\xi_\gamma\rangle \right|^2}~,
\label{e.chi2_true}
\end{equation}
and
\begin{align}
&\chi^{\mbox{\tiny M}}(\omega)^2=\nonumber\\
&=\sum_{\gamma\gamma'\gamma''}|a_{\gamma}|^2
c_{\gamma'}
c_{\gamma''}e^{i(\varphi_{\gamma\gamma''}-\varphi_{\gamma\gamma'})}
\bra{\omega}\xi_{\gamma'}\rangle
\bra{\xi_{\gamma''}}\omega\rangle~,\nonumber\\
&~
\label{e.chi2_meas}
\end{align}
respectively.
Comparing $\chi(\omega)^2$ and $\chi^{\mbox{\tiny M}} (\omega)^2$ is equivalent to
compare
$\rho_\Xi$ and $\rho_\Xi^{\mbox{\tiny M}}$, and hence the maps
\eqref{e.true_map} and \eqref{e.meas_map}.
However, despite the very specific construction leading
to $\ket{\Psi^{\mbox{\tiny M}}_\tau}$, we cannot yet make any meaningful specific
comparison between
$\chi(\omega)^2$ and $\chi^{\mbox{\tiny M}}(\omega)^2$ at this stage.
Indeed, we still have to exploit the fact that the
environment is doomed to be big and behave classically, which is why ECS
turn out to be so relevant to the final result, as shown in the next section.
\section{Large-$N$ and classical limit}
\label{s.largeN}
As mentioned in the Introduction, a physical system which is made by a
large number $N$ of quantum constituents does not necessarily obey the
rules of classical physics. However, several authors
~\cite{Yaffe82,ZhangFG90,Lieb73,GnutzmannK98} have shown
that if GCS exist and feature some
specific properties, then the structure of a classical theory ${\cal C}$ emerges
from that of a quantum theory ${\cal Q}$.
In particular, the existence of GCS establishes a relation between the
Hilbert space of ${\cal Q}$ and the manifold ${\cal M}$ that their
construction implies, which turns out to be the phase-space of the
classical theory that emerges as the large-$N$ limit of ${\cal Q}$.
In fact, one should rather speak about the $k\to 0$ limit
of ${\cal Q}$, with $k$ the real positive number, referred to
as "quanticity parameter", such that all the commutators of
the theory (or anticommutators, in the fermionic case)
vanish with $k$. However, all known quantum theories for systems
made by $N$ components have $k\sim \frac{1}{N^p}$
with $p$ a positive number: therefore, for the sake of clarity, we
will not hereafter use the vanishing of the quanticity parameters but
rather refer to the large-$N$ limit (see Appendix \ref{a.large_N}
for more details).
Amongst the above properties of GCS, that are thoroughly explained and
discussed in Ref.~\cite{Yaffe82} as the {\it assumptions} guaranteeing the large-$N$
limit to define a classical theory, one that plays a key role in
this work regards the overlaps $\exval{\omega|\xi}$, whose square
modulus represents the probability that a system in some generic pure
state $\ket{\xi}$ be observed in the coherent state $\ket{\omega}$.
These overlaps never vanish for finite $N$, due to the overcompleteness
of GCS: as a consequence, if one considers two orthonormal states, say
$\ket{\xi'}$ and $\ket{\xi{''}}$, there might be a finite
probability for a system in a GCS $\ket{\omega}$ to be observed either
in $\ket{\xi'}$ or in $\ket{\xi{''}}$.
This formally implies that, defined $S_{\xi}$ the set of points on
$\cal{M}$ where
$|\exval{\omega|\xi}|>0$, it generally
is $S_{\xi '}\cap S_{\xi{''}}\neq\emptyset$.
\begin{figure*}
\caption{Sum $|\exval{\alpha|+}
\label{f.bosonoverlap3}
\end{figure*}
On the other hand, the quantity
\begin{equation}
\lim_{N\to\infty}|\exval{\omega|\xi}|^2
\label{e.clas_lim_generic}
\end{equation}
features some very relevant properties.
First of all, if $\ket{\xi}$ is another GCS, say
$\ket{\omega'}$, the square modulus $|\exval{\omega|\omega'}|^2$ exponentially
vanishes with $|\omega-\omega'|^2$ in such a way that the limit
\eqref{e.clas_lim_generic} converges to the Dirac distribution
$\delta(\omega-\omega')$, thus restoring a notion of distinguishability
between different GCS in the large-$N$ limit.
Moreover, in Appendix \ref{a.large-N_Overlap} we demonstrate that
\begin{equation}
\exval{\xi'|\xi{''}}=\delta_{\xi'\xi{''}}\Leftrightarrow
\lim_{N\to\infty} S_{\xi'}\cap S_{\xi''}=\emptyset~,
\label{e.supports-dont-share}
\end{equation}
meaning that orthonormal states are put together by
distinguishable sets of GCS. In other terms, the large-$N$ limit
enforces the emergence of a one-to-one correspondence between elements of any
orthonormal basis $\{\ket{\xi}\}$ and disjoint sets of GCS,
in such a way that the distinguishability of the former is reflected
into the disjunction of the latter.
Given the relevance of Eq.\eqref{e.supports-dont-share} to this work,
let us discuss its meaning with two explicit examples.
\subsection{Field Coherent States}
Consider a system $\Xi$ whose Lie algebra is $\mathfrak{h}_4$, i.e., the vector
space spanned by
$\{\hat a,\hat a^\dagger, \hat n\equiv\hat a^\dagger \hat a, \hat{\mathbb{I}}\}$, with
Lie brackets $[\hat a,\hat a^\dagger]=1$, and
$[\hat a^{(\dagger)},\hat n]=(-)\hat a^{(\dagger)}$.
In order to identify the quanticity parameter $k$, i.e., the parameter
whose vanishing makes the Lie brackets of the theory go to zero, one can
restore dimensionful ladder operators,
$\hat a^{(\dagger)}\to\sqrt{\frac{2\hbar}{M\omega}}\hat
a^{(\dagger)}$, and observe that all the commutators vanish in the
large-$M$ limit. Further taking $M\propto N$, meaning that
the total mass of $\Xi$ is the sum of the masses of the elementary
components, which are assumed to have the same mass for the sake of
simplicity, it is easily found
that $k\sim 1/N$.
As for the GCS , they are the well known field coherent
states
$\{\ket{\alpha}\}$, with $\ket{0}:\hat a\ket{0}=0$ the
reference state, and ${\cal M}$ the complex plane. The eigenstates of
$\hat n$ are the Fock states $\{\ket{n}\}$, and
$\exp\{\alpha \hat a - \alpha^* \hat a^\dagger\}\equiv \hat \alpha$
is the displacement operator such that
$\ket{\alpha}=\hat\alpha\ket{0}$.
As for the overlaps entering Eq.\eqref{e.chi2_meas}, let us
first consider the case when the states $\{\ket{\xi_\gamma}\}$ are Fock
states. In Fig.~\ref{f.bosonoverlap1} we show $|\exval{\alpha|n}|^2$ as
a function of $|\alpha|^2$, for
$n=1,2$ and different values of $N$.
It is clearly seen that $S_{n'}\cap S_{n''}\to\emptyset$ as
$N\to\infty$, meaning that the product of overlaps in Eq.\eqref{e.chi2_meas}
vanishes unless $\gamma'=\gamma{''}$, i.e. $n'=n{''}$ in this specific example.
In order to better visualize $S_{n'}$ and $S_{n{''}}$ on ${\cal M}$, in
Fig.~\ref{f.bosonoverlap2} we contour-plot the sum
$|\exval{\alpha|1}|^2+|\exval{\alpha|2}|^2$:
indeed we see that, as $N$ increases, $S_1$ and
$S_2$ do not intersect. Notice that increasing $N$ does not squeeze
$S_n$ to the neighbourghood of some point on $\cal M$, as is the case for
$\lim_{N\to\infty}|\exval{\alpha|\alpha'}|^2=\delta(\alpha-\alpha')$,
but rather to that of the circle $|\alpha|^2=n$.
In other terms, more field coherent states overlap with the same Fock
state, but different Fock states overlap with distinct sets of field
coherent states, in the large-$N$ limit.
This picture holds not only for Fock states but, as expressed by
Eq.\eqref{e.supports-dont-share}, for any pair of orthonormal states.
In Fig.~\ref{f.bosonoverlap3}, for instance, we contour-plot the sum
$|\exval{\alpha|+}|^2+\exval{\alpha|-}|^2$ with
$\ket{\pm}\equiv(\ket{1}\pm\ket{2})/\sqrt{2}$: in this case $S_+$ and
$S_-$ are disjoint already for $N=1$, and keep shrinking as $N$
increases.
\subsection{Spin Coherent States}
A very similar scenario appears when studying
a system $\Xi$ whose Lie algebra is $\mathfrak{su}(2)$, i.e., the vector space
spanned by $\{\hat S^+,\hat S^-, \hat S^z\}$, with Lie brackets
$[\hat S^+,\hat S^-]=2\hat S^z, [\hat S^z,\hat S^\pm]=\pm\hat S^\pm$,
and $|\hat{\mathbf S}|^2=S(S+1)$, with $S$ fixed and constant;
in this case the quanticity parameter is
identified by noticing that the normalized operators
$\hat s^*\equiv \frac{1}{S}\hat S^*$, $*=z,\pm$, have vanishing commutators in the
large-$S$ limit. Further taking $S\propto N$, meaning that the
total spin of $\Xi$ is a conserved quantity, whose value is the sum of the
spins of each individual component, it is easily found that
$k\sim 1/N$.
As for the GCS , they are the so-called spin (or atomic) coherent
states $\{\ket{\Omega}\}$, with the reference state
$\ket{0}:\hat S^z\ket{0}=-S\ket{0}$,
and ${\cal M}$ the unit sphere.
The eigenstates of $\hat S^z$ are
$\{\ket{m}\}:\hat S^z\ket{m}=(-S+m)\ket{m}$, and the
displacement operators are
$\hat\Omega=\exp\{\eta \hat S^- - \eta^* \hat S^+\}$,
with $\eta=\frac{\theta}{2}e^{i\phi}$, and
$\theta\in[0,\pi],\phi\in[0,2\pi)$ the spherical coordinates.
\begin{figure}
\caption{ $|\exval{\Omega|m}
\label{f.spinoverlap1}
\end{figure}
As for the overlaps entering Eq.\eqref{e.chi2_meas},
the analytical expression for $\exval{\Omega|m}$ is available (see for
instance Ref.~\cite{ZhangFG90}), which allows us to show,
in Fig.~\ref{f.spinoverlap1}, the square modulus
$|\exval{\Omega|m}|^2$ for $m'/S=0.8$ and
$m{''}/S=0.4$,
for different values of $N$. Again we see that
$S_{m'}\cap S_{m{''}}\to\emptyset$ as $N\to\infty$, implying that the
product in Eq.\eqref{e.chi2_meas}
vanishes unless
$\gamma'=\gamma{''}$, i.e., $m'=m{''}$ in this specific example. In
Fig.~\ref{f.spinoverlap2} we show the sum
$|\exval{\Omega|m'}|^2+|\exval{\Omega|m{''}}|^2$ as density-plot on part
of the unit sphere: besides the expected shrinking of the regions
where the overlaps are finite, we notice that, as seen in the bosonic
case, the support of $\lim_{N\to\infty}|\exval{\Omega|m}|^2$ does not
shrink into the neighbourghood of a point on the sphere, as is the case for
$\lim_{N\to\infty}|\exval{\Omega|\Omega'}|^2=\delta(\Omega-\Omega')$,
but rather into that of the parallel $\cos\theta=m/S$.
\begin{figure*}
\caption{Sum $|\exval{\Omega|m'}
\label{f.spinoverlap2}
\end{figure*}
\section{A macroscopic environment that behaves classically}
\label{s.macroclassic_env}
Let us now get back to the general case and to Eq.\eqref{e.chi2_meas}: the
states $\ket{\xi_{\gamma'}}$ and
$\ket{\xi_{\gamma{''}}}$ are othonormal by definition, being elements of the
$\tau$-Schmidt basis $\{\ket{\xi_j}\}_{{\cal H}_\Xi}$ introduced in
Sec.\ref{s.Schmidt}.
Therefore Eq.\eqref{e.supports-dont-share} holds, meaning
\begin{equation}
\lim_{N\to\infty}\exval{\omega|\xi_{\gamma'}}\exval{\xi_{\gamma{''}}|\omega}
=
\lim_{N\to\infty}|\exval{\omega|\xi_{\gamma'}}|^2\delta_{\gamma'\gamma{''}}~,
\label{e.product-of-overlaps_limit}
\end{equation}
and hence
\begin{equation}
\lim_{N\to\infty}\chi^{\mbox{\tiny M}}(\omega)^2=
\sum_{\gamma\gamma'}|a_\gamma|^2c^2_{\gamma'}\lim_{N\to\infty}
|\exval{\omega|\xi_{\gamma'}}|^2~.
\label{e.lim_chi2_meas}
\end{equation}
Using $\sum_\gamma|a_\gamma|^2=1$, and the swap
$\gamma'\leftrightarrow\gamma$, we finally obtain
\begin{equation}
\lim_{N\to\infty}\chi^{\mbox{\tiny M}}(\omega)^2=\lim_{N\to\infty}\chi(\omega)^2~,
\label{e.lim_chi2}
\end{equation}
which is what we wanted to prove, namely that the
the dynamical maps \eqref{e.true_map} and \eqref{e.meas_map}
are equal when $\Xi$ is a quantum macroscopic system whose
behaviour can be effectively described classically.
\section{Discussion}
\label{s.discussion}
Aim of this section is to comment upon some specific aspects of our
results, with possible reference to the way other authors have
recently tackled the same subject.
Let us first consider the assumption that the initial state
\eqref{e.initial_state} of the total system $\Psi=\Gamma+\Xi$ be
separable. If this is not the case, as it may happen, one must look for the
different partition $\Psi=A+B$, such that
$\ket{\Psi}=\ket{A}\otimes\ket{B}$. If this partition is still such that
the subsystem $B$ is macroscopic and behaves classically, the change is
harmless and the whole construction can be repeated with $A$ the quantum
system being observed and $B$ its observing environment.
On the other hand, if the new partition is such that neither $A$ nor $B$
meet the conditions for being a classical environment, then the problem
reduces to the usual one of studying the dynamics of two interacting
quantum systems, for which any approach based on effective descriptions
is incongrous, as details of the true Hamiltonian will always be
relevant. Notice that this analysis is fully consistent with the results
presented in Ref.~\cite{BrandaoPH15}, which are embodied into
inequalities whose meaning wears off as dim${\cal H}_B$ diminishes.
The case when $\Psi$ is not initially in a pure state is similarly
tackled by enlarging $\Psi\to\widetilde{\Psi}$ as much as necessary for
$\widetilde{\Psi}$ to be in a pure state: a proper choice of a new
partition of $\widetilde{\Psi}$ will follow.
We then want to clarify in what sense the Hamiltonian
\eqref{e.meas_H} is said to induce a "measure-like dynamics" or, which is
quite equivalent, the channel \eqref{e.meas_map} to define a
m\&p map: the quotes indicate that the actual output
production,
which happens at a certain time according to some process whose nature
we do not discuss, is not considered and it only enters the description
via the requirement that the probability for each output is the one
predicted by Born's rule. To this respect, one might also ask what is
the property of $\Gamma$ which is observed by $\Xi$: this is
the one represented, in the Ozawa's model, by the operator
$\hat{O}_\Gamma$,
and it therefore depends on the true evolution via the Schmidt
decomposition of the evolved state. To put it another way, details of the
interaction do not modify the measure-like nature of the dynamics in the
large-$N$ limit, but they do affect what actual measurement is performed
by the environment.
Let us now discuss possible connections between our results
and Quantum Darwinism~\cite{Zurek09,BrandaoPH15}.
As mentioned at the end of Appendix \ref{a.large_N},
a sufficient condition for a quantum theory to
have a large-$N$ limit which is a classical theory is the existence of
a global symmetry, i.e., such that its
group-elements act non-trivially upon the
Hilbert space of each and every component of the total system $\Xi$ that
the theory describes.
In fact, few simple examples show
that quantum theories with different global symmetries
can flow into the same classical theory in the large-$N$
limit: in other words, echoing L.~G.~Yaffe in Ref.~\cite{Yaffe82},
different quantum theories can be "classically equivalent".
If one further argues that amongst classically equivalent quantum theories
there always exists a free theory, describing $N$ non-interacting
subsystems, it is possible to show that each macroscopic
fragment of $\Xi$ can be effectively described as if it were the same
measurement apparatus. Work on this point is in progress,
based on the quantum de Finetti theorem, results from
Refs.~\cite{ChiribellaDA06,BrandaoPH15}, and the
preliminary analysis reported in Refs.~\cite{Querini_Master16,Foti_phd2019}.
We close this section by mentioning the possible connection between
our description and the way the notion of "objective information" is
seen to emerge in Ref.~\cite{HorodeckiKH15}: in fact, the idea that there may
be no quantum-to-classical transition involved in the perception of the
world around us, that might rather emerge just as a reflection of some
specific properties of the underlying quantum states, seems to be
consistent with the discussion reported above, and we believe that
further investigation on this point might be enlightening.
\section{Conclusions}
\label{s.conclusions}
The idea that the interaction with macroscopic environments causes the
continual state-reduction of any quantum system is crucial for making
sense of our everyday experience w.r.t. the quantum description of
nature. However, the formal analysis of this idea has been
unsatisfactory for decades, due to several reasons, amongst which we
underline the following.
Firstly the generality of the above idea implies that assumptions on the
initial state of the quantum system, and the specific form of the
interaction with its environment, should not be made. Secondly, formal
tools must be devised to allow the study of the system-plus-environment
dynamics in a way that guarantees a genuinely quantum description of the
system throughout the crossover of the environment towards a classical
behaviour. Finally, a clean procedure is required to ensure that the
above crossover takes place when the environment becomes macroscopic,
i.e., in the large-$N$ limit of the quantum theory that describes it.
In this work, reminding that principal system and environment are dubbed
$\Gamma$ and $\Xi$, respectively, we have addressed the above three
issues as follows. As for the first point, the analysis is developed by
comparing CPTP linear maps from $\Gamma$- to $\Xi$-states, that do not
depend on the initial state of $\Gamma$ by definition. The considered
maps, Eqs.~\eqref{e.true_map} and \eqref{e.meas_map}, are defined using
ingredients provided by the Schmidt decomposition of the
system-plus-environment evolved state, Eq.~\eqref{e.true_evo}, that
exists at any time, and whatever the form of the interaction between
$\Gamma$ and $\Xi$ is. Regarding the second issue, we have used a
parametric representation of the overall system state,
Eq.~\eqref{e.PRECS_general}, that resorts to generalized coherent
states (i.e., coherent states as defined via the group-theoretical
approach) for describing $\Xi$. This representation, both for its
parametric nature and the peculiar properties of coherent states when
the quantum-to-classical crossover is considered, allows us to implement
the large-$N$ limit for $\Xi$ without making assumptions on $\Gamma$
or affecting its quantum character.
The third point has been tackled by using results from large-$N$ quantum
field theories: these results provided us with formal conditions that
generalized coherent states must fulfill, particularly
Eq.~\eqref{e.Yaffe-assumption_3}, in order to describe a macroscopic
system that behaves classically.
After this elaboration, we have managed to compare the map defined by
the true evolution of $\Gamma+\Xi$, Eq.~\eqref{e.true_map}, with that
corresponding to a measure-and-prepare dynamical process,
Eq.~\eqref{e.meas_map}, in terms of the difference between probability
functions entering the parametric representation,
Eqs.~\eqref{e.chi2_true} and \eqref{e.chi2_meas}. These functions have been
demonstrated to become equal when the large-$N$ limit defines a
classical dynamics for $\Xi$.
Overall, our approach allows one to tackle the so-called quantum to
classical crossover \cite{Schlosshauer07} by a rigorous mathematical
formulation that provides a physically intuitive picture of the
underlying dynamical process. In fact, exploiting the most relevant fact that
not every theory has a classical limit, we
have shown that any dynamics of whatever OQS
defines a Hamiltonian model that characterizes its environment
as a measuring apparatus
\textit{if} the conditions ensuring that the above classical
limit exists and corresponds to a large-$N$ condition upon the
environment itself are fulfilled.
In other words, \textit{if} some dynamics
emerges in the classical world, it \textit{necessarily} is a
measure-like one.
Let us conclude by briefly commenting upon the already mentioned
phenomenon known as Quantum Darwinism, introduced in \cite{Zurek09} and
recently considered in \cite{BrandaoPH15} from an information theoretic
viewpoint. Our work suggests that Quantum Darwinism might emerge as a
dynamical process, with its generality due to the versatilility of the
Hamiltonian model for the quantum measurement process, and the loss of
resolution inherent in the classical description.
\begin{acknowledgments} CF acknowledges M. Piani and M. Ziman for useful
and stimulating discussions. SM and TH acknowledge financial support
from the Academy of Finland via the Centre of Excellence program
(Project no. 312058) as well as Project no. 287750. CF and PV acknowledge
financial support from the University of Florence in the framework of the University
Strategic Project Program 2015 (project BRS00215).
PV acknowledges
financial support from the Italian National Research Council (CNR) via
the "Short term mobility" program STM-2015, and declares to have worked
in the framework of the Convenzione Operativa between the Institute for
Complex Systems of CNR and the Department of Physics and Astronomy of
the University of Florence. Finally, CF and PV warmly thank the Turku Centre
for Quantum Physics for the kind hospitality. \\
\end{acknowledgments}
\onecolumn
\appendix
\section{From Ozawa's model to the measure-and-prepare map}
\label{a.Ozawa}
Given a projective measurement with measurement operators
$\{\proj{\pi}\}$ acting on ${\cal{H}}_\Gamma$, its dynamical
description according to the Ozawa's model is defined by the propagator
$\exp\{-it\hat H^{\mbox{\tiny M}}\}$, with
\begin{equation}
\hat H^{\mbox{\tiny M}}=g\hat O_\Gamma\otimes \hat O_\Xi~,
\label{e.H_measure-like}
\end{equation}
where $\hat O_\Gamma=\sum_\pi\omega_\pi\proj{\pi}$ is the
measured observable, while $\hat O_\Xi$ is the operator on ${\cal H}_\Xi$
conjugate to the pointer observable~\cite{Schlosshauer07}.
The resulting, measure-like, dynamics is such that decoherence of
$\rho_\Gamma(t)$ w.r.t. the basis $\{\ket{\pi}\}$ implies
$\rho_\Xi(t)=\sum_\pi|a_\pi|^2\proj{\Xi^\pi_t}$ with
$\exval{\Xi^\pi_t|\Xi^{\pi'}_t}=\delta_{\pi\pi'}$ and $a_\pi$ such that
$\ket{\Gamma(0)}=\sum_\pi a_\pi\ket{\pi}$, and viceversa. Here $t$
indicates any time prior the output production when decoherence has
already occurred.
This dynamics defines a CPTP map ${\cal E}^{\mbox{\tiny M}}$ via
\begin{equation}
\proj{\Gamma}=\sum_{\pi\pi'}a_\pi a^*_{\pi'}\ket{\pi}\!\!\bra{\pi'}
\underset{{\cal E}^{\mbox{\tiny M}}}{\longrightarrow}
\rho_\Xi=\sum_\pi|a_\pi|^2\proj{\Xi^\pi}~,
\label{e.measure-like_map}
\end{equation}
referred to as measure-and-prepare (m\&p) map in the literature.
Notice that what characterizes ${\cal E}^{\mbox{\tiny M}}$ as a m\&p map is
not the diagonal form of the output state $\rho_\Xi$, but rather the
fact that its eigenvalues are constant and exclusively depend on the
input state $\proj{\Gamma}$.
\section{Large-$N$ as classical limit}
\label{a.large_N}
In order to define the classical limit of a quantum theory ${\cal Q}$ it
is first necessary to identify a parameter $k$, usually dubbed
"quanticity parameter", such that ${\cal Q}$ transforms into a classical theory
${\cal C}$ as $k$ vanishes. By "transform" it is meant that a formal
relation is set between Hilbert and phase spaces, Lie and Poisson
brackets, Hamiltonian operators and functions.
Consequently, the large-$N$ limit of ${\cal Q}$ implies a classical
behaviour of the macroscopic system it describes IF $N\to\infty$
implies $k\to 0$. On the other hand, in order for this being the case
it proves sufficient that GCS $\{\ket{\omega}\}$for ${\cal Q}$ exist and
feature some specific properties~\cite{Berezin1978,Yaffe82}.
Amongst these, particularly relevant to this work is that
\begin{equation}
\lim_{k\to 0}
\,k\Big[\ln|\exval{\omega'|\omega}|\Big]\le 0~,
\label{e.Yaffe-assumption_3}
\end{equation}
where the equality holds iff $\omega=\omega'$, and the property implies
the limit exists.
From the above property it follows\footnote{We use the Dirac-$\delta$
representation
$\delta(x-y)=\lim_{\epsilon\to 0}(1/\epsilon)\exp\{(x-y)^2/\epsilon\}$.}
\begin{equation}
\lim_{k\to 0}
\frac{1}{k}|\exval{\omega|\omega'}|^2=\delta(\omega-\omega')~,
\label{e.GCS_orthogonal}
\end{equation}
which is a most relevant properties of GCS, namely that they become
orthogonal in the classical limit.
It is worth mentioning that if ${\cal Q}$ features a global symmetry
(also dubbed "supersymmetry" in the literature), GCS can be explicitly
constructed and shown to feature the properties ensuring that the
large-$N$ limit is indeed a classical one~\cite{Yaffe82}. However,
whether the existence of one such symmetry be a necessary condition for
a system to behave classically in the large-$N$ limit is not proven,
although all of the known physical theories, be they vector-, matrix-,
or gauge-theories, confirm the statement (see Sec.VII of
Ref.~\cite{Yaffe82} for a thorough discussion about this point).
Incidentally, we believe the above supersymmetry be essential in
defining what a macroscopic observer should actually be in order
for Quantum Darwinism to occur, in a way similar to that discussed in
Ref.~\cite{ChiribellaDA06} in the specific case of a quantum theory for
$N$ distinguishable particles with permutation global symmetry.
\section{Overlap between GCS and elements of an orthonormal basis in
the large-$N$ limit}
\label{a.large-N_Overlap}
One of the output of the GCS construction, and key-ingredient for their
use, is the invariant measure $d\mu(\hat\omega)$ entering the identity
resolution Eq.\eqref{e.ECS_resolid}. It is demonstrated~\cite{Yaffe82}
that in order for such resolution to keep holding for whatever value of the
quanticity parameter $k$ it must be $d\mu(\omega)=c_k dm(\omega)$,
with $c_k$ a constant on ${\cal M}$ that depends on the normalization
of the group-measure $d\mu(\hat\omega)$ and should be computed
on a case-by-case basis. However, normalization of GCS is guaranteed by
construction, and hence, via Eq.\eqref{e.ECS_resolid},
\begin{equation}
\exval{\omega|\omega}=\int_{\cal M}
c_k dm(\omega')|\exval{\omega|\omega'}|^2=1~,~\forall\ket{\omega}~;
\label{e.ECS_norm}
\end{equation}
Furthermore, from Eq.\eqref{e.GCS_orthogonal} it follows
$|\exval{\omega|\omega'}|^2\to k\delta(\omega-\omega')$ as $k$ vanishes,
and hence
\begin{equation}
\lim_{k\to 0}c_k k\int_{\cal M}dm(\omega')\delta(\omega-\omega')=1~,
\label{e.ECS_norm_kto0}
\end{equation}
which implies $c_k=\frac{1}{k}$,
as readily verified in those cases where an explicit form of GCS is
available.
The fact that $c_k$
is independent of $\omega$ and goes like $\frac{1}{k}$ for vanishing $k$,
enforces
\begin{equation}
\lim_{k\to 0} \int_{\cal M} \frac{1}{k}dm(\omega)
\exval{\xi^{'}|\omega}\exval{\omega|\xi{''}}
=\delta_{\xi^{'}\xi{''}}
\label{e.orthonormality_limit}
\end{equation}
to hold for whatever pair $(\ket{\xi^{'}},\ket{\xi{''}})$ of orthonormal
states: as neither $dm(\omega)$ nor ${\cal M}$ depend on $k$, this is
only possible if the two overlaps entering the integral are never
simultaneously finite on ${\cal M}$ or, more precisely, on a set of
finite measure. In other terms, Eq.\eqref{e.orthonormality_limit}
implies Eq.\eqref{e.supports-dont-share}, and viceversa (which is
trivial).
\end{document} |
\begin{document}
\title{Spectral stability, spectral flow and circular relative equilibria for the Newtonian $n$-body problem}
\begin{abstract}
For the Newtonian (gravitational) $n$-body problem in the Euclidean $d$-dimensional space, $d\mathfrak{g}e 2$, the simplest possible periodic solutions are provided by circular relative equilibria, (RE) for short, namely solutions in which each body rigidly rotates about the center of mass and the configuration of the whole system is constant in time and central (or, more generally, balanced) configuration.
For $d\le 3$, the only possible (RE) are planar, but in dimension four it is possible to get truly four dimensional (RE).
A classical problem in celestial mechanics aims at relating the (in-)stability properties of a (RE) to the index properties of the central (or, more generally, balanced) configuration generating it. In this paper, we provide sufficient
conditions that imply the spectral instability of planar and non-planar (RE) in $\mathds{R}^4$ generated by a central configuration,
thus answering some of the questions raised in \cite[Page 63]{Moe14}. As a corollary, we retrieve
a classical result of Hu and Sun \cite{HS09} on the linear instability of planar (RE) whose generating central configuration is non-degenerate and has odd Morse index, and fix a gap in the
statement of \cite[Theorem 1]{BJP14} about the spectral instability of planar (RE) whose (possibly degenerate) generating central configuration has odd Morse index.
The key ingredients are a new formula of independent interest that allows to compute the spectral flow of a path of symmetric matrices having degenerate starting point, and
a symplectic decomposition of the phase space of the linearized Hamiltonian system along a given (RE) which is inspired by Meyer and Schmidt's planar decomposition \cite{MS05} and which allows us
to rule out the uninteresting part of the dynamics corresponding to the translational and (partially) to the rotational symmetry of the problem.
\simbolovettore{s}pace{2mm}
{\bf Keywords:\/} $n$-body problem, Central Configurations, Spectral (linear) instability, Spectral flow.
\simbolovettore{a}repsilonnd{abstract}
\mathfrak{t}ableofcontents
\section{Introduction and description of the problem}
The Newtonian $n$-body problem concerns the study of the dynamics of $n$ point particles with positions $q_i \in \mathds{R}^d$, $d\mathfrak{g}e 2$, and masses $m_i>0$, moving under the influence of their mutual gravitational attraction. Newton's law of motion for the gravitational $n$-body problem reads
\begin{equation}\label{eq:Newton-law}
m_i\mathrm{d}dot q_i = \sum_{j \neq i} \mathrm{d}frac{m_im_j(q_i-q_j)}{|q_i-q_j|^3}
\simbolovettore{a}repsilonnd{equation}
where $|q_i-q_j|$ is the Euclidean distance between the $i$-th and $j$-th particle (the gravitational constant being normalized to $G=1$). Let $q= (q_1, \ldots, q_n) \in \mathds{R}^{nd}$ be the {\sc configuration vector\/} and let
\begin{equation}\label{eq:potential}
U(q)=\sum_{i <j}\mathrm{d}frac{m_im_j}{|q_i-q_j|}
\simbolovettore{a}repsilonnd{equation}
be the {\sc Newtonian potential.\/} One readily sees that the RHS of~\simbolovettore{a}repsilonqqcolonref{eq:Newton-law}, that is the total force acting on the $i$-th particle, can be written as $F_i=\nablabla_i U$,
where $\nablabla_i$ denotes the gradient with respect to the $d$ components of the $i$-th particle. Then Equation~\simbolovettore{a}repsilonqqcolonref{eq:Newton-law} can be written equivalently as
\begin{equation}\label{eq:Newton-cpt}
M \mathrm{d}dot q= \nablabla U(q) ,
\simbolovettore{a}repsilonnd{equation}
where $\nablabla $ is the $dn$-dimensional gradient and $M:=\mathrm{d}iag(m_1 I_d, \ldots, m_n I_d)$ is the {\sc mass matrix\/}. Equation~\simbolovettore{a}repsilonqqcolonref{eq:Newton-cpt} defines a system of second-order ordinary differential equations on $\mathbb X:= \mathds{R}^{nd} \setminus\mathcal{D}elta$, where
$$\mathcal{D}elta:=\mathscr{B}ig \{q \in \mathds{R}^{nd}\ \mathscr{B}ig |\ U(q)=+\infty\mathscr{B}ig \}= \mathscr{B}ig \{q\in \mathds{R}^{nd}\ \mathscr{B}ig |\ q_i=q_j, \mathfrak{t}ext{for some}\ i\neq j\mathscr{B}ig \}$$
is the {\sc collision set\/}. Also, Equation~\simbolovettore{a}repsilonqqcolonref{eq:Newton-cpt} is invariant under translations and rotations. Symmetries under translations yield via Noether's theorem to the conservation of the {\sc total momentum\/}. This implies that the center of mass has an inertial motion, and for this reason one can always assume that the center of mass be fixed at the origin. Invariance under rotations yields instead to the conservation
of the {\sc angular momentum} (more about this can be found in the appendix).
Among all configurations of the system, a special role is played by the so-called {\sc central configurations\/}, that is configuration vectors $q\in\mathbb X$ satysfying
\begin{equation}\label{eq:cc}
\nablabla U(q)+ \frac{U(q)}{\langle Mq,q\rangle} Mq=0.
\simbolovettore{a}repsilonnd{equation}
In other words, central configurations are special arrangements of the particles in which the acceleration of each mass points towards the center of mass and is proportional to the distance to the center of mass. As it is nowadays well-known, central configurations play a key role in the understanding of the dynamics of the $n$-body problem for many reasons: they generate explicit periodic solutions of~\simbolovettore{a}repsilonqqcolonref{eq:Newton-cpt}, they govern the behavior of colliding solutions near collisions, they mark changes in the topology of the integral manifolds, etc.
The first solutions of~\simbolovettore{a}repsilonqqcolonref{eq:cc} were discovered by Euler, who determined all collinear solutions for the $3$-body problem, and Lagrange, who showed that up to symmetries the only non-collinear
central configuration for the $3$-body problem is given by the equilateral triangle. For larger values of $n$ it is impossible to solve~\simbolovettore{a}repsilonqqcolonref{eq:cc} explicitly.
Nevertheless, one can still say many things about central configurations by exploiting their variational characterization as critical points of the restriction $\mathbf{w}idehat U$ of the Newtonian potential $U$ to the inertia ellipsoid
\[
\mathbb{S}= \mathbb{S}et{q \in \mathds{R}^{nd}| \sum_{i=1}^n m_i q_i=0,\ \langle Mq,q \rangle =1}.
\]
Indeed, since~\simbolovettore{a}repsilonqqcolonref{eq:cc} is invariant under scalings, it is not restrictive to assume that $\langle Mq,q\rangle =1$, that is that the central configuration be {\sc normalized} (hereafter, whenever talking about central configurations we implicitly assume that the condition $\langle Mq,q\rangle =1$ hold).
Since $\mathbf{w}idehat U$ is $\mathrm{SO}(d)$-invariant (actually, $\mathrm{O}(d)$-invariant), central configurations are never isolated as critical points of $\mathbf{w}idehat U$ but rather come in $\mathrm{SO}(d)$-families. In particular, they are
always degenerate as critical points of $\mathbf{w}idehat U$. We shall notice that the $\mathrm{SO}(d)$-action is not free unless $d=2$, hence quotienting out $\mathbb S$ by the $\mathrm{SO}(d)$ does not lead to a quotient manifold
(actually, not even to a quotient orbifold) whenever $d\mathfrak{g}e 3$.
As already mentioned, central configurations give rise to simple, explicit solutions of the $n$-body problem with the property that the configuration of the particles is at any time
similar to the initial (central) configuration. In other words, the configuration of the particles at any time is up to rotations, translations, and dilations, identical to the initial configuration.
Such explicit solutions are usually called {\sc homographic\/} or {\sc self-similar\/} solutions. In particular, every planar central configuration gives rise to a family of periodic solutions of the $n$-body problem in which each of the bodies moves on a Keplerian elliptical orbit. As a special case, the circular Keplerian orbits give rise to homographic solutions for which the configuration rigidly rotates at constant angular speed $k:=\sqrt{U(q)}$ about the center of mass. Such periodic orbits are usually called
{\sc (circular) relative equilibria\/}, (RE) for short, the reason being that such solutions become true equilibrium solutions in a uniformly rotating coordinate system.
For the $n$-body problem in the physically relevant dimensions $d \le 3$ all possible homographic solutions of the $n$-body problem are those defined by central configurations.
In higher dimension instead new interesting phenomena appear due to the greater complexity of rotations: for instance, in $\mathds{R}^4$ planar and spatial central configurations can give rise to four dimensional (RE), whereas planar resp. spatial non-planar central configuration in $\mathds{R}^3$ only give rise to planar (RE) resp. to homothetic collapse motions. Even more striking, the fact that in $\mathds{R}^4$ it is possible to rotate in two mutually orthogonal planes with different angular speeds leads to new ways of balancing the gravitational forces with centrifugal forces in order to get new (RE) (see the notion of {\sc balanced configuration} introduced in \cite{AC98} and the further developments in \cite{AD20,AP20,AFP20}).
The nice feature of (RE) is that they become true equilibria in rotating coordinates, and as such it is natural and dynamically interesting to investigate their stability properties.
In this paper we will focus only on 2D and 4D (RE) in $\mathds{R}^4$ defined by central configurations, leaving the ``balanced case'' to future work, and perform the study
of the stability properties of such (RE) by linearizing Newton's equation~\simbolovettore{a}repsilonqqcolonref{eq:Newton-cpt} in a uniformly rotating frame about a given (RE) and analyzing the spectral properties of the associated (linear) Hamiltonian $8n\mathfrak{t}imes 8n$-matrix. This is in general a very difficult task because of several reasons: first, the dimension of the Hamiltonian matrix becomes larger as $n$ increases. Second, central configurations are not known explicitly besides in very few particular cases. Finally, the symmetries of Newton's equation~\simbolovettore{a}repsilonqqcolonref{eq:Newton-cpt} imply that the phase space of the linear Hamiltonian system at a given (RE)
decomposes into the direct sum $E_1\mathfrak{o}plus E_2\mathfrak{o}plus E_3$ of three invariant symplectic subspaces, the first two of which reflect the translational resp. rotational
invariance of the problem. The subspaces $E_1$ and $E_2$ would lead to instability if not ruled out a priori. For this reason, with slight abuse of terminology we will call a (RE)
\begin{itemize}
\item {\sc spectrally stable\/} if the eigenvalues of the restriction of the associated Hamiltonian matrix to $E_3$ are all purely imaginary, and
\item {\sc linearly stable\/} if in addition the associated Hamiltonian matrix restricted to $E_3$ is diagonalizable.
\simbolovettore{a}repsilonnd{itemize}
A long standing open problem aims at relating the linear and spectral stability properties of a (RE) with the inertia indices (as a critical point of $\mathbf{w}idehat U$) of the central configuration generating it.
In this respect we shall mention the following conjecture which is due to Moeckel:
\simbolovettore{s}pace{2mm}
\noindent
{\bf Conjecture (Moeckel).\/} {\simbolovettore{a}repsilonm If a (RE) in the planar $n$-body problem is linearly stable, then the corresponding central configuration is a non-degenerate minimum of $\mathbf{w}idehat U$.\/}
\simbolovettore{s}pace{2mm}
It is well-known that the converse of Moeckel's conjecture is false. For example, the (RE) corresponding to the Lagrange equilateral triangle with three equal masses is linearly unstable, even though the equilateral triangle is a non-degenerate minimum of $\mathbf{w}idehat U$, see \cite{BJP14, BJP16} and references therein.
Even if Moeckel's conjecture still remains widely open, several progresses towards its solution have been made in the last decades. A major breakthrough has been made in 2009 by X. Hu and S. Sun \cite{HS09}. There, the authors proved that a (RE) generated by a central configuration $q$ with odd Morse index as critical point of $\mathbf{w}idehat U$ is always linearly unstable. In particular, a non-degenerate central configuration generating a linearly stable (RE) must have even Morse index as a critical point of $\mathbf{w}idehat U$.
Some years later, authors in \cite{BJP14} claimed that Hu and Sun's result could be upgraded to show spectral instability of (RE) whose generating central configuration
has even nullity and odd Morse index as a critical point of $\mathbf{w}idehat U$. Unfortunately, the proof of their result contains a gap
because of a missing condition about the eigenvalue 0. Namely, the statement
\begin{center}
\mathfrak{t}extit{``If the Hamiltonian matrix $JA$, $A$ symmetric matrix, is purely imaginary, then $\iMor{A}$ is even``},
\simbolovettore{a}repsilonnd{center}
which is claimed in \cite[Theorem 3.11]{BJP14}, is in full generality false (see \cite{DZ21} for a counterexample). However, the proof given in \cite{BJP14} is correct under the additional
assumption that $JA$ do not have non-trivial Jordan blocks corresponding to the eigenvalue zero. As a consequence, the main result of \cite{BJP14} still remains true in a large variety of situations,
for instance whenever the central configuration is non-degenerate.
In the present paper we push the study of the stability properties of (RE) further by investigating the relationship between spectral and linear instability of (RE) in $\mathds{R}^4$
and the inertia indices of the corresponding central configurations. As a byproduct, we will fix the gap in the proof of the main result of \cite{BJP14}.
As we shall see in Section 2, the aforementioned decomposition of the phase space of the linearized Hamiltonian system into symplectic invariant subspaces depends on the fact that the considered (RE) be planar or not (in particular, the dimension of $E_3$ is different in the two cases). The following theorem is the main result of the present paper.
In what follows $B_3$ denotes the symmetric matrix obtained by multiplying the restriction of the Hamiltonian matrix to $E_3$ with the standard complex structure $J$.
\begin{thm}\label{thm:main-intro}
Let $\mathfrak{g}amma $ be a (RE) in $\mathds{R}^4$. If
\begin{itemize}
\item $\mathfrak{g}amma$ is non-planar, or planar but generated by a collinear central configuration, and
\[
\iMor{B_3\big|_{\ker (JB_3)^{8n-16}}}
-\iMor{B_3}\simbolovettore{a}repsilonqqcolonuiv 1
{\mathbf{q}}uad \mod 2,
\]
or
\item $\mathfrak{g}amma$ is planar and generated by a (planar) non-collinear central configuration, and
\[
\iMor{B_3\big|_{\ker (JB_3)^{8n-20}}}
-\iMor{B_3}\simbolovettore{a}repsilonqqcolonuiv 1
{\mathbf{q}}uad \mod 2,
\]
\simbolovettore{a}repsilonnd{itemize}
then $\mathfrak{g}amma$ is spectrally unstable.
\simbolovettore{a}repsilonnd{thm}
As in \cite{HS09}, one of the main ingredients of the proof of Theorem~\mathrm{(RE)}f{thm:main-intro} is the use of the mod 2 spectral flow for affine paths
of Hermitian matrices. This allows us to deduce information
about the parity of the inertia indices of $B_3$ under the assumption that the spectrum of $JB_3$ be purely imaginary. However, the proof is here considerably more
complicated than in \cite{HS09}. Indeed, there one can use simple and classical formulas (based on the so-called crossing forms)
for computing the spectral flow in case of regular crossings, whereas in our case one has to use refined formulas that allows to compute the spectral flow
even in cases where the crossings are not regular (because of the possibly non-trivial Jordan blocks structure). Also, in most of the cases the starting point of the
considered affine path will be degenerate (even after restriction to $E_3$), and this fact forces us to develop a new formula of independent interest that allows
to compute the spectral flow in case of degenerate starting points, see Section 3.
As an application of Theorem~\mathrm{(RE)}f{thm:main-intro} and the discussion about the nullity of the matrix $B_3$ resp. of the Hessian $H(q)$ (see Subsection~\mathrm{(RE)}f{subsec:Morse-Bott}),
we get the second main result of the paper relating the linear and spectral stability properties of a (RE) with the inertia indices of the corresponding central configuration.
\begin{thm} \label{thm:main-2-intro}
Let $q$ be a central configuration, and let $\mathfrak{g}amma$ be the corresponding (RE). Then the following hold:
\begin{enumerate}
\item If $q$ is non-planar and Morse-Bott non-degenerate, or arbitrary and Morse-Bott degenerate with $n^0(q)$ even, then $\mathfrak{g}amma$ is linearly unstable.
\item Suppose that
$$n^-\mathscr{B}ig (B_3 \mathscr{B}ig |_{\ker (JB_3)^{8n-8-2k}}\mathscr{B}ig )\simbolovettore{a}repsilonqqcolonuiv 0 {\mathbf{q}}uad \mathfrak{t}ext{mod}\ 2,$$
where $k=4$ in case of a non-planar, or planar but generated by a collinear central configuration, (RE), and $k=6$ otherwise. If
$$n^-(q) + n^0(q) \simbolovettore{a}repsilonqqcolonuiv 0 {\mathbf{q}}uad \mathfrak{t}ext{mod}\ 2,$$
then $\mathfrak{g}amma$ is spectrally unstable. In particular, when $q$ is Morse-Bott non-degenerate we have the following two statements:
\begin{enumerate}
\item If $q$ is non-planar and $n^-(q)$ is even, then $\mathfrak{g}amma$ is spectrally unstable.
\item If $q$ is planar and $n^-(q)$ is odd, then $\mathfrak{g}amma$ is spectrally unstable.
\simbolovettore{a}repsilonnd{enumerate}
\simbolovettore{a}repsilonnd{enumerate}
\simbolovettore{a}repsilonnd{thm}
The first assumption in Item 2 of Theorem~\mathrm{(RE)}f{thm:main-2} is satisfied for instance if
$q$ is a Morse-Bott non-degenerate planar central configuration generating a planar (RE) $\mathfrak{g}amma$.
Our last result concerns the spectral and linear instability of a planar relative equilibrium in the plane.
Such a result generalizes the main results of \cite{HS09} and \cite{BJP14} while at the same time fixing the aforementioned gap in the proof of the latter. In this case, the decomposition of the phase space is found in \cite{MS05}. For sake of simplicity, we adopt the same notation as above.
\begin{thm} \label{thm:main-intro-planarcase}
Let $\mathfrak{g}amma$ be a (RE) for the planar $n$-body problem. If
\[
\iMor{B_3\big|_{\ker (JB_3)^{4n-8}}}
-\iMor{B_3}\simbolovettore{a}repsilonqqcolonuiv 1
{\mathbf{q}}uad \mod 2,
\]
then $\mathfrak{g}amma$ is spectrally unstable. In particular, if $JB_3$ has no non-trivial Jordan blocks corresponding to the zero eigenvalue and
\[
\iMor{B_3}\simbolovettore{a}repsilonqqcolonuiv 1
{\mathbf{q}}uad \mod 2,
\]
then $\mathfrak{g}amma$ is spectrally unstable.
\simbolovettore{a}repsilonnd{thm}
It is worth noticing that the techniques developed in the present paper can be used for many other highly symmetric problems, for instance to prove instability results for relative equilibria
for $\mathrm{SO}(2p)$-invariant\footnote{For $p \mathfrak{g}e 3$ the situation is clearly more involved because of the higher complexity of the special orthogonal group $\mathrm{SO}(2p)$,
and a corresponding symplectic decomposition has to be found.} simple Lagrangian systems in $\mathds{R}^{2p}$ (the cases considered in the present paper corresponding to $p=1,2$)
for various interesting classes of potentials. In fact, the abstract techniques behind the main results of this paper rely only on the rotational invariance of the mechanical system, and indeed our main results offer a unified viewpoint for studying the stability of relative equilibria e.g. in the case of:
\begin{itemize}
\item[-]$\alpha$-homogeneous potentials (the Newtonian potential corresponding to the case $\alpha=1$), which are employed in different atomic models, and of the
\item[-] Lennard-Jones intermolecular potential, which is important in computational chemistry as well as in molecular modeling.
\simbolovettore{a}repsilonnd{itemize}
Another problem which could be dealt with with analogous methods is the so called $n$\mathfrak{t}extit{-vortex problem}. Also in this case, among all periodic orbits of particular importance are relative equilibria,
which are rigidly rotating vortex configurations sometimes called vortex crystals. Such configurations can be characterized as critical points of the Hamiltonian function restricted to the constant angular impulse hypersurface in the phase space (topologically a pseudo-sphere whose coefficients are the circulation strengths of the vortices). From the perspective of the stability of relative equilibria, one difficulty is here
represented by the fact that the $n$-vortex problem does not admit a Lagrangian formulation, and it is highly non-trivial to characterize the stability properties of such relative equilibria in terms of the inertia indices of the possibly indefinite circulation matrix and of a suitable stability matrix. For further details, we refer to \cite{HPX20}.
We finish this introduction with a brief summary of the content of the paper: in Section 2 we introduce the rotating coordinates frame from (RE) in $\mathds{R}^4$ and prove the symplectic decomposition of the phase space into invariant subspaces for the linearized Hamiltonian dynamics along a given (RE). In Section 3 we quickly recall the definition and basic
properties of the spectral flow for paths of Hermitian matrices and prove a formula that allows to compute the spectral flow in case of affine paths with possibly degenerate
starting point. In Section 4 we show how to apply the content of Section 3 to deduce information about the inertia indices of a symmetric matrix $A$ under the assumption that
the spectrum of the Hamiltonian matrix $JA$ be purely imaginary, and in Section 5 we apply this to prove the main theorems of the paper. Finally, in the appendix we study in detail the integral
manifolds for the Hamiltonian dynamics of the $n$-body problem.
\simbolovettore{s}pace{3mm}
\noindent \mathfrak{t}extbf{Acknowledgments.} Luca Asselle is partially supported by the DFG-grant 380257369 ``Morse theoretical methods in Hamiltonian dynamics''. Li Wu is partially supported by the NSFC N. 12071255.
\section{Circular relative equilibria in $\mathds{R}^4$}
In this section we describe the dynamical and geometrical
framework of the problem and introduce a
symplectic decomposition of the phase space which will enable us to rule out
the trivial Floquet multipliers produced by the integrals of motion.
Our main reference are the beautiful lecture notes \cite{Moe14}.
\subsection{A rotating frame for circular (RE)}
\label{subsec:preliminaries-n-corpi}
Consider the Euclidean four dimensional space $\mathds{R}^4$ endowed with the Euclidean scalar product $\langle \cdot , \cdot \rangle$ and let $m_1, \ldots, m_n$, $n \mathfrak{g}e 3$, be positive real numbers which can be thought of as point masses of $n$ particles.
For any position vector $q\coloneqq \mathfrak{t}rasp{(q_1, \ldots, q_n)}\in (\mathds{R}^4)^n$, $q_i \in \mathds{R}^4$ for every $i \in \{1, \ldots, n\}$, we can define the {\sc mass scalar product\/} and the {\sc mass norm\/} in $\mathds{R}^{4n}$ as follows
\[
\mscal{\cdot}{\cdot}\coloneqq \langle M \cdot, \cdot \rangle {\mathbf{q}}uad \mathfrak{t}extrm{ and } {\mathbf{q}}uad \mnorm{\cdot}\coloneqq \langle M \cdot, \cdot \rangle^{1/2}
\]
where $M \in \mathcal{M}at(4n,\mathds{R})$ is the diagonal {\sc mass matrix\/} $\mathrm{d}iag(m_1 I_4, \ldots, m_n I_4)$, $I_4$ being the $4 \mathfrak{t}imes 4$ identity matrix.
The invariance under translations of Newton's equation~\simbolovettore{a}repsilonqqcolonref{eq:Newton-cpt} implies that the center of mass of the $n$ particles has an inertial motion. For this reason there is no loss of generality
in assuming that the center of mass lie at the origin. We define the {\sc configuration space with center of mass at the origin\/} as
\[
\mathbb{X} \coloneqq \mathbb{S}et{(q_1, \ldots, q_n) \in \mathds{R}^{4n}| \sum_{i=1}^n m_i q_i=0}.
\]
It is readily seen that $\mathbb{X} $ is a $N$-dimensional (real) vector space, with $N\coloneqq 4(n-1)$. We define the space of {\sc collision free configurations\/} as
\[
\mathbf{w}idehat {\mathbb{X} }\coloneqq\mathscr{B}ig \{q=(q_1, \ldots, q_n) \in \mathbb{X} \ \mathscr{B}ig | \ q_i \neq q_j \ \mathfrak{t}extrm{ for } i \neq j\mathscr{B}ig \}= \mathbb{X} \setminus \mathcal{D}elta,
\]
where
\[
\mathcal{D}elta \coloneqq \mathscr{B}ig \{q=(q_1, \ldots, q_n) \in \mathds{R}^{4n}\ |\ q_i = q_j \ \mathfrak{t}extrm{ for } i \neq j\mathscr{B}ig \}
\]
is the {\sc collision set\/}. In what follows, we denote the unit sphere (resp. with the collision set removed) in $\mathds{R}^{4n}$ with respect to the mass scalar product
by $\mathbb{S}$ (resp. by $\mathbf{w}idehat\mathbb{S}$) and refer to it as the {\sc inertia ellipsoid \/} (resp. the {\sc collision free inertia ellipsoid\/}).
As it is well known, Newton's equation~\simbolovettore{a}repsilonqqcolonref{eq:Newton-cpt} admits a Lagrangian (thus, an Hamiltonian) formulation.
Denoting by $T\mathbb{X} $ (resp. $T^*\mathbb{X} $) the tangent (resp. cotangent) bundle of $\mathbb{X} $, the {\sc Lagrangian (function)\/} of the problem $L: T \mathbf{w}idehat{\mathbb{X} } \mathfrak{t}o \mathds{R}$ is defined by
\begin{equation}\label{eq:Lagrangian}
L(q,v)= \mathrm{d}frac12 \langle M v, v\rangle + U(q),{\mathbf{q}}quad \forall (q,v)\in T\mathbf{w}idehat{\mathbb{X} }
\simbolovettore{a}repsilonnd{equation}
The {\sc Hamiltonian function\/} $H: T^*\mathbf{w}idehat{\mathbb{X} }\mathfrak{t}o \mathds{R}$ is given as follows
\begin{equation}\label{eq:Hamiltonian}
H(q,p)= pv -L(q,v)\big{\mathrm{v}}t_{v=M^{-1}p}=\mathrm{d}frac12 \langle M^{-1}p, p\rangle - U(q),{\mathbf{q}}quad \forall (q,p)\in T^*\mathbb{X},
\simbolovettore{a}repsilonnd{equation}
where $p=Mv$ is the {\sc conjugate momentum\/}\footnote{Throughout in the paper we think of covectors as (column) vectors.}.
Hamilton equations of the $n$-body problem thus read
\begin{equation}\label{eq:Ham-system}
\begin{cases}
\mathrm{d}ot q = \mathbf{p}artial_p H(q,p) = M^{-1}p\\
\mathrm{d}ot p= -\mathbf{p}artial_q H(q,p) = \nablabla U(q)
\simbolovettore{a}repsilonnd{cases}
\simbolovettore{a}repsilonnd{equation}
Among all possible periodic motions of Newton's equation~\simbolovettore{a}repsilonqqcolonref{eq:Newton-cpt}, the simplest are provided by those motions in which certain configurations (among which are central configurations) are rigidly rotated (up to possibly changing the size of the configuration) with constant angular velocity about the center of mass.
Roughly speaking, a {\sc central configuration\/} $q$ is a configuration of the whole system at which there is a perfect balance between the gravitational interacting forces and the position vector $q$.
More precisely, a configuration vector $q$ is a central configuration if it solves the algebraic equation
\begin{equation}\label{eq:cc-eq-2}
M^{-1}\nablabla U(q) + \lambda q=0, {\mathbf{q}}quad \lambda= U(q)/\mnorm{q}^2.
\simbolovettore{a}repsilonnd{equation}
Even though Equation~\simbolovettore{a}repsilonqqcolonref{eq:cc-eq-2} is most likely impossible to solve explicitly, much can be said about its solutions. Indeed, a
key feature of central configurations is the fact that they admit a variational characterization as critical points of the restriction $\mathbf{w}idehat U$ of the Newtonian potential $U$ to the collision free inertia ellipsoid $\mathbf{w}idehat{\mathbb S}$, thus allowing us e.g. to apply Morse theoretical methods in the study of central configurations. However, in doing so one has to keep in mind that central configurations
are never isolated but rather always come in $\mathrm{SO}(4)$-families because of the $\mathrm{SO}(4)$-invariance of the potential $\mathbf{w}idehat U$.
\begin{dfn}\label{def:inertia-indices-cc}
The {\sc Morse index\/} $\iMor{q}$ (resp. the {\sc Morse coindex\/} $\coiMor{q}$) of a central configuration $q$ is the dimension of the negative (resp. positive) spectral space of $H(q)$, the Hessian of $\mathbf{w}idehat U$ at $q$. The non-negative integer $\mathrm{nul}lity{q}\coloneqq \mathrm{d}im \ker H(q)$ is referred to as the {\sc nullity\/} of the central configuration $q$. We say that $q$ is {\sc (Morse-Bott) nondegenerate \/} if
\[
\mathrm{nul}lity{q}=\mathrm{d}im T_q\big(\mathrm{SO}(4)\cdot q),
\]
i.e. if the nullity of $q$ is the least possible (namely, if the kernel of $H(q)$ does not contain anything which does not come from the symmetries of the problem).
\simbolovettore{a}repsilonnd{dfn}
\begin{rem}
A
straightforward computation shows that
\[
H(q):=[D^2 U(q )+ U(q)M]\big{\mathrm{v}}t_{T_q\mathbf{w}idehat{\mathbb{S} }}.
\]
In the literature, in order to rule out the nullity due to the symmetries of the problem one usually works on the {\sc shape sphere\/}, namely the quotient of the collision free inertial ellipsoid by the group action.
In the planar case, namely the $n$-body problem in $\mathds{R}^2$, this does not pose any additional difficulty as the $\mathrm{SO}(2)$-action is free, but things change in higher dimension. Indeed, as the action
is not free (in fact, not even locally free) the quotient space is not a manifold but rather an Alexandrov space. {\mathbf{q}}ed
\simbolovettore{a}repsilonnd{rem}
Among all periodic solutions of~\simbolovettore{a}repsilonqqcolonref{eq:Newton-cpt} which are generated by central configurations, in this paper we will be interested in the so-called circular relative equilibria, (RE) for short, which have the nice feature
of becoming true equilibrium solutions after introducing rotating coordinates.
Thus, let $q$ be a central configuration and let $k:=\sqrt{\lambda}$ where $\lambda$ is given by Equation~\simbolovettore{a}repsilonqqcolonref{eq:cc-eq-2}. We set
$$ \mathds{T}heta (t)=\begin{bmatrix} \cos (kt) & -\sin (kt) \\ \sin (kt) & \cos (kt) \simbolovettore{a}repsilonnd{bmatrix}= \simbolovettore{a}repsilonxp(ikt),$$
and define (real) $4 \mathfrak{t}imes 4$-matrices in $\mathrm{SO}(4)$ by
\begin{equation}\label{eq:R-S}
r_s(t):= \mathrm{d}iag\big(\mathds{T}heta (t), I\big), {\mathbf{q}}uad {\mathbf{q}}uad r_d(t):= \mathrm{d}iag\big(\mathds{T}heta (t), \mathds{T}heta(-t)\big).
\simbolovettore{a}repsilonnd{equation}
Clearly, $r_s(t)=\simbolovettore{a}repsilonxp(k_s t)$ and $r_d(t)=\simbolovettore{a}repsilonxp(k_d t)$, where $k_s$ and $k_d$ are the $4\mathfrak{t}imes 4$ skew-symmetric matrices defined respectively by
\begin{equation}\label{eq:K-S}
k_s= \mathrm{d}iag(ki, 0), {\mathbf{q}}uad {\mathbf{q}}uad k_d= \mathrm{d}iag(ki, -ki).
\simbolovettore{a}repsilonnd{equation}
We denote by $R_s(t)$ and $R_d(t)$ the $4n \mathfrak{t}imes 4n$-block diagonal matrices whose (diagonal) entries
are the $4 \mathfrak{t}imes 4$ matrices $r_s(t)$ and $r_d(t)$, and by $K_s$ and $K_d$ the $4n \mathfrak{t}imes 4n$-block diagonal matrices whose (diagonal) entries
are the $4 \mathfrak{t}imes 4$ matrices $k_s$ and $k_d$.
Given a central configuration $q$ we define the associated circular relative equilibrium (RE) by
\begin{equation}\label{eq:re}
q(t) \coloneqqR_d(t)\, q, {\mathbf{q}}quad t \in \mathds{R}.
\simbolovettore{a}repsilonnd{equation}
In rotating coordinates
\[
\begin{cases}
Q:= R_d(t) \,q\\
P:= R_d(t)\, p,
\simbolovettore{a}repsilonnd{cases}
\]
such a (RE) becomes a true stationary solution of Hamilton's equations. The Hamiltonian function in these coordinates reads
\[
H(Q,P)=\mathrm{d}frac12 \langle MP, P\rangle + \langle K_dQ,P\rangle - U(Q),
\]
and thus Hamilton's equations are given by
\begin{equation}\label{eq:Ham-system-new}
\begin{cases}
\mathrm{d}ot Q= M^{-1} P + K_d Q\\
\mathrm{d}ot{P}= \nablabla U(Q)+ K_d P
\simbolovettore{a}repsilonnd{cases}
\simbolovettore{a}repsilonnd{equation}
By linearizing the Hamiltonian system \simbolovettore{a}repsilonqqcolonref{eq:Ham-system-new} we obtain the linear Hamiltonian system
\begin{equation}
\mathrm{d}ot z = L_d \, z = - J B_d \, z,
\label{eq:Ham-lin}
\simbolovettore{a}repsilonnd{equation}
where
\begin{equation}
\label{eq:Ham-lin-2}
{\mathbf{q}}uad L_d\coloneqq \begin{pmatrix}
K_d & M^{-1}\\
D^2 U(Q) & K_d
\simbolovettore{a}repsilonnd{pmatrix}, {\mathbf{q}}quad B_d\,\coloneqq \begin{pmatrix}
-D^2 U(Q) & \mathfrak{t}rasp{K_d}\\
K_d & M^{-1}
\simbolovettore{a}repsilonnd{pmatrix}.
\simbolovettore{a}repsilonnd{equation}
\begin{rem}
If the central configuration $q$ satisfies $q_j\in \mathds{R}^2\mathfrak{t}imes \{0\}\subset \mathds{R}^4$ for all $j=1,...,n$, namely if $q$ is planar and contained in the $(x,y)$-plane, then the corresponding (RE) can be equivalently
written as $q(t)=R_s(t)\, q$. We take advantage of this fact by introducing a different rotating frame for the corresponding (RE) in which we consider rotations
only in the $(x,y)$-plane. More precisely, introducing rotating coordinates
$$\begin{cases}
Q:= R_s(t) \,q\\
P:= R_s(t)\, p,
\simbolovettore{a}repsilonnd{cases}$$
yields after linearization again to a linear Hamiltonian system as in~\simbolovettore{a}repsilonqqcolonref{eq:Ham-lin} where the matrices
\begin{equation}\label{eq:Ham-lin-pl-2}
{\mathbf{q}}uad L_s\coloneqq \begin{pmatrix}
K_s & M^{-1}\\
D^2 U(Q) & K_s
\simbolovettore{a}repsilonnd{pmatrix}, {\mathbf{q}}quad B_s\,\coloneqq \begin{pmatrix}
-D^2 U(Q) & \mathfrak{t}rasp{K_s}\\
K_s & M^{-1}
\simbolovettore{a}repsilonnd{pmatrix}
\simbolovettore{a}repsilonnd{equation}
replace the matrices $L_d$ and $B_d$ given by~\simbolovettore{a}repsilonqqcolonref{eq:Ham-lin-2} respectively. {\mathbf{q}}ed
\simbolovettore{a}repsilonnd{rem}
\begin{rem}
The disadvantage of introducing rotating coordinates is that in the new coordinates the Hamiltonian is not natural (i.e. of the form kinetic plus potential energy) but rather we have an additional term appearing
(usually referred to as a \mathfrak{t}extit{Coriolis term}) which roughly speaking measures how fast the frame is rotating. However, in the rotating frame the Hamiltonian system becomes autonomous and hence, at least in principle, the monodromy matrix (thus the Floquet multipliers) can be explicitly computed. {\mathbf{q}}ed
\simbolovettore{a}repsilonnd{rem}
\subsection{A symplectic decomposition into invariant subspaces}\label{subsec:decomposition}
The goal of this section is to provide a symplectic decomposition of the phase space into subspaces which are invariant by the phase flow of the linear Hamiltonian system~\simbolovettore{a}repsilonqqcolonref{eq:Ham-lin}.
In such a way we will be able to rule out the uninteresting part of the dynamics corresponding to the symmetries of the $n$-body problem.
More precisely, we will construct a decomposition $T^*(\mathds{R}^{4n})\cong \mathds{R}^{8n}$ as the direct sum of three invariant linear symplectic subspaces $E_1, E_2$ and $E_3$: The subspace $E_1$ will reflect the translational invariance of the problem, whereas the subspace $E_2$ the invariance under rotations. Finally, the subspace $E_3$ will be obtained as the symplectic orthogonal complement of $E_1\mathfrak{o}plus E_2$.
Such a decomposition is inspired by Meyer and Schmidt's work \cite{MS05}, but unlike in the Meyer and Schmidt's case will depend on the fact that the (RE) be planar or not as well as on the
fact that the central configuration generating the (RE) be planar or not.
Without loss of generality we suppose hereafter that
$$\sum_{i=1}^n m_i = 1.$$
\noindent \mathfrak{t}extbf{The subspace $E_1$.} We start to define the linear subspace $E_1 \subset \mathds{R}^{8n}$ corresponding to the conservation of the center of mass and linear momentum and thus reflecting the translational invariance of the $n$-body problem.
The construction of $E_1$ is independent on the (RE) being planar or not as well as on the generating central configuration be planar or not. In what follows the matrices $L_d$ and $L_s$ defined respectively in~\simbolovettore{a}repsilonqqcolonref{eq:Ham-lin-2} and in~\simbolovettore{a}repsilonqqcolonref{eq:Ham-lin-pl-2} (according to the fact that the (RE) be planar or not) will be denoted with $L_*$ whenever we don't need to distinguish them.
We thus set $E_1\subset \mathds{R}^{8n}$ to be the $8$-dimensional subspace given by
\[
E_1\coloneqq\mathscr{B}ig \{(v, \ldots, v, m_1 w, \ldots, m_n w)\in T^*\mathds{R}^{4n}\ \mathscr{B}ig |\ v,w \in \mathds{R}^4\mathscr{B}ig \}
\]
We denote by $\{e_l\}$ the standard basis of $\mathds{R}^4$ and set $\simbolovettore{e}_l=(e_l, \ldots, e_l)\in \mathds{R}^{4n}$, $l=1,...,4$. A basis of $E_1$ is then given by
\begin{equation}
u_l=\mathfrak{t}rasp{(e_l, \ldots , e_l, 0, \ldots ,0)}= \mathfrak{t}rasp{(\simbolovettore{e}_l, \mathfrak{z}ero)}
{\mathbf{q}}uad \mathfrak{t}extrm{ and } {\mathbf{q}}uad
v_l =\mathfrak{t}rasp{(0,\ldots ,0, m_1 e_l, \ldots, m_n e_l)}=\mathfrak{t}rasp{(\mathfrak{z}ero , M \simbolovettore{e}_l)}
\simbolovettore{a}repsilonnd{equation}
\begin{lem}\label{thm:lemma1}
$E_1$ is an $L_*$-invariant symplectic subspace of $(\mathds{R}^{8n}, \mathfrak{o}mega_0)$. Furthermore, the restriction $L_1^d$ of $L_d$ to $E_1$ is represented with respect to the basis $\{u_1,u_2,v_1,v_2,u_3,u_4,v_3,v_4\}$ by
\begin{equation}\label{eq:L1}
L_1^d=\mathrm{d}isplaystyle L_d|_{E_1}
= \begin{pmatrix}
k i & I & 0 & 0 \\
0 & -ki & 0 & 0\\
0 & 0 & ki& I\\
0 & 0 & 0 & -ki
\simbolovettore{a}repsilonnd{pmatrix},
\simbolovettore{a}repsilonnd{equation}
whereas the restriction $L_1^s$ of $L_s$ to $E_1$ is represented by
\begin{equation}\label{eq:L1s}
L_1^s:\mathrm{d}isplaystyle L_s|_{E_1}
= \begin{pmatrix}
k i & I & 0 & 0 \\
0 & -ki & 0 & 0\\
0 & 0 & 0& I\\
0 & 0 & 0 & 0
\simbolovettore{a}repsilonnd{pmatrix}.
\simbolovettore{a}repsilonnd{equation}
\simbolovettore{a}repsilonnd{lem}
\begin{proof}
By a direct computation, we get that
\begin{align}
\mathfrak{o}mega_0(u_l, u_m) &=\mathfrak{o}mega_0(v_l,v_m)=0, {\mathbf{q}}uad \forall l,m,\\
\mathfrak{o}mega_0(u_l, v_l) &=\langle J u_l, v_l\rangle= \langle M \simbolovettore{e}_l, \simbolovettore{e}_l\rangle=1, {\mathbf{q}}uad \forall l, \\
\mathfrak{o}mega_0(u_l, v_m) &=0, {\mathbf{q}}uad \forall l \neq m,
\simbolovettore{a}repsilonnd{align}
thus showing that $\{u_1,...,u_4,v_1,...,v_4\}$ is a symplectic basis of $E_1$.
Now, since
\begin{equation}
L_d=\begin{pmatrix}
K_d & M^{-1}\\
D^2 U(q ) & K_d
\simbolovettore{a}repsilonnd{pmatrix}, {\mathbf{q}}quad K_d= k \mathrm{d}iag \big ((i,-i),...,(i,-i)\big ) = k \mathrm{d}iag(\simbolovettore{i}, \ldots, \simbolovettore{i})
\simbolovettore{a}repsilonnd{equation}
we easily compute
\begin{equation}\label{eq:restrizione-L-E1}
\begin{aligned}
L_d\begin{pmatrix}\simbolovettore{e}_l \\ 0
\simbolovettore{a}repsilonnd{pmatrix} &= \begin{pmatrix}
K_d \simbolovettore{e}_l \\ D^2 U(q ) \simbolovettore{e}_l
\simbolovettore{a}repsilonnd{pmatrix}= \begin{pmatrix}
K_d \simbolovettore{e}_l \\ 0
\simbolovettore{a}repsilonnd{pmatrix}
\\
L_d\begin{pmatrix}
0\\ M \simbolovettore{e}_l
\simbolovettore{a}repsilonnd{pmatrix} &= \begin{pmatrix}
\simbolovettore{e}_l \\ K_d M \simbolovettore{e}_l
\simbolovettore{a}repsilonnd{pmatrix}= \begin{pmatrix}
\simbolovettore{e}_l \\ 0
\simbolovettore{a}repsilonnd{pmatrix}+ \begin{pmatrix} 0\\ M K_d \simbolovettore{e}_l
\simbolovettore{a}repsilonnd{pmatrix} = u_l + \begin{pmatrix} 0\\ M K_d \simbolovettore{e}_l
\simbolovettore{a}repsilonnd{pmatrix}
\simbolovettore{a}repsilonnd{aligned}
\simbolovettore{a}repsilonnd{equation}
where $D^2 U(q ) \simbolovettore{e}_l=0$ follows by differentiating and evaluating at $t=0$ the identity
$$\nablabla U(q+t\simbolovettore{e}_l)=\nablabla U(q).$$
The $L_d$-invariance of $E_1$ follows observing that $K_d \simbolovettore{e}_l$ is again a vector of the form $\mathbf{p}m k \simbolovettore{e}_j$.
The $L_s$-invariance of $E_1$ is proven in a completely analogous fashion, and to obtain the corresponding matrix representation one just needs to notice that $K_s \simbolovettore{e}_l=0$ for $l=3,4$.
\simbolovettore{a}repsilonnd{proof}
\begin{rem}
As we readily see from the lemma above, $L_1^*$ has a non-trivial Jordan block structure (roughly speaking, such Jordan blocks are associated with the drift that one gets when allowing non-zero total momentum)
and this leads to linear instability. The idea is therefore to exclude the subspace $E_1$ when looking at the stability properties of (RE), thus ruling out an uninteresting part of the dynamics. {\mathbf{q}}ed
\simbolovettore{a}repsilonnd{rem}
\simbolovettore{s}pace{3mm}
\noindent \mathfrak{t}extbf{The subspace $E_2$.} The second invariant subspace $E_2$ reflects the rotational and scaling invariance of the $n$-body problem, but unlike the subspace $E_1$ it will strongly depend on the fact that
the (RE) be planar or not.
Before defining $E_2$ we shall recall some basic facts about the special orthogonal group $\mathrm{SO}(4)$.
From an algebraic viewpoint $\mathrm{SO}(4)$ is a non-commutative compact $6$-dimensional Lie group which is commonly identified with the group
of real $4\mathfrak{t}imes 4$ orthogonal matrices with determinant $1$, namely with the group of rotations in $\mathds{R}^4$. In $\mathds{R}^4$ we have two types of rotations:
\begin{itemize}
\item[(s)] {\sc simple rotations}, which leave a linear plane $\mathcal{P}i$ fixed. If we identify $\mathcal{P}i$ with the $\{0\}\mathfrak{t}imes \mathds{R}^2$-plane, then a simple rotation of angle $\simbolovettore{a}rphi$, takes the form $\mathrm{d}iag(\mathcal{P}hi, I)$ where
$$\mathcal{P}hi=\begin{bmatrix}
\cos \simbolovettore{a}rphi & -\sin \simbolovettore{a}rphi\\
\sin \simbolovettore{a}rphi & \cos \simbolovettore{a}rphi
\simbolovettore{a}repsilonnd{bmatrix}.$$
\item[(d)] {\sc double rotations}, which fix only the origin. In this case there exists a pair of orthogonal planes $\mathcal{P}i_1$ and $\mathcal{P}i_2$ each of which is invariant. Hence, such a double rotation produces an usual planar rotation on each of the planes $\mathcal{P}i_1$ and $\mathcal{P}i_2$. If the rotation angles of such a double rotation on $\mathcal{P}i_1$ and $\mathcal{P}i_2$ coincide, then there are
infinitely many invariant planes. In this case we say that the double rotation is {\sc isoclinic\/}.
\simbolovettore{a}repsilonnd{itemize}
We have two types of isoclinic rotations in $\mathds{R}^4$ which are specified by the same angle $\simbolovettore{a}rphi$, namely:
\begin{itemize}
\item {\sc left-isoclinic rotations\/}, if the signs of the rotations are opposite. Such a class of rotations contains $\mathrm{d}iag(\mathcal{P}hi,- \mathcal{P}hi)$ and $\mathrm{d}iag(-\mathcal{P}hi,\mathcal{P}hi)$,
\item {\sc right-isoclinic rotations\/}, if the signs of the rotations are equal. Such a class of rotations contains $\mathrm{d}iag(\mathcal{P}hi, \mathcal{P}hi)$ and $\mathrm{d}iag(-\mathcal{P}hi,-\mathcal{P}hi)$.
\simbolovettore{a}repsilonnd{itemize}
The well-known {\sc isoclinic decomposition\/}, also known as {\sc Van Elfrinkho's formula\/}, states that, up to a central inversion, every
4D rotation is the product of a left-isoclinic and a right-isoclinic rotation.
Left- (resp right-)isoclinic rotations form a non-commutative subgroup $S^3_L$ (resp. $S^3_R$) of $\mathrm{SO}(4)$ which is isomorphic to the multiplicative group $\mathbb{S}^3$ of unit quaternions. It can be shown that $S^3_L \mathfrak{t}imes S^3_R$ is the universal covering of $\mathrm{SO}(4)$, and that $S^3_L$ and $S^3_R$ are normal subgroups of $\mathrm{SO}(4)$.
Moreover, left- and right-isoclinic rotations can be described in terms of quaternions as we now recall.
Quaternions are generally represented in the form
\[
Q= a+ b \simbolovettore{i} + c \simbolovettore{j}+ d \simbolovettore{k}
\]
where $a,b,c$ and $ d$ are real numbers and $\simbolovettore{i}, \simbolovettore{j}$ and $\simbolovettore{k}$ are the {\sc quaternion units\/}. We have
\[
\simbolovettore{i}^2=\simbolovettore{j}^2=\simbolovettore{k}^2=\simbolovettore{i}\simbolovettore{j}\simbolovettore{k}=-1.
\]
The quaternion units can be represented as $2 \mathfrak{t}imes 2$-complex matrices as follows
\begin{equation}
\simbolovettore{i}= \begin{pmatrix}
i & 0 \\ 0 & -i
\simbolovettore{a}repsilonnd{pmatrix}, {\mathbf{q}}quad \simbolovettore{j}= \begin{pmatrix}
0 & 1 \\ -1 & 0
\simbolovettore{a}repsilonnd{pmatrix},{\mathbf{q}}quad
\simbolovettore{k}= \begin{pmatrix}
0& i \\ i&0
\simbolovettore{a}repsilonnd{pmatrix}
\simbolovettore{a}repsilonnd{equation}
In real notation, $\simbolovettore{i}, \simbolovettore{j}, \simbolovettore{k}$ can be written, respectively, as the following real matrices
\begin{equation}
\simbolovettore{i}= \begin{pmatrix}
0&-1 & 0 &0\\ 1 & 0& 0 & 0 \\
0 & 0 & 0 & 1 \\ 0 & 0& -1 & 0
\simbolovettore{a}repsilonnd{pmatrix}, {\mathbf{q}}quad
\simbolovettore{j}= \begin{pmatrix}
0&0 & 1 &0\\ 0 & 0& 0 & 1 \\
-1 & 0 &0 & 0 \\ 0 & -1 & 0&0 \
\simbolovettore{a}repsilonnd{pmatrix}, {\mathbf{q}}quad
\simbolovettore{k}= \begin{pmatrix}
0&0 & 0 &-1\\ 0 & 0& 1 & 0 \\
0 & -1 & 0 & 0 \\ 1 & 0& 0 & 0
\simbolovettore{a}repsilonnd{pmatrix}
\simbolovettore{a}repsilonnd{equation}
A point $(x,y,z,t)\in \mathds{R}^4$ can be represented by the quaternion $P=x+ y\simbolovettore{i}+z\simbolovettore{j}+t\simbolovettore{k}$, and it is straightforward to check that left-isoclinic (resp. right-isoclinic) rotations correspond to the left (resp. right) multiplication by a unit quaternion.
We can now finally proceed with the definition of the subspace $E_2$.
We start considering the case in which the (RE) is non-planar (independently of the fact that the central configuration generating it be planar or not). Thus, let $q$ be a central configuration
generating a non-planar (RE). We define $8$ linearly independent vectors as follows
\begin{align*}
z_1& = \mathfrak{t}rasp{(q, \mathfrak{z}ero)}, {\mathbf{q}}quad
z_2=\mathfrak{t}rasp{(\simbolovettore{i}\, q , \mathfrak{z}ero)}, {\mathbf{q}}quad
z_3= \mathfrak{t}rasp{(\simbolovettore{j} \,q ,\mathfrak{z}ero)}, {\mathbf{q}}quad
z_4= \mathfrak{t}rasp{(\simbolovettore{k} \,q, \mathfrak{z}ero)}{\mathbf{q}}quad \\
w_1& =\mathfrak{t}rasp{(\mathfrak{z}ero, Mq )}, {\mathbf{q}}quad
w_2= \mathfrak{t}rasp{(\mathfrak{z}ero, \simbolovettore{i}\, Mq )}, {\mathbf{q}}quad
w_3= \mathfrak{t}rasp{(\mathfrak{z}ero, \simbolovettore{j}\, M q)}, {\mathbf{q}}quad
w_4= \mathfrak{t}rasp{(\mathfrak{z}ero, \simbolovettore{k}\, Mq)},
\simbolovettore{a}repsilonnd{align*}
where with slight abuse of notation $\simbolovettore{i} \cdot $, $\simbolovettore{j} \cdot$, $\simbolovettore{k} \cdot$ denotes the diagonal left-multiplication in $\mathds{R}^{4n}$ of the quaternion $\simbolovettore{i},\simbolovettore{j},\simbolovettore{k}$ respectively, and set
\[
E_2^{d}=\mathrm{Span}\, \big \{ z_1,z_2, w_1, w_2, z_3, z_4, w_3, w_4\big\}.
\]
\begin{lem}\label{thm:lemma2}
Let $q$ be a central configuration generating a non-planar (RE). Then $E_2^{d}$ is an $L_d$-invariant symplectic subspace of $(\mathds{R}^{8n}, \mathfrak{o}mega_0)$. Furthermore, the restriction of $L_d$ to $E_2^d$ is represented by the $8\mathfrak{t}imes 8$-matrix
\begin{equation}\label{eq:E2n}
L_2^d\coloneqqL|_{E_2^d}
= \begin{pmatrix}
k i & I & 0 & 0 \\
A & ki& 0 & 0\\
0 & 0 & ki& I\\
0 & 0 & B & ki
\simbolovettore{a}repsilonnd{pmatrix},\simbolovettore{a}repsilonnd{equation}
where
$$A=\begin{pmatrix}
2k^2 & 0\\ 0 & -k^2
\simbolovettore{a}repsilonnd{pmatrix}, {\mathbf{q}}uad
B=\begin{pmatrix}
-k^2 & 0\\ 0 & -k^2
\simbolovettore{a}repsilonnd{pmatrix}.
$$
\simbolovettore{a}repsilonnd{lem}
\begin{proof}
It is straightforward to check that $E_s^d$ is a symplectic subspace.
To show that $E_2^d$ is $L_d$-invariant
we start computing
$$L_d z_1=\binom{k \simbolovettore{i} q}{D^2 U(q ) [q]} =k \binom{\simbolovettore{i} q}{\mathfrak{z}ero} + \binom{\mathfrak{z}ero}{D^2 U(q )[q]} = -k z_2+2k^2 w_1,$$
where we used the fact that $\nablabla U$ is $(-2)$-homogeneous and the fact that $q$ is a central configuration to infer that $D^2U(q)[q]=-2\nablabla U(q) = 2k^2 Mq$. Similarly, we compute
$$L_d z_2=\binom{k \simbolovettore{i}\simbolovettore{i} q}{D^2 U(q )[\simbolovettore{i} q ]}= -k \binom{q}{\mathfrak{z}ero}+ \binom{\mathfrak{z}ero}{-k^2 M\simbolovettore{i} q}= -k z_1-k^2w_2,$$
where to obtain the last equality we have differentiated the identity\footnote{This follows from the fact that $\left (\begin{matrix} e^{it} & 0 \\ 0 & e^{-it}\simbolovettore{a}repsilonnd{matrix}\right ) \cdot q$ is a central configuration for every $t\in \mathds{R}$.}
$$\nablabla U \left ( \left (\begin{matrix} e^{it} & 0 \\ 0 & e^{-it}\simbolovettore{a}repsilonnd{matrix}\right ) \cdot q \right ) = - k^2 M \left (\begin{matrix} e^{it} & 0 \\ 0 & e^{-it}\simbolovettore{a}repsilonnd{matrix}\right ) \cdot q$$
and evaluated it at $t=0$. In a similar way we compute
\begin{equation}
\begin{aligned}
&L_d z_3=\begin{pmatrix}
k \simbolovettore{i}\simbolovettore{j} q \\
D^2 U(q )[\simbolovettore{j} q ]
\simbolovettore{a}repsilonnd{pmatrix}= k \begin{pmatrix}
\simbolovettore{k} q \\
\mathfrak{z}ero
\simbolovettore{a}repsilonnd{pmatrix}-k^2 \begin{pmatrix}
\mathfrak{z}ero \\
M\simbolovettore{j} q
\simbolovettore{a}repsilonnd{pmatrix}= k z_4-k^2 w_3\\
&L_d z_4=\begin{pmatrix}
k \simbolovettore{i}\simbolovettore{k} q \\
D^2 U(q )[\simbolovettore{k} q ]
\simbolovettore{a}repsilonnd{pmatrix}= - k z_3-k^2 w_4\\
&L_d w_1=
\begin{pmatrix}
q \\
kM\simbolovettore{i} q
\simbolovettore{a}repsilonnd{pmatrix}= z_1+k w_2\\
&L_d w_2=
\begin{pmatrix}
\simbolovettore{i} q \\
kM\simbolovettore{i}^2 q
\simbolovettore{a}repsilonnd{pmatrix}= z_2-k w_1\\
&L_d w_3=\begin{pmatrix}
\simbolovettore{j} q \\
kM\simbolovettore{i}\simbolovettore{j} q
\simbolovettore{a}repsilonnd{pmatrix}= z_3+k w_4\\
&L_d w_4=\begin{pmatrix}
\simbolovettore{k} q \\
kM\simbolovettore{i}\simbolovettore{k} q
\simbolovettore{a}repsilonnd{pmatrix}= z_4-k w_3\\
\simbolovettore{a}repsilonnd{aligned}
\simbolovettore{a}repsilonnd{equation}
This completes the proof.
\simbolovettore{a}repsilonnd{proof}
\begin{rem} An equivalent definition of $E_2^d$ is given as follows:
$$E_2^d := \mathscr{B}ig \{ \big ( \alpha \cdot (\mathfrak A q,...,\mathfrak A q), \beta \cdot(\mathfrak B Mq, ... \mathfrak B Mq) \ \mathscr{B}ig |\ \alpha,\beta \in \mathds{R}, \ \mathfrak A,\mathfrak B \in SU(2)\mathscr{B}ig \}.$$
This should be compared with the definition given in \cite[Page 259]{MS05} for the planar $n$-body problem. In fact, if one wanted to naively generalize Meyer and Schmidt's construction to $\mathds{R}^4$ then
one would be tempted to define the subspace $E_2^d$ as follows
$$\mathscr{B}ig \{ \big ( \alpha \cdot (\mathfrak A q,...,\mathfrak A q), \beta \cdot(\mathfrak B Mq, ... \mathfrak B Mq) \ \mathscr{B}ig |\ \alpha,\beta \in \mathds{R}, \ \mathfrak A,\mathfrak B \in SO(4)\mathscr{B}ig \}.$$
However, this unfortunately does not lead to an $L_d$-invariant subspace. Consider for instance the vector
$$z_5:=\mathfrak{t}rasp{ (q \simbolovettore{i},\mathfrak{z}ero)},$$
where as usual one identifies right-isoclinic rotations in $\mathrm{SO}(4)$ with the right-multiplication by quaternions. Computing
\begin{align*}
L_d z_5 &= \binom{K_d q \simbolovettore{i}}{\mathfrak{z}ero} + \binom{\mathfrak{z}ero}{D^2U(q)[q \simbolovettore{i}]} = k \binom{\simbolovettore{i} q \simbolovettore{i}}{\mathfrak{z}ero} - k^2 \binom{\mathfrak{z}ero}{M q \simbolovettore{i}},\\
L_d \binom{\simbolovettore{i} q \simbolovettore{i}}{\mathfrak{z}ero} &=-k \binom{q \simbolovettore{i}}{\mathfrak{z}ero} + \binom{\mathfrak{z}ero}{D^2U(q)[\simbolovettore{i} q \simbolovettore{i}]},
\simbolovettore{a}repsilonnd{align*}
we see that $D^2U(q)[\simbolovettore{i} q \simbolovettore{i}]$ cannot be computed. Indeed, one easily checks that $\simbolovettore{i} q \simbolovettore{i}$ cannot be written as $\mathfrak a q$ for some $\mathfrak a \in \mathfrak s \mathfrak o(4)$ (we leave
the easy proof of this fact to the reader).
In the planar case, Meyer and Schmidt's definition still gives rise to an invariant subspace since $\mathrm{SO}(2) =SU(1)$. Another reason why one cannot use $\mathrm{SO}(4)$ instead of $SU(2)$ in our
setting is the following: if this was the case, then the space $E_2^d$ would have dimension 14, and this would not allow us to find the symplectic change of coordinates given in Proposition~\mathrm{(RE)}f{thm:new-coordinates}
since 14 is not an integer multiple of 4.
{\mathbf{q}}ed
\simbolovettore{a}repsilonnd{rem}
We now move on to investigate the case of a planar (RE) $\mathfrak{g}amma$. In this case, the central configuration generating $\mathfrak{g}amma$ might be collinear or planar non-collinear.
We start by considering the latter case. Thus, let $q=\mathfrak{t}rasp{(x,y,0,0)}$ be a non-collinear central configuration contained in the plane $\mathds{R}^2\mathfrak{t}imes \{0\}\subset \mathds{R}^4$.
The corresponding (RE) $\mathfrak{g}amma$ is pointwise defined by $\mathfrak{g}amma(t)= R_s(t) q$ where $R_s(t)$ is as defined in Section~\mathrm{(RE)}f{subsec:preliminaries-n-corpi}, and the Hamiltonian matrix $L_s$ is given by
\[
L= \begin{bmatrix}
K_s & M^{-1}\\
D^2 U(q) & K_s
\simbolovettore{a}repsilonnd{bmatrix}
\]
where
$$ K_s=k \mathrm{d}iag \big ((i,0),...,(i,0)\big ).$$
For $1 \le l < r \le 4$, we set $k_{lr} \in \mathcal{M}at(4 \mathfrak{t}imes 4, \mathds{R})$ by
\[
(k_{lr})_{ij}= \begin{cases}
-1 & (i,j)=(l,r)\\
1 & (i,j)= (r,l)\\
0 & \mathfrak{t}extrm{ otherwise}
\simbolovettore{a}repsilonnd{cases}
\]
By using this notation, the six simple rotations of $\mathrm{SO}(4)$ are the following $k_{12}, k_{13}, k_{14}, k_{23}, k_{24}, k_{34}$, and it is straightforward to check that
\begin{align}
& \simbolovettore{i}= k_{12}-k_{34} && \simbolovettore{j}= -k_{13}-k_{34} && \simbolovettore{k}= k_{14}- k_{23}
\simbolovettore{a}repsilonnd{align}
We denote with $K_{lr}$ the $4n\mathfrak{t}imes 4n$-dimensional block diagonal matrix defined by
\[
K_{lr}= \mathrm{d}iag\big(k_{lr}, \ldots, k_{lr}\big).
\]
Under the notation above (and noticing that $K_{34} q=q$) we set
\begin{align}
& z_1= \mathfrak{t}rasp{(q,\mathfrak{z}ero)},&& z_2= \mathfrak{t}rasp{(K_{12}q,\mathfrak{z}ero)}, && z_3= \mathfrak{t}rasp{(K_{13}q,\mathfrak{z}ero)}, &&z_4= \mathfrak{t}rasp{(K_{14}q,\mathfrak{z}ero)}, \\
& z_5=\mathfrak{t}rasp{(K_{23}q,\mathfrak{z}ero)}, && z_6=\mathfrak{t}rasp{(K_{24}q,\mathfrak{z}ero)},\\
& w_1= \mathfrak{t}rasp{(\mathfrak{z}ero,Mq)},&& w_2= \mathfrak{t}rasp{(\mathfrak{z}ero, MK_{12}q)}, && w_3= \mathfrak{t}rasp{(\mathfrak{z}ero,MK_{13}q)}, &&w_4= \mathfrak{t}rasp{(\mathfrak{z}ero, MK_{14}q)}, \\
& w_5= \mathfrak{t}rasp{(\mathfrak{z}ero,MK_{23}q)},&& w_6= \mathfrak{t}rasp{(\mathfrak{z}ero, MK_{24}q)}.
\simbolovettore{a}repsilonnd{align}
It is straightforward to check that, since $q$ is not collinear, the vectors $z_1,...,z_6,w_1,...,w_6$ are linearly independent in $\mathds{R}^{8n}$. We can thus define the $12$-dimensional linear subspace
\begin{equation}\label{eq:E2p}
E_2^s:=\mathrm{Span}\big\{ z_1,z_2, w_1, w_2, z_3, z_4, w_3,w_4, z_5, z_6, w_5, w_6\big \}.
\simbolovettore{a}repsilonnd{equation}
\begin{lem}\label{thm:lemma3}
Let $\mathfrak{g}amma$ be a planar (RE) generated by a non collinear central configuration $q$. Then the subspace $E_2^s$ is a $L_s$-invariant symplectic subspace of $(\mathds{R}^{8n},\mathfrak{o}mega_0)$, and the restriction of $L_s$ to $E_2^s$ is represented by
\begin{equation}\label{eq:E2p}
L_2^s\coloneqqL|_{E_2^s}
= \begin{pmatrix}
k J & I & 0 & 0 & 0 & 0 \\
A & kJ & 0 & 0& 0 & 0 \\
0 & 0 & 0& I& 0 & 0 \\
0 & 0 & B & 0& 0 & 0 \\
0 & 0 & 0 & 0 & 0 & I\\
0 & 0 & 0 & 0 & B & 0
\simbolovettore{a}repsilonnd{pmatrix},\simbolovettore{a}repsilonnd{equation}
where $A$ and $B$ are as in Lemma~\mathrm{(RE)}f{thm:lemma2}.
\simbolovettore{a}repsilonnd{lem}
\begin{proof}
We start observing that
\begin{align*}
\mathfrak{o}mega_0(z_i, z_j)& =\mathfrak{o}mega_0(w_i, w_j)=0 {\mathbf{q}}uad \forall\, i, j,\\
\mathfrak{o}mega_0(z_1, w_1)& = \langle \binom{\mathfrak{z}ero}{q},\binom{\mathfrak{z}ero}{Mq}\rangle =1,\\
\mathfrak{o}mega_0(z_2, w_2)&= \langle \binom{\mathfrak{z}ero}{K_{12}q},\binom{\mathfrak{z}ero}{MK_{12}q}\rangle= \sum_{i=1}^n m_i |q_i|^2=1,\\
\mathfrak{o}mega_0(z_3,w_3) &= \langle \binom{\mathfrak{z}ero}{K_{13}q},\binom{\mathfrak{z}ero}{MK_{13}q}\rangle = \sum_{i=1}^n m_i x_i^2 >0,\\
\mathfrak{o}mega_0(z_4,w_4) &= \langle \binom{\mathfrak{z}ero}{K_{14}q},\binom{\mathfrak{z}ero}{MK_{14}q}\rangle = \sum_{i=1}^n m_i x_i^2 >0,\\
\mathfrak{o}mega_0(z_5,w_5) &= \langle \binom{\mathfrak{z}ero}{K_{23}q},\binom{\mathfrak{z}ero}{MK_{23}q}\rangle = \sum_{i=1}^n m_i y_i^2 >0,\\
\mathfrak{o}mega_0(z_6,w_6) &= \langle \binom{\mathfrak{z}ero}{K_{24}q},\binom{\mathfrak{z}ero}{MK_{24}q}\rangle = \sum_{i=1}^n m_i y_i^2 >0,
\simbolovettore{a}repsilonnd{align*}
where we used the fact that $q$ is not collinear and hence at least one of the $x_i$ respectively $y_i$ does not vanish.
This shows that the restriction of $\mathfrak{o}mega_0$ to $E_2^s$ is non-degenerate and hence $E_2^s$ is a symplectic subspace of $(\mathds{R}^{8n},\mathfrak{o}mega_0)$. Notice however that, since $\mathfrak{o}mega_0(z_i,w_j)\neq 0$ in
general, the basis $\{z_1,...,z_6,w_1,...,w_6\}$ is not a symplectic basis of $E_2^s$, not even after renormalization.
By a direct computation, noticing that $K_s=k K_{12}$ and that
$$D^2U(q)[K_{ij}q]=-k^2 K_{ij}q, {\mathbf{q}}uad \forall (i,j)\neq (3,4),$$
we get
\begin{align}
L_sz_1& = \binom{K_s q}{\mathfrak{z}ero} + \binom{\mathfrak{z}ero}{D^2U(q)[q]}= k \binom{K_{12} q}{\mathfrak{z}ero} + 2k^2 \binom{\mathfrak{z}ero}{M q} = k z_2 + 2k^2 w_1,\\
L_sz_2&= k \binom{K_{12} K_{12} q}{\mathfrak{z}ero} + \binom{\mathfrak{z}ero}{D^2 U(q)[K_{12} q]} = - k z_1 - k^2 w_2,\\
L_sw_1&= z_1+kw_2,\\
L_sw_2&= z_2-kw_1,\\
L_sz_j&= -k^2 w_j, \ \forall j \mathfrak{g}e 3,\\
L_s w_j&= z_j, \ \forall j \mathfrak{g}e 3.
\simbolovettore{a}repsilonnd{align}
This shows at once that $E_2^s$ is $L_s$-invariant and that the restriction of $L_s$ to $E_2^s$ has the desired matrix representation with respect to the basis
$\{z_1,z_2,w_1,w_2,z_3,z_4,w_3,w_4,z_5,z_6,w_5,w_6\}.$
\simbolovettore{a}repsilonnd{proof}
In case the planar (RE) $\mathfrak{g}amma$ is generated by a collinear central configuration it is easy to check that the subspace $E_2^s$ has dimension 8 and coincides with the subspace $E_2^d$ defined
in the case of non-planar (RE). Moreover, one can use the same basis as for $E_2^d$. We leave the easy proof of this fact to the reader.
\begin{rem}
As we have seen above, the invariant subspace $E_2$ depends both on the (RE) and on the central configuration generating it. Nevertheless,
$L_2^*$ leads in either case to linear instability (exactly as $L_1^*$) because of its non-trivial Jordan blocks structure. {\mathbf{q}}ed
\simbolovettore{a}repsilonnd{rem}
\simbolovettore{s}pace{3mm}
\noindent \mathfrak{t}extbf{The invariant subspace $E_3$.} We define the linear subspace $E_3^d$ (resp. $E_3^s$) to be the symplectic orthogonal complement of $E_1 \mathfrak{o}plus E_2^d$ (resp. $E_1 \mathfrak{o}plus E_2^s$), and denote by $E_3^*$ either one of the subspaces $E_3^d, E_3^s$ whenever there is no need to distinguish between them. Clearly, $E_3^d$ (resp. $E_3^s$) is itself invariant, being the symplectic orthogonal complement of an invariant subspace.
By counting dimensions, we get that
$$
\mathrm{d}im E_3^d= 8n-16, {\mathbf{q}}quad \mathrm{d}im E_3^s= \left \{ \begin{array}{l} 8n-20 {\mathbf{q}}uad \mathfrak{t}ext{if} \ q \ \mathfrak{t}ext{non-collinear},\\ 8n-16 {\mathbf{q}}uad \mathfrak{t}ext{if}\ q \ \mathfrak{t}ext{collinear}.\simbolovettore{a}repsilonnd{array}\right.
$$
Summarizing the contents of the previous subsections, we have obtained a decomposition of the phase space into symplectic subspaces which are invariant for the linear Hamiltonian system.
\begin{prop}\label{thm:invariant-decomposition}
Let $q$ be a central configuration generating a non-planar (RE) $\mathfrak{g}amma$. Then
\[
\mathds{R}^{8n}= E_1 \mathfrak{o}plus E_2^d \mathfrak{o}plus E_3^d
\]
is a decomposition of the phase space in symplectic subspaces which are invariant under the linear Hamiltonian flow~\simbolovettore{a}repsilonqqcolonref{eq:Ham-lin}.
A similar statement holds for planar (RE) replacing $E_2^d,E_3^d$ with $E_2^s,E_3^s$ respectively. {\mathbf{q}}ed
\simbolovettore{a}repsilonnd{prop}
The next result provides an explicit symplectic change of coordinates which allows to split the original Hamiltonian system into three Hamiltonian subsystems, each of which is defined on the corresponding symplectic subspace appearing in the above decomposition. In what follows we have $k=4$ or $6$, where $k=4$ holds in case of a non-planar (RE) or of a planar (RE) generated by a collinear central configuration, and $k=6$
otherwise.
\begin{lem}\label{thm:new-coordinates}
Let $X=(g,z,w) \in \mathds{R}^4 \mathfrak{t}imes \mathds{R}^k \mathfrak{t}imes \mathds{R}^{8n-4-k}$ and $Y=(G,Z,W) \in \mathds{R}^4 \mathfrak{t}imes \mathds{R}^k \mathfrak{t}imes \mathds{R}^{8n-4-k}$. There exists a linear symplectic transformation in $\mathds{R}^{8n}$ of the form
\[
\begin{cases}
Q= AX\\
P= \mathfrak{t}raspinv{A} Y
\simbolovettore{a}repsilonnd{cases}
\]
where $A$ is an $M$-orthogonal matrix commuting with $K_*$ (here, $*=d$ in case of a non-planar (RE), and $*=s$ otherwise).
Furthermore $(g,G)$, $(z,Z)$ and $(w,W)$ are symplectic coordinates of $E_1\mathfrak{o}plus E_2^*\mathfrak{o}plus E_3^*$.
\simbolovettore{a}repsilonnd{lem}
\begin{proof}
We prove the claim in case of a non-planar (RE) (in particular, $k=4$) leaving the other cases to the reader. We first construct the first eight columns of the $4n\mathfrak{t}imes 4n$-matrix $A$, and then construct the remaining columns by means of the Gram-Schmidt orthonormalization method with respect to the $M$-scalar product.
Thus consider the following 8 vectors in $\mathds{R}^{4n}$:
$$r_l := \simbolovettore{e}_l, {\mathbf{q}}uad l=1,...,4,{\mathbf{q}}uad r_5 := q, {\mathbf{q}}uad r_6 := \simbolovettore{i} q, {\mathbf{q}}uad r_7 := -\simbolovettore{j} q, {\mathbf{q}}uad r_8 := \simbolovettore{k} q,$$
where as usual $\simbolovettore{i}\cdot,\simbolovettore{j}\cdot,\simbolovettore{k}\cdot$ denote with slight abuse of notation the diagonal left-multiplication in $\mathds{R}^{4n}$ with the quaternions $\simbolovettore{i},\simbolovettore{j},\simbolovettore{k}$ respectively.
We shall stress the fact that there is a minus sign in front of $\simbolovettore{j}$. In fact, this is needed in order to have that $A$ commutes with $K_d$, which we recall is nothing else but $k\, \mathfrak{t}ext{diag}(\simbolovettore{i},...,\simbolovettore{i})$.
Since we have normalized the masses in such a way that their sum is equal to 1, the $M$-norm of $r_l$ is equal to 1 for every $l=1,...,4$. Also, the $M$-norm of $r_5,...,r_8$ is also equal to 1 since $q$ is by assumption a normalized central configuration.
It is straightforward to check that $r_5, r_6, r_7 , r_8$ are $M$-orthogonal to each other (this is a direct consequence of the multiplicative table of the imaginary units and the fact that in each $4 \mathfrak{t}imes 4 $-block the matrix $M$ is a scalar multiple of the identity). Finally, we observe that $r_5, r_6, r_7, r_8$ are $M$ orthogonal to the first four since the center of mass lies at the origin by assumption.
Arguing recursively we construct the remaining $4n-8$ vectors by quadruples. We set
$$V_1:= \mathrm{Span}\{r_1, \ldots r_8\}$$
and choose $ s_9\notin V_1$. Denoting with $\mathbf{p}i_{V_1}^M:\mathds{R}^{4n}\mathfrak{t}o \mathds{R}^{4n}$ the $M$-orthogonal projection onto $V_1$, we have that
$$r_9 := \frac{\mathfrak{t}ilde s_9}{|\mathfrak{t}ilde s_9|_M}, {\mathbf{q}}quad \mathfrak{t}ilde s_9 := s_9 - \mathbf{p}i_{V_1}^M s_9,$$
is orthogonal to $V_1$ and has $M$-norm 1. We finally set
$$r_{10}:= \simbolovettore{i} r_9, {\mathbf{q}}uad r_{11} := -\simbolovettore{j} r_9,{\mathbf{q}}uad r_{12}:= \simbolovettore{k} r_9.$$
Arguing as before, we see that the $M$-normalized vectors $r_9,...,r_{12}$ are pairwise $M$-orthogonal to each other as well as $M$-orthogonal to $r_1,...,r_8$.
The remaining columns of the matrix $A$ are constructed analogously. One easily checks that $A$ commutes with $K_d$. Indeed, writing $A=(A_{lm})_{l,m=1,...,n}$, where each $A_{lm}$ is a $4\mathfrak{t}imes 4$-block,
it is straightforward to check that the condition $AK_d=K_d A$ is equivalent to
$$\simbolovettore{i} \cdot A_{ij} = A_{ij} \cdot \simbolovettore{i}, {\mathbf{q}}uad \forall i,j,$$
where here we identify $\simbolovettore{i}$ with the corresponding real $4\mathfrak{t}imes 4$-matrix.
Now, one readily computes for the $4\mathfrak{t}imes 4$-block (being the computations for the remaining blocks identical)
$$A_{21}= (q_1,\simbolovettore{i} q_1 , -\simbolovettore{j} q_1,\simbolovettore{k} q_1) = \left ( \begin{matrix} x_1 & - y_1 & -z_1 & - w_1 \\ y_1 & x_1 & -w_1 & z_1 \\ z_1 & w_1 & x_1 & -y_1 \\ w_1 & -z_1 & y_1 & x_1 \simbolovettore{a}repsilonnd{matrix}\right ),$$
where $q_1=\mathfrak{t}rasp{(x_1,y_1,z_1,w_1)}$ is the first vector of the central configuration $q$,
$$\simbolovettore{i} \cdot A_{21} = \left ( \begin{matrix} -y_1 & -x_1 & w_1 & - z_1 \\ x_1 & -y_1 & -z_1 & -w_1 \\ w_1 & -z_1 & y_1 & x_1 \\-z_1 & -w_1 & -x_1 & y_1 \simbolovettore{a}repsilonnd{matrix}\right ) = A_{21}\cdot \simbolovettore{i}.$$
The matrix $A=(r_1,...,r_{4n})$ is by construction $M$-orthogonal, that is we have $\mathfrak{t}rasp A MA =I$, which can be equivalently rewritten as $\mathfrak{t}raspinv{A}=MA$.
In particular, the first eight columns of $\mathfrak{t}raspinv{A}$ are given by
$$t_l = M \simbolovettore{e}_l, {\mathbf{q}}uad l=1,...,4, {\mathbf{q}}uad t_5 =Mq,{\mathbf{q}}uad t_6 = M\simbolovettore{i} q, {\mathbf{q}}uad t_7 = - M\simbolovettore{j} q,{\mathbf{q}}uad t_8=M\simbolovettore{k} q.$$
This concludes the proof of the first claim. The second claim follows as well noticing that
$E_1$ is generated by the vectors $\mathfrak{t}rasp{(r_l,\mathfrak{z}ero)}, \mathfrak{t}rasp{(\mathfrak{z}ero,t_l)}$, $l=1,...,4$, and
$E_2$ is generated by the vectors $\mathfrak{t}rasp{(r_l,\mathfrak{z}ero)}, \mathfrak{t}rasp{(\mathfrak{z}ero,t_l)}$, $l=5,...,8$.
\simbolovettore{a}repsilonnd{proof}
The next result is a straightforward consequence of Lemma~\mathrm{(RE)}f{thm:new-coordinates} and provides the structure of the matrix $L$ in the new coordinates system.
\begin{thm}\label{thm:structure-L}
Let $\mathfrak{g}amma$ be a non-planar (RE). Then, after the symplectic change of coordinates provided in Lemma~\mathrm{(RE)}f{thm:new-coordinates}, the Hamiltonian matrix $L_d$ given by~\simbolovettore{a}repsilonqqcolonref{eq:Ham-lin} takes the form
\[
L_d=\begin{pmatrix}
L_1& 0 & 0\\
0 & L_2^d & 0 \\
0& 0 & L_3^d
\simbolovettore{a}repsilonnd{pmatrix}
\]
where $L_1$ is given by~\simbolovettore{a}repsilonqqcolonref{eq:L1}, $L_2^d$ is given by~\simbolovettore{a}repsilonqqcolonref{eq:E2n}, and
\begin{equation}\label{eq:L3*}
\mathrm{d}isplaystyle L_3^d\coloneqqL|_{E_3^d}= \begin{pmatrix}
K_d & I \\
\mathcal D & K_d
\simbolovettore{a}repsilonnd{pmatrix}, {\mathbf{q}}quad \mathcal D= \big[\mathfrak{t}rasp{A} D^2 U(Q) A\big]\mathscr{B}ig|_{\mathds{R}^{4n-8}},
\simbolovettore{a}repsilonnd{equation}
where with slight abuse of notation we denote the $(4n-8)\mathfrak{t}imes (4n-8)$-matrix $k \mathrm{d}iag (\simbolovettore{i},...,\simbolovettore{i})$ again with $K_d$. {\mathbf{q}}ed
\simbolovettore{a}repsilonnd{thm}
\begin{rem}
An analogous statement holds also in case of a planar (RE) replacing $L_d$, $K_d$ with $L_s$, $K_s$ respectively, and in case the central configuration generating it is planar replacing $4n-8$ with $4n-10$. Notice
however that in this latter case the representation of $L_2^s$ in the new coordinates is not the one given by the matrix in~\simbolovettore{a}repsilonqqcolonref{thm:lemma3}, since the considered basis was not symplectic. {\mathbf{q}}ed
\simbolovettore{a}repsilonnd{rem}
\subsection{More on $L_3^*$ and the corresponding symmetric matrix $B_3^*=JL_3^*$}
\label{subsec:Morse-Bott}
In the last section we provided a symplectic change of coordinates, inspired by Meyer and Schmidt's one \cite{MS05},
that allows to rule out the uninteresting part of the linearized dynamics coming from the symmetries of the $n$-body problem.
However, as we shall see below, unlike in the planar case one is in general not able to rule out the nullity of
the Hessian of $\mathbf{w}idehat U$ at the central configuration $q$ completely, not even after such a symplectic decomposition and not even under the assumption that
the considered central configuration be Morse-Bott non-degenerate.
More precisely, consider the Hamiltonian matrix
$$L_3^* = \left (\begin{matrix} K_* & I \\ \mathcal D & K_* \simbolovettore{a}repsilonnd{matrix} \right ),$$
where $*$ stands either for $d$ or $s$, according to the (RE) being non-planar or planar, and the associated symmetric matrix (notice that $\mathfrak{t}rasp{K_*}=-K_*$)
$$B_3^*:=JL_3^* = \left (\begin{matrix} - \mathcal D & -K_*\\ K_* & I\simbolovettore{a}repsilonnd{matrix}\right ).$$
The identity
\begin{equation*}
\begin{pmatrix}
I & K_* \\
0 & I
\simbolovettore{a}repsilonnd{pmatrix}\begin{pmatrix}
-\mathcal D & -K_*\\
K_* & I
\simbolovettore{a}repsilonnd{pmatrix}\begin{pmatrix}
I & 0\\
-K_* & I
\simbolovettore{a}repsilonnd{pmatrix}=
\begin{pmatrix}
-\big[\mathcal D + U(q)\big] & 0 \\
0 & I
\simbolovettore{a}repsilonnd{pmatrix}
\simbolovettore{a}repsilonnd{equation*}
shows that $B_3^*$ is similar to a block diagonal matrix having $-[\mathcal D + U(q)]$ and $I$ as diagonal blocks. Since
\[
\mathcal D + U(q)= \mathfrak{t}rasp{A}\big( D^2 U(q) + U(q)\big) A\mathscr{B}ig|_{\mathds{R}^{4n-4-k}}=
\mathfrak{t}rasp{A}H(q) A\mathscr{B}ig|_{\mathds{R}^{4n-4-k}}
\]
where $A$ is matrix given in Proposition~\mathrm{(RE)}f{thm:new-coordinates}, we infer that the eigenvalues of $B_3^*$ are given by minus the $4n-4-k$ eigenvalues of $H(q)$\footnote{Here with slight abuse of notation we denote the restriction of $H(q)$ to the $(4n-4-k)$-dimensional subspace of $T_q \mathbb S$ obtained after the symplectic decomposition again with $H(q)$.} (the Hessian of $\mathbf{w}idehat U$ at the considered
central configuration), and $4n-4-k$ eigenvalues equal 1. As in the previous section, $k=4$ in case of a non-planar (RE) or of a planar (RE) defined by a collinear central configuration and $k=6$ otherwise.
Recall that the Hessian $H(q)$ is defined on the $(4n-5)$-dimensional tangent space $T_q \mathbb S$ and that
$$\mathrm{nul}lity{H(q)} \mathfrak{g}e \mathrm{d}im T_q( \mathrm{SO}(4)\cdot q) = \left \{ \begin{array}{l} 6 {\mathbf{q}}uad \mathfrak{t}ext{if} \ q \ \mathfrak{t}ext{non-planar}, \\ 5 {\mathbf{q}}uad \mathfrak{t}ext{if} \ q \ \mathfrak{t}ext{planar non-collinear}, \\ 3 {\mathbf{q}}uad \mathfrak{t}ext{if} \ q \ \mathfrak{t}ext{collinear}, \simbolovettore{a}repsilonnd{array}\right.$$
and that equality holds if $q$ is Morse-Bott non-degenerate. By comparing dimensions, we obtain that
$$\mathrm{nul}lity{B_3^*} \mathfrak{g}e \left \{ \begin{array}{l} 3 {\mathbf{q}}uad \mathfrak{t}ext{if} \ \mathfrak{g}amma \ \mathfrak{t}ext{non-planar, and } q \ \mathfrak{t}ext{non-planar},\\
2 {\mathbf{q}}uad \mathfrak{t}ext{if} \ \mathfrak{g}amma \ \mathfrak{t}ext{non-planar, and } q \ \mathfrak{t}ext{planar},\\
0 {\mathbf{q}}uad \mathfrak{t}ext{if} \ \mathfrak{g}amma \ \mathfrak{t}ext{planar, and } q \ \mathfrak{t}ext{planar}.\simbolovettore{a}repsilonnd{array}\right .$$
and that equality holds if $q$ is Morse-Bott non-degenerate. In other words, if $\mathfrak{g}amma$ is a non-planar (RE), then the symmetric matrix $B_3^d$ is always degenerate.
Roughly speaking this means that, because of the complexity of the special orthogonal group $\mathrm{SO}(4)$, the symplectic decomposition does not allow us to remove all the
degeneracy coming from the rotational invariance of the problem.
This also poses major difficulties
when trying to relate the spectrum of $B_3^d$ and that of $JB_3^d$ via the spectral flow of a suitable path of symmetric matrices starting at $B_3^d$,
since all known formulas for computing the spectral flow assume that the endpoints of the path are invertible. For this reason, in the next section we will prove a formula of independent interest
that allows to compute the spectral flow of a path of symmetric matrices having degenerate starting point.
\section{Spectral flow for path of selfadjoint operators in finite dimension}\label{sec:ss-ls-ham-sys}
The spectral flow is an integer-valued homotopy invariant of paths of selfadjoint Fredholm operators introduced by Atiyah, Patodi and Singer in the seventies in connection with the \mathfrak{t}extit{eta-invariant} and \mathfrak{t}extit{spectral asymmetry}. This section is devoted to recall the basic definitions and properties of the spectral flow for paths of self-adjoint operators in finite-dimensional complex vector spaces and prove a new formula for computing the spectral flow in case of affine paths having degenerate starting point. Such a formula is of independent interest and will be needed in an
essential way in Section~\mathrm{(RE)}f{sec:stability-mod-2-sf}. Indeed, the discussion in Section~\mathrm{(RE)}f{subsec:Morse-Bott} implies that the paths of self-adjoint operators arising in the study of the linear stability properties of a non-planar (RE) always have degenerate starting point, independently of the defining central configuration being (Morse-Bott) non-degenerate or not,
even after the symplectic decomposition discussed in Section~\mathrm{(RE)}f{subsec:decomposition}.
\subsection{Definition and basic properties of the spectral flow}\label{subsec:spectral-flow}
Let $\mathcal{H}$ be a finite-dimensional complex Hilbert space (we shall specify its dimension when needed). We denote by $\mathscr{B}(\mathcal{H})$ the set of all (bounded) linear operators $T : \mathcal{H} \mathfrak{t}o \mathcal{H}$ and by $\mathscr{B}sa(\mathcal{H})$ the subset of all (bounded) linear self-adjoint operators on $\mathcal{H}$. For any $T \in \mathscr{B}sa(\mathcal{H})$, we define its {\sc index} $\iMor{T}$, its {\sc nullity} $\mathrm{nul}lity{T}$ and its {\sc coindex} $\coiMor{T}$ as the numbers of negative, null and positive eigenvalues respectively. The {\sc signature} $\sgn(T)$ of $T$ is the difference between its coindex and its index: $\sgn(T) \coloneqq \coiMor{T} - \iMor{T}$. The {\sc extended index\/} and {\sc extended coindex\/} of $T \in \mathscr{B}sa(\mathcal H)$ are defined respectively by
\[
\simbolovettore{a}repsilonxtiMor{T}= \iMor{T}+ \mathrm{nul}lity{T}{\mathbf{q}}quad \mathfrak{t}extrm{ and } {\mathbf{q}}quad \simbolovettore{a}repsilonxtcoiMor{T}= \coiMor{T}+ \mathrm{nul}lity{T}.
\]
The {\sc spectral flow\/} $\spfl(T_t, t \in [a,b])$ of a continuous path $T: [a,b] \mathfrak{t}o \mathscr{B}sa (\mathcal{H})$ is roughly speaking given by the number of negative eigenvalues of $T_a$ that become positive minus the number of positive eigenvalues of $T_a$ that become negative as the parameter $t$ runs from $a$ to $b$. In other words, the spectral flow measures the net change of eigenvalues crossing $0$ and can be interpreted as a sort of generalized signature. More precisely, we have the following definition.
\begin{dfn}\label{def:spectralflow}
Let $a, b \in \mathds{R}$, with $a < b$, and let $T :[a,b] \mathfrak{t}o \mathscr{B}sa (\mathcal{H})$ be a continuous path. The { \sc spectral flow} of $T$ on the interval $[a, b]$ is defined by
\[
\spfl \big( T, [a, b] \big) \coloneqq \iMor{T_a }- \iMor{ T_b} = \simbolovettore{a}repsilonxtcoiMor{T_b }- \simbolovettore{a}repsilonxtcoiMor{ T_a}.
\]
A path of operators having invertible ends will be usually referred to as {\sc admissible.\/}
\simbolovettore{a}repsilonnd{dfn}
Here we list some properties of the spectral flow that will be used in this paper. In what follows, every path of self-adjoint operators is assumed to be continuous.
\begin{enumerate}
\item {\sc Normalization.\/} Let $T:[a,b] \mathfrak{t}o \mathrm{GL}^{sa}(\mathcal{H})$ be a path of invertible operators. Then
\[
\spfl(T_t, t \in [a,b])=0.
\]
\item {\sc Invariance under Cogredience.\/} Let $T:[a,b] \mathfrak{t}o \mathscr{B}sa(\mathcal{H})$. Then for any $M: [a,b] \mathfrak{t}o \mathrm{GL}^{sa}(\mathcal{H})$ we have
\[
\spfl(T_t, t \in [a,b])=\spfl(M^*_tT_tM_t, t \in [a,b]).
\]
\item {\sc Concatenation.\/} Let $T:[a,b] \mathfrak{t}o \mathscr{B}sa(\mathcal{H})$ and $c\in[a,b]$. Then
\[
\spfl(T_t, t \in [a,b])= \spfl(T_t, t \in [a,c]) + \spfl(T_t, t \in[c,b])
\]
\item{\sc Homotopy invariance property.\/} If $F:[0,1]\mathfrak{t}imes [a,b] \mathfrak{t}o \mathscr{B}sa(\mathcal{H})$ is a continuous family such that such that $\mathrm{d}im\ker F(\cdot, a)$ and $\mathrm{d}im\ker F(\cdot, b)$ are both constant (thus, independent on $s \in [0,1]$), then
\[
\spfl(F(1,t), t \in [a,b]) = \spfl (F(0,t), t\in [a,b]).
\]
\item {\sc Direct sum property\/} For $i = 1,2$, let
$\mathcal{H}_i$ be two (finite dimensional) Hilbert spaces and $T_i: [a,b] \mathfrak{t}o \mathscr{B}sa(\mathcal{H}_i)$ be two paths of self-adjoint operators. Then
\[
\spfl(T_1\mathfrak{o}plus T_2, [a, b]) = \spfl(T_1, [a, b]) + \spfl(T_2, [a, b]).
\]
\simbolovettore{a}repsilonnd{enumerate}
\subsection{Spectral flow for affine paths of selfadjoint operators}
Throughout the last decades much effort has been put in trying to provide efficient ways to compute
the spectral flow of a path of self-adjoint operators. Under suitable non-degeneracy assumptions at the ``crossing'' instants, one such a way is provided through
the so-called {\sc crossing forms\/} \cite{RS95}. However, in the applications such additional non-degeneracy assumptions are often not satisfied resp. hard to check. For this reason, authors in
\cite{GPP03, GPP04} provided an explicit formula to compute the spectral flow for arbitrary real analytic paths based on the theory of {\sc partial signatures\/}.
In case of affine paths, which is the only case we will be interested in throughout the paper, such formulas significantly simplify: let $C, A \in \mathscr{B}sa(\mathcal{H})$ with $C$ invertible, and consider the affine path
$$\mathbf{w}idetilde C : (0, +\infty) \mathfrak{t}o \mathscr{B}sa(\mathcal{H}), {\mathbf{q}}uad \mathbf{w}idetilde {D}(s) \coloneqq sA + C.$$
If $s_* \in (0, +\infty)$ is a (possibly non-regular) crossing instant\footnote{As the path is real analytic, crossing instants are automatically isolated.} for $\mathbf{w}idetilde{D}$, so that $1/s_*$ is an eigenvalue of $-C^{-1}A$, then for $\simbolovettore{a}repsilon > 0$ small enough
\begin{equation} \label{eq:sfaffine}
\spfl \bigl( \mathbf{w}idetilde{C}(s), [s_* - \simbolovettore{a}repsilon, s_* + \simbolovettore{a}repsilon] \bigr) = -\sgn \mathscr{B}ig (\langle C\,\cdot, \cdot \rangle \bigr|_{\mathcal{H}_{s_*}}\mathscr{B}ig ),
\simbolovettore{a}repsilonnd{equation}
where $\mathcal{H}_{s_*}$ is the generalized eigenspace of the eigenvalue $1/s^*$ of $C^{-1}A$.
By taking into account all local contributions to the spectral flow one gets the following explicit formula for computing the spectral flow of an admissible
affine path of self-adjoint operators
\begin{align*}
\spfl(\mathbf{w}idetilde C(s), s \in [a,b])= \ -\sum_{\mathclap{\substack{s_* \in (a,b) \\ s_* \mathfrak{t}extup{ crossing}}}}\ \sgn \mathscr{B}ig (\langle C\,\cdot, \cdot \rangle \bigr|_{\mathcal{H}_{s_*}}\mathscr{B}ig ).
\simbolovettore{a}repsilonnd{align*}
Such a formula can be applied only if the path is admissible, hence in particular only if $s_*=a$ is not a crossing instant. As in the cases we will be interested in this will often not be the case, we provide below a way to compute the spectral flow of an affine path having a crossing instant at the starting point.
Thus, let $S\in \mathscr{B}sa(\mathcal H)$ and $L \in \mathcal B(\mathcal H)$ be such that $SL$ is self-adjoint, and consider the affine path of self-adjoint operators
$\mathbf{w}idetilde C : \mathds{R} \mathfrak{t}o \mathscr{B}sa(\mathcal H)$ defined by
\[
\mathbf{w}idetilde C(t) \coloneqq SL+ t S
\]
We stress the fact that we are not assuming here that $S$ be invertible resp. that $L$ be self-adjoint. Then for every $x, y \in \mathcal H$ and every positive integer $\simbolovettore{a}repsilonll$ we have
\begin{equation}\label{eq:proprietaSL}
\langle SL^\simbolovettore{a}repsilonll x,y\rangle= \langle Sx,L^\simbolovettore{a}repsilonll y\rangle ,
\simbolovettore{a}repsilonnd{equation}
as it readily follows from the fact that $S$ and $SL$ are self-adjoint.
The Fitting decomposition theorem implies that there exists $m \in \mathds{N}$ such that
\[
\mathcal H= \ker L^m \mathfrak{o}plus \mathrm{im\,} L^m .
\]
Such a decomposition is actually $S$-orthogonal: for $x \in \ker L^m$ and $y \in \mathrm{im\,} L^m$, we write $y=L^m u$ for suitable $u \in \mathcal H$ and compute using~\simbolovettore{a}repsilonqqcolonref{eq:proprietaSL}
\begin{equation}\label{eq:S-orto}
\langle Sx, y\rangle=\langle Sx,L^mu \rangle=\langle SL^m x, y \rangle=0 .
\simbolovettore{a}repsilonnd{equation}
We set $V\coloneqq \ker L^m$, $W\coloneqq\mathrm{im\,} L^m$, and let $\mathbf{p}i_1, \mathbf{p}i_2:\mathcal H\mathfrak{t}o \mathcal H$ be the canonical projections onto $V$ and $W$ respectively. Clearly, $V$ and $W$ are $L$-invariant subspaces
of $\mathcal H$. We now define
\[
S_1\coloneqq \mathbf{p}i^*_1 S \mathbf{p}i_1,{\mathbf{q}}quad S_2\coloneqq \mathbf{p}i^*_2 S \mathbf{p}i_2, {\mathbf{q}}quad L_1\coloneqq\mathbf{p}i_1 L \mathbf{p}i_1, {\mathbf{q}}quad L_2\coloneqq\mathbf{p}i_2 L \mathbf{p}i_2,
\]
and observe that $L_1^m=0$ and $ \ker L_2=V$. For $i=1,2$, let us consider the affine paths $\mathbf{w}idetilde C_i:[a,b] \mathfrak{t}o \mathscr{B}sa(\mathcal{H})$ pointwise defined by
\[
\mathbf{w}idetilde C_1(t)= S_1L_1+ t S_1 {\mathbf{q}}uad \mathfrak{t}extrm{ and } {\mathbf{q}}uad \mathbf{w}idetilde C_2(t)= S_2L_2+ t S_2.
\]
Given $x_1,x_2 \in V, y_1,y_2 \in W$, we compute
\begin{align}
\langle \mathbf{w}idetilde C(t)(x_1+y_1), x_2+ y_2 \rangle & = \langle (SL+tS)(x_1+y_1), x_2+ y_2 \rangle\nonumber \\
& = \langle (SL+tS)x_1, x_2\rangle+ \langle (SL+tS)y_1, y_2 \rangle \label{eq:conti} \\
& + \langle (SL+tS)x_1, y_2 \rangle+ \langle (SL+tS) y_1, x_2 \rangle. \nonumber
\simbolovettore{a}repsilonnd{align}
Using $x_i= \mathbf{p}i_1 x_i$ and $y_i=\mathbf{p}i_2 y_i$ we get for the first term in the (RHS) of~\simbolovettore{a}repsilonqqcolonref{eq:conti}
\begin{align*}
\langle (SL+tS)x_1, x_2\rangle & = \langle (SL+tS)\mathbf{p}i_1 x_1, \mathbf{p}i_1 x_2\rangle \\
& = \langle \mathbf{p}i_1^*(SL+tS)\mathbf{p}i_1 x_1, x_2\rangle\\
& = \langle \mathbf{p}i_1^*SL\mathbf{p}i_1 x_1, x_2\rangle+t \langle \mathbf{p}i_1^*S\mathbf{p}i_1 x_1, x_2\rangle\\
& = \langle \mathbf{p}i_1^*S\mathbf{p}i_1\mathbf{p}i_1L\mathbf{p}i_1 x_1, x_2\rangle+ t\langle \mathbf{p}i_1^*S\mathbf{p}i_1 x_1, x_2\rangle\\
& = \langle S_1L_1 x_1, x_2 \rangle + t \langle S_1 x_1 , x_2\rangle \\
& = \langle \mathbf{w}idetilde C_1(t) x_1, x_2\rangle
\simbolovettore{a}repsilonnd{align*}
Similarly, for the second term in the (RHS) of~\simbolovettore{a}repsilonqqcolonref{eq:conti} we compute
$$ \langle (SL+tS)y_1, y_2\rangle= \langle \mathbf{w}idetilde C_2(t) y_1, y_2\rangle .$$
The last two terms in the (RHS) of~\simbolovettore{a}repsilonqqcolonref{eq:conti} vanish: indeed, for the third term (being the argument for the fourth one completely analogous) we compute using~\simbolovettore{a}repsilonqqcolonref{eq:proprietaSL},~\simbolovettore{a}repsilonqqcolonref{eq:S-orto}, and $y_2=L^m u_2$ for some $u_2\in \mathcal H$
\begin{align*}
\langle (SL+tS)x_1, y_2 \rangle & = \langle SL x_1, L^m u_2 \rangle + t \langle S x_1, L^m u_2 \rangle = \langle SL^{m+1} x_1, u_2 \rangle + t \langle SL^m x_1, u_2 \rangle= 0.
\simbolovettore{a}repsilonnd{align*}
In conclusion we have shown that
\begin{equation}
\langle \mathbf{w}idetilde C(t)(x_1+y_1), (x_2+y_2)\rangle
= \langle \mathbf{w}idetilde C_1(t) x_1,x_2\rangle+\langle \mathbf{w}idetilde C_2(t) y_1,y_2\rangle
\simbolovettore{a}repsilonnd{equation}
and hence, by the additivity of the spectral flow under direct sum,
\[
\spfl(\mathbf{w}idetilde C(t), t \in [a,b])= \spfl(\mathbf{w}idetilde C_1(t), t \in [a,b])+ \spfl(\mathbf{w}idetilde C_2(t), t \in [a,b]), {\mathbf{q}}uad \forall [a,b]\subset \mathds{R}.
\]
The next result provides an explicit formula to compute the contribution to the spectral flow provided by the crossing instant $t=0$. It is worth noticing that Item 3 in the proposition below
is precisely~\simbolovettore{a}repsilonqqcolonref{eq:sfaffine}. Here we therefore give an alternative proof of~\simbolovettore{a}repsilonqqcolonref{eq:sfaffine} which does not rely on the theory of partial signatures.
\begin{prop}\label{lem:homotopia-zero}
There exists $\simbolovettore{a}repsilon>0$ sufficiently small such that the following hold:
\begin{enumerate}
\item $\spfl(\mathbf{w}idetilde C(t), t \in [\alpha, \beta ])= \spfl(\mathbf{w}idetilde C_1(t), t \in [\alpha, \beta ])$ for every $[\alpha, \beta ] \subset [-\simbolovettore{a}repsilon, \simbolovettore{a}repsilon]$.
\item $\spfl(\mathbf{w}idetilde C_1(t), t \in [0 ,\simbolovettore{a}repsilon ])= \iMor{S_1L_1}-\iMor{S_1}$, and $\ \spfl(\mathbf{w}idetilde C_1(t), t \in [-\simbolovettore{a}repsilon,0 ])= \coiMor{S_1}- \iMor{S_1L_1}$.
\item $\spfl(\mathbf{w}idetilde C_1(t), t \in [-\simbolovettore{a}repsilon,\simbolovettore{a}repsilon ])= \sgn(S_1)$.
\simbolovettore{a}repsilonnd{enumerate}
\simbolovettore{a}repsilonnd{prop}
Before proving this result, we point out that, since $S_1L_1= \mathbf{p}i_1^*SL\mathbf{p}i_1$ and $S_1=\mathbf{p}i_1^*S\mathbf{p}i_1$, then we get
\[
\iMor{S_1L_1}=\iMor{(SL)|_V} {\mathbf{q}}uad \mathfrak{t}extrm{ and } {\mathbf{q}}uad
\iMor{S_1}=\iMor{S|_V}
\]
\begin{proof}
\begin{enumerate}
\item Clearly, it is enough to prove that there exists $\simbolovettore{a}repsilon>0$ such that
\[
\spfl(\mathbf{w}idetilde C_2(t), t \in [-\simbolovettore{a}repsilon ,\simbolovettore{a}repsilon ])=0.
\]
Since $L_2$ is invertible and being invertible is an open condition, we can find $\simbolovettore{a}repsilon>0$ sufficiently small such that $L_2+ t I$ is invertible for $|t| \le \simbolovettore{a}repsilon$. Now the identity
$$\mathbf{w}idetilde C_2(t)= S_2L_2+ t S_2= S_2\big(L_2 + t I\big)$$
implies that $\mathrm{d}im \ker \mathbf{w}idetilde C_2(t)=\mathrm{d}im\ker S_2$ for every $t \in [-\simbolovettore{a}repsilon, \simbolovettore{a}repsilon]$. In particular, we get
\[
\mathrm{d}im \ker \mathbf{w}idetilde C_2(0)= \mathrm{d}im \ker S_2L_2= \mathrm{d}im \big( L_2^{-1}(\ker S_2)\big)= \mathrm{d}im \ker S_2
\]
since $\ker L_2=V\subset \ker S_2$. By the stratum homotopy invariance of the spectral flow we infer that $\spfl(\mathbf{w}idetilde C_2(t), t \in [\alpha, \beta])=0$ for every $ [\alpha, \beta]\subset [-\simbolovettore{a}repsilon ,\simbolovettore{a}repsilon ]$, as claimed.
\item Let us consider the two parameters continuous family of self-adjoint operators
\[
\mathbf{w}idetilde h:[0,1]\mathfrak{t}imes [-\simbolovettore{a}repsilon ,\simbolovettore{a}repsilon ] \mathfrak{t}o \mathscr{B}sa(\mathcal H), {\mathbf{q}}quad \mathbf{w}idetilde h(r,t)\coloneqqrS_1L_1+ t S_1.
\]
We observe that $rL_1 + \simbolovettore{a}repsilon I$ is invertible since $L_1$ is nilpotent. Hence
\[
\ker \big(r S_1 L_1 + \simbolovettore{a}repsilon S_1\big)= \ker \big(S_1(r L_1+\simbolovettore{a}repsilon I)\big)
\]
implies that $\mathrm{d}im \ker \mathbf{w}idetilde h(r,\simbolovettore{a}repsilon)=\mathrm{d}im \ker S_1$ for every $r \in [0,1]$. The stratum homotopy invariance of the spectral flow yields now that
\[
\spfl\big(\mathbf{w}idetilde h(r,\simbolovettore{a}repsilon), r \in [0,1]\big)=0.
\]
Since the rectangle $[0,1] \mathfrak{t}imes [0,\simbolovettore{a}repsilon]$ is contractible, the homotopy invariance of the spectral flow yields
$$\spfl \big (\mathbf{w}idetilde h(0,t), t\in [0,\simbolovettore{a}repsilonilon)\big ) -\spfl \big (\mathbf{w}idetilde h(1,t), t \in [0,\simbolovettore{a}repsilonilon)\big ) - \spfl \big (\mathbf{w}idetilde h (r,0), r \in [0,1]\big ) =0$$
which can be equivalently rewritten as
\[
\spfl\big(\mathbf{w}idetilde C_1(t), t \in [0 ,\simbolovettore{a}repsilon ]\big)=\spfl\big( tS_1, t \in [0 ,\simbolovettore{a}repsilon ]\big) -\spfl\big( rS_1L_1, r \in [0 ,1 ]\big)
\]
Since by definition we have $\spfl\big( tS_1, t \in [0 ,\simbolovettore{a}repsilon ]\big)=-\iMor{S_1}$ and $\spfl\big( rS_1L_1, r \in [0 ,1 ]\big)=-\iMor{S_1L_1}$, the first formula follows.
The proof of the second formula is completely analogous and will be omitted.
\item Follows from Item 2 using the concatenation property of the spectral flow. {\mathbf{q}}edhere
\simbolovettore{a}repsilonnd{enumerate}
\simbolovettore{a}repsilonnd{proof}
\section{Detecting stability through the mod 2 spectral flow}
\label{sec:stability-mod-2-sf}
The aim of this section is to investigate the relation intertwining the spectrum of a symmetric $2p\mathfrak{t}imes 2p$-matrix $A$ and that of the corresponding Hamiltonian matrix $JA$, where $J$ is the standard complex structure in $\mathds{R}^{2p}$. Even if in general it is extremely hard to relate the two spectra, we can provide via the spectral flow modulo 2 some conditions on the spectrum of $A$ which ensure that the spectrum of $JA$ be not purely imaginary. This information will be crucial in Section~\mathrm{(RE)}f{sec:instability} for the study of the stability properties of (RE).
In what follows we denote by $G:=i J$ the {\sc Krein matrix\/} and by $\langle G\cdot,\cdot\rangle$ the {\sc Krein form\/}. Our main reference for the general theory of the Krein form and its signature is \cite[Chapter 1]{Abb01}.
\begin{thm} \label{prop:mainB}
Let $A\in M(2p\mathfrak{t}imes 2p,\mathds{R})$ be a symmetric matrix such that the spectrum of $JA$ is purely imaginary, i.e. such that $\sigma(JA) \subset i\mathds{R}$. Then
\[
\iMor{A\big|_{\ker (JA)^{2p}}}
\simbolovettore{a}repsilonqqcolonuiv\iMor{A}
{\mathbf{q}}uad \mod 2.
\]
\simbolovettore{a}repsilonnd{thm}
\begin{rem}
\label{rmk:nocounterexample}
The kernel of $JA$ is clearly equal to the kernel of $A$, being $J$ invertible. However, this is in general not the case for the corresponding generalized eigenspaces. Indeed,
$\ker A = \ker A^{2p}$ since $A$ is diagonalizable, but in general $\ker JA \subsetneq \ker (JA)^{2p}$. Take for instance $p=3$ and
$$A=\mathrm{d}iag (-2,-1,1,-1,0,0).$$
It is readily seen that the characteristic polynomial of $JA$ is
$$p_{JA}(t) = t^4 (t^2+2),$$
which means that the generalized eigenspace $\ker (JA)^{6}$ has dimension 4. Moreover, the spectrum of $JA$ is purely imaginary and $n^-(A)=3$.
Such an example appears in \cite{DZ21} and provides a counterexample to the following statement claimed in \cite[Theorem 3.11]{BJP14}:
\begin{center}
\mathfrak{t}extit{If the spectrum of $JA$ is purely imaginary, then $n^{-}(A)$ is even.}
\simbolovettore{a}repsilonnd{center}
In Theorem~\mathrm{(RE)}f{prop:mainB} above we fix the gap in the proof of the aforementioned theorem in \cite{BJP14} by taking into account the contribution of the generalized eigenspace corresponding to the eigenvalue zero.
We shall finally notice that the example above does not contradict Theorem~\mathrm{(RE)}f{prop:mainB}. Indeed, writing $A=\mathrm{d}iag (A_1,A_2)$ with
$$A_1 = \mathrm{d}iag (-2,-1,1), {\mathbf{q}}uad A_2 = \mathrm{d}iag (-1,0,0),$$
we see that
$$(JA)^2 =\mathrm{d}iag (-A_1A_2, -A_1A_2),$$
and hence
\begin{align*}
\ker (JA)^{6}=\ker (JA)^2
&= \mathfrak{t}ext{span}\, \{e_2,e_3,e_5,e_6\},\simbolovettore{a}repsilonnd{align*}
where $\{e_j\}$ denotes the standard basis of $\mathds{R}^{6}$. From this we conclude that
$$n^{-} \mathscr{B}ig (A\mathscr{B}ig |_{\ker (JA)^{6}}\mathscr{B}ig ) = 1,$$
and hence the condition $n^-\mathscr{B}ig (A\mathscr{B}ig |_{\ker (JA)^{6}}\mathscr{B}ig )\simbolovettore{a}repsilonqqcolonuiv n^-(A)$ modulo two is satisfied.
{\mathbf{q}}ed
\simbolovettore{a}repsilonnd{rem}
\begin{proof}
Let $D:[0,+\infty) \mathfrak{t}o \mathds{R}^{2p}$ be the affine path of self-adjoint matrices pointwise defined by
\[
D(t) = A +tG = -J(JA - itI).
\]
Since $-J$ is invertible, we have
\[
\ker D(t) = \ker (JA - itI) {\mathbf{q}}quad \forall\, t \in [0, +\infty).
\]
In particular, $t_* \in [0, +\infty)$ is a crossing instant for $D$ if and only if
$it_* \in \sigma(JA)$. In other words,
there is a bijection between the set of non-negative crossing instants of $D$ and the set of purely imaginary eigenvalues of $JA$ with non-negative imaginary part. As already observed the crossing instants are isolated since $D$ is affine (hence real-analytic).
Let us first examine the strictly positive crossings. For a crossing instant $t_*\in (0,+\infty)$ we can find $\simbolovettore{a}repsilon >0$ such that $[t_*-\simbolovettore{a}repsilon,t_*+\simbolovettore{a}repsilon]$ does not contain
any other crossing instant and
\begin{equation}\label{eq:Krein}
\spfl \bigl( D(t), t\in [t_* - \simbolovettore{a}repsilon, t_* + \simbolovettore{a}repsilon] \bigr) = \sgn \mathscr{B}ig (\langle G\, \cdot, \cdot \rangle \big |_{ \mathcal{H}_{t_*}}\mathscr{B}ig ),
\simbolovettore{a}repsilonnd{equation}
where $\mathcal{H}_{t_*}:=\ker (GA+ t_* I)^{2p}$. Indeed, the identity
\[
D(t) = A+tG= G(GA+t_* I)+(t-t_*)G,
\]
implies that
\[
\spfl \bigl( D(t), t \in [t_* - \simbolovettore{a}repsilon, t_* + \simbolovettore{a}repsilon] \bigr) = \spfl ( \mathbf{w}idetilde{D}(t), t \in [-\simbolovettore{a}repsilon,\simbolovettore{a}repsilon]),
\]
where $\mathbf{w}idetilde{D}(t):=D(t+t_*)$, and the claim follows from Proposition~\mathrm{(RE)}f{lem:homotopia-zero}, Item 3, setting $S=G$ and $L =GA+t_*I$.
Now, \simbolovettore{a}repsilonqqcolonref{eq:Krein} implies that
\begin{equation} \label{eq:sfisolcross}
\spfl \bigl( D(t), t \in [t_* - \simbolovettore{a}repsilon, t_* + \simbolovettore{a}repsilon] \bigr) \simbolovettore{a}repsilonqqcolonuiv \mathrm{d}im \mathcal{H}_{t_*} {\mathbf{q}}uad \mod 2,
\simbolovettore{a}repsilonnd{equation}
as it is well-known that the restriction $\langle G\, \cdot, \cdot \rangle|_{\mathcal{H}_{t_*}}$ is non-degenerate (for further details we refer to \cite[Chapter 1]{Abb01}).
Clearly, we can find $\bar T>0$ such that all crossing instants are contained in $[0,\bar T)$. In particular, the additivity property of the spectral flow, together with
the fact that zero is an isolated crossing instant and the fact that $n^-(D(T)) = n^{-}(G)=p$ for all $T\mathfrak{g}e \bar T$, implies that
\begin{equation}
\spfl \big (D(t), t\in [\simbolovettore{a}repsilonilon, T] ) = n^-(D(\simbolovettore{a}repsilon))- p\simbolovettore{a}repsilonqqcolonuiv \sum \mathrm{d}im \mathcal H_{t_*} {\mathbf{q}}quad \mathfrak{t}ext{mod} \ 2, {\mathbf{q}}uad \forall \, T\mathfrak{g}e \bar T,
\label{eq:finalespfl2}
\simbolovettore{a}repsilonnd{equation}
where the sum is taken over all positive crossing instants and $\simbolovettore{a}repsilon>0$ is chosen in such a way that $[0,\simbolovettore{a}repsilon]$ does not contain any crossing instant other than $t_*=0$.
When turning our attention to the crossing instant $t_*=0$, we notice that applying Item 2 of Proposition~\mathrm{(RE)}f{lem:homotopia-zero} to the path $t\mapsto D(t)$ on $[0, \simbolovettore{a}repsilon]$ we obtain
\begin{equation}\label{eq:finalespfl4}
\begin{split}
\spfl \bigl( D(t), t\in [0, \simbolovettore{a}repsilon] \bigr)&= n^-\bigl(A\big|_{\ker (JA)^{2p}}\bigr)-n^-\bigl(G\big|_{\ker (JA)^{2p}}\bigr) = n^-\bigl(A\big|_{\ker (JA)^{2p}}\bigr)-\mathrm{d}frac{\mathrm{d}im \mathcal H_0}{2},
\simbolovettore{a}repsilonnd{split}
\simbolovettore{a}repsilonnd{equation}
where the second equality follows by the general theory on Krein forms (notice that $\mathcal H_0$ has even dimension). By the very definition of the spectral flow this implies that
\begin{equation}
\begin{split}
\iMor{A}- \iMor{D(\simbolovettore{a}repsilon)}=\iMor{A\big|_{\ker (JA)^{2p}}}-\mathrm{d}frac{\mathrm{d}im \mathcal H_0}{2}
\simbolovettore{a}repsilonnd{split}
\simbolovettore{a}repsilonnd{equation}
which is equivalent to saying that
\begin{equation}\label{eq:92}
\iMor{D(\simbolovettore{a}repsilon)}= \iMor{A}- n^-\bigl(A\big|_{\ker (JA)^{2p}}\bigr)+\mathrm{d}frac{\mathrm{d}im \mathcal H_0}{2}.
\simbolovettore{a}repsilonnd{equation}
Comparing~\simbolovettore{a}repsilonqqcolonref{eq:finalespfl2} and~\simbolovettore{a}repsilonqqcolonref{eq:92} we obtain
\begin{equation}\label{eq:112}
\spfl \bigl( D(t), [\simbolovettore{a}repsilon,T] \bigr) \simbolovettore{a}repsilonqqcolonuiv \iMor{D(\simbolovettore{a}repsilon)}-p\simbolovettore{a}repsilonqqcolonuiv \iMor{A} - n^-\bigl(A\big|_{\ker (JA)^{2p}}\bigr) +
\mathrm{d}frac{\mathrm{d}im \mathcal{H}_0}{2}-p{\mathbf{q}}uad \mod 2
\simbolovettore{a}repsilonnd{equation}
On the other hand, we have
\[
2p = {\mathbf{q}}uad 2\ \sum \ \mathrm{d}im \mathcal{H}_{t_*} + \mathrm{d}im \mathcal{H}_0,
\]
which can be equivalently rewritten as
\begin{equation}\label{eq:9piu2}
p - \mathrm{d}frac{\mathrm{d}im \mathcal{H}_0}{2} = {\mathbf{q}}uad\ \sum\ \mathrm{d}im \mathcal{H}_{t_*}.
\simbolovettore{a}repsilonnd{equation}
As above, the sums are here taken over all positive crossing instants.
Comparing \simbolovettore{a}repsilonqqcolonref{eq:finalespfl2} and \simbolovettore{a}repsilonqqcolonref{eq:9piu2} yields
\begin{equation}\label{eq:102}
\spfl \bigl( D(t), [\simbolovettore{a}repsilon,T] \bigr) \simbolovettore{a}repsilonqqcolonuiv p - \mathrm{d}frac{\mathrm{d}im \mathcal{H}_0}{2} \simbolovettore{a}repsilonqqcolonuiv \frac{\mathrm{d}im \mathcal{H}_0}{2} -p {\mathbf{q}}uad \mod 2.
\simbolovettore{a}repsilonnd{equation}
Finally, putting \simbolovettore{a}repsilonqqcolonref{eq:112} and \simbolovettore{a}repsilonqqcolonref{eq:102} together we conclude that
\[
\iMor{A}- n^-\bigl(A\big|_{\ker (JA)^{2p}}\bigr)
\simbolovettore{a}repsilonqqcolonuiv 0
{\mathbf{q}}uad \mod 2
\]
as claimed.
\simbolovettore{a}repsilonnd{proof}
As an immediate consequence of Theorem~\mathrm{(RE)}f{prop:mainB} we obtain the following
\begin{cor}
\label{cor:mainB}
Let $A\in M(2p\mathfrak{t}imes 2p,\mathds{R})$ be a symmetric matrix such that $\sigma (JA)\subset i \mathds{R}$. If there are no non-trivial Jordan blocks for $JA$ corresponding to the eigenvalue zero (in particular, if $JA$ is diagonalizable), then
$$n^0(A) \simbolovettore{a}repsilonqqcolonuiv n^-(A) \simbolovettore{a}repsilonqqcolonuiv 0 {\mathbf{q}}uad \mod \ 2.$$
\label{cor:mainB}
\simbolovettore{a}repsilonnd{cor}
\begin{proof}
By assumption $\ker(JA)^{2p}=\ker(JA)= \ker A$. In particular, $n^0(A)=n^0(JA)$ is even by the spectral properties of Hamiltonian matrices,
$$n^- \mathscr{B}ig ( A\mathscr{B}ig |_{\ker (JA)^{2p}} \mathscr{B}ig ) = \iMor{A\big|_{\ker A}}=0,$$
and hence
$$n^-(A) \simbolovettore{a}repsilonqqcolonuiv \iMor{A\big|_{\ker A}}= 0 {\mathbf{q}}uad \mathfrak{t}ext{mod} \ 2$$
as claimed.
\simbolovettore{a}repsilonnd{proof}
In the particular case in which $JA$ is invertible and diagonalizable, Corollary~\mathrm{(RE)}f{cor:mainB} is proved in \cite{HS09}.
Under such assumptions the proof is much less involved as all crossing instants are regular,
and hence the local contributions to the spectral flow can be easily computed using crossing forms.
\begin{rem}
Corollary~\mathrm{(RE)}f{cor:mainB} implies that if $A$ is a symmetric matrix with odd dimensional kernel such that $JA$ is spectrally stable (i.e. $\sigma (JA)\subset i\mathds{R}$), then the generalized eigenspsace $\ker (JA)^{2p}$ must be strictly bigger than $\ker JA$. In other words, the Jordan block structure of $JA$ corresponding to the eigenvalue zero is non-trivial and hence in particular $JA$ is linearly unstable. As a consequence, an Hamiltonian matrix $JA$ defined by a symmetric matrix $A$ having odd-dimensional kernel can never be linearly stable.
On the other hand, we can easily construct examples of spectrally stable $(JA)$ such that $A$ has odd-dimensional kernel. Consider for instance the matrix
$$A= \mathrm{d}iag (-2,1,1,-1,1,0).$$
Since
$$(JA)^2 = \mathrm{d}iag (-2,-1,0,-2,-1,0)$$
we have that $\sigma (JA) = \{0,\mathbf{p}m i,\mathbf{p}m i\sqrt{2}\}$, in particular $JA$ is spectrally stable. Notice that the condition
$$n^-\mathscr{B}ig (A|_{\ker (JA)^6}\mathscr{B}ig ) \simbolovettore{a}repsilonqqcolonuiv n^-(A) {\mathbf{q}}uad \mathfrak{t}ext{mod}\ 2$$
is satisfied since $n^-(A)=2$ and $\mathrm{d}isplaystyle n^-\mathscr{B}ig (A|_{\ker (JA)^6}\mathscr{B}ig )=0$.
{\mathbf{q}}ed
\simbolovettore{a}repsilonnd{rem}
\section{Linear and spectral instability}
\label{sec:instability}
Given a central configuration $q$ we consider the associated (RE) $\mathfrak{g}amma$ and the corresponding linear autonomous Hamiltonian system given by
\begin{equation}\label{eq:lin-ham-sys}
\mathrm{d}ot \mathfrak{z}eta(t) = -JB_*\mathfrak{z}eta(t)
\simbolovettore{a}repsilonnd{equation}
where $B_*$ stands for the symmetric matrix $B_d$ defined in Equation~\simbolovettore{a}repsilonqqcolonref{eq:Ham-lin-2} respectively for the symmetric matrix $B_s$ defined in Equation~\simbolovettore{a}repsilonqqcolonref{eq:Ham-lin-pl-2} according whether the (RE) $\mathfrak{g}amma$ is non-planar or planar. The fundamental solution of~\simbolovettore{a}repsilonqqcolonref{eq:lin-ham-sys} can be written explicitly as
\[
\mathfrak{g}amma(t)\coloneqq \simbolovettore{a}repsilonxp( -t JB_*).
\]
As already observed, the invariance of Newton's equation~\simbolovettore{a}repsilonqqcolonref{eq:Newton-cpt} under translations
and rotations yields to several integral of motions. As shown in Section~\mathrm{(RE)}f{subsec:decomposition}, using such integral of motions it is possible to provide a symplectic decomposition
of the phase space into symplectic subspaces which are invariant for the linearized Hamiltonian dynamics in~\simbolovettore{a}repsilonqqcolonref{eq:lin-ham-sys}, thus ruling out the somehow uninteresting part of the dynamics
due to the symmetries. Indeed, after the symplectic change of coordinates given in Lemma~\mathrm{(RE)}f{thm:new-coordinates}, the matrix $B_*$ can be decomposed as the direct sum
\[
B_*\,= B_1^*\, \mathfrak{o}plus B_2^*\, \mathfrak{o}plus B_3^* ,
\]
where $B_j^*\coloneqqB\,|_{E_j^*}$, $j=1,2,3$.
Under the notation above, the symmetric matrix $B_3^*$ detects the stability properties of the (RE) $\mathfrak{g}amma$ and takes the form
\[
B_3^*\, = \begin{pmatrix}
-\mathcal D & \mathfrak{t}rasp{K_*}\\
K_* & I
\simbolovettore{a}repsilonnd{pmatrix}, {\mathbf{q}}quad \mathcal D= \big[\mathfrak{t}rasp{A} D^2 U(Q) A\big]\mathscr{B}ig|_{\mathds{R}^{4n-4-k}},
\]
where $k=4 $ or $k=6$ depending on whether the (RE) is non-planar or planar but generated by a collinear central configuration, or planar and generated by a planar non-collinear central configuration, and where $Q$ is the central configuration (in the rotating frame) generating the (RE).
For this reason, we are allowed to call the (RE) $\mathfrak{g}amma$ {\sc spectrally stable} if the Hamiltonian matrix $JB_3^*$ is spectrally stable, that is if its spectrum is purely imaginary, and {\sc linearly stable} if
$JB_3^*$ is linearly stable, namely spectrally stable and diagonalizable.
As shown in Section~\mathrm{(RE)}f{subsec:Morse-Bott}, the matrix $B_3^*$ is similar to the block-diagonal matrix. It is easy to check that
\begin{equation}\label{eq:NsimB}
N_3^*:=
\begin{pmatrix}
-\big[\mathcal D + U(Q)\big] & 0 \\
0 & I
\simbolovettore{a}repsilonnd{pmatrix},
\simbolovettore{a}repsilonnd{equation}
with the matrix $\mathcal D + U(Q)$ having the same inertia indices as $H(Q)\big|_{\mathds{R}^{4n-4-k}}$.
We are now ready to state and prove the first main result of this section.
\begin{thm}\label{thm:main-1}
Let $\mathfrak{g}amma$ be the (RE) generated by a central configuration $q$. If:
\begin{itemize}
\item $\mathfrak{g}amma$ is either non-planar or planar and $q$ is collinear, and we have
\[
\iMor{B_3^d\big|_{\ker (JB_3^d)^{8n-16}}}
-\iMor{B_3^d}\simbolovettore{a}repsilonqqcolonuiv 1
{\mathbf{q}}uad \mod 2,
\]
\item or $\mathfrak{g}amma$ is planar and $q$ is planar non-collinear, and we have
\[
\iMor{B_3^s\big|_{\ker (JB_3^s)^{8n-20}}}
-\iMor{B_3^s}\simbolovettore{a}repsilonqqcolonuiv 1
{\mathbf{q}}uad \mod 2,
\]
\simbolovettore{a}repsilonnd{itemize}
then $\mathfrak{g}amma$ is spectrally unstable.
\simbolovettore{a}repsilonnd{thm}
\begin{proof}
We prove the claim only when the first condition holds, being the other proof completely analogous. Let $\mathcal{H} \coloneqq \mathds{C}^{8n-16}$ be the complexification of $\mathds{R}^{8n-16}$, and define
the path $D_3 : [0, +\infty) \mathfrak{t}o \mathscr{B}sa(\mathcal{H})$ as
\[
D_3(t) \coloneqq B_3^d + tG
\]
with $G \coloneqq iJ$ denotes the Krein matrix. Arguing by contradiction, we assume that $\mathfrak{g}amma$ is spectrally stable. Applying Theorem~\mathrm{(RE)}f{prop:mainB} to the path $t \mapsto D_3(t)$ (with $p=4n-8$), we get that
\[
n^-\bigl(B_3^d\big|_{\ker (JB_3^d)^{8n-16}}\bigr)
-\iMor{B_3^d}\simbolovettore{a}repsilonqqcolonuiv 0
{\mathbf{q}}uad \mod 2.
\]
This completes the proof.
\simbolovettore{a}repsilonnd{proof}
In the following theorem we collect some easy consequences of Theorem~\mathrm{(RE)}f{thm:main-1} which relates the linear and spectral stability of a (RE)
with the inertia indices of the corresponding central configuration.
\begin{thm} \label{thm:main-2}
Let $q$ be a central configuration, and let $\mathfrak{g}amma$ be the corresponding (RE). Then the following hold:
\begin{enumerate}
\item If $q$ is non-planar and Morse-Bott non-degenerate, or arbitrary and Morse-Bott degenerate with $n^0(q)$ even, then $\mathfrak{g}amma$ is linearly unstable.
\item Suppose that
$$n^-\mathscr{B}ig (B_3^* \mathscr{B}ig |_{\ker (JB_3^*)^{8n-8-2k}}\mathscr{B}ig )\simbolovettore{a}repsilonqqcolonuiv 0 {\mathbf{q}}uad \mathfrak{t}ext{mod}\ 2,$$
where $*$ and $k$ are as in Section~\mathrm{(RE)}f{subsec:decomposition}. If
$$n^-(q) + n^0(q) \simbolovettore{a}repsilonqqcolonuiv 0 {\mathbf{q}}uad \mathfrak{t}ext{mod}\ 2,$$
then $\mathfrak{g}amma$ is spectrally unstable. In particular, if $q$ is Morse-Bott non-degenerate then:
\begin{enumerate}
\item If $q$ is non-planar and $n^-(q)$ is even, then $\mathfrak{g}amma$ is spectrally unstable.
\item If $q$ is planar and $n^-(q)$ is odd, then $\mathfrak{g}amma$ is spectrally unstable.
\simbolovettore{a}repsilonnd{enumerate}
\simbolovettore{a}repsilonnd{enumerate}
\simbolovettore{a}repsilonnd{thm}
\begin{rem}
The assumption in Item 2 of Theorem~\mathrm{(RE)}f{thm:main-2} is satisfied for instance if:
\begin{itemize}
\item $q$ is a Morse-Bott non-degenerate planar central configuration generating a planar (RE) $\mathfrak{g}amma$.
\item $q$ is a degenerate planar central configuration and the Hamiltonian matrix $JB_3^*$ (where $*=d$ if $\mathfrak{g}amma$ is non-planar and $*=s$ otherwise)
does not have non-trivial Jordan blocks corresponding to the eigenvalue zero (in this case $n^0(q)$ must be necessarily odd).
\simbolovettore{a}repsilonnd{itemize}
Under the former condition we retrieve in particular the main result of \cite{HS09}. In case of planar (RE), the latter one must be added to the assumptions of the main result
in \cite{BJP14} to conclude spectral instability.
The result above shows that the stability properties of (RE) are sensitive to the ``dimension''
of the corresponding central configurations. Indeed, all Morse-Bott non-degenerate non-planar central configurations are linearly unstable.
Moreover, under the assumption of Item 2, we see that different parities of the Morse index $n^-(q)$ force spectral instability according to the fact
that $q$ is planar or not. In particular, for $n\mathfrak{g}e 4$ all (necessarily non-planar) local minima of $\mathbf{w}idehat U$ satisfying the assumption in Item 2 are spectrally unstable.
{\mathbf{q}}ed
\simbolovettore{a}repsilonnd{rem}
\begin{proof}
$\ $
\begin{enumerate}
\item Under the given assumptions we readily see that
$$n^0(B_3^d) = n^0(q)-3 \simbolovettore{a}repsilonqqcolonuiv 1 {\mathbf{q}}uad \mathfrak{t}ext{mod}\ 2, {\mathbf{q}}quad n^0(B_3^s) = \left \{\begin{array}{l} n^0(q) - 5 \\ n^0(q)-3 \simbolovettore{a}repsilonnd{array}\right. \simbolovettore{a}repsilonqqcolonuiv 1 {\mathbf{q}}uad \mathfrak{t}ext{mod}\, 2.$$
Therefore,
$$\ker (JB_3^d) \subsetneq \ker (JB_3^d)^{8n-16},$$
as the generalized eigenspace must have even dimension. In particular, the matrix $JB_3^d$ has a non-trivial Jordan block structure (corresponding to the eigenvalue zero) and as such is linearly unstable.
\item Assume that $\mathfrak{g}amma$ is spectrally stable. Then the assumption implies in virtue of Theorem~\mathrm{(RE)}f{thm:main-1} that $n^-(B_3^*)$ is even. In particular, we have that
\begin{align*}
4n-5 - n^-(q) - n^0(q) = n^+(q) = n^-(B_3^*) \simbolovettore{a}repsilonqqcolonuiv 0 {\mathbf{q}}uad \mathfrak{t}ext{mod}\ 2,
\simbolovettore{a}repsilonnd{align*}
which implies that
$$n^-(q) + n^0(q) \simbolovettore{a}repsilonqqcolonuiv 1 {\mathbf{q}}uad \mathfrak{t}ext{mod}\ 2,$$
showing the first claim. To conclude (a) and (b) it suffices to notice that in the first case $n^0(q)=6$ is even, whereas in the second one $n^0(q)=3,5$ depending on $q$ being collinear or planar not-collinear.
{\mathbf{q}}edhere
\simbolovettore{a}repsilonnd{enumerate}
\simbolovettore{a}repsilonnd{proof}
\appendix
\section{Symmetries and integrals of motion}\label{subsec:integrals}
The $n$-body problem in $\mathds{R}^4$ is invariant under the action of the $4$-dimensional Euclidean group $\mathbb{E}uc{4}$. By lifting such a group action to the phase space we can
use Noether's theorem to foliate the space with sets which are invariant under the Hamiltonian dynamics of the $n$-body problem. The goal of this appendix is to provide
an explicit description of the differentiable structure of such invariant sets.
Let $g \in \mathbb{E}uc{4}$ be an element of the Euclidean group; so, $g:\mathds{R}^4 \mathfrak{t}o \mathds{R}^4$ takes the form
\[
g\cdot q = Aq+ b {\mathbf{q}}quad A \in \mathrm{O}(4) \mathfrak{t}extrm{ and } b \in \mathds{R}^4.
\]
By lifting such an action to $T^*\mathbb{X} $ via
\[
g\cdot (q,p)=(g \cdot q, p \mathfrak{t}rasp{A})
\]
it is straightforward to check that both the Hamiltonian function $H$ as well as the Liouville one-form are invariant under the $g$-action. For fixed $b \in \mathds{R}^4$ we have a one-parameter family
\[
g_b^s= I + s b
\]
whose generating vector field is
\[
X_b\,\simbolovettore{a}repsilonqqcolonuiv b.
\]
Noether's theorem implies that the quantity
\[
F(q,p)\coloneqq p [X_b]= \left(\sum_{i=1}^n p_i\right)[b]
\]
is a first integral of motions for every $b \in \mathds{R}^4$, that is, that the {\sc total linear momentum\/}
\begin{equation}
\mathfrak{o}verline p\coloneqq p_1 + \ldots p_n=\sum_{i=1}^n p_i \in (\mathds{R}^4)^*
\simbolovettore{a}repsilonnd{equation}
is constant and determines the motion of the {\sc center of mass\/}
\[
\mathfrak{o}verline q\coloneqq \mathrm{d}frac{1}{\mathfrak{o}verline m}\sum_{i=1}^n m_i q_i, {\mathbf{q}}quad \mathfrak{o}verline m \coloneqq \sum_{i=1}^n m_i,
\]
which is namely given by
\[
\mathfrak{o}verline q(t)= \mathfrak{o}verline q(t_0) + \mathfrak{o}verline p (t-t_0).
\]
If $t\mapsto \big(q(t), p(t)\big)$ is any solution of the $n$-body problem having center of mass at $\mathfrak{o}verline q$ and momentum $\mathfrak{o}verline p$, then
\[
\mathbf{w}idehat q(t)\coloneqq q(t)-\mathfrak{o}verline q {\mathbf{q}}quad \mathbf{w}idehat p(t)\coloneqq p(t)-\mathfrak{o}verline p
\]
is another solution having total momentum zero.
Thus, one can without loss of generality study solutions with $\mathfrak{o}verline {p}=0$. This implies that $\mathfrak{o}verline q$ is a vector-valued constant of motion.
Hence it is not restrictive to assume $\mathfrak{o}verline q$=0. Summing up, the translational invariance of the $n$-body problem yields 8 integrals of motion.
Let's now choose $1 \le k <l \le 4$ (so, only 6 choices are possible) and let us consider the one-parameter group of rotations in the $(q^k,q^l)$-plane. After rearranging the coordinates as $(q^k,q^l, *,*)$, we get
\[
g_s=
\begin{pmatrix}
\begin{matrix}
\cos s & -\sin s \\
\sin s & \cos s
\simbolovettore{a}repsilonnd{matrix}
& \hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep} & \mathfrak{z}ero \\
\mathfrak{h}line \mathfrak{z}ero & \hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep} &
\mathfrak{z}ero
\simbolovettore{a}repsilonnd{pmatrix}
\]
where $\mathfrak{z}ero$ denotes the $2 \mathfrak{t}imes 2$ null matrix. The generating vector field of $g_s$ is pointwise defined by
\[
X(q)= \left(\mathrm{d}frac{d}{ds}\mathscr{B}ig{\mathrm{v}}t_{s=0} g^s\cdot q\right)=
\begin{pmatrix}
\begin{matrix}
0 & -1 \\
1 & 0
\simbolovettore{a}repsilonnd{matrix}
& \hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep} &\mathfrak{z}ero \\
\mathfrak{h}line \mathfrak{z}ero & \hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep} &
\mathfrak{z}ero
\simbolovettore{a}repsilonnd{pmatrix}\begin{pmatrix}
q^k\\ q^l\\0\\0
\simbolovettore{a}repsilonnd{pmatrix}=\begin{pmatrix}
-q^l\\ q^k\\0\\0
\simbolovettore{a}repsilonnd{pmatrix}
\]
Using Noether's theorem again we obtain (after rearranging the coordinates of $p$ accordingly) that the quantity
\[
F(q,p)\coloneqq \sum_{i=1}^n p_i [X(q_i)]=\sum_{i=1}^n \big( -p_i^k[q_i^l]+ p_i^l[q^k_i]\big)
\]
is a first integral of motion. Thus we get 6 additional first integrals
\[
\mathrm{O}mega_{kl}, {\mathbf{q}}quad 1 \le k <l \le 4,
\]
which will be referred to as the {\sc angular momentum first integrals.\/}
Summarizing, we have shown that for fixed constants
$\mathfrak{o}mega_{kl}$, $1 \le k <l \le 4 $, the set
\[
S_{\mathfrak{o}mega_{kl}}\coloneqq\mathscr{B}ig \{ (q,p)\in T^*\mathbb{X}\ \mathscr{B}ig |\ \mathfrak{o}verline q= \mathfrak{o}verline p=0, \ \mathrm{O}mega_{kl}(q,p)=\mathfrak{o}mega_{kl}\mathscr{B}ig \}
\]
is invariant under the Hamiltonian dynamics of the $n$-body problem. We will now investigate under which condition the set $S_{\mathfrak{o}mega_{kl}}$ is a smooth $(8n-14)$-dimensional manifold.
\begin{prop}
The 14 integrals of motion coming from the conservation of total linear momentum, center of mass and angular momentum are linearly independent except for the case in which all vectors $q_i,p_i$ for $i =1, \ldots n$ are coplanar. In such a case, the motion is planar.
\simbolovettore{a}repsilonnd{prop}
\begin{proof}
To prove the claim we just have to show that the $(8n\mathfrak{t}imes 14)$-dimensional matrix defined by
\[
\begin{pmatrix}
\nablabla \mathfrak{o}verline q \ |\ \nablabla \mathfrak{o}verline p\ | \ \nablabla \mathrm{O}mega
\simbolovettore{a}repsilonnd{pmatrix}
\]
has rank strictly less than $14$ at $(q,p)$ if and only if all vectors $q_i,p_i$ for $i =1, \ldots n$ are coplanar (as usual we identify $p$ with the corresponding column vector).
By a straightforward computation, we get that
\[
\nablabla \mathfrak{o}verline q= \mathrm{d}frac{1}{\mathfrak{o}verline m}\left(\begin{array}{@{}c@{}}
m_1 I_4 \\ \simbolovettore{d}ots \\ m_nI_4\\ \mathfrak{h}line 0_4 \\ \simbolovettore{d}ots \\ 0_4
\simbolovettore{a}repsilonnd{array}\right), {\mathbf{q}}quad
\nablabla \mathfrak{o}verline p=\left(\begin{array}{@{}c@{}}
0_4\\ \simbolovettore{d}ots \\0_4\\ \mathfrak{h}line
I_4 \\ \simbolovettore{d}ots \\ I_4
\simbolovettore{a}repsilonnd{array}\right),
\]
and
\[
\nablabla \mathrm{O}mega=\left(\begin{array}{@{}cccccc@{}}
p_2^1 & p_3^1 & p_4^1 & 0 & 0 & 0 \\
- p_1^1 & 0 & 0 & p_3^1& p_4^1 & 0 \\
0 & -p_1^1& 0 & -p_2^1 & 0 & p_4^1\\
0 & 0& -p_1^1 & 0 & -p_2^1 & -p_3^1\\
\mathfrak{h}line
\simbolovettore{d}ots &\simbolovettore{d}ots & \simbolovettore{d}ots & \simbolovettore{d}ots &\simbolovettore{d}ots &\simbolovettore{d}ots \\ \mathfrak{h}line \\
p_2^n & p_3^n & p_4^n & 0 & 0 & 0 \\
- p_1^n & 0 & 0 & p_3^n& p_4^n & 0 \\
0 & -p_1^n& 0 & -p_2^n & 0 & p_4^n\\
0 & 0& -p_1^n & 0 & -p_2^n & -p_3^n\\
\mathfrak{h}line\\
-q_2^1 & -q_3^1 & -q_4^1 & 0 & 0 & 0 \\
q_1^1 & 0 & 0 & -q_3^1& -q_4^1 & 0 \\
0 & q_1^1& 0 & q_2^1 & 0 & -q_4^1\\
0 & 0& q_1^1 & 0 &q_2^1 & q_3^1\\
\mathfrak{h}line
\simbolovettore{d}ots &\simbolovettore{d}ots & \simbolovettore{d}ots & \simbolovettore{d}ots &\simbolovettore{d}ots &\simbolovettore{d}ots \\ \mathfrak{h}line \\
-q_2^n & -q_3^n & -q_4^n & 0 & 0 & 0 \\
q_1^n & 0 & 0 & -q_3^n& -q_4^n & 0 \\
0 & q_1^n& 0 & q_2^n & 0 & -q_4^n\\
0 & 0& q_1^n & 0 &q_2^n & q_3^n\\
\simbolovettore{a}repsilonnd{array}\right)=
\left(\begin{array}{@{}c@{}}
\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}\\ \nablabla^1 \mathrm{O}mega\\\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep} \\ \mathfrak{h}line \hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep} \\ \nablabla^2 \mathrm{O}mega\\ \hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}
\simbolovettore{a}repsilonnd{array}
\right)
\]
The vectors $\nablabla \mathfrak{o}verline q$, $\nablabla \mathfrak{o}verline p$, $\nablabla \mathrm{O}mega$ are thus linearly dependent if and only if the following two systems of linear equations
\begin{equation}\label{eq:eq1}
\begin{cases}
m_i \lambda_1 + \lambda_9 p_2^i+ \lambda_{10}p_3^i+ \lambda_{11} p_4^i=0\\
m_i \lambda_2 - \lambda_9 p_1^i+ \lambda_{12}p_3^i+ \lambda_{13} p_4^i=0\\
m_i \lambda_3 - \lambda_{10} p_1^i+ \lambda_{12}p_2^i+ \lambda_{14} p_4^i=0\\
m_i \lambda_4 - \lambda_{11} p_1^i- \lambda_{13}p_2^i- \lambda_{14} p_3^i=0
\simbolovettore{a}repsilonnd{cases}{\mathbf{q}}quad \forall\, i =1, \ldots, n
\simbolovettore{a}repsilonnd{equation}
and
\begin{equation}\label{eq:eq2}
\begin{cases}
\lambda_5 - \lambda_9 q_2^i- \lambda_{10}q_3^i- \lambda_{11} q_4^i=0\\
\lambda_6 + \lambda_9 q_1^i- \lambda_{12}q_3^i- \lambda_{13} q_4^i=0\\
\lambda_7 + \lambda_{10} q_1^i+\lambda_{12}q_2^i- \lambda_{14} q_4^i=0\\
\lambda_8 + \lambda_{11} q_1^i+ \lambda_{13}q_2^i+ \lambda_{14} q_3^i=0
\simbolovettore{a}repsilonnd{cases}{\mathbf{q}}quad \forall\, i =1, \ldots, n
\simbolovettore{a}repsilonnd{equation}
have a non-trivial solution $\lambda_1,...,\lambda_{14}$.
By summing over $i$ in each line of~\simbolovettore{a}repsilonqqcolonref{eq:eq1} and~\simbolovettore{a}repsilonqqcolonref{eq:eq2} we obtain
\[
\lambda_1= \lambda_2 = \ldots= \lambda_8=0
\]
(here we use the fact that $\mathfrak{o}verline q=\mathfrak{o}verline p=0$). Moreover, for each $i=1, \ldots, n$ the vectors $q_i, p_i$ are contained in the linear solution subspace $V\subset \mathds{R}^4$ of the following (linear) system
\begin{equation}\label{eq:eq3}
\begin{cases}
\lambda_9 x_2+ \lambda_{10}x_3+ \lambda_{11} x_4=0\\
- \lambda_9 x_1+ \lambda_{12}x_3+ \lambda_{13} x_4=0\\
- \lambda_{10} x_1+ \lambda_{12}x_2+ \lambda_{14} x_4=0\\
- \lambda_{11} x_1- \lambda_{13}x_2- \lambda_{14} x_3=0
\simbolovettore{a}repsilonnd{cases}
\simbolovettore{a}repsilonnd{equation}
whose coefficient matrix is
\[
\Lambda=
\begin{pmatrix}
0 & \lambda_9& \lambda_{10} & \lambda_{11}\\
-\lambda_9 &0&\lambda_{12} & \lambda_{13}\\
-\lambda_{10} & -\lambda_{12} & 0 & \lambda_{14}\\
-\lambda_{11} & -\lambda_{13} & -\lambda_{14} & 0
\simbolovettore{a}repsilonnd{pmatrix}
\]
It is readily seen that $V$ is at most 2-dimensional as soon as at least one $\lambda_j$ is non-zero. This implies that the considered first integral can be linearly dependent if and only if all $q_i$ and $p_i$ are
coplanar.
\simbolovettore{a}repsilonnd{proof}
\begin{rem}
If all $q_i$ and $p_i$ are coplanar, then up to a rotation we may assume that they are all contained in $\mathds{R}^2 \mathfrak{t}imes \{0\}\subset \mathds{R}^4$. By definition, this implies that
\begin{align*}
\mathrm{O}mega_{kl}(q,p)=0 {\mathbf{q}}uad \forall\, (k,l)\neq(1,2), {\mathbf{q}}quad
\mathrm{O}mega_{12}(q,p)=\begin{cases}
0 & \mathfrak{t}extrm{ all } q_i\ \mathfrak{t}ext{and} \ p_i \mathfrak{t}extrm{ are collinear}\\
\neq 0 & \mathfrak{t}extrm{ otherwise }
\simbolovettore{a}repsilonnd{cases}
\simbolovettore{a}repsilonnd{align*}
The case in which all vectors $q_i, p_i$ are coplanar can therefore be ruled out by assuming that
$
\mathfrak{o}mega_{kl} \neq 0$ for at least two different pairs $(k,l)$. {\mathbf{q}}ed
\simbolovettore{a}repsilonnd{rem}
As the Hamiltonian $H$ of the $n$-body problem is autonomous, $H$ provides the 15-th integral of motion. For $h\in \mathds{R}$, the invariant set
$$S_{\mathfrak{o}mega_{kl},h} := S_{\mathfrak{o}mega_{kl}} \cap \{H=h\}$$
is a smooth $8n-15$-dimensional manifold provided the 15 integral of motions are linearly independent. In particular, as seen above, we must have that $\mathfrak{o}mega_{kl}\neq 0$ for at least two different pairs $(k,l)$.
In order to understand when the 15 integrals of motions are independent one should understand in which cases
\[
\nablabla H(q,p)= (\nablabla \mathfrak{o}verline q\cdot u) \ (\nablabla \mathfrak{o}verline p\cdot v)+ \nablabla \mathrm{O}mega(q,p)\cdot w
\]
possesses non-trivial solutions $u,v \in \mathds{R}^4$ and $w \in \mathds{R}^6$. Such a condition translates into the following system
\begin{equation}\label{eq:dip15integrals-1}
\begin{cases}
-\nablabla U(q)= \mathrm{d}frac{1}{\mathfrak{o}verline m} u \begin{pmatrix}m_1 I_4\\ \simbolovettore{d}ots\\ m_n I_4\simbolovettore{a}repsilonnd{pmatrix} + w \cdot \nablabla^1 \mathrm{O}mega(q,p)\\
M^{-1} p = v \begin{pmatrix}I_4\\ \simbolovettore{d}ots\\ I_4
\simbolovettore{a}repsilonnd{pmatrix}
+ w \cdot \nablabla^2\mathrm{O}mega(q,p)
\simbolovettore{a}repsilonnd{cases}
\simbolovettore{a}repsilonnd{equation}
By summing up the $n$ four dimensional components of the first equation in \simbolovettore{a}repsilonqqcolonref{eq:dip15integrals-1}, we get
\[
0=u+ w \cdot \underbrace{\begin{pmatrix}
\mathfrak{o}verline{p_2}&\mathfrak{o}verline{p_3}& \mathfrak{o}verline{p_4}&0&0&0\\
-\mathfrak{o}verline{p_1}&0&0&\mathfrak{o}verline{p_3}& \mathfrak{o}verline{p_4}&0\\
0 & -\mathfrak{o}verline{p_1}&0& -\mathfrak{o}verline{p_2}& 0&\mathfrak{o}verline{p_4}\\
0&0&-\mathfrak{o}verline{p_1}&0& - \mathfrak{o}verline{p_2}&-\mathfrak{o}verline{p_3}
\simbolovettore{a}repsilonnd{pmatrix}}_{=0\ (\mathfrak{t}extrm{since } \mathfrak{o}verline p=0)}=u
\]
Similarly, summing up the $n$ four dimensional components of the second equation in \simbolovettore{a}repsilonqqcolonref{eq:dip15integrals-1} with weights $m_i$ and since we are assuming that the total mass of the system is 1, yields
\[
0=\mathfrak{o}verline p= v + w \cdot \underbrace{\begin{pmatrix}
- \mathfrak{o}verline m\,\mathfrak{o}verline{q_2}&- \mathfrak{o}verline m\,\mathfrak{o}verline{q_3}& - \mathfrak{o}verline m\,\mathfrak{o}verline{q_4}&0&0&0\\
\mathfrak{o}verline m\,\mathfrak{o}verline{q_1}&0&0&- \mathfrak{o}verline m\,\mathfrak{o}verline{q_3}& - \mathfrak{o}verline m\,\mathfrak{o}verline{q_4}&0\\
0 & \mathfrak{o}verline m\,\mathfrak{o}verline{q_1}&0& \mathfrak{o}verline m\, \mathfrak{o}verline{q_2}& 0&- \mathfrak{o}verline m\,\mathfrak{o}verline{q_4}\\
0&0& \mathfrak{o}verline m\,\mathfrak{o}verline{q_1}&0& \mathfrak{o}verline m\, \mathfrak{o}verline{q_2}& \mathfrak{o}verline m\,\mathfrak{o}verline{q_3}
\simbolovettore{a}repsilonnd{pmatrix}}_{=0\ (\mathfrak{t}extrm{since } \mathfrak{o}verline q=0)}= v \ \mathds{R}ightarrow v=0.
\]
It follows that the 15 integrals of motions are linearly independent if and only if
\begin{equation}
\nablabla H(q,p)= w \cdot \nablabla \mathrm{O}mega(q,p)
\label{eq:finalintegral}
\simbolovettore{a}repsilonnd{equation}
does not possess non-trivial solutions $w\in \mathds{R}^6$. Understanding whether or not~\simbolovettore{a}repsilonqqcolonref{eq:finalintegral} admit non-trivial solutions seems to be quite involved. As this is not relevant
for the present work, we rather leave it for further work.
\begin{thebibliography}{99}
\bibitem[Abb01]{Abb01}
{\sc Abbondandolo, Alberto}
\newblock Morse theory for Hamiltonian systems.
\newblock Chapman \& Hall/CRC Research Notes in Mathematics, 425. Chapman \& Hall/CRC, Boca Raton, FL, 2001.
\bibitem[AC98]{AC98}
{\sc Albouy, Alain; Chenciner, Alain}
\newblock Le problème des n corps et les distances mutuelles. (French) [The n-body problem and mutual distances]
\newblock Invent. Math. 131 (1998), no. 1, 151--184.
\bibitem[AD20]{AD20}
{\sc Albouy, Alain; Dullin, Holger R.}
\newblock Relative equilibria of the 3-body problem in $\mathds{R}^4$.
\newblock J. Geom. Mech. 12 (2020), no. 3, 323--341.
\bibitem[AFP20]{AFP20}
{\sc Asselle, Luca; Fenucci, Marco, Portaluri, Alessandro}
\newblock Bifurcations of balanced configurations for the Newtonian n-body problem in $\mathds{R}^4$, 2020.
\newblock (preprint) \url{https://arxiv.org/abs/2011.09291}
\bibitem[AP20]{AP20}
{\sc Asselle, Luca; Portaluri, Alessandro}
\newblock Morse theory for $S$-balanced configurations in the Newtonian $n$-body problem, 2020.
\newblock (preprint) \url{https://arxiv.org/abs/2009.10118}
\bibitem[BJP14]{BJP14}
{\sc Barutello, Vivina L.; Jadanza, Riccardo D.; Portaluri, Alessandro}
\newblock Linear instability of relative equilibria for n-body problems in the plane.
\newblock J. Differential Equations 257 (2014), no. 6, 1773--1813.
\bibitem[BJP16]{BJP16}
{\sc Barutello, Vivina; Jadanza, Riccardo D.; Portaluri, Alessandro} \newblock Morse index and linear stability of the Lagrangian circular orbit in a three-body-type problem via index theory. \newblock Arch. Ration. Mech. Anal. 219 (2016), no. 1, 387--444.
\bibitem[DZ21]{DZ21}
{\sc Deng, Yanxia; Zhu, Shuqiang}
\newblock Odd index of the amended potential implies linear instability
\newblock Preprint available at \url{http://staff.ustc.edu.cn/~zhus/}
\bibitem[GPP03]{GPP03}
{\sc Giambò, Roberto; Piccione, Paolo; Portaluri, Alessandro}
\newblock COn the Maslov index of symplectic paths that are not transversal to the Maslov cycle. Semi-Riemannian index theorems in the degenerate case, 2003.
\newblock (preprint) \url{https://arxiv.org/abs/math/0306187}
\bibitem[GPP04]{GPP04}
{\sc Giambò, Roberto; Piccione, Paolo; Portaluri, Alessandro}
\newblock Computation of the Maslov index and the spectral flow via partial signatures.
\newblock C. R. Math. Acad. Sci. Paris 338 (2004), no. 5, 397--402.
\bibitem[HS09]{HS09}
{\sc Hu, Xijun; Sun, Shanzhong}
\newblock Stability of relative equilibria and Morse index of central configurations
\newblock C. R. Math. Acad. Sci. Paris 347 (2009), no. 21-22, 1309--1312.
\bibitem[HPX20]{HPX20}
{\sc Hu, Xijun; Portaluri, Alessandro; Xing, Qin}
\newblock Morse index and stability of the planar N-vortex problem.
\newblock Qual. Theory Dyn. Syst. 19 (2020), no. 2, Paper No. 76
\bibitem[MS05]{MS05}
{\sc Meyer, Kenneth R.; Schmidt, Dieter S.}
\newblock Elliptic relative equilibria in the N-body problem.
\newblock J. Differential Equations 214 (2005), no. 2, 256--298.
\bibitem[Moe14]{Moe14}
{\sc Moeckel, Richard}
\newblock Central configurations.
\newblock Lecture notes given at the Centre de Recerca Matemàtica (CRM), Barcelona, January 27--31, 2014.
\bibitem[RS95]{RS95}
{\sc Robbin, Joel; Salamon, Dietmar}
\newblock The spectral flow and the Maslov index.
\newblock Bull. London Math. Soc. 27 (1995), no. 1, 1–33.
\simbolovettore{a}repsilonnd{thebibliography}
\simbolovettore{s}pace{1cm}
\noindent
\mathfrak{t}extsc{Dr. Luca Asselle},
Justus Liebig Universit\"at Gie\ss en,
Arndtrstrasse 2,
35392, Gie\ss en,
Germany\\
E-mail: $\mathrm{luca.asselle@math.uni}$-$\mathrm{giessen.de}$
\simbolovettore{s}pace{5mm}
\noindent
\mathfrak{t}extsc{Prof. Alessandro Portaluri},
Università degli Studi di Torino,
Largo Paolo Braccini 2,
10095 Grugliasco, Torino,
Italy,
E-mail: $\mathrm{alessandro.portaluri@unito.it}$
\simbolovettore{s}pace{5mm}
\noindent
\mathfrak{t}extsc{Prof. Li Wu},
Department of Mathematics,
Shandong University,
Jinan, Shandong, 250100,
China\\
E-mail: $\mathrm{vvvli@sdu.edu.cn}$
\simbolovettore{a}repsilonnd{document} |
\begin{document}
\title{
Digest of Quantum Stream Cipher based on Holevo-Yuen Theory
}
\author{
\IEEEauthorblockN{Masaki SOHMA$^{1}$, Osamu HIROTA$^{1,2}$ \\}
\IEEEauthorblockA{
1. Quantum ICT Research Institute, Tamagawa University\\
6-1-1, Tamagawa-gakuen, Machida, Tokyo 194-8610, Japan\\
2. Research and Development Initiative, Chuo University, \\
1-13-27, Kasuga, Bunkyou-ku, Tokyo 112-8551, Japan\\
{\footnotesize\tt E-mail:sohma@eng.tamagawa.ac.jp, hirota@lab.tamagawa.ac.jp} \vspace*{-2.64ex}}
}
\maketitle
\begin{abstract}
So far, quantum key distribution (QKD) has been the main subject in the field of quantum cryptography, but that is not quantum
cryptographic communication, it is only the ability to send keys for cryptographic purposes.
To complete cryptographic communication, a technique for encrypting data is necessary, and the conventional cryptographic
technique of mathematical symmetric key cipher or One Time Pad (OTP) is adopted in the discussion so far.
However, OTP is not the ultimate cipher for data encryption, because it does not satisfy security conditions in the modern cryptology.
Around 2000, a new quantum stream cipher was proposed as a technique to challenge the possibility of overcoming drawbacks
of OTP in practical use. Recently, we have published some review papers on it in Entropy (Open access journal) [1], and others [2,3].
This paper introduces an overview and a back ground of our paper that is entitled Quantum stream cipher based on Holevo-Yuen theory.
\end{abstract}
\IEEEpeerreviewmaketitle
\section{General View of Cryptography or Cipher in Social Network Systems }
Around 2000, the government and communication service providers have imposed the conditions shown in Fig. 1 on the development for future telecommunications security technology.
Then Y-00 quantum stream cipher was proposed as a cryptographic technique that satisfies these conditions and is currently undergoing commercialization.
In the recent Book [4] and a technical paper [5], S. Tsujii who is one of the leaders of the cyber security community
and industry explains the current situation of the cyber security community
and industry on the trend of the security technology as follows. \\
``Quantum computer capable of breaking public key cryptographies, such as RSA or elliptic curve cryptography,
that relies on mathematical decipherability due to prime number factorization or discrete logarithm problems, will not be developed within 20 years.
Nevertheless, the jeopardy due to the cooperative effect with the development of mathematics remains.
Thus, NIST is in the process of selecting candidates for quantum computer-resistant cryptography (see Appendix [A]). The applications of cryptography
for confidentiality are categorized into the confidential transmission of data itself and the key delivery or storage for that purpose.
Then from the viewpoint of academic methods, they are categorized into mathematical cryptography and quantum cryptography.
In the former case, there are two types such as public key cryptography and symmetric key cipher.
Public key cryptography has the advantage of securely delivering and storing the initial key for data encryption and transmission.
But its processing speed is slow, so symmetric key cipher is responsible for data encryption.
On the other hand, quantum cryptography is a cryptographic technique that uses quantum phenomena to improve security performance.
The technique that uses quantum communication to perform the key delivery function of public key cryptography is
quantum key distribution (QKD: BB-84 et al), while the technique that uses quantum communication to perform the cryptographic transmission of data itself
is called Y-00 quantum stream cipher (see Fig. 2). QKD cannot be used to supply keys to One Time Pad cipher, because its data rate is too slow.
Y-00 for data encryption is extremely novel in its ability to prevent eavesdroppers from obtaining the ciphertext of the symmetric key cipher.
In addition, it is amazing that the strong quantum-ness is created by modulation scheme with multi-ary coherent state signals without any quantum device."
\begin{figure}
\caption{Basic requirements of performance to new technologies}
\end{figure}
\begin{figure}
\caption{Classification of cryptographic techniques}
\end{figure}
Let's now turn our focus to quantum cryptography.
Both of these quantum technologies are based on designing communication systems to make it difficult for eavesdroppers to steal signals on
the communication channels. Such a function to protect the signal itself cannot be realized by mathematical cryptography.
As mentioned above, there are two possible system operation methods for these quantum cryptography techniques.
One is to use BB-84 quantum key distribution for key delivery and conventional mathematical cryptography for authentication and data encryption.
The other is to use Y-00 quantum stream cipher for data encryption and conventional public key cryptography (or quantum computer resistant type)
for authentication and key delivery.
These quantum cryptography technologies are positioned as technologies to ensure the ultimate security of communication between data center stations,
that is of special importance in next-generation 5G and 6G systems.
In the following, we will explain the technical contents, applicability to the real world, and development trends.
\section{ Current Status of Quantum Communication Security Technology}
\subsection{Quantum cryptography}
As introduced in the above section, there are two quantum cryptography techniques. Let us give their brief introduction below.\\
(1) Quantum Key Distribution\\
BB-84 quantum key distribution (QKD) was proposed by C. H. Bennett and G. Brassard in 1984. It is a protocol to share a secret key sequence by using photon communication,
that is guaranteed to be quantum nature. Since the photons used in this protocol are weak light, the transmission speed and distance are limited.
In addition, many of the sequence of photons that carry information are lost due to attenuation effects in the transmission line,
and the sequence of photons that reaches the receiver is also subject to errors due to noise effects.
So the operation involves discarding the majority of the received bit sequence. Therefore, data itself cannot be sent, only random numbers can be sent.
Thus only the delivery of the secret key for symmetric key cipher is possible. This is why it is called QKD.
Recently, many newspapers have reported that several R $\&$ D groups can provide the commercial systems of QKD.
The transmission speed is the order of 100 Kbit/sec, and transmission length is below 100 Km.
The satellite system is one of the solutions to cope with the distance. But the transmission speed is so small.
In any case, if one tries to increase the transmission speed, there is a trade-off and one has to shorten the relay interval.
Since the maximum transmission speed is about a megabit, it is difficult to supply keys to the One Time Pad cipher for data after key delivery,
and it is likely to be limited to supplying initial keys (secret keys) for AES and others (See Appendix [B]).\\
(2) Quantum Stream Cipher\\
Y-00 quantum stream cipher is a protocol for physical symmetric key cipher proposed by H.P. Yuen of Northwestern University in the DARPA project (2000) [6].
The details are explained in the next section, but a simple concept is presented here.
\begin{figure}
\caption{Principle of operation of Y-00 quantum stream cipher. The expander of key in the both cases means PRNG
that is employed in the conventional cipher system. Classical signal means that they have distinguishability, and quantum signal means impossible
to distinguish them precisely. Y-00 Encryption is the function of converting a classical signal into a quantum signal. It is also called quantum modulation.
}
\end{figure}
This technique is characterized by the fact that it does not allow the physical signals consisting of
the mathematical ramdom generator and information data to be obtained without error.
In this scheme, the ciphertext in Y-00 circuit system of the mathematical cipher consisiting of the generator and data, which is the target of the eavesdropper,
is described by $ y=\alpha_i(X,f_g(K_s),R_p) $.
Then, we design the system such that the ciphertext $ y=\alpha_i(X,f_g(K_s),R_p) $ is mapped into ensemble of coherent state $\mid \Psi (X,K_s, R_p)>$
with the quantumness based on Holevo--Yuen theory [7,8,9].
This is called Y-00 signal which corresponds to ciphertext on the Hilbert space.
Thus, the ciphertext as the classical signal is protected by the quantumness. Let us describe it shortly.
Although ordinary laser light of high power is used as the transmission signal, signals on the communication channel can be made to
have very strong quantum properties in the sense of quantum detection theory. This is Y-00 principle [6].
That is, a large number of physical binary light communication base is prepared to transmit electric binary data, and the binary data is transmitted by using
one communication base which is randomly selected from many communication bases by a mathematical cipher. Let $M$ be the number of the base.
The optical signals on the communication channel become ultra-multiple-valued signals ($2M=4096$ or more values are common) against
the eavesdropper without the knowledge of communication base.
At this time, strong quantum nature in the signal ensemble appears even if the one signal is in high power light, when it is constructed by such ultra-multiple-valued.
In other words, this method means that the quantum nature in the sense of quantum detection theory is created artificially by modulation schemes,
so that it does not require light with strong physical quantum nature such as photon. The Y-00 signals of the length $m$ (number of slot) are described as follows:
\begin{eqnarray}
&&\mid \Psi (X,K_s, R_p)> =\mid \alpha_i(X,f_g(K_s),R_p) >_1 \nonumber \\
&&\otimes \mid \alpha_j(X,f_g(K_s),R_p) >_2 \dots \dots \nonumber \\
&&\otimes \mid \alpha_k(X,f_g(K_s),R_p) >_m
\end{eqnarray}
where $\mid \alpha_i(X,f_g(K_s),R_p) >$ is coherent state with amplitude $\alpha(\cdot)$, $i,j,k =1,2,3, \dots 2M$, $X$ is plaintext, $f_g(K_s)$ is
a mathematical pseudo random function of secret key $K_s$, and $R_p$ is additional randomization. The set of these coherent states
is designed to be strong non-orthogonal property,
even if each amplitude of the signals is $ |\alpha_k(X,f_g(K_s),R_p) | \gg 1$.
A legitimate receiver with the knowledge for communication base to which the data is sent can ignore the quantum nature of the data, because it is a binary
transmission by high power signal. That is, he can receive the data of error-free.
On the other hand, an eavesdropper, who does not know
the information of communication base, must receive a sequence of a ultra-multi-valued optical signal that consists of non-orthogonal quantum states of Eq(1).
The quantum noise generated by quantum measurement based on Holevo-Yuen theory on quantum detection masks the received signal,
resulting in errors.
Thus, even if the eavesdropper tries to record the ciphertext, the masking effect of the quantum noise makes it impossible to accurately recover the ciphertext.
This fact is a novel function in the cryptology. Fig 3 shows the scheme of Y-00 protocol. And Fig 4 shows the experimental demonstration of the advantage creation principle of security based on Holevo-Yuen theory.
\begin{figure}
\caption{Experimental demonstration of advantage creation based on Holevo-Yuen theory. Quantum ciphertext for eavesdropper consists of 2$M$ densely packed non-orthogonal quantum coherent state signals. As a result, Holevo-Yuen theory guarantees that an eavesdropper cannot receive the correct ciphertext, or cannot copy the ciphertext.}
\end{figure}
\subsection{Comparison of services based on each quantum cryptosystem}
QKD and Y-00 are about 40 and 20 years old, respectively, since they were invented.
At the time of their invention, the principle models of both quantum cryptography technologies were not very attractive in terms of security
and communication performance.
But nowadays, the systems and security assurance technologies of both technologies have evolved dramatically.
Based on the results, business models for security services using these quantum cryptography technologies have been proposed.
Fig.5 shows the cryptography communication scheme based on two types of quantum cryptographies,
and Fig.6 shows the current status of the system performance.
\begin{figure}
\caption{Two types of quantum cryptographic communication schemes }
\end{figure}
\begin{figure}
\caption{Comparison of product capabilities for two types of quantum cryptography services }
\end{figure}
\section{Feature of quantum stream cipher }
In the near future, optical networks will move toward even higher speeds, but Y-00 quantum stream cipher can solve technical requirement from the real world.
Since there are few introductions to this technology, we describe the details of this technology at the following.
\subsection{Basic Scheme }
As explained in the previous section, the quantum stream cipher is expected to accelerate advanced application in the future communication system.
The reason for this is that this scheme can utilize ordinary optical communication devices and is compatible with existing communication systems.
In its design, optical communication, quantum theory, and cryptography are effectively integrated.
Therefore, it is also called "Y-00 optical communication quantum cryptography" in implementation studies.
Pioneering researches on practical experiment for this system have been reported by Northwestern University [10,11], Tamagawa University-Panasonic [12], and Hitachi Ltd [13].
Theories of system design for the basic system have been given by Nair and others [14,15,16,17].
Let us explain the principle of Y-00 quantum stream cipher. First, Y-00 protocol starts by specifying the signal system that use as the transmission medium.
The actual signal to be transmitted is selected in terms of amplitude or intensity, phase, quadrature amplitude, etc., having coherent state $|\alpha \rangle $ in quantum optics.
Then the design is made accordingly.
Depending on the type of signal to be used, it is called as ISK:Y-00, PSK:Y-00, QAM:Y-00, etc.
Here, one communication base consisting of various binary signals is randomly selected by PRNG (or AES) in each data slot. Then a binary data is transmitted by using the communication
base selected.
Thus ultra-multi-valued signals appear to be transmitted on the channel. The eavesdropper has to receive the ultra-multi-valued signal, because she does not
which communication base was selected.
\subsection{Progress in Security Theory}
The BB-84 protocol is a key delivery technique for securely sharing secret key sequences (random numbers).
The Y-00 protocol is a symmetric key stream cipher technique for cryptographically transmitting data.
As mentioned above, both quantum cryptography techniques enhance security by preventing eavesdroppers from taking the exact signal on the communication channel.
The models that explains the principle of such physical technology are called the ``basic model". It is this basic model that can be found in textbooks for beginners.
Let us start with QKD such as BB-84.
If the basic model of the BB-84 protocol is implemented in a real optical fiber communication system, it can be eavesdropped.
Therefore, in order to guarantee security even in systems with noise and energy loss, a technique that combines error correction and privacy amplification
(universal hashing) was proposed, and then a theoretical discussion of security assurance became possible.
That is, in 2000, P. Shor, et al proposed a mathematical security theory for BB-84 on an abstract mathematical model called the Shor model,
which was later improved by R. Renner.
In brief, the security of the BB-84 protocol is evaluated by quantifying quantum trace distance of the two density operators to the ideal random sequence
and the random sequence shared by the real system.
This is the current standard theory for the security of QKD. It is very difficult to realize a real system that
the quantum trace distance is sufficiently small.
On the other hand, from the beginning, Y-00 protocol can consider the effects of non-ideal communication systems.
As mentioned at the above section, the selection of communication base of Y-00 protocol is encrypted by conventional mathematical cipher.
Y-00 quantum ciphertext, which is an optical signal, is emitted as the transmission signal.
So, the ciphertext of the mathematical symmetric key cipher that an eavesdropper needs to decipher corresponds to Y-00 quantum ciphertext.
However, since the set of ultra-multi-valued signals, which is Y-00 quantum ciphertext, are non-orthogonal quantum state ensemble,
her received signals are inaccurate due to errors caused by quantum noise.
Therefore, the discussion based on the computational security of the mathematical cryptographic part of Y-00 mechanism to be attacked is
replaced by the problem of combination of information theoretic analysis and computational analysis.
However, we should emphasize that the discussion with infinite number or asymptotic theory are not our concern, because our concern is a physical system
under practical situation. For example, if attacker needs circuits of number of the size of the universe to perform the brute-force attack, the system is unbreakable.
Or, if attacker needs 100 years to collect the ciphertext for trying the crypto-analysis, it is also unbreakable.
\subsection{Randomization technology for quantitative security performance (\textbf {Errata of the original paper [1]})}
In the early days when Y-00 was invented, the model was used so called the basic model, and it just explained the principle.
In order to achieve sufficient quantitative security, the randomization technique described here is necessary.
In the above criteria, Y-00 scheme has a potential to improve quantitative security by additional randomization technology,
because all physical parameters are finite.
In this point of view, we have developed a new concept such as ``quantum noise diffusion technology"[18,19].
In addition, several randomizations based on Yuen's idea [6] have been discussed [20].
{\it ``Although we have, at present, no general theory on randomization, using these techniques, it is expected to have security performance that cannot be achieved by conventional cipher. One of them is a special relation between secret key and data (plaintext). That is, under the condition of $H(X_n \mid C_n^B, K_s)=0$,
one can expect the following security performance"}:
\begin{equation}
H(X_n \mid C_n^E, K_s) \ne 0
\end{equation}
for certain finite $n > |K_s|$.
$n$ is the length of the plaintext,
$C_n^B$ means the ciphertext for Bob (signal received by Bob), and $C_n^E$ means the ciphertext for Eve (signal received by Eve).
This is an amazing capability, and one that cannot be achieved even with any conventional cipher including OTP (see Appendix [C]).
In this way, we can say that Y-00 quantum stream cipher has abillity to provide security that exceeds the performance of
conventional cryptography while maintaining the capabilities of ordinary optical communication.
\section{ Concrete Applications of Quantum Stream Cipher}
As mentioned above, Y-00 quantum stream ciphers has not yet reached their ideal performance,
but in practical use, they have achieved a high level of security that cannot be achieved with conventional techniques,
and it can be said that they are now at a level where they can be introduced to the market.
Since quantum stream cypher is a physical cypher, it requires a dedicated transmitter and receiver. So far, principle models for commercial purposes have been developed at Northwestern University, Tamagawa University, Panasonic, and Hitachi, Ltd. Fig.7 shows the transceivers of each research institute. The communication speed is 1 Gbit/sec to 10 Gbit/sec and the communication distance is 100 km to 1,000 km. Using these transceivers, operational tests were conducted in a real optical communication network.
Here, we introduce examples of the use case of Y-00 quantum stream cipher.\\
\subsection{Optical Fiber Communication}
Large amounts of important data are instantaneously exchanged on the communication lines between data centers
that various data is accumulated.
It is important from the viewpoint of system protection to eliminate the risk that the data is copied in its entirety
from communication channel.
We believe that Y-00 quantum stream cipher is the best technology for this purpose.
On the other hand, this technology can be used for optical amplifier relay system. Hence, it can apply to
the current optical communication systems.
Transceivers capable of cryptographic transmission at speeds from one Gbit/sec to 10 Gbit/sec have already been realized,
and by wavelength division multiplexing, 100 Gbit/sec system has been tested. Also, communication distances of 1,000 km to
10,000 km have been demonstrated. In off-line experiments, 10 Tbit/sec has been demonstrated.
In general, a dedicated line such as dark fiber is required.
If we want to apply this technology to network function,
we need the optical switching technology developed by the National Institute of Advanced Industrial Science and Technology (AIST).
Thus, in collaboration with AIST and other organizations, we have successfully demonstrated the feasibility of using
Y-00 transceiver in testbed optical switching systems.
Furthermore, the references [21-28] show the recent activities of the experimental research group at Tamagawa University towards
practical application to the real world.
\begin{figure}
\caption{ Commercial Y-00 Transceiver for 1 Git/sec optical Ethernet. This can be mass produced.}
\end{figure}
\subsection{ Optical Satellite Communication}
Y-00 quantum stream cipher, which was developed for fiber-optic communications, can also be applied to satellite communications.
In satellite communication applications, rate of operation is an important factor because communication performance depends on weather conditions.
With QKD, it is difficult to keep communications up and running except on clear-air nights.
In the case of Y-00, communication by any satellite system can be almost ensured when the weather is clear.
In case of bad weather, the effects of atmospheric turbulence and scattering phenomena need to be considered.
We are currently analyzing the performance of the system in such cases at 10 Gbps operation [29].
\subsection{Optical Communication from Base at Moon to Earth}
The Japanese government has initiated a study to increase the user transmission rate of optical space communications from 1.8 Gbps to
more than 10 Gbps. Furthermore, in the future, the government aims to achieve higher transmission rates in ultra-long distance
communications required for lunar and planetary exploration. This plan is called LUCAS.
We have started to design for an implementation of 1 Gbps communication system at a transmission distance of 380,000 km
between the Moon and the Earth using the high-speed performance of the Y-00 quantum stream cipher.
\section{Conclusion}
The current optical network was not laid out in a planned manner, but was configured by extending the existing communication
lines for adapting the demand.
In the future, the configuration and specifications of the optical network will be determined following to new urban planning.
An actual example is the Smart City that Toyota Motor Corporation et al have disclosed as a future plan.
Many ideas are also being discussed in other organizations. Recently, NTT has announced a future network concept so called IOWN.
In these systems, the security of the all optical network with ultra-high speed is also important issue.
The group of QKD and the group of Y-00 are promoting their respective technologies.
Y-00 quantum stream cipher is a technology that can realize the specification of high speed and long communication distance.
In addition, the signals of Y-00 cipher with ultra-multiple valued scheme for coherent state signal, so called quantum mdulation, can have stronger
quantum properties than QKD in the sense of quantum detection theory.
So, the security is protected by many quantum no-go theorems.
Although it is difficult to make an accurate prediction, there is a good chance that such a new technology will be used in the future.
In view of the situation described in this paper, Y-00 quantum stream cipher will contribute to real world application of
quantum technology for Society 5.0, and new business development can be expected.
Finally, we would like to introduce that Chinese research institutes have recently been actively working on Y-00 quantum stream cipher.
Fig.8 shows a list of academic papers on their activities [30-37].
It is expected that many research institutes will participate in this technological development.
\begin{figure}
\caption{Research activities on Y-00 quantum stream cipher in China }
\end{figure}
\section*{\textbf {Explanation of Symbols}}
Here we give the explanation on the several symbols.\\
\\
(a) Conventional cipher: \\
$X$ is plaintext; $\{0,1\}$, $K_s$ is secret key, $f(K_s) $is running key ;$\{0,1\}$,
$C$ is conventional ciphertext; $\{0,1\}$.\\
(b) Y-00 quantum stream cipher: \\
$X$ is plaintext; $\{0,1\}$, $K_s$ is secret key, $f(K_s)$ is running key by PRNG ;$\{0,1\}$,
Y-00 running key is $f(K_s) \mapsto $ $ f_g(K_s)$; $\{1,2,3,\dots M\}$,
Y-00 ciphertext in the circuit is $y= \alpha_i(X,f_g(K_s),R_p) $; $\{1,2,3,\dots 2M\}$,
Y-00 signal is $ \mid \alpha_i(X,f_g(K_s),R_p) >$ =Quantum ciphertext, $C_n^B$ is ciphertext received by Bob:$\{0,1\}$,
$C_n^E$ is ciphertext :$\{0,1\}$for Eve transformed from $M$-ary received signal,
$R_p$ is additional randomization.
\appendix
\section* {[A] Quantum computer and quantum computer-resistant cryptography}
It is difficult to predict the realization of a quantum computer capable of cryptanalysis.
It has been discovered in our recent paper [38] that a new type of error so called nonlinear error or bust error occurs in general quantum computer.
Therein, an error probability for single qubit increases depending on number of qubits in the system.
These nonlinear errors and bust errors are caused by the recurrence effect due to quantum correlation or the collective decoherence, and by cosmic ray.
They give a serious damage to scalable quantum computer, and give serious degradations of the capability of quantum computer.
In addition, a number of previously unknown and extremely difficult problems in the development for an error correctable quantum
computer have been reported.
Thus, the capability of a real quantum computer is strictly limited and that the current cryptography is not subject to the danger posed
by current quantum computers.
However, we believe that the ideal quantum computer will be realized in the future.
So, one should develop quantum computer-resistant cryptosystems based on mathematical analysis, or by physical cipher on
the assumption that an ideal quantum computer or new mathematical discovery can be realized in the future.
Recently, J. P. Mattsson, B. Smeets, and E. Thormarker [39] have provided an excellent survey for the NIST quantum computer-resistant
cryptography standardization effort, the migration to quantum-resistant public-key cryptography, and the relevance of QKD as a complement to conventional cryptography.
In particular, these algorithms of quantum-resistant public-key cryptography can execute completely in software on classical computers,
in contrast to e.g., QKD which requires very expensive custom hardware.
For functions of authentication, signature, and key distribution, such capability provided by software is the most important
in the real world application.
\section* {[B] Position of security system based on QKD in practical applications}
To complete full quantum secure communication systems, at present, we are challenged to cope with the following problems, discussing a new type of QKD.
Recently, NSA [40], NCSC [41] and ANSSI [42] announced the international stance on QKD.
They have a negative view of QKD, because the communication performance of QKD based on weak signal is not enough for
applications to the real situation. Let us denote their comments in the following, respectively.
\subsection{NSA (USA) }
(a) NSA does not recommend the usage of QKD for securing the transmission of data in National Security Systems :(NSS) \\
(b) QKD utilizes the unique properties of quantum mechanical systems to generate and distribute cryptographic keying material
using special purpose technology. Quantum cryptography uses the same physics principles and similar technology to communicate over
a dedicated communications link. Published theories suggest that physics allows QKD to detect the presence of an eavesdropper,
a feature not provided in standard cryptography.\\
QKD and similar quantum cryptography vendors and the media occasionally state bold claims based on theory e.g., that this technology offers guaranteed security based on the laws of physics. Communications needs and security requirements physically conflict in the use of QKD, and the engineering required to balance these fundamental issues has extremely low tolerance for error. Thus, security of QKD is highly implementation dependent rather than assured be laws of physics. \\
${\bf Technical}$ ${\bf limitations}$\\
(1) Quantum key distribution is only a partial solution. \\
(2) Quantum key distribution requires special purpose equipment.\\
(3) Quantum key distribution increases infrastructure costs and insider threat risks. \\
(4) Securing and validating quantum key distribution is a significant challenge.\\
(5) Quantum key distribution increases the risk of denial of service. \\
For all of these reasons, NSA does not support the usage of QKD to protect communications in National Security Systems, and does not anticipate certifying or approving any QKD security products for usage by NSS customers unless these limitations are overcome.
\subsection{NCSC (UK) }
Given the specialized hardware requirements of QKD over classical cryptographic key agreement mechanisms and the requirement for authentication in all use cases, the NCSC does not endorse the use of QKD for any government or military applications, and cautions against sole reliance on QKD for business critical networks, especially in Critical National Infrastructure sectors. In addition, we advise that any other organizations considering the use of QKD as a key agreement mechanism ensure that robust quantum-safe cryptographic mechanisms for authentication are implemented alongside them. NCSC advice is that the best mitigation against the threat of quantum computers is quantum safe cryptography. Our white paper on quantum-safe cryptography is available on the NCSC website. The NCSC design principles for high assurance systems, which set out the basis under which products and systems should be designed to resist elevated threats, is also available
\subsection{ANSSI (The French National Agency for the Security of Information Systems) }
Quantum Key Distribution (QKD) presents itself as a technology functionally equivalent to common asymmetric key agreement schemes that are used in nearly all secure communication protocols over the Internet or in private networks. The defining characteristic of QKD is its alleged superior secrecy guarantee that would justify its use for high security applications. However, deployment constraints specific to QKD hinder large-scale deployments with high practical security. Furthermore, new threats on existing cryptography, and in particular the emergence of universal quantum computers, can be countered without resorting to QKD, in a way that ensures the future of secure communications. Although QKD can be used in a variety of niche applications, it is therefore not to be considered as the next step for secure communications
\\
\section* {[C] Drawback of One Time Pad cipher}
OTP is extremely inefficient for the encryption of data, because it requires key sequence as same as data sequence.
However, it has the following benefit:\\
(1) Ciphertext only attack on data\\
Since the secret key : $K_s$ is a perfect random number, the ciphertext : $C$ is also a perfect random number.
Therefore, obtaining the ciphertext gives no information about the plaintext : $X$. So one has $H(X|C)=H(X)$. At this point, it is called perfect information-theoretic security or unconditional security. \\
(2) Known plaintext attack on key\\
In OTP, if the length of the known plaintext is $|X|=N$, then the key of the same length $N$ can be known for sure by obtaining a ciphertext $N$ of the same length. However, since the key sequence is completely random, subsequent key sequences cannot be predicted. \\
\\
On the other hand, it has the following drawback:\\
(1) Falsification attack\\
If Eve obtains the correct ciphertext, and she can invert 0 and 1, and resend. Then 1 and 0 of the data are inverted.
As an example, if the data is yes or no, the falsification will be successful. So OTP is not secure against falsification attacks [43].\\
(2) Partial known plaintext attack on data\\
If a plaintext sequence has a correlation (e.g., a word), then the possibility of identifying a word arises through a brute force search with
a partial known plaintext attack [44]. These are some examples of the fact that OTP does not satisfy the security requirements of modern cryptology.\\
\end{document} |
\begin{document}
\bibstyle{plain}
\title{
{\bf Domination and Closure}
}
\author{
John L. Pfaltz\\
Dept. of Computer Science, University of Virginia \\
{\tt jlp@virginia.edu } \\
}
\maketitle
\begin{abstract}
An expansive, monotone operator is dominating;
if it is also idempotent it is a closure operator.
Although they have distinct properties, these two kinds of
discrete operators are also intertwined.
Every closure is dominating; every dominating operator
embodies a closure.
Both can be the basis of continuous set transformations.
Dominating operators that exhibit categorical pull-back
constitute a Galois connection and must be antimatroid
closure operators.
Applications involving social networks and learning spaces
are suggested.
\end{abstract}
\noindent
{\bf keywords:} antimatroid, operator, pull-back, category, closure, domination.
\section {Introduction} \label{I}
The concept of ``domination'' is an important one in graph theory, where a
set of nodes ``dominates'' its neighbors.
An extensive treatment can be found in
\cite{HayHedSla98b,HayHedSla98,Sum90},
and an interesting historical application in
\cite{ReVRos00}.
Concepts of ``closure'' arise in many contexts, including topology,
algebra,
and its closely related concept of ``convexity''
\cite{Chv09}.
This paper develops both concepts in terms of general, discrete set systems,
where $\mbox{$\Delta$}$ and $\mbox{$\varphi$}$ are dominating and closure operators
respectively.
In Section \ref{CO}, we develop the connection between domination
and closure that seems to be largely unexplored.
Every domination operator gives rise to a closure operator, and every
closure operator is dominating.
Section \ref{T} explores the properties of domination and closure under
functional transformation, especially continuous transformation, and
briefly reviews the definition of closure in terms of Galois transformations,
or connections.
It is natural to regard collections of functions, whether operators or
transformations, as a category.
In Section \ref{CC}, we develop this theme and introduce the
category $\mbox{${\bf Dom}$}$ to denote all domination
morphisms over discrete sets of $S$.
We show that if a subcategory $\mbox{${\cal C}$} \subset \mbox{${\bf Dom}$}$ exhibits the
pull-back property, then it consists of only antimatroid closure operators.
We believe this approach to the study of set systems, including
directed and undirected networks, solely in terms of set-valued operators
and the representation of set system dynamics by
set-valued transformations from one discrete set system, $\mbox{${\cal S}$}$,
to another, $\mbox{${\cal S}$}'$,
may be original.
\section {Set Systems} \label{SS}
Let $S$ be any finite set.
By a set system, $\mbox{${\cal S}$}$, we mean a collection of subsets of $2^S$, the power
set of $S$, together with various operators, $\alpha, \beta, \ldots$
defined on this collection.
Elements of the ground set, $S$, we denote
by lower case letters $x, y, z$.
In the general theory, the nature of these members is unimportant;
although in some
applications
they can be significant.
The sets of $\mbox{${\cal S}$}$ are denoted by upper case letters, $X, Y, Z$.
We assume that $S = \bigcup_{X \in \mbox{${\cal S}$}} X$.
Sets can also be denoted by their constituent members, such as $\{x, y, z\}$,
or more simply by $xyz$.
One can regard $xyz$ to be the label of a set.
Whenever we reference an element, such as $x$, we will actually be denoting
the singleton set $\{x\}$.
And if we use a familiar expression, such as $x \in Y$, it can be interpreted
as $\{x\} \ \cap \ Y \neq \mbox{$\O$}$.
This is a paper about sets, their properties and their transformations.
The cardinality of a set $Y$ is denoted $| Y |$.
\subsection{Operators on $\mbox{${\cal S}$}$} \label{OS}
An unary {\bf operator} $\mbox{$\alpha$}$ on $\mbox{${\cal S}$}$ is a function defined on the sets of $\mbox{${\cal S}$}$,
that is, for all $Y \in \mbox{${\cal S}$}$, $Y.\mbox{$\alpha$} \in \mbox{${\cal S}$}$.
Operators are expressed in suffix notation because they are ``set-valued''.\footnote
{
Here we follow a convention that is more often used by algebraists
\cite{ZarSam58}.
Set valued functions/transformations, $f$, are presented in suffix notation,
$e.g.$ $S' = S.f$; single valued functions, $f$, on set elements are denoted
by prefix notation, $e.g.$ $e' = f(e)$.
}
An operator $\mbox{$\alpha$}$ is said to be:
\\
\hspace*{0.4in}
{\bf contractive}, if $Y.\mbox{$\alpha$} \subseteq Y$;
\\
\hspace*{0.4in}
{\bf expansive}, if $Y \subseteq Y.\mbox{$\alpha$}$;
\\
\hspace*{0.4in}
{\bf monotone}, if $X \subseteq Y$ implies $X.\mbox{$\alpha$} \subseteq Y.\mbox{$\alpha$}$;
\\
\hspace*{0.4in}
{\bf idempotent}, if $Y.\mbox{$\alpha$}.\mbox{$\alpha$} = Y.\mbox{$\alpha$}$;
\\
\hspace*{0.4in}
{\bf path independent} if $(X.\mbox{$\alpha$} \cup Y.\mbox{$\alpha$}).\mbox{$\alpha$} = (X \cup Y).\mbox{$\alpha$}$.
\noindent
Contractive operators are often called {\bf choice operators}
\cite{Kos99,Sen77}.
Path independent choice operators are important in economic theory
\cite{Johdea01,MonRad01}.
Operators, $\mbox{$\Delta$}$, that are expansive and monotone we call
{\bf domination} (or dominating) operators.
They are central to this paper.
If $X.\mbox{$\alpha$} = Y.\mbox{$\alpha$} = Z$ then $X$ and $Y$ are said to be {\bf generators} of $Z$ (with
respect to $\mbox{$\alpha$}$).
A set $Y$ is said to be a {\bf minimal} generator of $Z$ if for all $X \subset Y$,
$X.\mbox{$\alpha$} \neq Z$.
The operator $\mbox{$\alpha$}$ is said to be {\bf uniquely generated} if for all $Z$,
$Y.\mbox{$\alpha$} = Z$ implies there exists a unique minimal generator $X \subseteq Y$, such that $X.\mbox{$\alpha$} = Z$.
When $X.\mbox{$\alpha$} = Y.\mbox{$\alpha$}$, we say $X$ and $Y$ are {\bf $\mbox{$\alpha$}$-equivalent},
denoted $X =_{\mbox{$\alpha$}} Y$.
\subsection {Extended Operators} \label{EO}
An operator $\mbox{$\alpha$}$ is said to be {\bf extended} if for all $Y \in \mbox{${\cal S}$}$,
$Y.\mbox{$\alpha$} = \bigcup_{y \in Y} \{y\}.\mbox{$\alpha$}$.
That is, $\mbox{$\alpha$}$ has been extended from its definition on singleton subsets.
This is, perhaps, the most common way of defining set-valued operators.
It is not difficult to show that:
\begin{samepage}
\begin{proposition}\label{p.EX1}
If $\mbox{$\alpha$}$ is an extended operator, then for all $z \in Y.\mbox{$\alpha$}$, there exists
$y \in Y$ such that $z \in \{y\}.\mbox{$\alpha$}$.
\end{proposition}
\end{samepage}
\begin{samepage}
\begin{corollary}\label{c.EX2}
If $\mbox{$\alpha$}$ is an extended operator, then $\mbox{$\alpha$}$ is a monotone operator.
\end{corollary}
\end{samepage}
\begin{samepage}
\begin{corollary}\label{c.EX3}
If $\mbox{$\alpha$}$ is an extended operator, then $\mbox{$\O$}.\mbox{$\alpha$} = \mbox{$\O$}$.
\end{corollary}
\end{samepage}
If an operator $\mbox{$\alpha$}$ is extended, then it can be visualized as a simple graph with
$(x, y) \in E$ if and only if $y \in \{x\}.\mbox{$\alpha$}$.
We call this a {\bf graphic representation}.
\subsection{Dominating Operators} \label{DO}
Throughout this paper we concentrate on $expansive$, $monotone$ operators which
we called domination
operators, $\mbox{$\Delta$}$, in Section \ref{OS}.
We call $Y.\mbox{$\Delta$}$ the {\bf region} dominated by $Y$.
If $S$ denotes the nodes of a network $\mbox{${\cal N}$}$, one can define
$X.\mbox{$\Delta$} = X \cup \{ y | \exists x \in X, (x, y) \in E \}$.
$X$ is said to ``dominate'' $X.\mbox{$\Delta$}$ and there is a large literature, called
``domination theory'', devoted to the combinatorial properties of the minimal
generators, $X$, when
$X.\mbox{$\Delta$} = S$
\cite{HayHedSla98}.
Whence the term ``domination'' operator.
However, domination operators need not be graphically representable.
Consider the following $\mbox{$\Delta$}$ defined on $S = \{a, b, c, d\}$.
For the singleton sets, let
$\{a\}.\mbox{$\Delta$} = \{ac\}$,
$\{b\}.\mbox{$\Delta$} = \{bc\}$,
$\{c\}.\mbox{$\Delta$} = \{cd\}$, and
$\{d\}.\mbox{$\Delta$} = \{d\}$.
Except for $\{ab\}$, let $Y.\mbox{$\Delta$} = \bigcup_{y \in Y}\{y\}.\mbox{$\Delta$}$, but
let $\{ab\}.\mbox{$\Delta$} = \{abcd\}$.
This is not a simple extension of $\{a\}.\mbox{$\Delta$}$ and $\{b\}.\mbox{$\Delta$}$;
yet $\mbox{$\Delta$}$ is well defined.
It should not be surprising that a larger set might have a
larger radius of domination.
If the expansive, monotone operator, $\mbox{$\Delta$}$, is also idempotent,
it is called a
{\bf closure operator}, $\mbox{$\varphi$}$.
It is sometimes convenient to distinguish that part of a dominated region $Y.\mbox{$\Delta$}$
from its generator $Y$.
We call $Y.\mbox{$\eta$} = Y.\mbox{$\Delta$} \mbox{$-$} Y$ the dominated {\bf neighborhood} of $Y$.
Observe that, as an operator, $\mbox{$\eta$}$ is not expansive and generally is not
monotone.
\section {Closure Operators} \label{CO}
The concept of closure appears to be an important theme in many discrete systems
\cite{Pfa08,Pfa13}.
A closure system can
be defined by simply enumerating a collection, $\mbox{${\cal C}$}$, of sets which are said to
be {\bf closed}.
The union of all the subsets of $\mbox{${\cal S}$}$ is assumed to be in $\mbox{${\cal C}$}$.
The only other constraint is that if the sets $X$ and $Y$ are in $\mbox{${\cal C}$}$, $i.e.$ are
closed, then $X \ \cap \ Y \in \mbox{${\cal C}$}$, must be closed.
Given such a collection, $\mbox{${\cal C}$}$, of closed sets
we can then define a {\bf closure operator}, $\mbox{$\varphi$}$, on $\mbox{${\cal S}$}$ by
letting $Y.\mbox{$\varphi$}$ denote the smallest set $C_i \in \mbox{${\cal C}$}$ such that $Y \subseteq C_i$.
Since $\mbox{${\cal C}$}$ is closed
under intersection, $\mbox{$\varphi$}$ is single valued and well
defined.
It is well known
\cite{Ore62,Pfa96}
that this definition of closure is equivalent to
the one given in section \ref{OS}, that is
``an operator $\mbox{$\varphi$}$ is a closure operator if and only if
\\
\hspace*{0.4in}
$Y \subseteq Y.\mbox{$\varphi$}$, expansive;
\\
\hspace*{0.4in}
$X \subseteq Y$ implies $X.\mbox{$\varphi$} \subseteq Y.\mbox{$\varphi$}$, monotone; and
\\
\hspace*{0.4in}
$Y.\mbox{$\varphi$}.\mbox{$\varphi$} = Y.\mbox{$\varphi$}$, idempotent''.
\\
Consequently, every closure operator, $\mbox{$\varphi$}$, is a dominating operator because it's
monotone and expansive.
A dominating operator, $\mbox{$\Delta$}$, is a closure operator, $\mbox{$\varphi$}$
only if it is idempotent.
For any monotone operator, $\mbox{$\alpha$}, \mbox{$\Delta$}$ or $\mbox{$\varphi$}$, we have
\\
\hspace*{0.4in}
$(X \cap Y).\mbox{$\alpha$} \subseteq X.\mbox{$\alpha$} \cap Y.\mbox{$\alpha$}$,
\\
\hspace*{0.4in}
$X.\mbox{$\alpha$} \cup Y.\mbox{$\alpha$} \subseteq (X \cup Y).\mbox{$\alpha$}$.
\begin{samepage}
\begin{proposition}\label{p.TC}
Let $\mbox{$\varphi$}$ be an idempotent dominating operator $\mbox{$\Delta$}$.
If $y \in X.\mbox{$\varphi$}$ then $\{y\}.\mbox{$\eta$} \subseteq X.\mbox{$\Delta$}$.
\end{proposition}
\end{samepage}
\noindent
\begin{proof}
Suppose $\exists X$ and $y, z$, with $y \in X.\mbox{$\varphi$}, z \in \{y\}.\mbox{$\eta$}$, but
$z \not\in X.\mbox{$\Delta$}$.
Then $z \in X.\mbox{$\Delta$}.\mbox{$\Delta$}$ contradicting the idempotency of $\mbox{$\Delta$}$.
\
\end{proof}
\noindent
Proposition \ref{p.TC} effectively asserts that for a dominating
operator, $\mbox{$\Delta$}$, to be a closure operator, $\mbox{$\Delta$}$ must be ``transitively
closed''.
Still a third characterization of closure systems can be found in
\cite{MonRad01};
\begin{proposition}\label{prop.CC3}
An expansive operator $\mbox{$\varphi$}$ is a closure operator if and only if $\mbox{$\varphi$}$
is path independent.
\end{proposition}
Let $Y$ be closed, a closure operator, $\mbox{$\varphi$}$, is said to be:
\\
\hspace*{0.4in}
{\bf matroid} \hspace*{0.8in} if $x, z \not\in Y$ then $z \in (Y \cup \{ x \}).\mbox{$\varphi$}$ implies $x \in (Y \cup \{ z \}).\mbox{$\varphi$}$;
\\
\hspace*{0.4in}
{\bf antimatroid} \hspace*{0.5in} if $x, z \not\in Y$ then $z \in (Y \cup \{ x \}).\mbox{$\varphi$}$ implies $x \not\in (Y \cup \{ z \}).\mbox{$\varphi$}$;
\\
\hspace*{0.4in}
{\bf topological} \hspace*{0.58in} if $\mbox{$\O$}.\mbox{$\varphi$} = \mbox{$\O$}$ and $(X \cup Y).\mbox{$\varphi$} = X.\mbox{$\varphi$} \cup Y.\mbox{$\varphi$}$.
\\
The first two expressions on the right are also known as the ``exchange'' and ``anti-exchange'' axioms.
Matroids are generalizations of linear independent structures
\cite{KorLovSch91}.
Matroid closure is usually denoted by the ``spanning operator'', $\sigma$,
\cite{Tut71,Wel76}.
Antimatroids are typically viewed as convex geometries,
\cite{Cop98,Jam82},
where closure is the convex {\bf hull} operator, sometimes denoted
by $h$,
\cite{EdeJam85,Jam82}.
A closure operator,$\mbox{$\varphi$}$, is said to be {\bf finitely generated} if every closed
set has finite generators.
Since we assume $S$ is finite, all operators will be finitely generated.
In \cite{PfaSla13},
it is shown that:
\begin{samepage}
\begin{proposition}\label{p.FGEN1}
Let $\mbox{${\cal S}$}$ be finitely generated and let $\mbox{$\varphi$}$ be antimatroid.
If $X$ and $Y$ are generators of a closed set $Z$,
then $X \cap Y$ is a generator of $Z$.
\end{proposition}
\end{samepage}
\begin{samepage}
\begin{proposition}\label{p.FGEN2}
If $\mbox{${\cal S}$}$ is finitely generated, then $\mbox{$\varphi$}$ is antimatroid if and only if $\mbox{$\varphi$}$ is
uniquely generated.
\end{proposition}
\end{samepage}
\noindent
A counter example is presented in
\cite{PfaSla13}
to show that the condition of finite generation is necessary.
\subsection{Dominated Closure} \label{RC1M}
It is evident that
the domination operator $\mbox{$\Delta$}$ and closure operator $\mbox{$\varphi$}$ are closely related.
For example, if $Y.\mbox{$\Delta$}.\mbox{$\Delta$} = Y.\mbox{$\Delta$}$ for all $Y$, then $\mbox{$\Delta$}$ is a
closure operator.
But, in general, $Y.\mbox{$\Delta$} \subset Y.\mbox{$\Delta$}.\mbox{$\Delta$}$.
In this section, we explore the close relationship between these two operators
even further.
We define {\bf dominated closure} (sometimes denoted by $\mbox{$\varphi$}_{\Delta}$) to be:
\begin{equation} \label{RCDEF}
Y.\mbox{$\varphi$}_{\Delta} = \bigcup_{Z \subseteq Y.\Delta} \{ Z | Z.\mbox{$\Delta$} \subseteq Y.\mbox{$\Delta$} \}
\end{equation}
Here it is apparent that, $Y \subseteq Y.\mbox{$\varphi$} \subseteq Y.\mbox{$\Delta$}$, as was shown
in Proposition \ref{p.TC}.
Although in general $Y.\mbox{$\Delta$}.\mbox{$\Delta$} \neq Y.\mbox{$\Delta$}$, we have
\begin{samepage}
\begin{proposition}\label{p.CLO.REG}
For all $Y$, $Y.\mbox{$\varphi$}.\mbox{$\Delta$} = Y.\mbox{$\Delta$}$.
\end{proposition}
\end{samepage}
\noindent
\begin{proof}
Let $z \in Y.\mbox{$\varphi$}.\mbox{$\Delta$}$.
If $z \in Y.\mbox{$\varphi$}$, then since $Y.\mbox{$\varphi$} \subseteq Y.\mbox{$\Delta$}$, we are done.
So assume $\exists y \in Y.\mbox{$\varphi$}$, $z \in \{y\}.\mbox{$\Delta$}$.
But, $y \in Y.\mbox{$\varphi$}$ implies $\{y\}.\mbox{$\Delta$} \subseteq Y.\mbox{$\Delta$}$ so $z \in Y.\mbox{$\Delta$}$.
By monotonicity, $Y.\mbox{$\Delta$} \subseteq Y.\mbox{$\varphi$}.\mbox{$\Delta$}$, so equality follows.
\
\end{proof}
\noindent
The operators, $\mbox{$\Delta$}$ and $\mbox{$\varphi$}$ are not in general commutative, since $Y.\mbox{$\varphi$}.\mbox{$\Delta$} = Y.\mbox{$\Delta$} \subset Y.\mbox{$\Delta$}.\mbox{$\varphi$}$
as shown by the following example.
Let $S = \{ abc \}$ and let $Y = \{a\}$, where $\{a\}.\mbox{$\Delta$} = \{ab\}$, $\{b\}.\mbox{$\Delta$} = \{bc\}$, so
$\{a\}.\mbox{$\varphi$}.\mbox{$\Delta$} = \{a\}.\mbox{$\Delta$} = \{ab\}$ as postulated by Proposition \ref{p.CLO.REG}, but
if $\{ab\}.\mbox{$\varphi$} = \{abc\}$ then
$\{a\}.\mbox{$\Delta$}.\mbox{$\varphi$} = \{ab\}.\mbox{$\varphi$} = \{abc\}$.
\begin{samepage}
\begin{proposition}\label{p.RC}
An operator $\mbox{$\varphi$}$ is a closure operator if and only if there exists a dominating
operator, $\mbox{$\Delta$}$, related to $\mbox{$\varphi$}$ by (\ref{RCDEF}).
\end{proposition}
\end{samepage}
\noindent
\begin{proof}
If $\mbox{$\varphi$}$ is a closure operator, then let $\mbox{$\Delta$} = \mbox{$\varphi$}$.
Readily, $\mbox{$\Delta$}$ is monotone, expansive because $\mbox{$\varphi$}$ is.
Let $Z \subseteq Y.\mbox{$\Delta$}$.
Since $Y.\mbox{$\varphi$}.\mbox{$\varphi$} = Y.\mbox{$\varphi$}$, $Z.\mbox{$\Delta$} = Z.\mbox{$\varphi$} \subseteq Y.\mbox{$\varphi$} = Y.\mbox{$\Delta$}$ satisfying
equation (\ref{RCDEF}).
\\
\\
Conversely, let $\mbox{$\Delta$}$ be any monotone, expansive operator, and let $\mbox{$\varphi$}$ be
defined by (\ref{RCDEF}).
Monotonicity and expansivity follow from $Y \subseteq Y.\mbox{$\varphi$} \subseteq Y.\mbox{$\Delta$}$.
We need only show idempotency.
Readily, $Y.\mbox{$\varphi$} \subseteq Y.\mbox{$\varphi$}.\mbox{$\varphi$}$.
By Prop. \ref{p.CLO.REG}, $Y.\mbox{$\varphi$}.\mbox{$\Delta$} \subseteq Y.\mbox{$\Delta$}$.
So, $Y.\mbox{$\varphi$}.\mbox{$\varphi$} = \bigcup_{Z \subseteq Y.\mbox{$\varphi$}.\Delta} \{ Z | Z.\mbox{$\Delta$} \subseteq Y.\mbox{$\varphi$}.\mbox{$\Delta$} \}
\subseteq \bigcup_{Z \subseteq Y.\Delta} \{ Z | Z.\mbox{$\Delta$} \subseteq Y.\mbox{$\Delta$} \} = Y.\mbox{$\varphi$}$.
\
\end{proof}
\begin{samepage}
\begin{proposition}\label{p.REG.GEN}
$X$ is a $\mbox{$\Delta$}$-generator of $Y.\mbox{$\Delta$}$ if and only if $X$ is a $\mbox{$\varphi$}_{\Delta}$-generator of $Y.\mbox{$\varphi$}_{\Delta}$.
\end{proposition}
\end{samepage}
\noindent
\begin{proof}
Suppose $X$ is a $\mbox{$\Delta$}$-generator of $Y.\mbox{$\Delta$}$, so $X.\mbox{$\Delta$} = Y.\mbox{$\Delta$}$.
By (\ref{RCDEF}),
$X.\mbox{$\varphi$}_{\Delta} = \bigcup_{Z \subseteq X.\mbox{$\Delta$}} \{ Z.\mbox{$\Delta$} \subseteq X.\mbox{$\Delta$} \}$
= $\bigcup_{Z.\subseteq Y.\Delta} \{Z.\mbox{$\Delta$} \subseteq Y.\mbox{$\Delta$} \}$ = $Y.\mbox{$\varphi$}_{\Delta}$.
\\
Conversely, let $X$ be a $\mbox{$\varphi$}_{\Delta}$-generator of $Y.\mbox{$\varphi$}_{\Delta}$ and assume
$X$ is not a $\mbox{$\Delta$}$-generator of $Y.\mbox{$\Delta$}$, so $X.\mbox{$\Delta$} \neq Y.\mbox{$\Delta$}$.
Let $Z_0 = X.\mbox{$\Delta$} \mbox{$-$} Y.\mbox{$\Delta$}$ (or else $Y.\mbox{$\Delta$} \mbox{$-$} X.\mbox{$\Delta$}$).
$Z_0 \not\subseteq Y.\mbox{$\Delta$}$ implies
$Z_0 \not\subseteq \bigcup_{Z \subseteq Y.\Delta}\{Z.\mbox{$\Delta$} \subseteq Y.\mbox{$\Delta$} \} = Y.\mbox{$\varphi$}_{\Delta}$
contradicting assumption that $X$ is a $\mbox{$\varphi$}_{\Delta}$-generator of $Y.\mbox{$\varphi$}_{\Delta}$.
\
\end{proof}
\begin{samepage}
\begin{proposition}\label{p.C.REG}
A dominating operator, $\mbox{$\Delta$}$, is itself a closure operator, $\mbox{$\varphi$}$, if
and only if $X \subseteq Y.\mbox{$\Delta$}$ implies $X.\mbox{$\Delta$} \subseteq Y.\mbox{$\Delta$}$.
\end{proposition}
\end{samepage}
\noindent
\begin{proof}
Assume the condition holds.
Since $\mbox{$\Delta$}$ is monotone, expansive we need only show $\mbox{$\Delta$}$ is idempotent.
But readily, $Y.\mbox{$\Delta$}.\mbox{$\Delta$} = \bigcup_{X \subseteq Y.\Delta} \{ X | X.\mbox{$\Delta$} \subseteq Y.\mbox{$\Delta$} \}
= \bigcup \{ X \subseteq Y.\mbox{$\Delta$} \} = Y.\mbox{$\Delta$}$.
\\
Conversely, if $\mbox{$\Delta$}$ is idempotent, it is a closure operator by definition.
\
\end{proof}
Several set systems have the property that $X \subseteq Y.\mbox{$\Delta$}$
implies $X.\mbox{$\Delta$} \subseteq Y.\mbox{$\Delta$}$.
Let $P$ be a partially ordered set and let $Y.\mbox{$\Delta$} = \{ x | x \leq y \in Y \}$.
Readily $X \subseteq Y.\mbox{$\Delta$}$ implies $X.\mbox{$\Delta$} \subseteq Y.\mbox{$\Delta$}$, so
$\mbox{$\Delta$}$ is a closure operator, $\mbox{$\varphi$}$.
It has been called ``downset'' closure
\cite{PfaSla13,Pfa08}.
The maximal elements in $Y$ constitute a unique generator; it is antimatroid.
One can use $\mbox{$\Delta$}$ to construct a large variety of closure systems $\mbox{${\cal S}$}$,
As an example, let $P$ be any set, which we will augment with a special element, $*$.
Let $Y \subseteq P$, and define $Y.\mbox{$\Delta$} = Y \cup \{*\}$ and let $\{*\}.\mbox{$\Delta$} = \{*\}$.
Then $Y.\mbox{$\varphi$} = Y \cup \{*\}$ and $\{*\}.\mbox{$\varphi$} = \{*\}$.
One can optionally let $\mbox{$\O$}.\mbox{$\varphi$} = \{*\}$ or $\mbox{$\O$}.\mbox{$\varphi$} = \mbox{$\O$}$.
It is apparent that $X \subseteq Y.\mbox{$\Delta$}$ implies $X.\mbox{$\Delta$} \subseteq Y.\mbox{$\Delta$}$, so
$\mbox{$\Delta$}$ is a closure operator.
No subset of $P$ is closed, and $Y \subseteq P$ is the unique minimal generator of the
closed set of the form $Y \cup \{*\}$.
Either $\mbox{$\O$}$ or $\{*\}$ could be the minimal generator of $\{*\}$.
In either case, $\mbox{${\cal S}$}$ is an antimatroid closure space.
We call $\mbox{${\cal S}$}$, so defined, a ``star space''.
For a simple example where $X \subseteq Y.\mbox{$\Delta$}$ need not imply $X.\mbox{$\Delta$} \subseteq Y.\mbox{$\Delta$}$,
consider $S$ = {\bf Z}, the integers.
Define $\{y\}.\mbox{$\Delta$} = \{ x \leq y |$ if $y$ is even \} and $\{ z \geq y |$ if $y$ is odd \}.
Let $Y = \{1, 2\}$.
Readily, $2 \in \{1\}.\mbox{$\Delta$} = \{i | i \geq 1\}$, but $\{2\}.\mbox{$\Delta$} = \{j | j \leq 2\} \not\subseteq \{1\}.\mbox{$\Delta$}$.
\section {Transformations} \label{T}
A {\bf transformation} $f$ is a function that maps the sets of one
set system $\mbox{${\cal S}$}$ into another set system $\mbox{${\cal S}$}'$.
If $\mbox{${\cal S}$}' = \mbox{${\cal S}$}$, then a transformation is just an operator on $\mbox{${\cal S}$}$.
More often, however, $\mbox{${\cal S}$}'$ has a different internal structure than $\mbox{${\cal S}$}$.
Frequently, $f$ represents ``change'' in a dynamic set system.
Because the domain, and codomain, of a transformation $f$ is a collection of sets,
including the empty set $\mbox{$\O$}$, an expression such as $Y.f = \mbox{$\O$}'$
is well defined.
Similarly one can have $\mbox{$\O$}.f = Y'$.\footnote
{
Normally, we do not distinguish between $\mbox{$\O$}$ and
$\mbox{$\O$}'$.
The empty set is the empty set.
We do so here only for emphasis.
}
Thus we have a functional notation for sets entering, or leaving, a set system altogether.
\subsection {Monotone Transformations} \label{MT}
A transformation $\mbox{${\cal S}$} \FTRANS \mbox{${\cal S}$}'$ is said to be {\bf monotone} if
$X \subseteq Y$ in $\mbox{${\cal S}$}$ implies $X.f \subseteq Y.f$ in $\mbox{${\cal S}$}'$.
Monotonicity seems to be absolutely basic to transformations and is
assumed throughout this paper.
No other property is.
Monotonicity ensures that if $Y.f = \mbox{$\O$}$ then
for all $X \subseteq Y$, $X.f = \mbox{$\O$}'$,
and if $\mbox{$\O$}.f = Y'$ then
for all $Z' \subseteq Y'$, $\mbox{$\O$}.f = Z'$, so $Z'.f^{-1} = \mbox{$\O$}$.
Readily,
\begin{samepage}
\begin{proposition}\label{p.MONOTONE}
The composition $f \mbox{${\HHS \bf \cdot} \HHS$} g$ of monotone transformations is monotone.
\end{proposition}
\end{samepage}
\subsection {Continuous Transformations} \label{CT}
A transformation $\mbox{${\cal S}$} \FTRANS \mbox{${\cal S}$}'$ is said to be {\bf continuous}
with respect to an operator, $\mbox{$\alpha$}$, or more simply $\mbox{$\alpha$}$-continuous,
if for all sets $Y \in \mbox{${\cal S}$}$, $Y.\mbox{$\alpha$}.f \subseteq Y.f.\mbox{$\alpha$}'$,
\cite{Cas03,Ore46,PfaSla13,Sla04,Sla10}.
In the referenced literature, continuity is only considered with respect
to a closure operator, $\mbox{$\varphi$}$.
This is reasonable; but as the following propositions show, it can be generalized.
\begin{samepage}
\begin{proposition}\label{p.COP}
Let $\mbox{$\alpha$}$ be any monotone operator.
If $\mbox{${\cal S}$} \FTRANS \mbox{${\cal S}$}'$ and $\mbox{${\cal S}$}' \GTRANS \mbox{${\cal S}$}''$ are monotone
$\mbox{$\alpha$}$-continuous transformations then
$\mbox{${\cal S}$} \FGTRANS \mbox{${\cal S}$}''$ is $\mbox{$\alpha$}$-continuous
\end{proposition}
\end{samepage}
\noindent
\begin{proof}
Since $f$ is continuous w.r.t. $\mbox{$\alpha$}$, $Y.\mbox{$\alpha$}.f \subseteq Y.f.\mbox{$\alpha$}'$.
Since $g$ is monotone, $Y.\mbox{$\alpha$}.f.g \subseteq Y.f.\mbox{$\alpha$}'.g$.
And finally $g$ $\mbox{$\alpha$}$-continuous yields
$Y.\mbox{$\alpha$}.f.g \subseteq Y.f.\mbox{$\alpha$}'.g \subseteq Y.f.g.\mbox{$\alpha$}''$.
\
\end{proof}
\noindent
That the composition of $\mbox{$\alpha$}$-continuous transformations
is continuous when $\mbox{$\alpha$}$ is a closure operator has already been shown in
\cite{PfaSla13}, where
a counter example is provided to demonstrate the necessity of having $g$ be monotone.
They also show that
the collection of all monotone, $\mbox{$\varphi$}$-continuous transformations
forms a concrete category, $\mbox{${\bf MCont}$}$,
\cite{PfaSla13}.
By Proposition \ref{p.CLO.REG}
$Y.\mbox{$\varphi$}.\mbox{$\Delta$} = Y.\mbox{$\Delta$} \subseteq Y.\mbox{$\Delta$}.\mbox{$\varphi$}$, so $\mbox{$\Delta$}$ is monotone, $\mbox{$\varphi$}$-continuous,
and thus a member of $\mbox{${\bf MCont}$}$.
And since $\mbox{$\varphi$}$ is idempotent, $\mbox{$\varphi$}$ is trivially $\mbox{$\varphi$}$-continuous, as well.
\begin{proposition}\label{p.IC4}
Let $\mbox{$\Delta$}$ be a dominating operator and let
$\mbox{${\cal S}$} \FTRANS \mbox{${\cal S}$}'$ be monotone.
Then $f$ is $\mbox{$\Delta$}$-continuous if and only if
$X.\mbox{$\Delta$} = Y.\mbox{$\Delta$}$ implies $X.f.\mbox{$\Delta$}' = Y.f.\mbox{$\Delta$}'$.
\end{proposition}
\begin{proof}
Let $f$ be $\mbox{$\Delta$}$-continuous, and let
$X.\mbox{$\Delta$} = Y.\mbox{$\Delta$}$, so
$X =_{\mbox{$\Delta$}} Y$.
By monotonicity and continuity, $X.f \subseteq X.\mbox{$\Delta$}.f = Y.\mbox{$\Delta$}.f \subseteq Y.f.\mbox{$\Delta$}'$.
Similarly, $Y.f \subseteq X.f.\mbox{$\Delta$}'$.
Since $Y.f.\mbox{$\Delta$}'$ is the smallest $\mbox{$\Delta$}$-set containing $X.f$ and
$X.f.\mbox{$\Delta$}'$ is the smallest $\mbox{$\Delta$}$-set containing $Y.f$,
$X.f.\mbox{$\Delta$}' = Y.f.\mbox{$\Delta$}'$.
\\
Conversely, assume $f$ is not $\mbox{$\Delta$}$-continuous.
So there exists $Y$ with $Y.\mbox{$\Delta$}.f \not\subseteq Y.f.\mbox{$\Delta$}'$
Let $X \in Y.\mbox{$\Delta$}\mbox{$^{-1}$}$.
$X.f \subseteq X.\mbox{$\Delta$}.f = Y.\mbox{$\Delta$}.f \not\subseteq Y.f.\mbox{$\Delta$}'$,
so $X.f.\mbox{$\Delta$}' \neq Y.f.\mbox{$\Delta$}'$,
contradicting the condition.
\
\end{proof}
Thus, the image of a generator under a continuous transformation is again a generator.
However, if
$X \in Y.\mbox{$\Delta$}\mbox{$^{-1}$}$ is a minimal generator,
Proposition \ref{p.IC4} only shows that $X.f$ is still a generator of
$Y.f.\mbox{$\Delta$}'$; it need not be minimal.
A transformation $S \FTRANS S'$ is {\bf $\mbox{$\Delta$}$-surjective} if for all $\mbox{$\Delta$}$-sets $Y'$,
there exists a set $Y\subseteq S$ such that $Y.f = Y'$.
\begin{samepage}
\begin{proposition}\label{p.IC2}
Let $f$ be monotone, $\mbox{$\Delta$}$-continuous and $\mbox{$\Delta$}$-surjective, then for all $\mbox{$\Delta$}$-sets
$Y'$ in $\mbox{${\cal S}$}'$, there exists a $\mbox{$\Delta$}$-set $Y$ in $\mbox{${\cal S}$}$ such that
$Y.f = Y'$.
\end{proposition}
\end{samepage}
\noindent
\begin{proof}
Since $f$ is $\mbox{$\Delta$}$-surjective, $\exists Y, Y.f = Y'$.
But, by monotonicity and $\mbox{$\Delta$}$-continuity, $Y.f \subseteq Y.\mbox{$\Delta$}.f \subseteq Y.f.\mbox{$\Delta$}'$ = Y'.
So, $Y.\mbox{$\Delta$}.f = Y'$.
\
\end{proof}
\begin{proposition}\label{p.COMP.SUR}
Let $\mbox{${\cal S}$} \FTRANS \mbox{${\cal S}$}'$, $\mbox{${\cal S}$}' \GTRANS \mbox{${\cal S}$}''$
be monotone, $\mbox{$\Delta$}$-continuous transformations.
If both
$f$ and $g$ are $\mbox{$\Delta$}$-surjective, then so is $\mbox{${\cal S}$} \FGTRANS \mbox{${\cal S}$}''$.
\end{proposition}
\begin{proof}
Because the composition of $\mbox{$\Delta$}$-continuous transformations is
$\mbox{$\Delta$}$-continuous, we need only consider surjectivity.
Let $Y''$ be a $\mbox{$\Delta$}$-set in $\mbox{${\cal S}$}''$.
Since $g$ is surjective, $\exists Y' \in \mbox{${\cal S}$}'$, $Y'.g = Y''$.
Because, $g$ is continuous we may assume, by Prop. \ref{p.IC2}, that
$Y'$ is an $\mbox{$\Delta$}$-set.
Thus, by surjectivity of $f$, $\exists Y \in \mbox{${\cal S}$}, Y.f = Y'$
Consequently,
$f\mbox{${\HHS \bf \cdot} \HHS$} g$ is $\mbox{$\Delta$}$-surjective.
\
\end{proof}
A transformation $\mbox{${\cal S}$} \FTRANS \mbox{${\cal S}$}'$ is said to be {\bf $\mbox{$\alpha$}$-preserving}
if $Y.\mbox{$\alpha$}.f = Y.f.\mbox{$\alpha$}'$.
An $\mbox{$\alpha$}$-preserving map takes $\mbox{$\alpha$}$-sets onto $\mbox{$\alpha$}$-sets.
\begin{samepage}
\begin{proposition}\label{p.AP}
Let $\mbox{$\Delta$}$ be a dominating operator and let $f$ be monotone.
$f$ is $\mbox{$\Delta$}$-preserving if and only if
for all $Y$, $Y.f.\mbox{$\Delta$}' \subseteq Y.\mbox{$\Delta$}.f$.
\end{proposition}
\end{samepage}
\noindent
\begin{proof}
Assume, $Y.f.\mbox{$\Delta$}' \subseteq Y.\mbox{$\Delta$}.f$.
Let $Y = Y.\mbox{$\Delta$}$, so $Y.f.\mbox{$\Delta$}' \subseteq Y.\mbox{$\Delta$}.f = Y.f$.
Readily $Y.f \subseteq Y.f.\mbox{$\Delta$}'$ so $Y.f = Y.f.\mbox{$\Delta$}'$ and $f$ is $\mbox{$\Delta$}$-preserving
\\
\\
Now assume $\mbox{$\Delta$}$ is idempotent and that $f$ is $\mbox{$\Delta$}$-preserving.
By monotonicity of $f$, $Y \subseteq Y.\mbox{$\Delta$}$ implies $Y.f \subseteq Y.\mbox{$\Delta$}.f$ and
and since $\mbox{$\Delta$}$ is monotone, $Y.f.\mbox{$\Delta$}' \subseteq Y.\mbox{$\Delta$}.f.\mbox{$\Delta$}'$.
Idempotency implies $Y.\mbox{$\Delta$}.\mbox{$\Delta$} = Y.\mbox{$\Delta$}$.
Since $f$ is $\mbox{$\Delta$}$-preserving, $Y.\mbox{$\Delta$}.f.\mbox{$\Delta$}' = Y.\mbox{$\Delta$}.f$ so
$Y.f.\mbox{$\Delta$}' \subseteq Y.\mbox{$\Delta$}.f$.
\
\end{proof}
\noindent
Proposition \ref{p.AP} is also proven in
\cite{PfaSla13}
where $\mbox{$\Delta$}$ is a closure operator $\mbox{$\varphi$}$.
\begin{samepage}
\begin{corollary}\label{c.AP}
A monotone transformation $f$ is both $\mbox{$\Delta$}$-continuous and $\mbox{$\Delta$}$-preserving,
if and only if $Y.\mbox{$\Delta$}.f = Y.f.\mbox{$\Delta$}'$.
\end{corollary}
\end{samepage}
\subsection {Galois Connections} \label{GC}
Monotone transformations $S \FTRANS S'$ can provide another mechanism for defining
closure operators on $S$.
Let $S \FTRANS S'$ and $S' \GTRANS S$ be monotone transformations.
The composite $(f \mbox{${\HHS \bf \cdot} \HHS$} g)$ is called a {\bf Galois connection} if for all
$X \subseteq S, Y' \subseteq S'$,
\\
\hspace*{0.5in}
(1) $X \subseteq X.f.g$ \hspace*{0.55in} $(f \mbox{${\HHS \bf \cdot} \HHS$} g)$ is expansive
\\
\hspace*{0.5in}
(2) $Y'.g.f \subseteq Y'$ \hspace*{0.5in} $(g \mbox{${\HHS \bf \cdot} \HHS$} f)$ is contractive.
\begin{samepage}
\begin{proposition}\label{p.GC1}
Let $S \FTRANS S' \GTRANS S$ be monotone.
The following are equivalent statements.
\\
\hspace*{0.5in}
(a)
$(f \mbox{${\HHS \bf \cdot} \HHS$} g)$ is a Galois connection.
\\
\hspace*{0.5in}
(b)
For all $X \subseteq S$ and all $Y' \subseteq S'$, $X.f \subseteq Y'$ if and only if $X \subseteq Y'.g$.
\end{proposition}
\end{samepage}
\noindent
\begin{proof}
(a) implies (b):\ \ Let $X.f \subseteq Y'$, so $g$ monotone implies $X.f.g \subseteq Y'.g$,
thus $X \subseteq X.f.g \subseteq Y'.g$.
Similarly, $X \subseteq Y'.g$ implies $X.f \subseteq Y.g.f \subseteq Y'$.
\\
(b) implies (a):\ \
Let $X.f = Y'$, so trivially $X.f \subseteq Y'$.
By (b) $X \subseteq Y'.g$ implying $X \subseteq Y'.g \subseteq X.f.g$.
$f \mbox{${\HHS \bf \cdot} \HHS$} g$ is expansive.
Similarly, $X \subseteq Y'.g$ implies $Y.g.f \subseteq Y'$.
\
\end{proof}
\begin{samepage}
\begin{proposition}\label{p.GC2}
If $S \FTRANS S' \GTRANS S$ is a Galois connection, then
$f$ and $g$ uniquely determine each other.
\end{proposition}
\end{samepage}
\noindent
\begin{proof}
Let $f \mbox{${\HHS \bf \cdot} \HHS$} g$ be a Galois connection and suppose there exists $h$ such
that $f \mbox{${\HHS \bf \cdot} \HHS$} h$ is also a Galois connection.
We apply $h$ to $Y'.g.f \subseteq Y'$.
$Y'.g \subseteq Y'.g.f.h$ because $f \mbox{${\HHS \bf \cdot} \HHS$} h$ is expansive, and
$Y'.g.f.h \subseteq Y'.h$ since $h$ is monotone.
So, $Y.g \subseteq Y'.h$.
Applying $g$ to $Y'.h.f \subseteq Y'$ yields $Y'.h \subseteq Y'.g$,
$\forall Y'$, so $h = g$.
\\
A similar argument shows that $f$ must be unique given $g$.
\
\end{proof}
\begin{samepage}
\begin{proposition}\label{p.GC3}
If $S \FTRANS S' \GTRANS S$ is a Galois connection,
then $f \mbox{${\HHS \bf \cdot} \HHS$} g \mbox{${\HHS \bf \cdot} \HHS$} f = f$ and $g \mbox{${\HHS \bf \cdot} \HHS$} f \mbox{${\HHS \bf \cdot} \HHS$} g = g$.
\end{proposition}
\end{samepage}
\noindent
\begin{proof}
Let $Y' = X.f$.
Since $g \mbox{${\HHS \bf \cdot} \HHS$} f$ is contractive, $X.f = Y' \subseteq Y'.g.f = X.f.g.f$.
However, $X \subseteq X.f.g$ implies $X.f \subseteq X.f.g.f$ by monotonicity,
so $X .f = X.f.g.f, \forall X$.
\\
Similarly, $Y'.g = Y.g.f.g, \forall Y' \subseteq S'$.
\
\end{proof}
\begin{samepage}
\begin{corollary}\label{c.GC4}
If $f \mbox{${\HHS \bf \cdot} \HHS$} g$ is a Galois connection, then $S \FGTRANS S$ is a closure operator
on $S$.
\end{corollary}
\end{samepage}
\noindent
\begin{proof}
Since $f \mbox{${\HHS \bf \cdot} \HHS$} g$ are Galois connected, $f \mbox{${\HHS \bf \cdot} \HHS$} g$ is expansive.
$f$ and $g$ are monotone.
Prop. \ref{p.GC3} establishes that $f \mbox{${\HHS \bf \cdot} \HHS$} g$ is idempotent.
\
\end{proof}
The preceding three propositions largely follow the development of Galois
connections provided by Castellini in
\cite{Cas03},
except for changed notation.
A somewhat different development can be found in Ganter \& Wille
\cite{GanWil99}.
They choose to let $f$ and $g$ be ``anti-monotone'', that is,
$X_1 \subseteq X_2$ implies $X_1.f \supseteq X_2.f$ and
$Y'_1 \subseteq Y'_2$ implies $Y'_1.g \supseteq Y'_2.g$
and
$X \subseteq X.f.g$, $Y' \subseteq Y'.g.f$.
With this definition of Galois connection, Proposition \ref{p.GC1}
must be rewritten as:
``$f \mbox{${\HHS \bf \cdot} \HHS$} g$ is a Galois connection if and only if
$Y' \subseteq X.f$ implies $X \subseteq Y'.g$ and conversely''.
However, both approaches will yield Proposition \ref{p.GC3} and
its corollary.
With the latter definition, both $f \mbox{${\HHS \bf \cdot} \HHS$} g$ and $g \mbox{${\HHS \bf \cdot} \HHS$} f$
are expansive operators on $S$, so both are closure operators.
This is important for the subsequent development of ``Formal Concept Analysis''
in \cite{GanWil99}.
We prefer the development presented here, and in
\cite{Cas03},
because we can compose monotone transformations, so is easy to show that
\begin{samepage}
\begin{proposition}\label{p.GC5}
Let $S \FTRANS S' \GTRANS S$ and $S' \HTRANS S'' \KTRANS S'$ be Galois
connections,
then $S \FHTRANS S'' \KGTRANS S$ is a Galois connection.
\end{proposition}
\end{samepage}
Let $Y.\mbox{$\gamma$}$ denote the collection of minimal generators of $Y.\mbox{$\Delta$}$.
If $\mbox{$\Delta$}$ is uniquely generated, then $Y.\mbox{$\gamma$}$ is a well defined function.
\begin{samepage}
\begin{proposition}\label{p.UGGC}
If $\mbox{$\Delta$}$ is uniquely generated, then $\mbox{$\Delta$}$ and $\mbox{$\gamma$}$ constitute a Galois connection
on $S$.
\end{proposition}
\end{samepage}
\noindent
\begin{proof}
$Y.\mbox{$\gamma$}.\mbox{$\Delta$} = Y.\mbox{$\Delta$}$, so $\mbox{$\gamma$}.\mbox{$\Delta$}$ is expansive.
Because $\mbox{$\Delta$}$ is uniquely generated, $Y.\mbox{$\gamma$} \subseteq Y$,
so $Y.\mbox{$\Delta$}.\mbox{$\gamma$}$ is contractive.
\
\end{proof}
\noindent
Hence, by Corollary \ref{c.GC4} any uniquely generated dominating operator
$\mbox{$\Delta$}$ is a closure operator, and by Proposition \ref{p.FGEN2}, it is
antimatroid.
If we suppose $\mbox{$\Delta$}$ is not uniquely generated, say $X$ and $Z$ are minimal
sets such that $X.\mbox{$\Delta$} = Z.\mbox{$\Delta$}$, then
$X.\mbox{$\Delta$}.\mbox{$\gamma$} = X \cup Z \not\subseteq X$.
$\mbox{$\Delta$}.\mbox{$\gamma$}$ is not contractive.
\section {Categorical Closure} \label{CC}
Operators, such as $\mbox{$\alpha$}, \mbox{$\Delta$}, \mbox{$\varphi$}$, can be naturally regarded
as morphisms in a categorical sense.
In this section we develop this way of approaching domination
and closure.
It is somewhat different from the approach found in
\cite{Cas03},
but is compatible with it.
The following material has been largely derived from
\cite{HerStr79,Pie91};
the only substantive differences have been to change notation.
All functions, or morphisms, will still be denoted using postfix notation.
Proposition \ref{p.ANTIMATROID} is, we believe, original.
\subsection{Review and Examples}
Two categories which are usually presented in category text books are
$\mbox{${\bf Set}$}$ and $\mbox{${\bf Porder}$}$.
For us,
the objects of $\mbox{${\bf Set}$}$ will be all possible finite sets.\footnote
{
To avoid logical paradoxes, MacLane,
\cite{Mac98},
lets the objects in $\mbox{${\bf Set}$}$ be all the sets in a fixed universe $U$.
These he calls $small$ sets.
Readily, our finite sets are small.
}
The morphisms of $\mbox{${\bf Set}$}$ consist of all functions $f:X \mbox{$\rightarrow$} Y$
(or morphisms $X \FTRANS Y$).
with the usual composition
In the partial order category $\mbox{${\bf Porder}$}$, the objects consist of all finite sets, $S$,
with a partial order $\leq_S$ on $S$.
Its morphisms are all order preserving (monotone)
functions, $f:S \mbox{$\rightarrow$} T$, such that $x \leq_S y$ implies
$x.f \leq_T y.f$.
Checking that
$f \mbox{${\HHS \bf \cdot} \HHS$} g$ is order preserving is usually left as an
exercise.
These two well known categories are paradigms for the dominance and closure
categories.
Analogous to $\mbox{${\bf Set}$}$, is the category $\mbox{${\bf Pow}$}$ whose objects consist of
all finite power sets.
Its morphisms are all total
functions $2^S \FTRANS 2^T$ with the usual composition;
$(\forall X \in 2^S) [X.(f\mbox{${\HHS \bf \cdot} \HHS$} g) = X.f.g]$ and the
usual identity on $2^S$,
$(\forall X \subseteq S) [X.id_S = X]$.
It is not difficult to show this composition is well-defined and associative.
The transformations of Section \ref{T} are morphisms in $\mbox{${\bf Pow}$}$.
The category $\mbox{${\bf Pow}$}$ would appear to be completely isomorphic to $\mbox{${\bf Set}$}$,
with each $S \in \mbox{${\bf Set}$}$ replaced by $2^S \in \mbox{${\bf Pow}$}$.
But there are essential differences.
An object $Y$ is said to be {\bf terminal} in $\mbox{${\cal C}$}$
if for every object $X$ in $ \mbox{${\cal C}$}$ there exists exactly one morphism
$X \longrightarrow Y$.
As is well known,
\cite{Mac98},
every element $x$ is a terminal object of $\mbox{${\bf Set}$}$, since for
all $S$, the function $f:S \mbox{$\rightarrow$} x$ must be unique.
However, the singleton sets in $\mbox{${\bf Pow}$}$ need not be terminal.
To see this, let $Y = \{x, y\} \in 2^S$ and let $f, g$ be
extended transformations, where $\{x\}.f = \{x\}$,
$\{y\}.f = \mbox{$\O$}$ and
$\{x\}.g = \mbox{$\O$}$, $\{y\}.g = \{x\}$.
Then $S.f = S.g = \{x\}$, but $f \neq g$.
So the singleton elements $\{x\}$ cannot be terminal in $\mbox{${\bf Pow}$}$.
Only $\mbox{$\O$}$ is terminal in $\mbox{${\bf Pow}$}$, and initial as well.
If we restrict the morphisms in $\mbox{${\bf Pow}$}$ to be monotone, or order preserving,
as in Section \ref{MT},
that is, $X \subseteq Y$ implies $X.f \subseteq Y.f$ then we have
an exact analogue to $\mbox{${\bf Porder}$}$, but over $\mbox{${\bf Pow}$}$, not $\mbox{${\bf Set}$}$.
This category, which we call $\mbox{${\bf Trans}$}$, has the objects of $\mbox{${\bf Pow}$}$ as
its objects, and the collection of all monotone
transformations $2^S \FTRANS 2^T$.
It is simply a restriction on the morphisms of $\mbox{${\bf Pow}$}$.
Proposition \ref{p.MONOTONE} asserts that composition in $\mbox{${\bf Trans}$}$ is
still well defined.
Much of Section \ref{T} is simply concerned with examining the
properties of transformations $f$ in $\mbox{${\bf Trans}$}$.
When the morphisms in $\mbox{${\bf Trans}$}$ are of the form $2^S \ATRANS 2^S$, which
we have called ``operators'', they constitute a category $\mbox{${\bf Opr}$} \subset \mbox{${\bf Trans}$}$.
Our final example is that of $\mbox{${\bf Dom}$}$, the category of all
expansive, monotone operators, $\mbox{$\Delta$}$ on power sets
$2^S$; that is, the category of dominating operators.\footnote
{
Note that ``$dom$'' is the standard categorical way of denoting
the $domain$ of a morphism, that is, if $\mbox{$\alpha$}:2^S \mbox{$\rightarrow$} 2^T$
then $dom\ \mbox{$\alpha$} = 2^S$ and $cod\ \mbox{$\alpha$} = 2^T$, where $cod$
denotes $codomain$.
We do not use the terminology $dom$ or $cod$ in this paper.
}
The objects of $\mbox{${\bf Dom}$}$ are precisely those of $\mbox{${\bf Pow}$}$, that is
$2^S$ partially ordered by $\subseteq$.
Its morphisms are all expansive, monotone (order preserving) functions (operators)
$\mbox{$\alpha$} : (S, \subseteq) \mbox{$\rightarrow$} (S, \subseteq)$,
such that $X \subseteq Y$ implies $X.\mbox{$\alpha$} \subseteq Y.\mbox{$\alpha$}$ and $X \subseteq X.\mbox{$\alpha$}$.
Readily, the identity operator $id_S$ preserves the order $\subseteq$.
The usual composition, $\mbox{$\alpha$} \mbox{${\HHS \bf \cdot} \HHS$} \beta$ is order preserving.
Readily, $\mbox{${\bf Dom}$} \subset \mbox{${\bf Opr}$} \subset \mbox{${\bf Trans}$} \subset \mbox{${\bf Pow}$}$.
\subsection {Pullbacks} \label {PULL}
Recall that
the {\bf pullback} of a pair of morphisms, $Y \FTRANS Z$ and $X \GTRANS Z$,
is an object $W$ and two morphisms $W \FPTRANS X$, $W \GPTRANS Y$ such
that $g' \mbox{${\HHS \bf \cdot} \HHS$} f = f' \mbox{${\HHS \bf \cdot} \HHS$} g$.
Moreover, if
there exist morphisms $V \HXTRANS X$, $V \HYTRANS Y$,
such that $h_Y \mbox{${\HHS \bf \cdot} \HHS$} f = h_X \mbox{${\HHS \bf \cdot} \HHS$} g$ then
there exists a $unique$ morphism
$V \UTRANS W$
such that $h_Y = u \mbox{${\HHS \bf \cdot} \HHS$} g'$ and $h_X = u \mbox{${\HHS \bf \cdot} \HHS$} f'$.
\begin{samepage}
\begin{proposition}\label{p.ANTIMATROID}
Let $\mbox{${\cal C}$} \subset \mbox{${\bf Dom}$}$ be a subcategory.
Its morphisms $\mbox{$\Delta$}$ are
antimatroid closure operators if and only if $\mbox{${\cal C}$}$
exhibits the pullback property of Figure \ref{PULLBACK2}.
\end{proposition}
\end{samepage}
\noindent
\begin{proof}
Suppose that the dominating morphisms $\mbox{$\Delta$}$ of $\mbox{${\cal C}$} \subseteq \mbox{${\bf Dom}$}$
are antimatroid closure operators.
We must show that the pullback diagram of Figure \ref{PULLBACK2} is
satisfied.
We assume that $\mbox{$\Delta$}_X:X \mbox{$\rightarrow$} Z$ and $\mbox{$\Delta$}_Y:Y \mbox{$\rightarrow$} Z$, so $X$ and $Y$ are
generators of $Z$.
Let $V$ be any set $V \subseteq_X X$, $V \subseteq_Y Y$ such that
$V.(\subseteq_X \mbox{${\HHS \bf \cdot} \HHS$} \mbox{$\Delta$}_X) = Z$ and $V.(\subseteq_Y \mbox{${\HHS \bf \cdot} \HHS$} \mbox{$\Delta$}_Y) = Z$, so
$V$ is a generator of $Z$.
By Prop. \ref{p.FGEN1}, $W = X \cap Y$ is the unique pullback of these
two generators.
\\
Conversely, let the morphisms $\mbox{$\Delta$} \in \mbox{${\cal C}$}$ exhibit the pullback property
of Figure \ref{PULLBACK2}.
$X$ and $Y$ are generators of $Z$.
Since $V \UTRANS W$ is unique, by Prop. \ref{p.FGEN2}, $\mbox{$\Delta$}$ is uniquely generated.
By Prop. \ref{p.UGGC}, $\mbox{$\Delta$}$ must be a closure operator.
\
\end{proof}
\begin{figure}
\caption{The pullback of antimatroid generators.
\label{PULLBACK2}
\label{PULLBACK2}
\end{figure}
\noindent
Observe that in Figure \ref{PULLBACK2}, the sets $V, W, X, Y$ are all generators of $Z$.
It is known that pullback diagrams preserve monomorphisms and retractions
\cite{Mac98}.
It is conjectured that an analog of Proposition \ref{p.ANTIMATROID} is true as well,
that is that a subcategory of $\mbox{${\bf Dom}$}$ satisfying the ``push-out'' property must consist
of matroid closure operators.
\section {Summary} \label{S}
There are many more dominating than closure operators,\footnote
{
If $|S| = n \geq 10$, there exist more than $n^n$ distinct
antimatroid closure operators
\cite{Pfa95c}.
}
in spite of Proposition \ref{p.RC} which established that
for every dominating operator there exists
a corresponding (not necessarily unique) closure operator.
While dominating operators are more ubiquitous, for example most network
operators associated with internet analysis are expansive;
closure operators are more structured.
By Proposition \ref{p.UGGC}, only antimatroid closure operators can be
uniquely generated.
However, dominated closure has been used to reduce social networks in a
way that preserves path connectivity
\cite{Pfa13}.
The dominated closure,
$\mbox{$\varphi$}_{\mbox{$\Delta$}}$ defined by (\ref{RCDEF}) is often called a {\bf neighborhood closure},
because the definition (\ref{RCDEF}) can be rewritten as
\begin{equation} \label{RCDEF2}
Y.\mbox{$\varphi$} = Y \cup \bigcup_{\{z\} \subseteq Y.\mbox{$\eta$}} \{ \{z\} | \{z\}.\mbox{$\Delta$} \subseteq Y.\mbox{$\Delta$} \}
\end{equation}
which, in many cases, is computationally much more efficient.
Dominating operators can be easily computed.
A common way, in practice, of defining a dominating operator and its
dominated neighborhood is by an adjacency matrix, $\mbox{${\cal A}$}$.
For each row $i$ of $\mbox{${\cal A}$}$, if $(i,k) \neq 0$, then $k$ is in the region dominated by $i$, or
$\{k\} \subseteq \{i\}.\mbox{$\Delta$}$.
Thus $\mbox{${\cal A}$}$ can be the base of a graphic representation, $G$, described in Section \ref{EO}.
If $\mbox{$\Delta$}$, and $\mbox{${\cal A}$}$, are symmetric, the graph $G$ is undirected.
In most applications, $Y.\mbox{$\Delta$}$ is assumed to be $\bigcup_{\{y\} \subseteq Y} \{\{y\}.\mbox{$\Delta$} \}$,
or all elements directly connected to $Y$ in $G$, $i.e.$ $\mbox{$\Delta$}$ is an extended operator.
However, one might want much larger sets $Z$ to have a much wider scope of dominance.
Similarly, in $\mbox{${\cal A}$} \times \mbox{${\cal A}$}$, $(i, k) \neq 0, $ if $k$ is two, or fewer, links from $i$.
It is still a dominance region.
But, it is not graphically representable, nor is it extensible.
The term ``domination'' is well established in the graph theory literature, but we
prefer to think of these expansive, outward accessing operators as ``exploratory''
operators, especially when viewing their role in social network analysis.
Some educators have suggested that ``knowledge spaces'' might be modelled
by closed sets.
If so, ``exploratory'' closure, $\mbox{$\varphi$}_{\Delta}$, becomes a plausible mechanism for ``learning''.
One can regard $S$ as a universe of related ``experiences'' and ``knowledge'' to be a
collection $Y$ of such experiences, or skills.
An experience $f$ expands one's knowledge if it is congruent with one's existing knowledge
set $Y$;
that is if $\{f\}.\mbox{$\eta$} \subseteq Y.\mbox{$\eta$}$.
That is, the connections of $f$ are congruent with the connections of $Y$; it makes sense.
Dominated closure may find other applications as well.
In
\cite{Pfa13c},
it was shown that one can define ``fuzzy'' dominated closure, yet still retain
many of the crisp properties of closure operators.
It is evident that many of the preceding results, which have been expressed
in operator terminology, can be recast using graph terminology.
In return, graph theory can
provide a rich source of discrete set systems.
In particular, domination
\cite{HayHedSla98b,HayHedSla98,Sum90}
and closure
\cite{Cas03,FarJam86,JamPfa05,Ore46}
have been well studied and can provide many operator examples.
{\small
}
\end{document} |
\begin{document}
\title{Learning networks from high dimensional binary data: An
application to genomic instability data}
\author{Pei Wang, Dennis L. Chao, Li Hsu$^*$ \\
Division of Public Health Sciences, Fred Hutchinson Cancer Research
Center, Seattle, WA\\
$^*$Corresponding Author: 1100 Fairview Ave. N., M2-B500, Fred
Hutchinson Cancer Research \\
Center, Seattle, WA 98109. Email: lih@fhcrc.org. }
\begin{abstract}
Genomic instability, the propensity of aberrations in chromosomes,
plays a critical role in the development of many diseases. High
throughput genotyping experiments have been performed to study genomic instability in diseases. The output of such experiments
can be summarized as high dimensional binary vectors, where each
binary variable records aberration status at one marker locus. It is
of keen interest to understand how these aberrations interact with
each other. In this paper, we propose a novel method,
\texttt{LogitNet}, to infer the interactions among aberration
events. The method is based on penalized logistic regression with an
extension to account for spatial correlation in the genomic
instability data. We conduct extensive simulation studies and show
that the proposed method performs well in the situations considered.
Finally, we illustrate the method using genomic instability data from
breast cancer samples.
\textbf{Key Words:} Conditional Dependence; Graphical Model; Lasso;
Loss-of-Heterozygosity; Regularized Logistic Regression
\end{abstract}
\maketitle
\section{Introduction}
Genomic instability refers to the propensity of aberrations in
chromosomes such as mutations, deletions and amplifications. It has
been thought to play a critical role in the development of many
diseases, for example, many types of cancers (Klein and Klein 1985).
Identifying which aberrations
contribute to disease risk, and how they may interact with each other
during disease development is of keen interest. High throughput genotyping
experiments have been performed to interrogate these aberrations
in diseases, providing a vast amount of information on genomic
instabilities on tens of thousands of marker loci
simultaneously. These data can essentially be organized as a $n \times
p$ matrix where $n$ is the number of samples, $p$ is the
number of marker loci, and the $(i,j)^{th}$ element of the matrix is the
\texttt{binary} aberration status for the $i$th sample at the $j$th
locus. We refer to the interactions among aberrations as oncogenic
pathways. Our goal is to infer oncogenic pathways based on these binary
genomic instability profiles.
Oncogenic pathways can be compactly represented by graphs, in which
vertices represent $p$ aberrations and edges represent interactions
between aberrations. Tools developed for graphical models
(Lauritzen 1996) can therefore be employed to infer interactions
among $p$ aberrations. Specifically, each vertex represents a
binary random variable that codes aberration status at a locus, and
an edge will be drawn between two vertices if the corresponding two
random variables are conditionally dependent given all other random
variables. Here, we want to point out that graphical models based on
conditional dependencies provide information on ``higher order"
interactions compared to other methods (e.g., hierarchical
clustering) which examine the marginal pairwise correlations. The
latter does not tell, for example, whether a non-zero correlation is
due to a direct interaction between two aberration events or due to
an indirect interaction through a third intermediate aberration
event.
There is a rich literature on fitting graphical models for a limited
number of variables (see for example Dawid and Lauritzen 1993;
Whittaker 1990; Edward 2000; Drton and Perlman 2004, and references
therein). However, in genomic instability profiles, the number of
genes $p$ is typically much larger than the number of samples $n$.
Under such high-dimension-low-sample-size scenarios, sparse
regularization becomes indispensable for purposes of both model
tractability and model interpretation. Some work has already been
done to tackle this challenge for high dimensional continuous
variables. For example, Meinshausen and Buhlmann (2006) proposed performing neighborhood selection with \texttt{lasso} regression
(Tibshirani 1996) for each node. Peng et al. (2009a) extended the
approach by imposing the sparsity on the whole network instead of
each neighborhood, and implemented a fast computing algorithm. In
addition, a penalized maximum likelihood approach has been carefully
studied by Yuan and Lin (2007), Friedman et al.(2007b) and Rothman
et al.(2008), where the $p$ variables were assumed to follow a
multivariate normal distribution. Besides these cited works, various
other regularization methods have also been developed for high
dimensional continuous variables (see for example, Li and Gui 2006
and Schafer and Strimmer 2007). Bayesian approaches have been
proposed for graphical models as well (see for example, Madigan et
al. 1995).
In this paper, we consider binary variables and propose a
novel method, \texttt{LogitNet}, for inferring edges, i.e.,
the conditional dependence between pairs of aberration events given all others.
Assuming a tree topology for oncogenic pathways, we derive
the joint probability distribution of the $p$ binary variables,
which naturally leads to a set of $p$ logistic regression models
with the combined $p\times p$ coefficient matrix being symmetric.
We propose sparse logistic regression with a \texttt{lasso} penalty
term and extend it to account for the spatial correlation
along the genome. This extension together with the enforcement of
symmetry of the coefficient matrix produces a group selection
effect, which enables \texttt{LogitNet} to make good use of spatial
correlation when inferring the edges.
\texttt{LogitNet} is related to the work by Ravikumar et al.
(2009), which also utilized sparse logistic regression to construct a
network based on high dimensional binary variables. The basic idea
of Ravikumar et al. is the same as that of Meinshausen and
Buhlmann's (2006) neighborhood selection approach, in which sparse
logistic regression was performed for each binary variable given all
others. Sparsity constraint was then imposed on each neighborhood and
the sparse regression was performed for each binary
variable separately. Thus, the symmetry of conditional dependence
obtained from regressing variable $X_r$ on variable $X_s$ and from
regressing $X_s$ on $X_r$ is not guaranteed. As such, it can yield
contradictory neighborhoods,
which makes interpretation of the results difficult. It also loses
power in detecting dependencies, especially when the sample size is
small. The proposed \texttt{LogitNet}, on the other hand, makes use
of the symmetry, which produces compatible logistic regression models
for all variables and has thus achieved a more coherent result with
better efficiency than the Ravikumar et al. approach. We show by
intensive simulation studies that \texttt{LogitNet} performs better in
terms of false positive rate and false negative rate of edge
detection.
The rest of the paper is organized as follows. In section 2, we will
present the model, its implementation and the selection of the
penalty parameter. Simulation studies of the proposed method and the
comparison with the Ravikumar et al. approach are described in
Section 3. Real genomic instability data from breast cancer samples is used
to illustrate the method and the results are described in Section 4.
Finally, we conclude the paper with remarks on future work.
\section{Methods}
\subsection{\texttt{LogitNet} Model and Likelihood Function}
Consider a $p \times 1$ vector of binary variables $X^T = (X_1,
\ldots, X_p)$ for which we are interested in inferring the
conditional dependencies. Here the superscript $T$ is a transpose.
The pattern of conditional dependencies between these binary
variables can be described by an undirected graph ${\cal G} = (V,
E)$, where $V$ is a finite set of vertices, $(1, \ldots, p)$, that
are associated with binary variables $(X_1, \ldots, X_p)$; and $E$
is a set of pairs of vertices such that each pair in $E$ are
conditionally dependent given the rest of binary variables. We assume
that the edge set $E$ doesn't contain cycles,
i.e., no path begins and ends with the same vertex. For
example, in a set of four vertices, if the edge set includes (1,2),
(2,3), and (3,4), it can't include the edge (1,4) or (1,3) or (2,4),
as it will form a cycle. Under this assumption, the joint
probability distribution $\Pr(X)$ can be represented as a product of
functions of pairs of binary variables. We formalize this result in
the following proposition:
\textit{ \noindent \textbf{Proposition 1.} Let $V = \{1, \ldots,
p\}$ and $X_{-(r,s)}$ denote the vector of binary variables $X$
excluding $X_r$ and $X_s$ for $r,s \in V$. Define the edge set
$$E = \{(r, s) | \Pr(X_r, X_s | X_{-(r,s)}) \ne \Pr(X_{r}|X_{-(r,s)}) \Pr(X_{s} | X_{-(r,s)}); r, s \in V, r < s \},$$
and
$|E| = K$. If ${\cal G}$ doesn't contain cycles, then there exist
functions $\{h_k, k= 1, \ldots, K\}$ such that
\[ \Pr(X) = \prod_{k=1}^K h_k(X_{r_k}, X_{s_k}), \]
where $(r_k, s_k) \in E \textit{ for } k = 1, \ldots, K$. }
The proof of Proposition 1 is largely based on the Hammersley and
Clifford theorem (Lauritzen, 1996) and given in Supplementary Appendix A.
Assuming $\Pr(X)$ is strictly positive for all values of $X$, then
the above probability distribution leads to the well known
quadratic exponential model
\begin{eqnarray}
\Pr(X=x) = \Delta^{-1} \exp(x^T \theta + z^T \kappa), \label{eqn1}
\end{eqnarray}
where $z^T = (x_1x_2, x_1x_3, \ldots, x_{p-1}x_p)$, $\theta^T =
(\theta_1, \ldots, \theta_p)$, $\kappa^T = (\kappa_{12},
\kappa_{13}, \ldots, \kappa_{(p-1)p})$, and $\Delta$ is a
normalization constant such that $\Delta = \sum_{x_1 = 0}^1 \cdots
\sum_{x_p=0}^1 \exp(x^T \theta + z^T \kappa)$.
Under this model, the zero values in $\kappa$ are equivalent to the
conditional independence for the corresponding binary variables. The
following proposition describes this result and the proof is given
in Supplementary Appendix B.
\textit{ \noindent \textbf{Proposition 2.} If the distribution on
$X$ is (\ref{eqn1}), then $X_r \perp\!\!\!\perp X_s \,|\, X_{-(r,s)}$ if and
only if $\kappa_{rs} = 0$, for $1\leq r<s \leq p$.}
As the goal of graphical model selection is to infer the edge set $E$
which represents the conditional dependence among all
the variables, the result of Proposition 2 implies that we can infer
the edge between a pair of events, say $X_r$ and $X_s$,
based on whether or not $\kappa_{rs}$ is equal to 0. Interestingly,
under model (\ref{eqn1}), $\kappa$ can also be interpreted as
a conditional odds ratio. This can be seen from
\begin{eqnarray*}
\lefteqn{\frac{\Pr(x_s = 1 | x_1, \ldots, x_{s-1}, x_{s+1}, \ldots,
x_p)}{\Pr(x_s = 0 | x_1, \ldots, x_{s-1}, x_{s+1}, \ldots, x_p)}} \\
& = &
\exp(\kappa_{1s} x_1 + \ldots + \kappa_{(s-1)s} x_{s-1} + \theta_s
+ \kappa_{s(s+1)} x_{s+1} + \ldots + \kappa_{sp} x_p).
\end{eqnarray*}
Taking the log transformation of the left hand side of this equation
results the familiar form of a logistic regression model, where the
outcome is the $j$th binary variable and the predictors are all
the other binary variables. Doing this for each of $x_1, x_2, \ldots,
x_p$, we obtain $p$ logistic regressions models:
\begin{eqnarray}\label{eqn:Plogistic}
\left \{ \begin{array}{rcl}
\mbox{logit}\{\Pr(x_1 = 1 | x_2, \ldots, x_p)\} & = & \theta_1 +
\kappa_{12} x_2 + \ldots + \kappa_{1p} x_p, \\
& \vdots & \\
\mbox{logit} \{\Pr(x_p = 1 | x_1, \ldots, x_{p-1})\} & = &
\kappa_{1p} x_1 + \ldots + \kappa_{(p-1)p} x_{p-1} + \theta_p.
\end{array} \right .
\end{eqnarray}
The matrix of all of the regression coefficients from $p$ logistic
regression models can then be row combined as
\[
\cal{B} =\left (\begin{array}{cccc}
\theta_1 & \kappa_{12} & \ldots & \kappa_{1p} \\
\kappa_{12} & \theta_2 & \ldots & \kappa_{2p} \\
\vdots & &\ddots & \\
\kappa_{1p} & \ldots & \ldots & \theta_p
\end{array} \right )
\]
with matrix elements defined by $\beta_{rs}$ for the $r$th row and
the $s$th column of ${\cal B}$. It is easy to see that the
$\cal{B}$ matrix is symmetric, i.e.,
$\beta_{rs}=\beta_{sr}=\kappa_{rs}$, $i \ne j$ under model (\ref{eqn:Plogistic}). Vice
versa, the symmetry of $\cal{B}$ ensures the compatibility of
the $p$ logistic conditional distributions (\ref{eqn:Plogistic}), and the
resulting joint distribution is the quadratic exponential model
(\ref{eqn1})(Joe and Liu, 1986). Thus, to infer the edge set $E$ of
the graphical model, i.e., non-zero off-diagonal entries in
$\cal{B}$, we can resort to regression analysis by simultaneously
fitting the $p$ logistic regression models in~(\ref{eqn:Plogistic})
with symmetric $\cal{B}$.
Specifically, let $X_{n \times p}$ denote the data which consists of
$n$ samples each measured with $p$-variate binary events. We also
define two other variables mainly for the ease of the presentation
of the likelihood function: (1) $Y$ is the same as $X$ but with 0s replaced
with -1s; (2) $X^r, r = 1, \ldots, p$ same as X but with $r$th
column set to 1. We propose to maximize the joint log likelihood of
the $p$ logistic regressions in~(\ref{eqn:Plogistic}) as follows:
\begin{eqnarray}\label{eqn:JointLogit}
l ({\cal{B}}) = -\sum_{r=1}^p \sum_{i=1}^n \log \left\{ 1 +
\exp(-X^r[i,]{\cal{B}}[r,]^T \cdot y_{ir})\right\}.
\end{eqnarray}
where $X^r[i,] = (x^r_{i1}, \ldots, x^r_{ip})$; and ${\cal{B}}
[r,]=(\beta_{r1}, ..., \beta_{rp})$. Note, here we have the
constraints $\beta_{rs}=\beta_{sr}$ for $1 \leq r < s \leq p$; and
$\beta_{rr}$ now represents the intercept $\theta_r$ of the $r$th
regression.
Recall that our interest is to infer oncogenic pathways based
on genome instability profiles of tumor samples. Most often, we are dealing with
hundreds or thousands of genes and only tens or hundreds of samples.
Thus, regularization on parameter estimation becomes indispensable
as the number of variables is larger than the sample size, $p>n$. In
the past decade, $\ell_1$ norm based sparsity constraints such as
\texttt{lasso} (Tibshirani 1996) have shown considerable success in
handling high-dimension-low-sample-size
problems when the true model is sparse relative to the
dimensionality of the data. Since it is widely believed that genetic
regulatory relationships are intrinsically sparse (Jeong et al.
2001; Gardner et al. 2003), we propose to use $\ell_1$ norm penalty
for inferring oncogenic pathways. The penalized loss
function can be written as:
\begin{equation}\label{eqn:PenalizedJL}
l_{\lambda}^{lasso}({\cal{B}}) = -l({\cal{B}}) + \lambda \sum_{r=1}^p
\sum_{s=r+1}^p |\beta_{rs}|.
\end{equation}
Note that $\ell_1$-norm penalty is imposed on all off-diagonal entries of
$\cal{B}$ matrix simultaneously to control the overall sparsity
of the joint logistical regression model, i.e., only a limited number
of $\beta_{rs}$, $r \ne s$ will be non-zero. We then estimate
$\cal{B}$ by $\widehat{\cal{B}}(\lambda):= \textrm{arg min}_{\cal{B}}
l_{\lambda}^{lasso}({\cal{B}})$. In the rest of the paper, we refer to the
model defined in~(\ref{eqn:PenalizedJL}) as \texttt{LogitNet} model,
$\widehat{\cal{B}}(\lambda)$ as the \texttt{LogitNet}
estimator and $\widehat\beta_{rs}(\lambda)$ as the $rs$th element of
$\widehat{\cal{B}}(\lambda)$.
As described in the Introduction, the \texttt{LogitNet} model is
closely related to the work by Ravikumar et al. (2009) which fits
$p$ \texttt{lasso} logistic regressions separately (hereafter
referred to as \texttt{SepLogit}). Our model, however, differs in
two aspects: (1) \texttt{LogitNet} imposes the \texttt{lasso}
constraint for the entire network while \texttt{SepLogit} does it
for each neighborhood; (2) \texttt{LogitNet} enforces symmetry when
estimating the regression coefficients while \texttt{SepLogit}
doesn't, so for \texttt{LogitNet} there are only about half of the
parameters needed to be estimated as for \texttt{SepLogit}. As a
result, the \texttt{LogitNet} estimates are more efficient and the
results are more interpretable than \texttt{SepLogit}.
\subsection{Model fitting}
In this section, we describe an algorithm for obtaining the
\texttt{LogitNet} estimator $\widehat{\cal{B}}(\lambda)$. The
algorithm extends the gradient descent algorithm (Genkin et al. 2007)
to enforce the symmetry of $\cal{B}$.
Parameters are updated one at a time using a one-step Newton-Raphson
algorithm, in the same spirit as the shooting algorithm (Fu, 1998) and the
coordinate descent algorithm (Friedman et al., 2007a) for
solving the general linear \texttt{lasso} regressions.
More specifically, let $\dot l(\beta_{rs})$ and $\ddot l(\beta_{rs})$
be the first- and second- partial derivatives of log-likelihood
$l(\cal{B})$ with respect to $\beta_{rs}$,
\begin{eqnarray*}
\dot l(\beta_{rs}) & = & \sum_{i=1}^n\frac{X^r[i,s]
Y[i,r]}{1+\exp(R_{r})} +
\sum_{i=1}^n\frac{X^s[i,r]Y[i,s]}{1+\exp(R_{s})}, \\
\ddot l(\beta_{rs}) & = & \sum_{i=1}^n(X^r[i,s])^2
\frac{\exp(R_{r})}{\{1+\exp(R_{r})\}^2} +
\sum_{i=1}^n (X^s[i,r])^2 \frac{\exp(R_{s})}{\{1+\exp(R_{s})\}^2},
\end{eqnarray*}
where $R_{r} = X^r[i,] \beta^T[r,]Y[i,r]$. Under the
Newton-Raphson algorithm, the update for the estimate $\widehat
\beta_{rs}$ is $\Delta
\beta_{rs}=-\dot l (\beta_{rs})/\ddot l(\beta_{rs})$. For the
penalized likelihood (\ref{eqn:PenalizedJL}), the update for
$\widehat \beta_{rs}(\lambda)$ is
\begin{eqnarray}
\Delta \beta_{rs}^{lasso} & = & -\frac{\dot l_\lambda^{lasso}(\beta_{rs})}{\ddot
l_\lambda^{lasso}(\beta_{rs})} \nonumber \\
& = & \Delta \beta_{rs} - \frac{\lambda}{\ddot l(\beta_{rs})} \mbox{sgn}(\beta_{rs}),
\label{update}
\end{eqnarray}
where $\mbox{sgn}(\beta_{rs})$ is a sign function, which is 1 if
$\beta_{rs}$ is positive and -1 if $\beta_{rs}$ is negative. The
estimates are also thresholded such that if an update overshoots and
crosses the zero, the update will be set to 0. If the current
estimate is 0, the algorithm will try both directions by setting
$\mbox{sgn}$ to be 1 and -1, respectively. By the convexity of
(\ref{eqn:PenalizedJL}), the update for both directions can not be
simultaneously successful. If it fails on both directions, the estimate will
be set to 0. The algorithm also takes other steps to make sure
the estimates and the numerical procedure are stable, including
limiting the update sizes and setting the upper bounds for $\ddot l$
(Zhang and Oles 2001). See Supplemental Appendix C for more
details of the algorithm.
To further improve the convergence speed of the algorithm, we
utilize the \texttt{Active-Shooting} idea proposed by Peng et al.
(2009a) and Friedman et al. (2009). Specifically, at each
iteration, we define the set of currently non-zero
coefficients
as the current active set and conduct the following two steps: (1)
update the coefficients in the active set until convergence is
achieved; (2) conduct a full loop update on all the coefficients
one by one. We then repeat (1) and (2) until convergence is
achieved on all of the coefficients. Since the target model in our
problem is usually very sparse, this algorithm achieves a very
fast convergence rate by focusing on the small subspace whose
members are more likely to be in the model.
We note that in equation (5) the regularization shrinks the estimate
towards zero by the amount determined by the penalty parameter
$\lambda$ and that each parameter is not penalized by the same
amount: $\lambda$ is weighted by the variance ${\ddot
l(\beta_{rs})}^{-1}$ of $\widehat \beta_{rs}$. In other words,
parameter estimates that have larger variances will be penalized
more than the ones that have smaller variances. It turns out that
this type of penalization is very useful, as it would also offer us
ways to account for the other features of the data. In the next
section we show a proposal for adding another weight function to
account for spatial correlations in genomic instability data.
\subsection{Spatial correlation}
Spatial correlation of aberrations is common in genomic
instability data. When we perform the regression of $X_r$ on all
other binary variables, loci that are spatially closest to
the $X_r$ are likely the strongest predictors in the model and will
explain away most of the variation in $X_r$. The loci at the other
locations of the same or other chromosomes, even if they are
correlated with $X_r$, may be left out in the model. Obviously
this result is not desirable because our objective is to identify the
network among all of these loci (binary variables), in particular
those that are not close spatially as we know them already.
One approach to accounting for this undesirable spatial
effect is to downweight the effect of the neighboring loci of $X_r$
when
regressing $X_r$ on the rest of the loci. Recall that in Section
2.2, we observed that the penalty term in (\ref{update}) is
inversely weighted by the variance of the parameter estimates.
Following the same idea, we can achieve the downweighting of neighboring
loci by letting the penalty term be proportional to the strength of
their correlations with $X_r$. This way we can
shrink the effects of the neighboring loci with strong
spatial correlation more than those that have less or no spatial
correlation. Specifically, the update for the parameter estimate
$\beta_{rs}$ in (\ref{update}) can be written as
\begin{eqnarray*}
\Delta \beta_{rs}^{lasso} & = &
\Delta \beta_{rs} - \lambda \frac{w_{rs}}{\ddot l(\beta_{rs})}
\mbox{sgn}(\beta_{rs}),
\end{eqnarray*}
where $w_{rs}$ is the weight for the spatial correlation. Naturally
the weight $w_{rs}$ for $X_r$ and $X_s$ on different
chromosomes is 1 and for $X_r$ and $X_s$ on
the same chromosome should depend on the strength of the spatial
correlation. As the spatial correlation varies across the genome, we
propose the following adaptive estimator for
$w_{rs}$:
\begin{enumerate}
\item Calculate the odds ratio $\alpha$ between every locus in the
chromosome with the target locus by univariate logistic regression.
\item Plot the $\alpha$'s by their genomic locations and smooth the
profile by loess with a window size of 10 loci.
\item Set the smoothed curve $\widetilde{\alpha}$ to 0 as soon as the
curve starting from the target locus hits 0. Here ``hits 0" is
defined as $\widetilde{\alpha}<\varepsilon$, where
$\varepsilon=\textrm{median}_r|\widetilde{\alpha}_r-\widetilde{\alpha}_{r+1}|$.
\item Set the weight $w = \exp(\alpha)$.
\end{enumerate}
It is worth noting that the above weighting scheme together
with the enforcement of the symmetry of $\cal{B}$ in
\texttt{LogitNet} encourages a group selection effect, \textit{i.e.},
highly correlated predictors tend to be in or out of the model
simultaneously. We illustrate this point with a simple example system
of three variables $X_1, X_2$, and $X_3$. Suppose that $X_2$ and
$X_3$ are very close on the genome and highly correlated; and
$X_1$ is associated with $X_2$ and $X_3$ but sits on a different
chromosome. Under our proposal, the weight matrix $w$ is 1 for all
entries except $w_{23}=w_{32}=a$, which is a large value because of
the strong spatial correlation between $X_2$ and $X_3$.
Then, for \texttt{LogitNet}, the joint logistic regression model
\begin{eqnarray}
\label{eqn:WeiExa:1}\texttt{logit}(X_1)\sim \beta_{11} +\beta_{12}
X_2 + \beta_{13} X_3,\\
\label{eqn:WeiExa:2} \texttt{logit}(X_2)\sim \beta_{12} X_1
+\beta_{22} + \beta_{23}
X_3,\\
\label{eqn:WeiExa:3}\texttt{logit}(X_3)\sim \beta_{13} X_1
+\beta_{23}X_2 + \beta_{33},
\end{eqnarray}
is subject to the constraint $|\beta_{12}|+|\beta_{13}|+
a|\beta_{23}|<s$. Because of the large value of $a$, $\beta_{23}$
will likely be shrunk to zero, which ensures $\beta_{12}$ and
$\beta_{13}$ to be nonzero in (\ref{eqn:WeiExa:2}) and
(\ref{eqn:WeiExa:3}), respectively. With the symmetry constraint
imposed on $\cal{B}$ matrix, we also enforce both $\beta_{12}$ and
$\beta_{13}$ to be selected in (\ref{eqn:WeiExa:1}). This grouping
effect would not happen if we fit only the model
(\ref{eqn:WeiExa:1}) for which only one of $\beta_{12}$ and
$\beta_{13}$ would likely be selected (Zou and Hastie 2005), nor would it
happen if we didn't have a large value of $a$ because $\beta_{23}$ would
have been the dominant coefficient in models (\ref{eqn:WeiExa:2}) and
(\ref{eqn:WeiExa:3}). Indeed, the group selection effect of
\texttt{LogitNet} is clearly observed in the simulation studies conducted
in Section 3.
\subsection{Penalty Parameter Selection}
We consider two procedures for selecting the penalty parameter
$\lambda$: cross validation (CV) and Bayesian Information Criterion
(BIC).
\subsubsection{Cross Validation}
After we derive the weight matrix $w$ based on the whole data set,
we divide the data into $V$ non-overlapping equal subsets. Treat the
$v^{th}$ subset $X^{(v)}$ as the $v^{th}$ test set, and its
complement $X^{-(v)}$ as the $v^{th}$ training set. For a given
$\lambda$, we first obtain the \texttt{LogitNet}
estimate $\widehat{\cal{B}}^{v}(\lambda)$ with the weight matrix $w$
on the $v^{th}$ training set $X^{-(v)}$. Since in our problem the
true model is usually very sparse, the degree of regularization
needed is often high. As a result, the value of
$\widehat{\cal{B}}^{v}(\lambda)$ could be shrunk far from the true
parameter values. Using such heavily shrunk estimates for choosing
$\lambda$ from cross validation often results in severe over-fitting
(Efron et al. 2004). Thus, we re-estimate $\cal{B}$ using the
selected model in the $v$th training set without any shrinkage and
use it in calculating the log-likelihood for the $v^{th}$ test set.
The \textit{un-shrunk estimates}
$\widehat{\cal{B}}_{uns}^{(v)}(\lambda)$ can be easily obtained
from our current algorithm for the regularized estimates
with modifications described below:
\begin{enumerate}
\item Define a new weight matrix $\widetilde{w}^{v}$ such that
$\widetilde{w}_{rs}^{v}=1$, if $\widehat{\beta}_{rs}^{(v)}\neq 0$;
and $\widetilde{w}_{rs}^{v}=M$, if $\widehat{\beta}_{rs}^{(v)}= 0$,
where $M=\textrm{max}\{w_{rs}\}$.
\item Fit the \texttt{LogitNet} model using the new weight matrix
$\tilde{w}$, thus $\{\beta_{rs}| \widetilde{w}_{rs}^{v}=1\}$ are not
penalized in the model and all other $\beta_{rs}$ are shrunk to 0.
The result is $\widehat{\cal{B}}_{uns}^{(v)}(\lambda)$.
\end{enumerate}
We then calculate the joint log likelihood of logistic regressions
using the un-shrunk estimates on the $v^{th}$ test set
$l(\widehat{\cal{B}}_{uns}^{(v)}(\lambda)|X^{(v)})$ according to
formula (\ref{eqn:Plogistic}). The optimal
$\lambda_{\textrm{cv}}=\textrm{arg}\max_{\lambda} \sum_{v=1}^V
l(\widehat{\cal{B}}_{uns}^{(v)}(\lambda)|X^{(v)})$.
In order to further control the false positive findings due to
stochastic variation, we employ the \texttt{cv.vote} procedure
proposed by Peng et al. (2009b). The idea is to derive the
``consensus" result of the models estimated from each training set,
as variables that are consistently selected by different training
sets should be more likely to appear in the true model than the
ones that are selected by one or few training sets. Specifically,
for a pair of $r$th and $s$th variables, we define
\begin{equation}
s_{rs}(\lambda_{\textrm{cv}})=\left\{ \begin{array}{ll} 1, & \textrm{if }
\sum_{v=1}^{V} I(\widehat{\beta}_{uns,rs}^{(v)}(\lambda_{\textrm{cv}})\neq 0) > V/2;\\
0, & \textrm{otherwise.}
\end{array}
\right.
\end{equation}
We return $\{s_{rs}(\lambda_{\textrm{cv}})\}$ as our final result.
\subsubsection{BIC}
We can also use BIC to select $\lambda$:
\begin{equation}\label{eqn:BIC}
\lambda_{\textrm{BIC}}=\arg
\min_{\lambda}\left\{-2l(\widehat{\cal{B}}_{uns}(\lambda)|X)+
\textrm{log}(n) \sum_{r<s} I
(\widehat{\beta}_{uns,rs}^{(v)}(\lambda)\neq 0)\right\}
\end{equation}
where $\sum_{r<s} I (\widehat{\beta}_{uns,rs}^{(v)}(\lambda)\neq 0)$
gives the dimension of the parameter space of the selected model.
Here again, \textit{un-shrunk estimates}
$\widehat{\cal{B}}_{uns}^{(v)}(\lambda)$ is used to calculate the log
likelihood.
\section{Simulation Studies}
In this section, we investigate the performance of the
\texttt{LogitNet} method and compare it with \texttt{SepLogit}
which fits $p$ separate lasso logistic regressions all using the same
penalty parameter value (Ravikumar et al., 2009). We use R package
\textit{glmnet} to compute the \texttt{SepLogit} solution and the
same weight matrix as described in Section 2.3 to account for the
spatial correlation. In addition, since the \texttt{SepLogit} method
does not ensure the symmetry of the estimated $\mathcal{B}$ matrix,
there will be cases that $\beta_{rs} =0 $ but $\beta_{sr} \neq 0$ or
vice versa. In these cases we interpret the result using the ``OR"
rule: $X_r$ and $X_s$ are deemed to be conditionally dependent if
either $\beta_{rs}$ or $\beta_{sr}$ is 0. We have also used the
``AND" rule, \textit{i.e.} $X_r$ and $X_s$ are deemed to be
conditionally dependent if both $\beta_{rs}$ and
$\beta_{sr}$ are 0. The ``AND" rule always yields very high false
negative rate. Due to space limitations, we omit the results for the
``AND'' rule.
\subsection{Simulation setting}
We generated background aberration events with spatial correlation
using a homogenous Bernoulli Markov model. It is part of the
instability-selection model (Newton et al. 1998), which hypothesizes
that the genetic structure of a progenitor cell is subject to
chromosomal instability that causes random aberrations. The Markov
model has two parameters: $\delta$ and $\nu$, where $\delta$ is the
marginal (stationary) probability at a marker locus and $\nu$
measures the strength of the dependence between the aberrations. So
$\delta$ plays the role of a background or sporadic aberration when
$\nu$ affects the overall rate of change in the stochastic process.
Under this model, the infinitesimal rate of change from no
aberration to aberration is $\nu \delta$, and from aberration to no
aberration is $\nu(1-\delta)$. We then super-imposed the aberrations
at disease loci, which were generated according to a pre-determined
oncogenic pathway, on the background aberration events. The
algorithm for generating an aberration indicator vector $X^T=(X_1,
\cdots, X_p)$ is given below:
\begin{enumerate}
\item[1.] Specify the topology of an oncogenic pathway for the
disease loci and the transitional probabilities among the aberrations on the
pathway. The $K$ disease loci are indexed by $\{{s_1}, \cdots,
{s_K}\}$, where $s_i \in \{1, \ldots, p\}$ for $i= 1, \ldots, K$.
\item[2.] Generate background aberrations denoted by a $p\times 1$
vector $Z$ according to the homogenous Bernoulli Markov process with
preselected values of $\delta=0.05$ and $\nu=15$.
\item[3.] Generate aberration events at disease loci following the
oncogenic pathway specified in Step 1. This is denoted by a $p
\times 1$ vector $U$, where indices $\{{s_1}, \cdots, {s_K}\}$
are disease loci. If disease locus $s_i$ has an aberration
($U_{s_i}=1$), we also
assign aberrations to its neighboring loci $U_t=1$, for $t \in
[s_i-a_i, s_i+b_i]$, where $a_i$ and $b_i$ are independently sampled
from Uniform$[0, 30]$. The rest of the elements in $U$ are 0.
\item[4.] Combine the aberration events at disease loci and the background
aberrations by assigning $X_i=1 \textrm{ if }U_i+Z_i>0$ and $0 \textrm{ if
}U_i=Z_i=0$, for $i=1, \ldots, p$.
\end{enumerate}
We set $n=200$ and $p=600$ to mimic the dimension of the real data
set used in Section~\ref{sec:RealApplication}, so $V = \{1, \ldots,
600\}$. We assume the $600$ marker loci fall into 6 different
chromosomes with 100 loci on each chromosome. We consider two
different oncogenic pathway models: a chain shape and a tree shape
(see Figure~\ref{Fig1}). Each model contains 6 aberration events:
$\texttt{M}=\{\texttt{A}, \texttt{B}, \texttt{C}, \texttt{D},
\texttt{E}, \texttt{F}\}$. Without loss of generality, we assume
these 6 aberrations are located in the middle of each chromosome, so
the indices of \texttt{A}--\texttt{F} are $s_{\mathtt{A}} = 50$,
$s_{\mathtt{B}}=150$, $\cdots$, $s_\mathtt{F} = 550$, respectively.
For any $\texttt{u}\in \texttt{M}$, $X_{s_{\mathtt{u}}}=1$ means
aberration \texttt{u} occurs in the sample.
We evaluate the performance of the methods by two metrics:
the false positive rate (\texttt{FPR}) and the false negative rate
(\texttt{FNR}) of edge detection. Denote the true edge
set $E =\{(u,v) | X_{u} \mbox{ and } X_v \mbox{ are
conditionally dependent }, u \in V, v \in V\}$.
We define a non-zero $\hat\beta_{rs}$ a false detection if its genome
location indices $(r,s)$ is far from the indices of any true edge:
\[
|r-I_\texttt{u}|+|s-I_\texttt{v}|> 30, \qquad
\forall(\texttt{u},\texttt{v})\in E.
\]
For example, in
Figure~\ref{Fig:ChainBeta} red dots that do not fall into any grey diamond are
considered false detection. A cutoff
value of 30 is used here because in the simulation setup (see Step 3)
we set the maximum aberration size around the
disease locus to be 30. Similarly, we define a conditionally dependent pair
$(\texttt{u},\texttt{v})\in E$ is missed, if there is no non-zero
$\beta$ falling in the grey diamond. We then calculate \texttt{FPR}
as the number of false detections divided by the total number of non-zero
$\hat\beta_{rs}$, $r<s$; and calculate \texttt{FNR} as the number
of missed $(\texttt{u},\texttt{v})\in E$ divided by the size
of $E$.
\subsection{Simulation I --- Chain Model}
For the chain model, aberrations \texttt{A}-\texttt{F} occur
sequentially on one oncogenic pathway. The aberration frequencies
and transitional probabilities along the oncogenic pathway are
illustrated in Figure~\ref{Fig:ChainModel}. The true conditionally
dependent pairs in this model are
$$E = \{(s_\mathtt{A}, s_\mathtt{B}), (s_\mathtt{B}, s_\mathtt{C}),
(s_\mathtt{C}, s_\mathtt{D}), (s_\mathtt{D}, s_\mathtt{E}), (s_\mathtt{E},
s_\mathtt{F})\}.$$
Based on this chain model, we generated 50 independent data sets.
The heatmap of one example data matrix $200\times
600$ is shown in Supplemental Figure S-1. We then
apply \texttt{LogitNet} and \texttt{SepLogit} to each simulated data
set for a series of different values of
$\lambda$. Figure~\ref{Fig:Chain.error} shows the \texttt{FPR} and
\texttt{FNR} of the two methods as a function of $\lambda$.
For both methods, \texttt{FPR} decreases with $\lambda$ while
\texttt{FNR} increases with $\lambda$. Comparing the two methods,
\texttt{LogitNet} clearly outperforms \texttt{SepLogit} in terms of
\texttt{FPR} and \texttt{FNR}. For \texttt{LogitNet}, the average
optimal total error rate (\texttt{FPR}$+$\texttt{FNR}) across the 50
independent data sets is 0.014 (s.d.=0.029); while the average optimal total error
rate for \texttt{SepLogit} is 0.211 (s.d.=0.203). Specifically, taking
the data set shown in the Supplemental Figure S-1 as an example, the
optimal total error rate achieved by \texttt{LogitNet} on this data
set is 0, while the optimal total error achieve by \texttt{SepLogit}
is 0.563 (\texttt{FPR}$=0.563$, \texttt{FNR}$=0$). The corresponding
two coefficient matrices $\hat{\cal{B}}$ are illustrated in
Figure~\ref{Fig:ChainBeta}. As one can see, there is
a large degree of asymmetry in the result of \texttt{SepLogit}: 435
out of the 476 non-zero $\hat\beta_{rs}$ have inconsistent transpose
elements, $\hat\beta_{sr}=0$.
On the contrary, by enforcing symmetry our proposed approach
\texttt{LogitNet} has correctly identified all five true conditionally
dependent pairs in the chain model. Moreover, the non-zero
$\hat\beta_{rs}$'s plotted by red dots tend to be clustered within
the grey diamonds. This shows that \texttt{LogitNet} indeed encourages
group selection for highly correlated predictors, and thus is
able to make good use of the spatial correlation in the data when
inferring the edges.
We also evaluated the two penalty parameter selection approaches: CV
and BIC, for \texttt{LogitNet}.
Table~\ref{table:tuning} summarizes the \texttt{FPR}
and \texttt{FNR} for CV and BIC. Both approaches performed
reasonably well. The CV criterion tends to select larger models than
the BIC, and thus has more false positives and fewer false
negatives. The average total error rate (\texttt{FPR}$+$\texttt{FNR})
for CV is 0.079, which is slightly smaller than the total error rate
for BIC, 0.084.
\begin{table}\caption{Summary of \texttt{FPR}
and \texttt{FNR} for \texttt{LogitNet} for using CV and BIC to choose optimal
$\lambda$. Each entry is the mean (S.D.) over 50 independent data sets}
\begin{tabular}{c|cc|cc}\hline
& \multicolumn{2}{|c|}{Chain Model} & \multicolumn{2}{|c}{Tree Model}
\\\hline
& \texttt{FPR} & \texttt{FNR} & \texttt{FPR} & \texttt{FNR}\\
\texttt{CV} & 0.079 (0.049) & 0 (0) & 0.058 (0.059) & 0.280 (0.17)\\
\texttt{BIC} & 0.025 (0.035) & 0.06 (0.101) & 0.024 (0.038) & 0.436 (0.197)\\
\hline
\end{tabular}
\label{table:tuning}
\end{table}
\subsection{Simulation II --- Tree Model}
For the tree model, we used the empirical mutagenic tree derived
in Beerenwinkel et al.\ (2004) for a HIV data set. The details of the
model are illustrated in Figure~\ref{Fig:TreeModel}. The true
conditionally dependent pairs in this model are
$$ E =\{(s_\mathtt{A}, s_\mathtt{B}), (s_\mathtt{B}, s_\mathtt{E}),
(s_\mathtt{A}, s_\mathtt{C}), (s_\mathtt{C}, s_\mathtt{F}),
(s_\mathtt{A}, s_\mathtt{D})\}.$$ The results of \texttt{LogitNet}
and \texttt{SepLogit} for these data sets are summarized in
Figure~\ref{Fig:tree.error}. Again, \texttt{LogitNet} outperforms
\texttt{SepLogit} in terms of \texttt{FPR} and \texttt{FNR}. The
average optimal total error rate (\texttt{FPR}+\texttt{FNR})
achieved by \texttt{LogitNet} across the 50 independent data sets is
0.163 (s.d.=0.106); while the average optimal total error rate for
\texttt{SepLogit} is 0.331 (s.d.=0.160), twice as large as
\texttt{LogitNet}. We also evaluated CV and BIC for
\texttt{LogitNet}. The
results are summarized in Table~\ref{table:tuning}. Both CV and BIC
give much higher \texttt{FNR}s under the tree model than under
the chain model. This is not surprising as some transition
probabilities between aberration events along the pathway are smaller
in the tree model than in the chain model. As in
the chain model, we also observe that BIC gives
smaller \texttt{FPR} and higher \texttt{FNR} than CV, suggesting CV
tends to select larger models and thus yields less
false negatives but with more false positives in detecting edges.
\section{Application to a Breast Cancer Data Set}\label{sec:RealApplication}
In this section, we illustrate our method using a genomic
instability data set from breast cancer samples. In this data set
the genomic instability is measured by loss of heterozygosity (LOH),
one of the most common alterations in breast cancer. An LOH event at
a marker locus for a tumor is defined as a locus that is homozygous
in the tumor and heterozygous in the constitutive normal DNA. To
gain a better understanding of LOH in breast cancer, Loo et al.
(2008) conducted a study which used the GeneChip Mapping 10K Assay
(Affymetrix, Santa Clara, CA) to measure LOH events in 166 breast
tumors derived from a population-based sample. The array contains
9706 SNPs, with 9670 having annotated genome locations.
Approximately 25\% of the SNPs are heterozygous in normal DNA, which
means LOH can not be detected in the remaining 75\% of SNPs, i.e.,
the SNPs are non-informative. To minimize the missing rate for
individual SNPs, we binned the SNPs by cytogenetic bands (cytoband).
A total of 765 cytobands are covered by these SNPs. For each sample,
we define the LOH status of a cytoband to be 1 if at least 2
informative SNPs in this cytoband show LOH and 0 otherwise. We then
remove 164 cytobands which either have missing rates above $20\%$,
or show LOH in less than 5 samples to exclude rare events. The
average LOH rate in the remaining 601 cytobands is $12.3\%$.
Despite our effort to minimize missingness in the data, $7.5\%$ of
values are still missing in the remaining data. We use the
multiple imputation approach to impute the missing values based on
the conditional probability of LOH at the target SNP given the available LOH
status at adjacent loci. If both adjacent loci are missing LOH
status, we will impute the genotype using only the marginal
probability of the target SNP. See Supplemental Appendix D for
details of the multiple imputation algorithm.
We then generate 10 imputed data sets. We apply \texttt{LogitNet} on
each of them and use 10-fold CV for penalty parameter selection. The
total number of edges inferred on each imputed data set is
summarized in Table~\ref{table:edgeNumber}. We can see that two
imputation data sets have far more edges detected than the rest of
imputation data sets. This suggests that there is a substantial
variation among imputed data sets and we can not reply on a single
imputed data set. Thus, we examine the consensus edges across
different imputation runs. There are 3 edges inferred in at least 4
imputed datasets (Table~\ref{table:interaction}). Particularly,
interaction between 11q24.2 and 13q21.33 has been consistently
detected in all of the 10 imputation data sets. Detailed numbers of
LOH frequencies at these two cytobands are shown in Supplementary
Table S-1. Cytoband 11q24.2 harbors the CHEK1 gene, which is an
important gene in the maintenance of genome integrity and a
potential tumor suppressor. DACH1 is located on cytoband 13q21.33 and
has a role in the inhibition of tumor progression and metastasis in
several types of cancer (e.g., Wu et al., 2009). Both CHEK1
and DACH1 inhibit cell cycle progression through mechanisms involving
the cell cycle inhibitor, CDKN1A. See Supplemental Figure S-2 for the
pathway showing the interaction between CHEK1 on 11q24.2 and DACH1
on 13q21.33.
\begin{table}
\caption{Number of edges detected in each imputed data set. }
\begin{tabular}{c|cccccccccc}\hline
Imputation Index & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & 10\\
$\#$ of edges detected & 2 & 2 & 5 & 219 & 3 & 10 & 1 & 114 & 2 & 1\\
\hline
\end{tabular}
\label{table:edgeNumber}
\end{table}
\begin{table}\caption{Annotation for the edges inferred in at least 4
out of 10 imputed datasets. }
\begin{tabular}{cc} \hline
Cytoband pair & Frequency of selection \\\hline
11q22.3, 13q33.1 & 6\\
11q24.2, 13q21.33 & 10\\
11q25, 13q14.11 & 4\\
\hline
\end{tabular}
\label{table:interaction}
\end{table}
\section{Final Remarks}
In this paper, we propose the \texttt{LogitNet} method for learning
networks using high dimensional binary data. The work is motivated
by the interest in inferring disease oncogenic pathways from genomic
instability profiles (binary data). We show that under the
assumption of no cycles for the oncogenic pathways, the dependence
parameters in the joint probability distribution of binary variables
can be estimated by fitting a set of logistic regression models
with a symmetric coefficient matrix. For
high-dimension-low-sample-size data, this result is especially
appealing as we can use sparse regression techniques to regularize
the parameter estimation. We implemented a fast algorithm for
obtaining the \texttt{LogitNet} estimator. This algorithm enforces
the symmetry of the coefficient matrix and also accounts for the
spatial correlation in the genomic instability profiles by a
weighting scheme. With extensive simulation studies, we demonstrate
that this method achieves good power in edge detection, and also
performs favorably compared to an existing method.
In \texttt{LogitNet}, the weighting scheme together with the
enforcement of symmetry encourage a group selection effect,
\textit{i.e.}, highly spatially correlated variables tend to be in and
out of the model simultaneously. It is conceivable that this
group selection effect may be further enhanced by replacing the
\texttt{lasso} penalty with the \texttt{elastic net} penalty proposed
by Zou and Haste (2005) as $ \lambda_1\sum_{1\leq r<s\leq p}|\beta_{rs}|
+\lambda_2\sum_{1\leq r<s\leq p}\beta_{rs}^2$. The square $\ell_2$ norm
penalty may facilitate group selection within each regularized
logistic regression. More investigation along this line
is warranted.
R package \texttt{LogitNet} is available from the authors upon
request. It will also be made available through CRAN shortly.
\begin{figure}
\caption{\textbf{(a)}
\label{Fig:ChainModel}
\label{Fig:TreeModel}
\label{Fig1}
\end{figure}
\begin{figure}
\caption{Results of \texttt{LogitNet}
\label{Fig:Chain.error}
\end{figure}
\begin{figure}
\caption{Results of \texttt{LogitNet}
\label{Fig:ChainBeta}
\end{figure}
\begin{figure}
\caption{Results of \texttt{LogitNet}
\label{Fig:tree.error}
\end{figure}
\begin{center} {\sc Acknowledgments}
\end{center}
The authors are grateful to Drs. Peggy Porter and Lenora Loo for
providing the genomic instability data set to us, which has motivated
this methods development work. The authors are in part supported by
grants from the National Institute of Health, R01GM082802 (Wang),
R01AG14358 (Chao and Hsu), and P01CA53996 (Hsu).
\end{document} |
\begin{document}
\title{Flat approximations of hypersurfaces along curves}
\author{Irina Markina}
\address{Department of Mathematics\\
University of Bergen\\
5020 Bergen\\
Norway}
\email{irina.markina@uib.no}
\author{Matteo Raffaelli}
\address{DTU Compute\\
Technical University of Denmark\\
2800 Kongens Lyngby\\
Denmark}
\email{matraf@dtu.dk}
\date{\today}
\begin{abstract}
Given a smooth curve $\gamma$ in some $m$-dimensional surface $M$ in $\mathbb{R}^{m+1}$, we study existence and uniqueness of a flat surface $H$ having the same field of normal vectors as $M$ along $\gamma$, which we call a flat approximation of $M$ along $\gamma$. In particular, the well-known characterisation of flat surfaces as torses (ruled surfaces with tangent plane stable along the rulings) allows us to give an explicit parametric construction of such approximation.
\end{abstract}
\maketitle
\section{Introduction and Main Result}
\noindent Developable, or flat, hypersurfaces in $\mathbb{R}^{m+1}$, where $m\geq 2$, are classical objects in Riemannian geometry. They are characterised by being foliated by open subsets of $(m-1)$-dimensional planes, called rulings, along which the tangent space remains stable \cite[Theorem~1]{ushakov1999}. Here we are concerned with the problem of existence and uniqueness---as well as with the explicit construction---of flat approximations of hypersurfaces along curves. Let $M^{m}$ be a (possibly curved) Euclidean hypersurface and $\gamma$ a curve in $M^{m}$. A hypersurface $H$ is called an \textit{approximation of} $M^{m}$ \textit{along} $\gamma$ if the two manifolds have common tangent space at every point of $\gamma$.
In dimension $2$, the question of existence has been settled for a long time. A constructive proof, under suitable assumptions, is already present in Do Carmo's textbook \cite[p.~195--197]{docarmo1976}. It turns out the existence of a flat approximation of $M^{2}$ along $\gamma$ implies the existence of a rolling, in Nomizu's sense, of $M^{2}$ on the tangent space $T_{\gamma(0)}M^{2}$ along the given curve -- see \cite{nomizu1978} and \cite{raffaelli2016}. More recently, Izumiya and Otani have shown uniqueness \cite[Corollary~6.2]{izumiya2015}.
In this paper, we extend the result in \cite{docarmo1976} to any curve in $M^{m}$. More precisely, we shall present a constructive proof of the following
\begin{theorem} \label{mainresult}
Let $\gamma \colon I \to M^{m}$ be a smooth curve in a hypersurface $M^{m}$ in $\mathbb{R}^{m+1}$. If the curve is never parallel to an asymptotic direction of $M^{m}$, then there exists a flat approximation $H$ of $M^{m}$ along $\gamma$. Such hypersurface is unique in the following sense: if $H_{1}$ and $H_{2}$ are two flat approximations of $M^{m}$ along $\gamma$, then they agree on an open set containing $\gamma(I)$.
\end{theorem}
The strategy to prove this result involves looking for $(m-1)$-tuples of linearly independent vector fields $(X_{1},\dotsc, X_{m-1})$ along $\gamma$ satisfying $\dot{\gamma}(t) \notin \Span(X_{j}(t))_{j\,=\,1}^{m-1}$ for all $t$ and having zero \emph{normal} derivative (normal projection of Euclidean covariant derivative). Indeed, such conditions guarantee the image of the map $\gamma + \Span(X_{j})_{j\,=\,1}^{m-1}$ be a flat hypersurface of $\mathbb{R}^{m+1}$ in a neighbourhood of $\gamma$. The main difficulty resides in getting around the many-to-one correspondence between tuples of vector fields and rank-$(m-1)$ distributions along $\gamma$.
It is worth pointing out that the solution depends on the original hypersurface $M^{m}$ only through its distribution of tangent planes along $\gamma$. Thus, when $m=2$, our problem is nothing but the classical Bj\"{o}rling's problem---to find all minimal surfaces passing through a given curve with prescribed tangent planes---addressed to a different class of surfaces. In this respect, the present work joins several other recent studies aimed at solving Bj\"{o}rling-type questions, see \cite{brander2018,brander2013} and references therein.
The paper is organised as follows. The next two sections present some preliminaries, mostly for the sake of introducing relevant notation and terminology. In Section $4$ we derive a simple condition for discerning when a parametrised ruled hypersurface has a flat metric. Such condition is then used in Section $5$ to prove the main theorem. Finally, in Section $6$ we give some general remarks about the construction of the approximation.
\section{Vector Cross Products}
\noindent Let $V$ be an $n$-dimensional, real vector space equipped with a positive definite inner product $\langle\cdot{,}\cdot\rangle$. In the following, $V^{k}$ will indicate the $k$-th Cartesian power of $V$, and $L^{k}(V)$ the set of all multilinear maps from $V^{k}$ to $V$. Note that, under pointwise addition and scalar multiplication, $L^{k}(V)$ is a \emph{finite dimensional} vector space, in that it is naturally isomorphic to the space $T^{(1,k)}(V)$ of tensors on $V$ of type $(1,k)$ -- see for example \cite[Lemma~2.1]{lee1997}. Thus, $\dim L^{k}(V) = n^{k+1}$.
A $k$-\textit{fold vector cross product on} $V$, $1 \leq k \leq n$, is an element of $L^{k}(V)$---i.e., a multilinear map $ X \colon V^{k} \to V$---satisfying the following two axioms:
\begin{align*}
&\langle X(v_{1}, \dotsc, v_{k}), v_{i} \rangle = 0 \, , \quad 1 \leq i \leq k \, . \\
&\langle X(v_{1}, \dotsc, v_{k}), X(v_{1}, \dotsc, v_{k}) \rangle = \det (\langle v_{i}, v_{j} \rangle) \, .
\end{align*}
We emphasize that the second axiom implies any such $X$ being alternating.
In particular, in the case $V$ carries an orientation $\mathcal{O}$, we say that an $(n-1)$-fold vector cross product $X$ is \textit{positively oriented} if the following condition holds for all $(n-1)$-tuples of linearly independent vectors $v_{1}, \dotsc, v_{n-1}$:
\begin{equation*}
\left(v_{1}, \dotsc, v_{n-1}, X(v_{1}, \dotsc, v_{n-1})\right) \in \mathcal{O} \, .
\end{equation*}
Analogously, a \textit{negatively oriented} vector cross product satisfies the same relation with $-\mathcal{O}$ in place of $\mathcal{O}$.
In \cite{brown1967}, Brown and Gray proved the following theorem:
\begin{theorem} \label{VCP-TH1}
Let $V$ be an oriented finite dimensional inner product space, of dimension $n$. There exists a unique positively oriented $(n-1)$-fold vector cross product $X = \cdot \times \dotsb \times \cdot$ on $V$. It is given by:
\begin{equation*}
v_{1} \times \dotsb \times v_{n-1} = \star (v_{1} \wedge \dotsb \wedge v_{n-1}) \,
\end{equation*}
where $\star$ is the Hodge star operator on $V$.
\end{theorem}
We now turn our attention to manifolds. If $M$ is a smooth Riemannian manifold of dimension $m$, let $L^{k}\mathit{TM}$ be the disjoint union of all the vector spaces $L^{k}(T_{p}M)$:
$$ L^{k}\mathit{TM} = \bigsqcup_{p \in M} L^{k}(T_{p}M) \, .$$
Clearly, for $L^{k}(T_{p}M) \cong T^{(1,k)}(T_{p}M)$, the set $L^{k}\mathit{TM}$ has a canonical choice of topology and smooth structure turning it into a smooth vector bundle of rank $m^{k+1}$ over $M$. We define a $k$-\textit{fold vector cross product on} $M$, where $1 \leq k \leq m$, to be a smooth section $X$ of $L^{k}\mathit{TM}$ such that, for every point $p \in M$, the map $X_{p}$ is a $k$-fold vector cross product on $T_{p}M$.
We thus have the following corollary of Theorem \ref{VCP-TH1}:
\begin{corollary}
Let $M$ be a smooth oriented $m$-dimensional Riemannian manifold. There exists a unique $(m-1)$-fold positively oriented vector cross product on $M$. It acts on $(m-1)$-tuples of vector fields $X_{1}, \dotsc, X_{m-1}$ on $M$ by
\begin{equation*}
X_{1} \times \dotsb \times X_{m-1} = \star (X_{1} \wedge \dotsb \wedge X_{m-1}) \,.
\end{equation*}
\end{corollary}
\section{Frames Along Curves}
\noindent In this section we review some basic facts about Euclidean submanifolds and orthonormal frames along curves.
Let us start with some notation. If $m\geq2$, let $M$ be an $m$-dimensional embedded submanifold of $\mathbb{R}^{d}$, and $\gamma \colon I=[0,\alpha] \to M$ a smooth regular curve in $M$. Throughout this paper, $\mathbb{R}^{d}$ will always be equipped with the standard Euclidean metric $\overline{g}$, typically indicated by a dot ``$\,\cdot\,$'', and standard orientation. Thus, there is a natural choice of Riemannian metric on $M$: the induced metric $\iota^{\ast}\overline{g}$, i.e., the pullback of $\overline{g}$ by the inclusion $\iota \colon M \hookrightarrow \mathbb{R}^{d}$.
Working with submanifolds, it is customary to identify each tangent space $T_{p}M$ with its image under the differential of $\iota$. In so doing, the ambient tangent space $T_{p}\mathbb{R}^{d}$ splits as the orthogonal direct sum $T_{p}M \oplus N_{p}M$, where $N_{p}M$ is the normal space of $M$ at $p$. Thus, the set $\mathfrak{X}(M)$ of tangent vector fields \emph{on} $M$ becomes a proper subset of the set of vector fields \emph{along} $M$, which we denote by $\overline{\mathfrak{X}}(M)$. If $X \in \mathfrak{X}(M)$ and $\varUpsilon \in \overline{\mathfrak{X}}(M)$,
\begin{equation*}
\overline{\nabla}_{X}\varUpsilon = (\overline{\nabla}_{X}\varUpsilon)^{\top} + (\overline{\nabla}_{X}\varUpsilon)^{\perp}\,,
\end{equation*}
where $\overline{\nabla}$ is the Euclidean connection, $\top$ and $\perp$ are the orthogonal projections onto the tangent and normal bundle of $M$, and where the vector fields $X$ and $\varUpsilon$ are extended arbitrarily to $\mathbb{R}^{d}$. It turns out that the map $\mathfrak{X}(M) \times \mathfrak{X}(M) \to \mathfrak{X}(M)$ defined by
\begin{equation*}
(X,Y) \mapsto (\overline{\nabla}_{X}Y)^{\top}
\end{equation*}
is a linear connection on $M$, called the tangential connection. In fact, it is no other than the (intrinsic) Levi-Civita connection $\nabla$ of $(M,\iota^{\ast}\overline{g})$.
Similarly, indicating by $\mathfrak{X}(M)^{\perp}$ the set of normal vector fields along $M$, we define the normal connection on $M$ as the map $\mathfrak{X}(M) \times \mathfrak{X}(M)^{\perp} \to \mathfrak{X}(M)^{\perp}$ given by
\begin{equation*}
(X,N) \mapsto (\overline{\nabla}_{X}N)^{\perp}\,.
\end{equation*}
Let us recall that an orthonormal frame along $\gamma$ is an $m$-tuple of smooth vector fields $(E_{i})_{i\,=\,1}^{m}$ along $\gamma$ such that $(E_{i}(t))_{i\,=\,1}^{m}$ is an orthonormal basis of $T_{\gamma(t)}M$ for all $t$. In particular, an orthonormal frame $(W_{1}, \dotsc, W_{d})$ along a curve $\iota \circ \gamma$ in $\mathbb{R}^{d}$ is said to be $M$-\textit{adapted} if $(W_{i})_{i\,=\,1}^{m}$ spans the ambient tangent bundle over $\gamma$.
In the remainder of this section, we assume that $M$ has codimension one in $\mathbb{R}^{d}$, i.e., that $d=m+1$. Under such hypothesis, given any orthonormal frame $(E_{i})_{i\,=\,1}^{m}$ along $\gamma$, we can construct an associated $M$-adapted orthonormal frame along $\iota \circ \gamma$ as follows. For $k=1, \dotsc, m$, let $W_{k} = E_{k}$; then, for $k=m+1$,
\begin{equation*}
W_{m+1} = E_{1} \times \dotsb \times E_{m}\,,
\end{equation*}
so that $(W_{1}, \dotsc, W_{m+1})$ is the unique extension of $(E_{i}(t))_{i\,=\,1}^{m}$ to a positively oriented, orthonormal frame along $\iota\circ\gamma$.
Denoting by $D_{t}$ and $\overline{D}_{t}$ the covariant derivative operators determined by $\nabla$ and $\overline{\nabla}$, respectively, we may write
\begin{equation} \label{FAC-EQ1}
\overline{D}_{t} E_{i} = D_{t} E_{i} +\tau_{i}W_{m+1} \,,
\end{equation}
for some smooth function $\tau_{i} \colon I \to \mathbb{R}$. Clearly, should $M$ be orientable, $\tau_{i} = \pm h(E_{1},E_{i})$, where $h$ is the (scalar) second fundamental form of $M$ determined by a choice of unit normal vector field. Moreover, it easily follows from orthonormality that
\begin{equation*}
\overline{D}_{t} W_{m+1} = -\tau_{1}E_{1} - \dotsb -\tau_{m}E_{m}\,.
\end{equation*}
\section{Developable Surfaces}
\noindent The main purpose of this section is to generalize to higher dimensions the following well-known fact about ruled surfaces in $\mathbb{R}^{3}$ -- see for example \cite[p.~194]{docarmo1976}:
\begin{lemma} \label{DH-LM1}
Let $I$, $J$ be open intervals. Further, let $\gamma$ and $X$ be curves $I \to \mathbb{R}^{3}$ such that the map $\sigma \colon I \times J \to \mathbb{R}^{3}$ given by
\begin{equation*}
\sigma(t,u) = \gamma(t)+ u X(t)
\end{equation*}
is a smooth injective immersion. Then the Gauss curvature of $\sigma(I \times J)$ is zero precisely when $\gamma$ and $X$ satisfy $\dot{\gamma} \cdot \dot{X} \times X = 0$.
\end{lemma}
We shall begin with some definitions extending the classical notions of ruled and torse surface to arbitrary dimension, yet keeping the codimension fixed to $1$. If $m \geq 2$, let $H$ be a hypersurface in $\mathbb{R}^{m+1}$, as always smooth and embedded.
\begin{definition} \label{DH-DEF2}
We say that $H$ is a \textit{ruled} surface if
\begin{enumerate}
\item \label{cond1} $H$ is free of planar points, that is, there exists no point of $H$ where the second fundamental form vanishes;
\item there exists a \textit{ruled structure on} $H$, that is, a foliation of $H$ by open subsets of $(m-1)$-dimensional affine subspaces of $\mathbb{R}^{m+1}$, called \textit{rulings}.
\end{enumerate}
In particular, a ruled surface $H$ is said to be a \textit{torse surface} if, for every pair of points $(p,q)$ on the same ruling, we have $T_{p}H = T_{q}H$, i.e., if all tangent spaces of $H$ along a fixed ruling can be canonically identified with the same linear subspace of $\mathbb{R}^{m+1}$.
\end{definition}
\begin{remark} \label{DS-RMK3}
Although condition \ref{cond1} in Definition \ref{DH-DEF2} may seem overly restrictive, it gives any ruled surface $H$ a desirable property. Namely, it ensures the existence of a \emph{smooth} ruled parametrisation of $H$ \cite{ushakov1996}. On the other hand, we will also need to work with the broader class of \textit{generalised ruled hypersurfaces} obtained by relaxing such condition. It is well known that every generalised torse with planar points is made up of both standard torses and pieces of $m$-planes, always glued along a well-defined ruling.
\end{remark}
Remember that any $d$-dimensional Riemannian manifold locally isometric to $\mathbb{R}^{d}$ is said to be \textit{flat}. In particular, the classical term for hypersurfaces is \emph{developable}, see \cite[Section~1]{ushakov1999} for a detailed discussion on terminology. Remarkably, it turns out that
\begin{theorem}[{\cite[Theorem~1]{ushakov1999}}]
$H$ is a torse surface if and only if it is free of planar points and, when equipped with the induced metric $\iota^{\ast}\overline{g}$, $H$ becomes a flat Riemannian manifold.
\end{theorem}
\begin{corollary} \label{DS-COR}
$H$ is a generalised torse surface if and only if the induced metric on $H$ is flat.
\end{corollary}
Given a curve $\gamma$ in $\mathbb{R}^{m+1}$, the following result is key for constructing ruled surfaces containing $\gamma$. Note that in its statement we use the canonical isomorphism between $\mathbb{R}^{m+1}$ and any of its tangent spaces to identify the vector fields $X_{1}, \dotsc, X_{m-1}$ along $\gamma$ with curves in $\mathbb{R}^{m+1}$.
\begin{lemma}
Let $I$ be a closed interval. Let $\gamma \colon I \to \mathbb{R}^{m+1}$ be a smooth injective immersion. Let $(X_{1}, \dotsc, X_{m-1})$ be a smooth, linearly independent $(m-1)$-tuple of vector fields along $\gamma$ such that $\dot{\gamma}(t) \times X_{1}(t) \times \dotsb \times X_{m-1}(t) \neq 0$ for all $t \in I$. Then there exists an open box $V$ in $\mathbb{R}^{m-1}$ containing the origin such that the restriction to $I \times V$ of the map $\sigma \, \colon I \times \mathbb{R}^{m-1} \to \mathbb{R}^{m+1}$ defined by
\begin{equation*}
\sigma(t, u) = \gamma(t) + \sum\nolimits_{j} u^{j} X_{j}(t)
\end{equation*}
is a smooth embedding.
\end{lemma}
\begin{proof}
To show that $\sigma$ restricts to an embedding, we first prove the existence of an open box $V_{1}$ such that $\sigma \rvert_{I\times V_{1}}$ is a smooth immersion. Essentially, the statement will then follow by compactness of $I$.
Obviously, $\sigma$ is immersive at $(t,u)$ if and only if the length $\ell \,\colon I \times \mathbb{R}^{m-1} \to \mathbb{R}$ of the cross product of the partial derivatives of $\sigma$ is non-zero at $(t,u)$. Thus, define $W_{t}$ to be the subset of $\{t\} \times \mathbb{R}^{m-1}$ where $\sigma$ is immersive. It is an open subset in $\mathbb{R}^{m-1}$ because it is the inverse image of an open set under a continuous map, $W_{t} = \ell(t,\cdot)^{-1}(\mathbb{R} \setminus \{0\})$; it contains $0$ by assumption. Thence, there exists an $\epsilon_{t} >0$ such that the open ball $B(\epsilon_{t},0) \subset \mathbb{R}^{m-1}$ is completely contained in $W_{t}$. Letting $\epsilon_{1} = \inf_{t \, \in \, I}(\epsilon_{t})$, we can conclude that the restriction of $\sigma$ to the box $I\times (-\epsilon_{1}/2,\epsilon_{1}/2)^{m-1}$ is a smooth immersion.
Now, being $\sigma$ a smooth immersion on $I \times V_{1}$, it follows that every point of $I \times V_{1}$ has a neighbourhood on which $\sigma$ is a smooth embedding. Let then $W_{t}'$ be the subset of $W_{t}$ where $\sigma$ is an embedding. It is open in $\mathbb{R}^{m-1}$, and it contains the origin because $\gamma$ is a smooth injective immersion of a compact manifold. From here we may proceed as before.
\end{proof}
Thus, for suitably chosen $(X_{1}, \dotsc, X_{m-1})$ and $V \subset \mathbb{R}^{m-1}$, we have verified that $H_{\sigma} = \Ima\sigma\rvert_{I\times V}$ is a hypersurface in $\mathbb{R}^{m+1}$, and $\mathscr{F}_{\sigma} = \{ \sigma(t,V) \}_{t \, \in \, I}$ a ruled structure on it. Under such hypothesis, let us assume $H_{\sigma}$ is orientable (this we can do, possibly limiting the analysis to an open subset). Then, we may pick out a smooth unit normal vector field $N$ along $H_{\sigma}$ by means of the $m$-fold cross product on $\mathbb{R}^{m+1}$, as follows. Letting
\begin{equation} \label{DH-EQ1}
Z = \frac{\partial \sigma}{\partial t} \times \frac{\partial \sigma}{\partial u^{1}} \times \dotsb \times \frac{\partial \sigma}{\partial u^{m-1}} \, ,
\end{equation}
define $\widehat{N} = Z \lvert Z \rvert^{-1}$, and so $N = \widehat{N} \circ \sigma^{-1}$. In this situation, assuming there are no planar points, $H_{\sigma}$ being a torse surface is equivalent to $N$ being constant along each of the rulings. Thus, indicating with $\overline{\nabla}$ the Euclidean connection on $\mathbb{R}^{m+1}$, $(H_{\sigma},\iota^{\ast}\overline{g})$ is flat if and only if, for all vector fields $X$ tangent to $\mathscr{F}_{\sigma}$ on $H_{\sigma}$:
\begin{equation} \label{DH-EQ2}
\overline{\nabla}_{X}N = 0\, .
\end{equation}
In fact, by linearity -- and writing $\partial_{j}$ as a shorthand for $\frac{\partial}{\partial u^{j}}$ -- it suffices that \eqref{DH-EQ2} holds for the vector fields $\sigma_{\ast}(\partial_{1}), \dotsc, \sigma_{\ast}(\partial_{m-1})$ spanning the distribution corresponding to $\mathscr{F}_{\sigma}$. We may thereby express the developability condition for $(H_{\sigma},\iota^{\ast}\overline{g})$ simply as
\begin{equation} \label{DH-EQ3}
\partial_{1}\widehat{N} = \dotsb = \partial_{m-1}\widehat{N} = 0\,,
\end{equation}
where we understand $\partial_{j}$ as acting on the coordinate functions $\widehat{N}^{1},\dotsc, \widehat{N}^{m+1}$ of $\widehat{N}$ in the standard coordinate frame of $T \mathbb{R}^{m+1}$.
The next lemma finally translates \eqref{DH-EQ3} into $m-1$ conditions involving the vector fields $X_{1}, \dotsc, X_{m-1}$ along $\gamma$, and represents the sought generalization of Lemma \ref{DH-LM1}. It says that $\iota^{\ast}\overline{g}$ is a flat Riemannian metric precisely when $\overline{D}_{t}X_{j} = D_{t}X_{j}$ for every $j$, or equivalently when each of the normal projections $(\overline{D}_{t}X_{1})^{\perp}, \dotsc, (\overline{D}_{t}X_{m-1})^{\perp}$ vanishes identically.
\begin{lemma}
Assume $\sigma\rvert_{I\times V}$ is a smooth embedding. The hypersurface $H_{\sigma}$ is a generalised torse surface if and only if the following equations hold:
\begin{align} \label{DH-EQ4}
\begin{split}
\dot{\gamma} \cdot \partial_{1} Z \equiv \dot{\gamma} \cdot \overline{D}_{t}X_{1} \times X_{1} \times \dotsb \times X_{m-1} &= 0 \\
&\mathrel{\makebox[\widthof{=}]{\vdots}} \\
\dot{\gamma} \cdot \partial_{m-1} Z \equiv \dot{\gamma} \cdot \overline{D}_{t}X_{m-1} \times X_{1} \times \dotsb \times X_{m-1} &= 0
\end{split}
\end{align}
\end{lemma}
\begin{proof}
Computing the partial derivatives of $\sigma$ and substituting them into the expression \eqref{DH-EQ1} for $Z$, we get:
\begin{equation*}
Z(t,u)= \{ \dot{\gamma}(t) + u^{i} \overline{D}_{t}X_{i}(t) \} \times X_{1}(t) \times \dotsb \times X_{m-1}(t)\,,
\end{equation*}
from which the identity $\partial_{j}Z \equiv \overline{D}_{t}X_{j} \times X_{1} \times \dotsb \times X_{m-1}$ clearly follows. Thus, we need to prove that $\partial_{1}\widehat{N} = \dotsb = \partial_{m-1}\widehat{N} = 0$ if and only if $\partial_{1}Z \cdot \dot{\gamma} = \dotsb = \partial_{m-1}Z \cdot \dot{\gamma} = 0$. In fact, for $\partial_{j}Z$ is orthogonal to $X_{1}, \dotsc, X_{m-1}$, it is enough to check that $\partial_{1}\widehat{N} = \dotsb = \partial_{m-1}\widehat{N} = 0$ if and only if $(\partial_{1}Z )^{\top}= \dotsb = (\partial_{m-1}Z)^{\top}= 0$. First, assume $\partial_{j}\widehat{N} = 0$. Since $\widehat{N} = Z \lvert Z \rvert^{-1}$, it follows by linearity of the tangential projection that
\begin{equation*}
\lvert Z \rvert (\partial_{j}Z)^{\top} - Z^{\top} \partial_{j} \lvert Z \rvert = 0\,,
\end{equation*}
which is true exactly when $(\partial_{j}Z)^{\top} = 0$, as desired. To verify the converse, note that $(\partial_{j}N)^{\perp} = 0$ because $N$ has unit length. Thus, again by linearity of $\top$,
\begin{equation*}
\partial_{j}\widehat{N}= \frac {(\partial_{j}Z)^{\top}\lvert Z\rvert - Z^{\top}\partial_{j}\lvert Z \rvert}{\lvert Z\rvert^{2}}\,.
\end{equation*}
Since $Z^{\top}=0$, the claim follows.
\end{proof}
\section{Proof of the Main Result}
\noindent Here we prove our main result, stated in Theorem \ref{mainresult} in the Introduction. The proof is constructive and is based on the fact that an Euclidean hypersurface without planar points has a flat induced metric precisely when it is a torse surface (Theorem 1.3). Let $M$ be a hypersurface in $\mathbb{R}^{m+1}$ and $\gamma$ a smooth curve in $M$, as defined at the beginning of Section $3$. Denoting by $\mathfrak{X}(\gamma)$ the set of smooth, non-vanishing vector fields along $\gamma$, define an equivalence relation on the $n$-th Cartesian power $\mathfrak{X}(\gamma)^{n}$ of $\mathfrak{X}(\gamma)$ by the following rule:
\begin{equation*}
\{(X_{1},\dotsc,X_{n}) \sim (Y_{1},\dotsc,Y_{n})\} \Leftrightarrow \{\Span (X_{1},\dotsc,X_{n}) =\Span (Y_{1},\dotsc,Y_{n})\}.
\end{equation*}
Let us indicate an element of the quotient $\mathfrak{X}(\gamma)^{n}/{\sim}$, that is, an element of $\mathfrak{X}(\gamma)^{n}$ up to equivalence, by $[X_{1},\dotsc,X_{n}]$. We wish to find $[X_{1}, \dotsc, X_{m-1}]$ such that, for every $t \in I$ and integer $j$ with $1 \leq j \leq m-1$, both the conditions
\begin{align}
&\dot{\gamma} \cdot \overline{D}_{t}X_{j} \times X_{1} \times \dotsb \times X_{m-1} = 0 \label{PMR-EQ1} \\
&\dot{\gamma}(t) \times X_{1}(t) \times \dotsb \times X_{m-1}(t) \neq 0 \label{PMR-EQ2}
\end{align}
are satisfied. Beware that, throughout this section, we will extensively use Einstein summation convention: every time the same index appears twice in any monomial expression, once as an upper index and once as a lower index, summation over all possible values of that index is understood.
Once and for all, let us choose a $\gamma$\textit{-adapted} orthonormal frame $(E_{1}, \dotsc, E_{m})$ along $\gamma$: this is just an orthonormal frame along $\gamma$ whose first element coincides with the tangent vector $\dot{\gamma}$. The first step is to rewrite \eqref{PMR-EQ1} as an equation involving the $m(m-1)$ coordinate functions $X_{j}^{i}$ of $X_{1},\dotsc,X_{m-1}$ with respect to $(E_{1}, \dotsc, E_{m})$. Differentiating covariantly $X_{j} = X_{j}^{i}E_{i}$ and substituting, we obtain
\begin{equation} \label{PMR-EQ3}
E_{1} \cdot (\overline{D}_{t}X_{j}^{i}E_{i} + X_{j}^{i} \overline{D}_{t}E_{i}) \times X_{1}^{i}E_{i} \times \dotsb \times X_{m-1}^{i}E_{i} = 0 \,,
\end{equation}
whereas, from \eqref{FAC-EQ1}.
\begin{align*}
\sum_{i\, =\, 1}^{m} \overline{D}_{t}E_{i}&=\sum_{i \, =\, 1}^{m} D_{t}E_{i}+ E_{m+1}\sum_{i \,=\, 1}^{m}\tau_{i} \\
&= \sum_{i \, =\, 1}^{m} \left\{ (D_{t}E_{i} \cdot E_{1}) E_{1} + \dotsb + (D_{t}E_{i} \cdot E_{m}) E_{m}\right\} + E_{m+1}\sum_{i \,=\, 1}^{m}\tau_{i} \,.
\end{align*}
Now, given any ordered $m$-tuple $(i_{1},\dotsc, i_{m})$ of integers with $1 \leq i_{1} \leq m+1$ and $1 \leq i_{k} \leq m$ for $k = 2,\dotsc,m$, a necessary condition for the $m$-fold cross product $E_{i_{1}}\times \dotsb \times E_{i_{m}}$ to give either $E_{1}$ or $-E_{1}$ is that $i_{1} = m+1$ and $i_{k} \neq 1$. It follows that \eqref{PMR-EQ3} is equivalent to
\begin{equation} \label{PMR-EQ4}
E_{1} \cdot X_{j}^{i}\tau_{i}E_{m+1} \times (X_{1}^{2}E_{2} + \dotsb + X_{1}^{m}E_{m}) \times \dotsb \times (X_{m-1}^{2}E_{2} + \dotsb + X_{m-1}^{m}E_{m}) = 0\,.
\end{equation}
In fact, $E_{i_{1}}\times \dotsb \times E_{i_{m}} = \pm E_{1}$ if and only if $i_{1} = m+1$ and the $(m-1)$-tuple $(i_{2},\dotsc, i_{m})$ is a permutation of $(2,\dotsc,m)$. In particular, if it is an \emph{even} permutation, then the basis $(E_{m+1},E_{i_{2}},\dotsc,E_{i_{m}},E_{1})$ is \emph{negatively} oriented, for transposing $E_{m+1}$ and $E_{1}$ must give a positive basis, and so $E_{i_{1}}\times \dotsb \times E_{i_{m}} = -E_{1}$. Thence, denoting by $S_{m}^{2}$ the group of permutations $\sigma$ of $(2,\dotsc,m)$, we may write \eqref{PMR-EQ4} simply as
\begin{equation*}
-X_{j}^{i}\tau_{i} \sum_{\sigma \,\in\, S_{m}^{2}} \Sign(\sigma) X_{1}^{\sigma(2)} \dotsm X_{m-1}^{\sigma(m)} = 0\,.
\end{equation*}
On the other hand, a similar computation would reveal that condition \eqref{PMR-EQ2} is satisfied for every $t$ if and only if the summation term above (the term independent of $j$) never vanishes. We may thereby conclude that, under the assumption of \eqref{PMR-EQ2} being true, condition \eqref{PMR-EQ1} is equivalent to $X_{j}^{i}\tau_{i} = 0$.
Next, consider the set $\mathscr{Z} \subset \mathfrak{X}(\gamma)$ of smooth vector fields $Z$ along $\gamma$ such that $Z^{1}(t) = Z \cdot E_{1}(t) \neq0$ for every $t$. We establish a bijection between its quotient $\mathscr{Z}/{\sim}$ by $\sim$ and the subset of $\mathfrak{X}(\gamma)^{m-1}/{\sim}$ where \eqref{PMR-EQ2} holds. For every $j$, let
\begin{equation} \label{PMR-EQ5}
X_{j}(Z) = Z \times E_{2} \times \dotsb \times \widetilde{E}_{m-j+1} \times \dotsb \times E_{m}\,,
\end{equation}
where the tilde indicates that $E_{m-j+1}$ is omitted, so that the cross product is $(m-1)$-fold. For example, when $j=1$, we omit the last vector field $E_{m}$; when $j=2$ the second to last, and so on, until dropping $E_{2}$ for $j=m-1$. Linear independence of $E_{1},X_{1}(Z), \dotsc, X_{m-1}(Z)$ is easily seen, as by definition $Z$ is never in the span of $E_{2},\dotsc,E_{m}$. Since the normal projection $Z \mapsto Z^{\perp}$ induces a bijection between $\mathscr{Z}/{\sim}$ and the set of smooth $(m-1)$-distributions along $\gamma$ nowhere parallel to $E_{1}$, it follows that the map $[Z] \mapsto [X_{1}(Z),\dotsc,X_{m-1}(Z)]$ between classes of equivalence is indeed a valid parametrisation of the solution set of \eqref{PMR-EQ2}.
We then compute the coordinates of the cross product in \eqref{PMR-EQ5} with respect to the frame $(E_{1},\dotsc,E_{m})$. Substituting $Z = Z^{i}E_{i}$, all but the terms $Z^{1}E_{1}$ and $Z^{m-j+1}E_{m-j+1}$ will not give any contribution. In particular, $E_{1} \times \dotsb \times \widetilde{E}_{m-j+1} \times \dotsb \times E_{m} = \pm E_{m-j+1}$ depending on whether $(E_{1}, \dotsc, \widetilde{E}_{m-j+1}, \dotsc, E_{m}, E_{m-j+1})$ is positively or negatively oriented. Since the corresponding permutation of $(1,\dotsc, m)$ has sign $(-1)^{j-1}$, we conclude that $X_{j}^{m-j+1}(Z) = (-1)^{j-1}Z^{1}$. An analogous argument would show that $X_{j}^{1}(Z) = (-1)^{j}Z^{m-j+1}$.
Summing up, solving the original problem on $\mathfrak{X}(\gamma)^{m-1}/{\sim}$ essentially amounts to finding $[Z] \in \mathscr{Z}/{\sim}$ such that $X_{j}^{i}(Z)\tau_{i} = 0$ for every $j$. Moreover, by the previous computation,
\begin{equation*}
X_{j}^{i}(Z)\tau_{i} = (-1)^{j}Z^{m-j+1}\tau_{1}+(-1)^{j-1}Z^{1}\tau_{m-j+1}\,.
\end{equation*}
Thus, denoting again by $\sim$ the equivalence relation on $C^{\infty}(I)^{m} = C^{\infty}(I;\mathbb{R}^{m})$ naturally induced from the one on $\mathfrak{X}(\gamma)$, we need to look for $(Z^{1}, \dotsc,Z^{m})$, up to equivalence, satisfying the following system of $m-1$ linear equations on $C^{\infty}(I;\mathbb{R}_{\neq0}) \times C^{\infty}(I)^{m-1}$:
\begin{align}
\begin{split} \label{PMR-EQ6}
Z^{m}\tau_{1}-Z^{1}\tau_{m} &=0\\
Z^{m-1}\tau_{1}-Z^{1}\tau_{m-1} &=0\\
&\mathrel{\makebox[\widthof{=}]{\vdots}} \\
Z^{3}\tau_{1}-Z^{1}\tau_{3} &=0\\
Z^{2}\tau_{1}-Z^{1}\tau_{2} &=0\,.
\end{split}
\end{align}
Assume $\tau_{1}(t) \neq0$ for all $t$. Then, for any given $Z^{1}$ (remember $Z^{1}$ is non-vanishing by definition), the system has solution
\begin{equation*}
\frac{Z^{1}}{\tau_{1}}\left(\tau_{1},\dotsc,\tau_{m}\right).
\end{equation*}
However, it is easy to see that all solutions are in one and the same equivalence class. Indeed, if $f$ and $g$ are two distinct values of $Z^{1}$, then
\begin{equation*}
\frac{\tau_{i}}{\tau_{1}}f = \frac{f}{g}\frac{\tau_{i}}{\tau_{1}}g\,.
\end{equation*}
In particular, letting $Z^{1}=\tau_{1}$, we obtain $Z^{i} = \tau_{i}$ for every $i=1,\dotsc,m$, and the solution of the original problem on $\mathfrak{X}(\gamma)^{m-1}/{\sim}$ is given by
\begin{align*}
X_{1} &= -\tau_{m}E_{1} + \tau_{1}E_{m}\\
X_{2} &= \tau_{m-1}E_{1} - \tau_{1}E_{m-1}\\
&\mathrel{\makebox[\widthof{=}]{\vdots}} \\
X_{m-2} &= (-1)^{m-2} \tau_{3}E_{1} + (-1)^{m-3}\tau_{1}E_{3}\\
X_{m-1} &= (-1)^{m-1} \tau_{2}E_{1} + (-1)^{m-2}\tau_{1}E_{2}\,.
\end{align*}
As for uniqueness, in view of Remark \ref{DS-RMK3}, it is sufficient to show that the condition $\tau_{1}(t) \neq 0$ for all $t$ implies any flat approximation $H$ of $M^{m}$ along $\gamma$ be free of planar points, i.e., be a torse surface. To see this, let $N_{H}$ and $N_{M}$ be smooth unit normal vector fields along $H$ and $M^{m}$, respectively, defined in a neighbourhood of $\gamma(t)$. Then, $\overline{D}_{t}N _{H} =\overline{D}_{t}N _{M}$. Since $H$ is a generalised torse surface by Corollary \ref{DS-COR}, the claim easily follows.
\section{Construction of an Adapted Frame}
\noindent As seen in the last section, the construction of the flat approximation of $M$ along $\gamma$ requires choosing some $\gamma$-adapted orthonormal frame $(E_{i})_{i \,= \,1}^{m}$ along $\gamma$. We emphasize that such a choice is completely arbitrary. If the curve in question satisfies some (rather strong) conditions on its derivatives, then a natural generalization of the classical Frenet--Serret frame is available. The reader may find details on this construction in \cite{spivak1999} or \cite{kuhnel2015}. Here we briefly review an alternative approach, one that does not require any initial assumption on the curve. Such approach is due to Bishop \cite{bishop1975}.
First of all, since the problem is local, we are free to assume that $\gamma$ is a smooth embedding. Thus, for any point $p \in S = \gamma(I)$, there exist slice coordinates $(x_{1},\dotsc,x_{m})$ in a neighbourhood $U$ of $p$. It follows that $(\partial_{1}\lvert_{p}, \dotsc, \partial_{m}\lvert_{p})$ is a $\gamma$-adapted basis of $T_{p}M$, i.e., it satisfies $T_{p}S = \Span \partial_{1}\lvert_{p}$ and $N_{p}S = \Span(\partial_{2}\lvert_{p}, \dotsc, \partial_{m}\lvert_{p})$. By applying the Gram--Schmidt process to these vectors, one obtains an orthonormal basis $(n_{j})$ of $N_{p}S$. Although this basis is by no means canonical, the normal connection $\nabla^{\perp}$ of $S$ provides an obvious means for extending it to a frame for the normal bundle of $S$: for each $j$, let $\varUpsilon_{j}$ be the unique normal parallel vector field along $\gamma$ such that $\varUpsilon_{j}\lvert_{p} = n_{j}$ -- see \cite[p.~119]{oneill1983}. Because normal parallel translation is an isometry, the frame $(\dot{\gamma}, \varUpsilon_{1}, \dotsc, \varUpsilon_{m-1})$ is an orthonormal adapted frame along $\gamma$, as desired.
\end{document} |
\betagin{document}
\maketitle
\betagin{abstract}
Let $(\Omega, g)$ be a real analytic Riemannian manifold with real analytic boundary $\partial \Omega$.
Let $\psi_{\lambdambda}$ be an eigenfunction of the Dirichlet-to-Neumann operator $\Lambdambda$ of $(\Omega, g, \partial \Omega)$ of eigenvalue $\lambdambda$.
Let $\mathcal{N}_{\lambdambda_j}$ be its nodal set. Then, there exists a constant $C > 0$ depending only on
$(M, g, \partial \Omega)$ so that
$$\mathcal{H}^{n-2} (\mathcal{N}_{\lambdambda}) \leq C \lambdambda.$$
This proves a conjecture of F. H. Lin and K. Bellova.
\end{abstract}
This article is concerned with the Hausdorff $\mathcal{H}^{n-2}$ (surface) measure of the nodal sets
$$\mathcal{N}_{\lambdambda} = \{x \in \partial \Omega: \psi_{\lambdambda} (x) = 0\} \subset \partial \Omega$$
of Steklov eigenfunctions of eigenvalue $\lambdambda$ of a domain $\Omega \subset {\mathbb R}^n$ in the real analytic case.
The Steklov eigenvalue problem is to find the eigenfunctions
of the Steklov problem on a domain $\Omega$,
\betagin{equation} \lambdabel{SP} \left\{ \betagin{array}{l} \Deltalta u(x) = 0, \;\; x \in \Omega, \\ \\
\frac{\partial u}{\partial \nu}(x) = - \lambdambda u(x), \;\; x \in \partial \Omega. \end{array} \right. \end{equation}
It is often assumed that $\Omega \subset {\mathbb R}^n$ is a bounded $C^2$ domain with Euclidean
metric, but the problem may be posted on a bounded domain in any Riemannian manifold.
The eigenvalue problem may be reduced to the boundary, and $\psi_{\lambdambda}$ is an eigenfunction
\betagin{equation} \Lambdambda \psi_{\lambdambda} = \lambdambda \psi_{\lambdambda} \end{equation} of the Dirichlet-to-Neumann operator $$\Lambdambda f = \frac{\partial u}{\partial \nu}(x) |_{\partial \Omega}. \;\;$$
Here, $u$ is the harmonic extension of $f$, $$\left\{ \betagin{array}{l} \Deltalta u(x) = 0, \;\; x \in \Omega, \\ \\
u(x)=f(x), \;\; x \in \partial \Omega. \end{array} \right..$$
$\Lambdambda$ is self-adjoint on $L^2(\partial \Omega, d S)$ and there exists an orthonormal
basis $\{\psi_j\}$ of eigenfunctions $$\Lambdambda \psi_j = \lambdambda_j \psi_j, \;\;\; \psi_j \in C^{\infty}(\partial \Omega), \;\;
\int_{\partial \Omega} \psi_j \psi_k d S = \partialeltalta_{jk},$$
where $d S$ is the surface measure. We order the eigenvalues in ascending order $0=\lambda_0<\lambda_1\le \lambda_2\le\cdots$,
counted with multiplicty.
In a recent article, Bellova-Lin \cite{BL} proved that
$$\mathcal{H}^{n-2} (\mathcal{N}_{\lambdambda}) \leq C \lambdambda^{6}$$
when $\Omega\subset {\mathbb R}^n$ is a bounded {\it real analytic} Euclidean domain. They suggest that the optimal result is
$\mathcal{H}^{n-2} (\mathcal{N}_{\lambdambda}) \leq C \lambdambda.$ The purpose of this article is to prove this upper bound for bounded real analytic domains in general real analytic Riemannian manifolds.
\betagin{theo} \lambdabel{NODALBOUND} Let $(\Omega, g)$ be a real analytic Riemannian manifold with real analytic boundary $\partial \Omega$.
Let $\psi_{\lambdambda}$ be an eigenfunctions of the Dirichlet-to-Neumann operator $\Lambdambda$ of $(\Omega, g, \partial \Omega)$ of eigenvalue $\lambdambda$, and
$\mathcal{N}_{\lambdambda}$ be its nodal set as above. Then, there exists a constant $C > 0$ depending only on
$(\Omega, g, \partial \Omega)$ so that
$$\mathcal{H}^{n-2} (\mathcal{N}_{\lambdambda}) \leq C \lambdambda.$$
\end{theo}
It is not hard to find examples of $(\Omega, g, \partial \Omega)$ and $\psi_{\lambdambda}$ where the upper
bound is achieved, for instance on a
hemisphere of a round sphere. But it is not clear that it is attained by a sequence of Steklov eigenfunctions on
every $(\Omega, g, \partial \Omega)$, or more stringently that it is obtained by every sequence of eigenfunctions. In the setting of real analytic Riemannian manifolds $(M,g)$, it is proved in \cite{DF}
that there exists $ C> 0$ depending only on the metric $g$ so that $\mathcal{H}^{n-1}(\mathcal{N}_{\lambdambda}) \geq C \lambdambda$.
Since $\partialisplaystylem \partial \Omega = n-1$, the analogous lower bound for the real analytic Steklov problem would
be $\mathcal{H}^{n-2} (\mathcal{N}_{\lambdambda}) \geq C \lambdambda$, where $C$ depends only on $(\Omega, g, \partial \Omega)$.
However the key existence result for $\Deltalta$-eigenfunctions of eigenvalue $\lambdambda^2$, that every ball of radius $\frac{C}{\lambdambda}$ contains
a zero of $\varphi_{\lambdambda}$, does not seem to be known for the Steklov problem \eqref{SP}. We believe it is possible
to prove good lower bounds by the methods of this article, and plan to investigate lower bounds in a subsequent article.
\subsection{Outline of the proof of Theorem \ref{NODALBOUND}}
The key to proving the sharp upper bound in the generality of Theorem \ref{NODALBOUND} is to use the wave group
\betagin{equation} \lambdabel{WG} U(t ) = e^{it \Lambdambda} : L^2(\partial \Omega) \to L^2(\partial \Omega) \end{equation}
generated by $\Lambdambda$. $\Lambdambda$ is a positive elliptic self-adjoint pseudo-differential operator of order
one, and its wave group has been constructed as a Fourier integral operator in \cite{Hor,DG}. As in \cite{Z}
we study nodal sets by analytically continuing the Schwartz kernel of the wave group to imaginary time $t + i \tau$ with
$ \tau > 0$, and to the complexification $(\partial \Omega)_{{\mathbb C}} $ of $\partial \Omega$. The analytic continuation in time and in the
first space variable defines the Poisson wave kernel\betagin{equation} \lambdabel{PWG} U_{{\mathbb C}}(t + i \tau, \zeta, y) = e^{i (t + i \tau) \Lambdambda} (\zeta, y) : L^2(\partial \Omega) \to L^2((\partial \Omega)_{{\mathbb C}}). \end{equation}
As discussed below,
$\Lambdambda$ is an analytic pseudo-differential operator on $\partial \Omega$ when $(\Omega, \partial \Omega, g)$ is real analytic, and \eqref{PWG} is a Fourier integral operator with complex phase. (See \cite{Bou2,Sj} for background
on analytic pseudo-differential operators).
In the real analytic case, the Steklov eigenfunctions are real analytic on $\partial \Omega$ and have complex analytic extensions to
$(\partial \Omega)_{{\mathbb C}}$. We then study their complex nodal sets
\betagin{equation} \lambdabel{CXN} \mathcal{N}_{\lambdambda}^{{\mathbb C}} = \{ \zeta \in (\partial \Omega)_{{\mathbb C}}: \psi^{{\mathbb C}}_{\lambdambda_j}(\zeta) = 0\}.
\end{equation}
To prove Theorem \ref{NODALBOUND}, we use Crofton's formula and a multi-dimensional Jensen's formula to give an upper bound for $\mathcal{H}^{n-2}(\mathcal{N}_{\lambdambda})$ in terms of the integral
geometry of $\mathcal{N}_{\lambdambda}^{{\mathbb C}}$. The integral geometric approach to the upper bound is inspired by the classic paper of
Donnelly-Fefferman \cite{DG} (see also \cite{Lin}). But, instead of doubling estimates or frequency function
estimates, we use the Poisson wave kernel
to obtain growth estimates on eigenfunctions, and then use results on pluri-subharmonic functions rather than functions
of one complex variable to relate growth of zeros to growth of eigenfunctions. This approach was used in \cite{Z}
to prove equidistribution theorems for complex nodal sets when the geodesic flow is ergodic. The Poisson wave kernel
approach works for Steklov eigenfunctions as well as Laplace eigenfunctions,
and in fact for eigenfunctions of any positive elliptic analytic pseudo-differential operator.
We first use the Poisson wave group \eqref{PWG} to analytically continue eigenfunctions in the
form
\betagin{equation} \lambdabel{UAC} U_{{\mathbb C}}(i \tau) \psi_j (\zeta) = e^{- \tau \lambdambda_j} \psi_j^{{\mathbb C}} (\zeta). \end{equation}
We then use \eqref{UAC} to determine the growth properties of $\psi_j^{{\mathbb C}}(\zeta)$ in Grauert tubes of
the complexification of $\partial \Omega$.
The relevant notion of Grauert tube is the standard Grauert tube for $\partial \Omega$ with the metric $g_{\partial \Omega}$
induced by the ambient metric $g$ on $M$. This is because the principal
symbol of $\Lambdambda$ is the same as the principal symbol of $\sqrt{\Deltalta_{\partial \Omega}}$.
\betagin{rem} A remark on notation: In \cite{Z} we use $M$ to denote a Riemannian manifold, $M_{\varepsilonsilon}$ its Grauert tube
of radius $\varepsilonsilon$ and $\partial M_{\varepsilonsilon}$ to denote the boundary of the Grauert tube of radius $\varepsilonsilon$. Since
$\partial \Omega$ is the Riemannian manifold of interest here, we denote it by $M$, \betagin{equation} \lambdabel{MOM} (M, g): = (\partial \Omega, g_{\partial \Omega}). \end{equation} Thus the Grauert tube of radius
$\tau$ of $(\partial \Omega)_{{\mathbb C}}$ is denoted $M_{\tau}$ and its boundary
by $\partial M_{\tau}$, not to be confused with $\partial \Omega$. We also denote $m = \partialisplaystylem M = n -1$.
\end{rem}
Because $U_{{\mathbb C}}(i \tau) $ is a Fourier integral operator with complex phase, it can only magnify the $L^2$ norm
of $\psi_j$ by a power of $\lambdambda_j$. Hence the exponential $e^{\tau \lambdambda_j}$ dominates the $L^2$ norm
on the boundary of the Grauert tube of radius $\tau$. We prove:
\betagin{prop} \lambdabel{PW} Suppose $(\Omega, g, \partial \Omega)$ is real analytic. Let $\{\psi_{\lambdambda}\}$ be an
eigenfunction of $\Lambdambda$ on $M = \partial \Omega$ of eigenvalue $\lambdambda$. Then
$$ \sup_{\zeta \in M_{\tau}} |\psi^{{\mathbb C}}_{\lambdambda}(\zeta)| \leq C
\lambdambda^{\frac{m+1}{2}} e^{\tau \lambdambda},
$$
\end{prop}
The proof follows from a standard cosine Tauberian result and the fact that the complexified
Poisson kernel is a complex Fourier integral operator of finite
order. This simple growth estimate replaces the doubling estimates
of \cite{DF} and \cite{BL}.
It is closely related to growth estimates of $\Deltalta$-eigenfunctions in \cite{Z,Z2,Z3}.
For the precise statement that $U_{{\mathbb C}}(t + i \tau)$ is indeed a Fourier
integral operator with complex phase, we refer to Theorem \ref{BOUFIO}.
It is in some sense a known result for elliptic analytic pseudo-differential operators, and we therefore postpone the detailed proof of Theorem \ref{BOUFIO} for $\Lambdambda$ to a later article.
We thank Boris Hanin, Peng Zhou, Iosif Polterovich, Chris Sogge and particularly Y. Canzani for comments/corrections on earlier versions. We also thank G. Lebeau for confirming that
Theorem \ref{BOUFIO} should be true, with not too different a proof than in the Riemannian wave case.
\section{Geometry and analysis of Grauert tubes}
We briefly review the geometry and analysis on Grauert tubes of real analytic Riemannian manifolds. We refer to \cite{Z,Z2,GS1,GS2} for more detailed discussions.
\subsection{\lambdabel{AC} Analytic continuation to a Grauert tube}
A real analytic manifold $M$ always possesses a complexification
$M_{{\mathbb C}}$, i.e. a complex manifold of which $M$ is a totally real
submanifold. A real analytic Riemannian metric $g$ on $M$ determines a
canonical plurisubharmonic function $\rho_g$ on $M_{{\mathbb C}}$; since the metric is fixed througout, we denote
it simply by $\rho$. Its
square-root $\sqrt{\rho}$ is known as the Grauert tube function; it
equals
$\sqrt{- r^2_{{\mathbb C}}(z, \bar{z})}/2$ where $r_{{\mathbb C}}$ is the holomorphic extension
of the distance function. The $(1,1)$ form
$\omegaega = \omegaega_{\rho}: = i \partial\partialbar \rho$ defines a K\"ahler metric on $M_{{\mathbb C}}$. The
Grauert tubes $M_{\varepsilonsilon}: = \{ z \in M_{{\mathbb C}}: \sqrt{\rho}(z) < \varepsilonsilon\} $
are strictly pseudo-convex domains in $M_{{\mathbb C}}$, whose boundaries are strictly
pseudo-convex CR manifolds. We also denote the contact form of $\partial M_{\tau}$ by
\betagin{equation} \lambdabel{alpha} \alphapha = \frac{1}{i} \partial \rho|_{\partial M_{\tau} } = d^c \sqrt{\rho}.\end{equation}
The complexified exponential map
\betagin{equation} \lambdabel{E} (x, \xi) \in B_{\varepsilonsilon}^*M \to E(x, \xi): = \exp_x^{{\mathbb C}} \sqrt{-1} \xi \in M_{\varepsilonsilon} \end{equation}
defines a
symplectic diffeomorphism, where $B^*_{\varepsilonsilon} M
\subset T^*M$ is the co-ball bundle of radius $\varepsilonsilon$, equipped with the standard symplectic structure,
and where $M_{\varepsilonsilon}$ is equipped with $\omegaega_{\rho}$. The Grauert tube function $\sqrt{\rho}$ pulls back under $E$ to the metric norm function $|\xi|_g$.
We emphase the setting $M_{{\mathbb C}}$ but it is equivalent to using $E$ to endow $B^*_{\varepsilonsilon} M$ with an adapted
complex structure. We refer to \cite{GS1, GS2, LS, GLS} for further discussion.
\subsection{Geodesic and Hamiltonian flows}
The microlocal analysis of the kernels \eqref{PWG}
involves the complexification of the geodesic flow.
We denote by $g^t$
the (real) homogeneous geodesic flow of $(M, g)$. It is the real analytic
Hamiltonian flow on $T^*M \backslash 0_M$ generated by the Hamiltonian $|\xi|_g$
with respect to the standard symplectic form $\omegaega$. We also consider
the Hamiltonian flow of
$|\xi|_g^2$, which is real analytic on all of $T^*M$ and denote
its Hamiltonian flow by $G^t$. In general, we denote by $\Xi_H$
the Hamiltonian vector field of a Hamiltonian $H$ and its flow by
$\exp t \Xi_H$. Thus, we consider the Hamiltonian flows
\betagin{equation} \lambdabel{gtdef}
g^t = \exp t \Xi_{|\xi|_g}, \;\;\; \mbox{resp.}\;\;\;
G^t = \exp t \Xi_{|\xi|_g^2}. \end{equation}
The
exponential map is the map $\exp_x: T^*M \to M$ defined by $\exp_x
\xi = \pi G^t(x, \xi)$ where $\pi$ is the standard projection.
Since $E^* \sqrt{\rho} = |\xi|, $ $E^*$ conjugates the geodesic flow
on $B^*M$ to the Hamiltonian flow $\exp t \Xi_{\sqrt{\rho}}$ of
$\sqrt{\rho}$ with respect to $\omegaega$, i.e.
\betagin{equation} \lambdabel{gt} E(g^t(x, \xi)) = \exp t \Xi_{\sqrt{\rho}} (\exp_x i \xi). \end{equation}
\subsection{Szeg\"o kernel and analytic continuation of the Poisson kernel}
We denote by $\mathcal{O}^{s +
\frac{m-1}{4}}(\partial M _{\tau})$ the Sobolev spaces of CR
holomorphic functions on the boundaries of the strictly
pseudo-convex domains $M_{\tau}$, i.e.
\betagin{equation} \lambdabel{SOBSP} {\mathcal O}^{s +
\frac{m-1}{4}}(\partial M_{\tau}) = W^{s + \frac{m-1}{4}}(\partial
M_{\tau}) \cap \mathcal{O} (\partial M_{\tau}), \end{equation} where
$W^s$ is the $s$th Sobolev space and where $ \mathcal{O} (\partial
M_{\tau})$ is the space of boundary values of holomorphic
functions. The inner product on $\mathcal{O}^0 (\partial M _{\tau} )$ is
with respect to the Liouville measure or contact volume form
\betagin{equation} \lambdabel{CONTACTVOL} d\mu_{\tau} : = \alphapha \wedge \omegaega^{m-1}, \end{equation}
on $\partial M_{\tau}$.
The study of norms of complexified eigenfunctions is
related to the study of the Szeg\"o\ kernels $\Pi_{\tau}$ of
$M_{\tau}$, namely the orthogonal projections
\betagin{equation} \Pi_{\tau}: L^2(\partial M_{\tau}, d\mu_{\tau}) \to \mathcal{O}^0(\partial M_{\tau},
d\mu_{\tau}) \end{equation} onto the Hardy space of boundary
values of holomorphic functions in $M_{\tau}$ which belong to $
L^2(\partial M_{\tau}, d\mu_{\tau})$. The Szeg\"o\ projector
$\Pi_{\tau}$ is a complex Fourier integral operator with a
positive complex canonical relation. The
real points of its canonical relation form the graph
$\Deltalta_{\Sigma}$ of the identity map on the symplectic one
$\Sigma_{\tau}
\subset T^*
\partial M_{\tau}$ defined by the spray \betagin{equation} \lambdabel{SIGMATAU} \Sigma_{\tau} =
\{(\zeta, r d^c \sqrt{\rho}(\zeta)): r \in {\mathbb R}_+\} \subset T^*
(\partial M_{\tau})
\end{equation} of the contact form $d^c \sqrt{\rho}$. There exists a symplectic equivalence (cf.
\cite{GS2})
\betagin{equation} \iota_{\tau} : T^*M - 0 \to
\Sigma_{\tau},\;\; \iota_{\tau} (x, \xi) = (E(x, \tau
\frac{\xi}{|\xi|}), |\xi|d^c \sqrt{\rho}_{E(x, \tau
\frac{\xi}{|\xi|})} ).
\end{equation}
\subsection{Analytic continuation of the Poisson wave kernel}
The wave group generated by $\Lambdambda$ on $M = \partial \Omega$ is the unitary group $U(t) = e^{ i
t \Lambdambda}$. Its kernel $U(t, x, y)$ solves the `half-wave equation',
\betagin{equation} \lambdabel{HALFWE} \left(\frac{1}{i} \frac{\partial }{\partial t} -\Lambdambda_x \right) U(t, x, y) = 0, \;\; U(0, x, y) =
\partialeltalta_y(x). \end{equation} Here, $\Lambdambda_x$ means that $\Lambdambda$ is applied in the $x$ variable. In the real domain it is well known \cite{Hor,DG} that
$U(t, x, y)$ is the Schwartz kernel of a Fourier integral
operator,
$$U(t, x, y) \in I^{-1/4}({\mathbb R} \times M \times M, \Gammamma)$$
with underlying canonical relation $$\Gammamma = \{(t, \tau, x, \xi,
y, \eta): \tau + |\xi| = 0, g^t(x, \xi) = (y, \eta) \} \subset T^*
{\mathbb R} \times T^*M \times T^*M. $$
The Poisson-wave kernel is the analytic continuation $U(t + i \tau, x, y)$ of the wave kernel with respect to time, $ t \to t + i \tau\in {\mathbb R} \times {\mathbb R}_+$. For $t = 0$ and for $\tau > 0$, we
obtain
the Poisson semi-group
$U(i \tau) = e^{- \tau \Lambdambda}$. For general $t + i \tau$, the Poisson-wave kernel has the eigenfunction expansion,
\betagin{equation}\lambdabel{POISEIGEXP} U ( i
\tau, x, y) = \sum_j e^{i (t + i \tau) \lambdambda_j} \psi_{\lambdambda_j}(x)
\psi_{\lambdambda_j}(y).
\end{equation}
As stated in Theorem \ref{BOUFIO} in the introduction, the Poisson-wave kernel $U(t + i \tau, x, y)$ admits an analytic
continuation $U_{{\mathbb C}}(t + i \tau, \zeta, y)$ in the first variable
to $M_{\tau} \times M$.
\betagin{theo}\lambdabel{BOUFIO} Let $U(t)$ be the wave group of the Dirichlet to Neumann operator $\Lambdambda$ on
$M = \partial \Omega$ as above. Then $\Pi_{\varepsilonsilon} \circ U (i \varepsilonsilon): L^2(M)
\to \mathcal{O}(\partial M_{\varepsilonsilon})$ is a complex Fourier integral
operator of order $- \frac{m-1}{4}$ associated to the canonical
relation
$$\Gammamma = \{(y, \eta, \iota_{\varepsilonsilon} (y, \eta) \} \subset T^* \partial M_{\varepsilonsilon} \times \Sigma_{\varepsilonsilon}.$$
Moreover, for any $s$,
$$\Pi_{\varepsilonsilon} \circ U (i \varepsilonsilon): W^s(M) \to {\mathcal O}^{s +
\frac{m-1}{4}}(\partial M_{\varepsilonsilon})$$ is a continuous
isomorphism.
\end{theo}
This statement is asserted by Boutet de Monvel in \cite{Bou, Bou2} for any real analytic positive elliptic pseudo-differential operator,
and has been accepted since then as an established fact (see for instance \cite{GS1,GS2}). The proof was only sketched in \cite{Bou,Bou2},
and the first complete proofs appeared only recently in
\cite{Z2,L,St} for the special case of the wave group of a Riemannian manifold without boundary.
Roughly the same proof applies to the Steklov problem as well because $\sqrt{\Deltalta_{\partial M}}$
and $\Lambdambda$ are the same to leading order and in fact differ by an analytic pseudo-differential operator
of order zero. This is because the principal symbol of $\Lambdambda,$
\betagin{equation} \lambdabel{ps} \sigma_{\Lambdambda} : T^* \partial \Omega \to {\mathbb R}, \;\;\; \sigma_{\Lambdambda} (x, \xi) = |\xi|_{g_{\partial}},
\end{equation}
is the same as for the Laplacian $\Deltalta_{\partial}$ of the boundary $(\partial \Omega, g_{\partial})$. In fact, the
complete symbol of $\Lambdambda$ is calculated in \cite{LU} (see also \cite{PS}). It would be desirable to have
a detailed exposition of the proof, but we postpone that to a future article.
\section{Growth of complexified eigenfunctions proof of Proposition \ref{PW}}
We further need to generalize sup norm estimates
of complexified eigenfunctions in \cite{Z2} to the $\Lambdambda$-eigenfunctions.
As in \cite{Z2,Z3} we prove Proposition \ref{PW} by introducing the
`tempered' spectral
projections
\betagin{equation}\lambdabel{TCXSPM} P_{ I_{\lambdambda}}^{\tau}(\zeta, \bar{\zeta}) =
\sum_{j: \lambdambda_j \in I_{\lambdambda}} e^{-2 \tau \lambdambda_j}
|\psi_{\lambdambda_j}^{{\mathbb C}}(\zeta)|^2, \;\; (\sqrt{\rho}(\zeta) \leq
\tau),
\end{equation}
where $I_{\lambdambda} $ could be a short interval $[\lambdambda, \lambdambda
+ 1]$ of frequencies or a long window $[0, \lambdambda]$.
Exactly as in \cite{Z2} but with the wave group of $\Lambdambda$ replacing the wave group of $\sqrt{\Deltalta}$, we prove
\betagin{equation} \lambdabel{PTAU} P^{\tau}_{[0, \lambdambda]}(\zeta, \bar{\zeta}) = (2\pi)^{-m} \left(\frac{\lambdambda}{\sqrt{\rho}} \right)^{\frac{m-1}{2}}
\left( \frac{\lambdambda}{(m-1)/2 + 1} + O (1) \right), \;\; \zeta \in \partial M_{\tau}. \end{equation}
We then obtain
\betagin{cor} \lambdabel{PWa}
Let $\psi_{\lambdambda}$ be an eigenfunction of $\Lambdambda$ as above. Then
there exists $C > 0$ so that for all $\sqrt{\rho}(\zeta) = \tau$,
$$ C
\lambdambda_j^{-\frac{m-1}{2}} e^{ \tau \lambdambda} \leq \sup_{\zeta \in
M_{\tau}} |\psi^{{\mathbb C}}_{\lambdambda}(\zeta)| \leq C
\lambdambda^{\frac{m-1}{4} + \half} e^{\tau \lambdambda}.
$$
\end{cor}
The lower bound is not used in the nodal analysis.
\subsection{Proof of the local Weyl law}
We only sketch the proof for the sake of completeness, since it is essentially the same as in \cite{Z,Z2,Z3} and
closely follows \cite{DG}. The novelty is that we apply the argument of \cite{DG} to the analytically continued
parametrix.
By \cite{Hor, DG} the positive elliptic first order pseudo-differential operator $\Lambdambda$ generates
a wave group which has a parametrix of the form,
\betagin{equation} \lambdabel{PARAONE} U(t, x, y) = \int_{T^*_y M} e^{ i
t |\xi|_{g_y} } e^{i \lambdangle \xi, \exp_y^{-1} (x) \rangle} A(t, x,
y, \xi) d\xi
\end{equation}
similar to that of the wave kernel of $M = \partial \Omega$,
since $\Lambdambda = \sqrt{\Deltalta_M} + Q$ where $Q $ is an analytic pseudo-differential operator of order zero.
Here,
$|\xi|_{g_x} $ is the metric norm function at
$x$, and where $A(t, x, y, \xi)$ is a polyhomogeneous amplitude of
order $0$ which is supported near the diagonal. The amplitude is different from that of the wave kernel since
the transport equations involve $Q$.
By Theorem \ref{BOUFIO}, the wave group and parametrix may be analytically continued.
To obtain uniform asymptotics, we use the analytic continuation of the
H\"ormander
parametrix (\ref{PARAONE}). We choose local coordinates near $x$
and write $\exp_x^{-1}(y) = \Phi(x, y)$ in these local coordinates
for $y$ near $x$, and write the integral $T^*_yM$ as an integral
over ${\mathbb R}^m$ in these coordinates. The holomorphic extension of the
parametrix to the Grauert tube $|\zeta| < \tau$ at time $t + 2 i
\tau$ has the form
\betagin{equation} \lambdabel{CXPARAONE} U_{{\mathbb C}}(t + 2 i \tau,
\zeta, \bar{\zeta}) = \int_{{\mathbb R}^m} e^{(i t - 2\tau ) |\xi|_{g_y} }
e^{i \lambdangle \xi, \Phi (\zeta, \bar{\zeta}) \rangle} A(t, \zeta,
\bar{\zeta}, \xi) d\xi,
\end{equation}
where $A$ is the analytic extensions of the real analytic $A$ and $\Phi(\zeta, \bar{\zeta})$ is the
analytic extension of $\exp_y^{-1} (x)$.
We
introduce a cutoff function $\chi \in \mathcal{S}({\mathbb R})$ with $\hat{\chi}
\in C_0^{\infty}$ supported in sufficiently small neighborhood of
$0$ so that no other singularities of $U_{{\mathbb C}}(t + 2 i \tau, \zeta,
\bar{\zeta})$ lie in its support. We also assume $\hat{\chi}
\equiv 1$ in a smaller neighborhood of $0$. We then change
variables $\xi \to \lambdambda \xi$ and apply the complex
stationary phase to the integral,
\betagin{equation} \lambdabel{CXPARAONEc}\betagin{array}{l} \int_{{\mathbb R}} \hat{\chi}(t) e^{-i \lambdambda t} U_{{\mathbb C}} (t + 2 i \tau,
\zeta, \bar{\zeta})dt \\ \\
= \lambdambda^m \int_{0}^{\infty} \int_{{\mathbb R}} \hat{\chi}(t) e^{-i
\lambdambda t} \int_{S^{m-1}} e^{(i t - 2\tau ) \lambdambda r} e^{i r
\lambdambda \lambdangle \omegaega, \Phi (\zeta, \bar{\zeta}) \rangle} A(t,
\zeta, \bar{\zeta}, \lambdambda r \omegaega ) r^{m-1} dr dt
d\omegaega.\end{array}
\end{equation}
The resulting integral \eqref{CXPARAONEc} is a semi-classical Fourier integral distribution of with a complex phase,
the same phase as in the pure Riemannian case treated in \cite{Z2}. Hence the stationary phase calculation
is essentially the same as in section 9.1
of \cite{Z2}. We first integrate over $d \omegaega$ and find that there are two stationary
phase points, one giving an exponentially decaying amplitude of order $e^{- 2 \lambdambda \tau r}$and one for which the critical value is $2 \lambdambda \tau r$. It cancels the term $- 2 \tau \lambdambda r$ coming from the factor $e^{(i t - 2\tau ) \lambdambda r} $.
We then apply stationary phase to the resulting
integral over $(t, r)$
with phase $ t (r - 1)$. The critical set consists of $r = 1, t = 0$. The
phase is clearly non-degenerate with Hessian determinant one and inverse
Hessian operator $D^2_{\theta, t}$. Taking into account the factor of $\lambdambda^{-1}$ from the change of variables,
the stationary phase expansion gives
\betagin{equation}\lambdabel{EXPANSIONCaa} \sum_j \psi(\lambdambda - \lambdambda_j) e^{- 2 \tau
\lambdambda_j} |\psi_j^{{\mathbb C}}(\zeta)|^2 \sim \sum_{k = 0}^{\infty}\lambdambda^{\frac{m-1}{2} - k} \omegaega_k(\tau; \zeta),
\end{equation}
where the coefficients $\omegaega_k(\tau, \zeta)$ are smooth for $\zeta \in \partial M_{\tau}$.
The Weyl asymptotics then follows from the standard cosine Tauberian theorem, as in \cite{DG} or \cite{Z2} (loc. cit.).
\section{Proof of Theorm \ref{NODALBOUND}}
We start with the integral geometric approach of \cite{DF} (Lemma 6.3) (see also \cite{Lin} (3.21)). There exists a ``Crofton formula" in the real domain which
bounds the local nodal
hypersurface volume above,
\betagin{equation} \lambdabel{INTGEOM} \mathcal{H}^{m-1}(\mathcal{N}_{\varphi_{\lambdambda}} \cap U) \leq C_L \int_{\mathcal{L}} \#\{ \mathcal{N}_{\varphi_{\lambdambda}}\cap \ell\}
d\mu(\ell). \end{equation}
Thus, $ \mathcal{H}^{m-1}(\mathcal{N}_{\varphi_{\lambdambda}} \cap U) $ is bounded above
by a constant $C_L$ times the average over all line segments of length $L$ in a local coordinate patch $U$ of the number of intersection points
of the line with the nodal hypersurface. The measure $d\mu_L$ is known as the `kinematic measure' in the Euclidean
setting \cite{F} (Chapter 3); see also Theorem 5.5 of \cite{AP}. We will be using geodesic
segments of fixed length $L$ rather than line segments, and parametrize them by $S^*M \times [0, L]$, i.e. by
their intial data and time. Then
$d\mu_{\ell}$ is essentially Liouville measure $d\mu_L$ on $S^* M$ times $dt$.
The complexification of a
real line $\ell = x + {\mathbb R} v$ with $x, v \in {\mathbb R}^m$ is $\ell_{{\mathbb C}} = x +
{\mathbb C} v$. Since the number of intersection points (or zeros) only increases if we count complex intersections, we have
\betagin{equation} \lambdabel{INEQ1} \int_{\mathcal{L}} \# (\mathcal{N}_{\varphi_{\lambdambda}} \cap \ell)
d\mu(\ell) \leq \int_{\mathcal{L}} \# (\mathcal{N}_{\varphi_{\lambdambda}}^{{\mathbb C}} \cap \ell_{{\mathbb C}})
d\mu(\ell).
\end{equation}
Note that this complexification is quite different from using intersections with all complex lines to measure
complex nodal volumes. If we did that, we would obtain a similar upper bound on the complex hypersurface volume
of the complex nodal set. But it would not give an upper bound on the real nodal volume and indeed would
the complex volume tends to zero as one shrinks the Grauert tube radius to zero, while \eqref{INEQ1} stays
bounded below.
Hence to prove Theorem \ref{NODALBOUND} it suffices to show
\betagin{lem} \lambdabel{DF2} We have,
$$\mathcal{H}^{m-1}(\mathcal{N}_{\varphi_{\lambdambda}}) \leq C_L \int_{\mathcal{L}} \# (\mathcal{N}_{\varphi_{\lambdambda}})^{{\mathbb C}} \cap \ell_{{\mathbb C}} )
d\mu(\ell) \leq C \lambdambda. $$
\end{lem}
We now sketch the proofs of these results using a somewhat novel approach to the integral geometry and
complex analysis.
\subsection{\lambdabel{GEOS} Background on hypersurfaces and geodesics}
The proof of the Crofton formula given below in Lemma \ref{CROFTONEST} involves the geometry
of geodesics and hypersurfaces. To prepare for it we provide the relevant background.
As above, we denote by $d\mu_L$ the Liouville measure on $S^*
M$. We also denote by $\omegaega$ the standard symplectic form on
$T^* M$ and by $\alphapha$ the canonical one form. Then
$d\mu_L = \omegaega^{n-1} \wedge \alphapha$ on $S^* M$. Indeed, $d\mu_L$
is characterized by the formula $d\mu_L \wedge d H = \omegaega^{m}$,
where $H(x, \xi) = |\xi|_g$. So it suffices to verify that
$\alphapha \wedge dH = \omegaega$ on $S^*M$. We take the interior
product $\iota_{\Xi_H}$ with the Hamilton vector field $\Xi_H$ on
both sides, and the identity follows from the fact that
$\alphapha(\Xi_H) = \sum_j \xi_j \frac{\partial H}{\partial \xi_j} =
H = 1$ on $S^*M$, since $H$ is homogeneous of degree one. Henceforth we denote
by $\Xi = \Xi_H$ the generator of the geodesic flow.
Let $N \subset M$ be a smooth hypersurface in a Riemannian manifold $(M,
g)$. We denote by $T^*_N M$ the
of covectors with footpoint on $N$ and $S^*_N M$ the unit covectors along $N$.
We introduce Fermi normal coordinates $(s, y_n) $
along
$N$, where $s$ are coordinates on $N$ and $y_n$ is the
normal coordinate, so that $y_m = 0$ is a local defining function for $N$. We also let $\sigma, \xi_m$ be
the dual symplectic Darboux coordinates. Thus the canonical
symplectic form is $\omegaega_{T^* M } = ds \wedge d \sigma + dy_m
\wedge d \xi_m. $
Let $\pi: T^* M \to M$ be the natural projection. For notational simplicity we denote
$\pi^*y_m$ by $ y_m$ as functions on $T^* M$. Then $y_m$ is a
defining function of $T^*_N M$.
The hypersurface $S^*_N M \subset S^* M$ is a kind of Poincar\'e section or
symplectic transversal to the orbits of $G^t$, i.e. is a symplectic transversal away from
the (at most codimension one) set of $(y, \eta) \in S_N^* M$ for which
$\Xi_{y, \eta} \in T_{y, \eta} S^*_N M$, where as above $\Xi$ is the generator
of the geodesic flow. More precisely,
\betagin{lem} \lambdabel{NSYMP} The restriction $\omegaega |_{S_N^* M}$ is symplectic on $S^*_N M \backslash S^* N$.
\end{lem}
Indeed, $\omegaega |_{S_N^* M}$ is symplectic on $T_{y, \eta} S^* N$ as long as $T_{y, \eta} S^*_N M$
is transverse to $\Xi_{y, \eta}$, since $\ker (\omegaega|_{S^*M}) = {\mathbb R} \Xi. $ But $S^* N$ is the set of points of $S^*_N M$ where $\Xi \in T S^*_N M$, i.e. where $S^*_N M$
fails to be transverse to $G^t$.
Indeed, transversality fails when $\Xi(y_m) =dy_m (\Xi) = 0$, and $\ker d y_m \cap \ker d H = T S^*_N M$. One may also see
it in Riemannian terms as follows: the generator $\Xi_{y, \eta}$ is the
horizontal lift $\eta^h$ of $\eta$ to $(y, \eta)$ with respect to
the Riemannian connection on $S^* M$, where we freely identify
covectors and vectors by the metric. Lack of transversality occurs
when $\eta^h $ is tangent to $T_{(y, \eta)} (S^*_N M)$. The latter
is the kernel of $d y_n$. But $d y_m (\eta^h) = d y_m (\eta)= 0 $
if and only if $\eta \in T N$.
It follows from Lemma \ref{NSYMP} that the symplectic volume form of $S^*_N M \backslash S^* N$
is $\omegaega^{n-1} |_{S_N^* M}$. The following Lemma gives a useful alternative formula:
\betagin{lem} \lambdabel{dmuLN}
Define $$d\mu_{L, N} = \iota_{\Xi} d\mu_L \;|_{S^*_N M}, $$
where as above, $d\mu_L$ is Liouville measure on $S^* M$. Then $$d \mu_{L, N}= \omegaega^{m-1} |_{S_N^* M}. $$
\end{lem}
Indeed, $d \mu_L = \omegaega^{m-1} \wedge \alphapha$, and $ \iota_{\Xi} d\mu_L = \omegaega^{m-1}$.
\betagin{cor} \lambdabel{COR} $\mathcal{H}^{m-1} (N) = \frac{1}{\betata_m} \int_{S^*_N M} |\omegaega^{m-1}|$. \end{cor}
\subsection{Hausdorff measure and Crofton formula for real geodesic arcs}
First we sketch a proof of the integral geometry estimate using geodesic arcs rather than local coordinate
line segments. For background on integral geometry and Crofton type formulae we refer to \cite{AB,AP}. As explained there, a Crofton
formula arises from a double fibration
$$\betagin{array}{lllll} && \mathcal{I} && \\ &&&&\\
& \pi_1 \;\swarrow & & \searrow \;\pi_2 & \\ &&&& \\ \Gammamma &&&&
B,
\end{array}$$
where $\Gammamma$ parametrizes a family of submanifolds $B_{\gammamma}$ of $B$. The points $b \in B$
then parametrize a family of submanifolds $\Gammamma_b = \{\gammamma \in \Gammamma: b \in B_{\gammamma}\}$
and the top space is the incidence relation in $B \times \Gammamma$ that $b \in B_{\gammamma}.$
We would like to define $\Gammamma$ as the space of geodesics of $(M, g)$, i.e.
the space of orbits of the geodesic flow on $S^* M$. Heuristically,
the space of geodesics is the quotient space $S^* M/{\mathbb R}$ where ${\mathbb R}$ acts by the geodesic flow $G^t$ (i.e. the Hamiltonian flow of
$H$). Of course,
for a general (i.e. non-Zoll) $(M, g)$ the `space of geodesics' is not a Hausdorff space and so we do not have a simple analogue
of the space of lines in ${\mathbb R}^n$. Instead we consider the space $\mathcal{G}_T$ of geodesic arcs of length $T$.
If we only use partial orbits of length $T$, no two partial orbits are equivalent and
the space of geodesic arcs $\gammamma_{x, \xi}^T$ of length $T$ is simply
parametrized by $S^* M$. Hence we let $B = S^* M$ and also $\mathcal{G}_T \simeq S^* M$. The fact that different arcs of length $T$ of the same geodesic are distinguished leads to some
redundancy.
In the following, let $L_1$ denote the length of the shortest
closed geodesic of $(M, g)$.
\betagin{prop}\lambdabel{CROFTONEST} Let $N \subset M$ be any smooth hypersurface\footnote{The same formula is true
if $N$ has a singular set $\Sigma$ with $\mathcal{H}^{m-2}(\Sigma) < \infty$}, and let $S^*_N M$ denote
the unit covers to $M$ with footpoint on $N$. Then for
$0 < T < L_1,$
$$\mathcal{H}^{m-1}(N) = \frac{ 1}{\betata_mT}
\int_{S^* M}
\# \{t \in [- T, T]: G^t(x, \omegaega) \in S^*_N M\} d\mu_L(x,
\omegaega),$$
where $\betata_m $ is $2 (m-1)!$ times the volume of the unit ball in ${\mathbb R}^{m-2}$.
\end{prop}
\betagin{proof}
By Corollary \ref{COR}, the Hausdorff measure of $N$ is given by
\betagin{equation} \lambdabel{HNN}\betagin{array}{lll} \mathcal{H}^{m-1}(N) & = & \frac{1}{\betata_m}
\int_{S^*_N M} |\omegaega^{m-1}|. \end{array} \end{equation}
We use the Lagrange (or more accurately, Legendre) immersion,
$$\iota: S^* M \times {\mathbb R} \to S^*M \times S^* M, \;\; \iota(x,
\omegaega, t) = (x, \omegaega, G^t(x, \omegaega)), $$
where as above, $G^t$ is the geodesic flow \eqref{gtdef}.
We also let $\pi: T^* M \to M$ be the standard projection.
We restrict $\iota$ to $S^* M \times [-T, T]$ and define the
incidence relation $$ \mathcal{I}_T = \{((y, \eta), (x, \omegaega), t)
\subset S^*M \times S^*M \times [- T, T]: (y, \eta) = G^t(x,
\omegaega)\}, $$ which is isomorphic to $[- T, T ] \times S^*M$
under $\iota$. We form the diagram
$$\betagin{array}{lllll} && \mathcal{I}_T \simeq S^* M \times [-T, T] && \\ &&&&\\
& \pi_1 \;\swarrow & & \searrow \;\pi_2 & \\ &&&& \\ S^* M \simeq \mathcal{G}_T &&&&
S^* M,
\end{array}$$
using the two natural projections, which in the local parametrization take the form
$$\pi_1(t, x, \xi) = G^t(x, \xi), \;\;\; \pi_2(t, x, \xi) = (x, \xi). $$ As noted above, the bottom
left $S^*M$ should be thought of as the space of geodesic arcs.
The fiber $$\pi_1^{-1}(y, \eta) = \{(t, x, \xi) \in [-T, T ] \times S^* M: G^t(x, \xi)
= (y, \eta)\} \simeq \gammamma_{(y, \eta)}^T$$ may be identified with the geodesic segment through
$(y, \eta)$ and the fiber $\pi_2^{-1} (x, \omegaega) \simeq [- T,
T]$.
We `restrict' the diagram above to $S^*_N M$:
\betagin{equation} \lambdabel{DIAGRAM} \betagin{array}{lllll} && \mathcal{I}_T \simeq S_N^* M \times [-T, T] && \\ &&&&\\
& \pi_1 \;\swarrow & & \searrow \;\pi_2 & \\ &&&& \\ (S^*_N M)_T &&&&
S_N^* M,
\end{array} \end{equation}
where
$$(S^*_N M)_{T} = \pi_1 \pi_2^{-1} (S_N^* M) = \bigcup_{|t| < T} G^t(S^*_N M).$$
We define the Crofton density $\varphi_T$ on $S_N^* M$ corresponding to the diagram \eqref{DIAGRAM} \cite{AP} (section 4) by
\betagin{equation} \lambdabel{CROFDEN} \varphi_T = (\pi_2)_* \pi_1^*
d\mu_L. \end{equation} Since the fibers of $\pi_2$ are 1-dimensional, $\varphi_T$
is a differential form of dimension $2 \partialisplaystylem M - 2$ on $S^*M$. To make it smoother, we
can introduce a smooth cutoff $\chi$ to $(-1,1)$, equal to $1$ on $(- \half, \half)$, and use
$\chi_T(t) = \chi(\frac{t}{T}). $ Then $\pi_1^* (d\mu_L \otimes
\chi_T dt)$ is a smooth density on $\mathcal{I}_T$.
\betagin{lem} \lambdabel{phiT} The Crofton density \eqref{CROFDEN} is given by, $\varphi_T = T d\mu_{L, N} $ \end{lem}
\betagin{proof}
In \eqref{DIAGRAM} we defined the map
$\pi_1: (y, \eta, t) \in S^*_N M \times [-T,T] \to G^t(y, \eta) \in (S^* M)_{\varepsilonsilon}$. We first claim that
$\pi_1^* d\mu_L = d\mu_{L, N} \otimes dt. $ This is essentially the same as Lemma \ref{dmuLN}. Indeed,
$d \pi_1 (\frac{\partial}{\partial t} )= \Xi$, hence $\iota_{\frac{\partial}{\partial t}} \pi_1^* d\mu_L |_{(t, y, \eta)}
= (G^t)^* \omegaega^{m-1} = \omegaega^{m-1} |_{T_{y, \eta} S^*_N M}$.
Combining Lemma \ref{phiT} with \eqref{HNN} gives
\betagin{equation} \lambdabel{HDPHIT} \int_{S^*_N M} \varphi_T = \int_{\pi_2^{-1} (S^*_N M)} d\mu_L = T\betata_m\mathcal{H}^{m-1}(N). \end{equation}
\end{proof}
We then relate the integral on the left side to numbers of intersections of geodesic arcs with $N$.
The relation is given by the co-area formula: if $f: X \to Y$ is a
smooth map of manifolds of the same dimension and if $\Phi$ is a
smooth density on $Y$, and if $\# \{f^{-1}(y)\} < \infty$ for
every regular value $y$, then
$$ \int_X f^* \Phi = \int_Y \# \{f^{-1}(y)\}\; \Phi. $$
If we set set $X = \pi_2^{-1}(S^*_N M), \; Y = S^* M, $ and $f =
\pi_1|_{\pi_2^{-1}(S^*_N M)}$ then the co-area formula gives,
\betagin{equation} \lambdabel{COAREA} \int_{\pi_2^{-1}(S^*_N M)} \pi_1^* d\mu_L = \int_{S^* M}
\# \{t \in [- T, T]: G^t(x, \omegaega) \in S^*_N M\} d\mu_L(x,
\omegaega). \end{equation}
Combining \eqref{HDPHIT} and \eqref{COAREA} gives the result stated in Proposition \ref{CROFTONEST},
\betagin{equation} \lambdabel{CONCLUSION} T \betata_m \mathcal{H}^{m-1}(N) =
\int_{S^* M}
\# \{t \in [- T, T]: G^t(x, \omegaega) \in S^*_N M\} d\mu_L(x,
\omegaega). \end{equation}
\end{proof}
\subsection{Proof of Lemma \ref{DF2}}
The next step is to complexify.
\betagin{proof}
We complexify the Lagrange immersion $\iota$ from a line (segment)
to a strip in ${\mathbb C}$: Define
$$F: S_{\varepsilonsilon} \times S^*M \to M_{{\mathbb C}}, \;\;\; F(t + i
\tau, x, v) = \exp_x (t + i \tau) v, \;\;\; (|\tau| \leq \varepsilonsilon)
$$ By definition of the Grauert tube, $\psi$ is surjective onto
$M_{\varepsilonsilon}$. For each $(x, v) \in S^* M$,
$$F_{x, v}(t + i \tau) = \exp_x (t + i \tau) v $$
is a holomorphic strip. Here, $S_{\varepsilonsilon} = \{t + i \tau \in {\mathbb C}:
|\tau| \leq \varepsilonsilon\}. $ We also denote by
$S_{\varepsilonsilon, L} = \{t + i \tau \in {\mathbb C}:
|\tau| \leq \varepsilonsilon, |t| \leq L \}. $
Since $F_{x, v}$ is a holomorphic strip,
$$F_{x, v}^*(\frac{1}{\lambdambda} dd^c \log |\psi_j^{{\mathbb C}}|^2) =
\frac{1}{\lambdambda} dd^c_{t + i \tau} \log |\psi_j^{{\mathbb C}}|^2 (\exp_x (t + i \tau)
v) = \frac{1}{\lambdambda} \sum_{t + i \tau: \psi_j^{{\mathbb C}}(\exp_x (t + i
\tau) v) = 0} \partialeltalta_{t + i \tau}.
$$ Put:
\betagin{equation} \lambdabel{acal} \mathcal{A}_{L, \varepsilonsilon} (\frac{1}{\lambdambda} dd^c \log |\psi_j^{{\mathbb C}}|^2) = \frac{1}{\lambdambda} \int_{S^* M} \int_{S_{\varepsilonsilon, L}}
dd^c_{t + i \tau} \log |\psi_j^{{\mathbb C}}|^2 (\exp_x (t + i \tau) v)
d\mu_L(x, v). \end{equation} A key observation of \cite{DF,Lin} is that
\betagin{equation} \lambdabel{MORE}
\#\{\mathcal{N}_{\lambdambda}^{{\mathbb C}} \cap F_{x,v}(S_{\varepsilonsilon, L}) \} \geq \#\{\mathcal{N}_{\lambdambda}^{{\mathbb R}} \cap F_{x,v}(S_{0, L}) \},
\end{equation}
since every real zero is a complex zero.
It follows then from Proposition \ref{CROFTONEST} (with $N = \mathcal{N}_{\lambdambda}$) that
$$\betagin{array}{lll} \mathcal{A}_{L, \varepsilonsilon} (\frac{1}{\lambdambda} dd^c \log
|\psi_j^{{\mathbb C}}|^2) &= & \frac{1}{\lambdambda} \int_{S^* M}
\#\{\mathcal{N}_{\lambdambda}^{{\mathbb C}} \cap F_{x,v}(S_{\varepsilonsilon, L}) \} d
\mu(x,v) \\ && \\
&\geq & \frac{1}{\lambdambda} \mathcal{H}^{m-1}(\mathcal{N}_{\psi_{\lambdambda}}).\end{array} $$
Hence to obtain an upper bound on $\frac{1}{\lambdambda} \mathcal{H}^{m-1}(\mathcal{N}_{\psi_{\lambdambda}})$ it suffices
to prove that there exists $M < \infty$ so that
\betagin{equation} \lambdabel{acalest} \mathcal{A}_{L, \varepsilonsilon} (\frac{1}{\lambdambda} dd^c \log
|\psi_j^{{\mathbb C}}|^2) \leq M. \end{equation}
To prove \eqref{acalest}, we observe that since $dd^c_{t + i \tau} \log |\psi_j^{{\mathbb C}}|^2 (\exp_x (t + i \tau)
v)$ is a positive $(1,1)$ form on the strip, the integral over
$S_{\varepsilonsilon}$ is only increased if we integrate against a
positive smooth test function $\chi_{\varepsilonsilon} \in
C_c^{\infty}({\mathbb C})$ which equals one on $S_{\varepsilonsilon, L}$ and vanishes
off $S_{2 \varepsilonsilon, L} $. Integrating by
parts the $dd^c$ onto $\chi_{\varepsilonsilon}$, we have
$$\betagin{array}{lll} \mathcal{A}_{L, \varepsilonsilon} (\frac{1}{\lambdambda} dd^c \log |\psi_j^{{\mathbb C}}|^2) &\leq & \frac{1}{\lambdambda} \int_{S^* M} \int_{{\mathbb C}}
dd^c_{t + i \tau} \log |\psi_j^{{\mathbb C}}|^2 (\exp_x (t + i \tau) v)
\chi_{\varepsilonsilon} (t + i \tau) d\mu_L(x, v) \\ && \\ &= & \frac{1}{\lambdambda} \int_{S^* M} \int_{{\mathbb C}}
\log |\psi_j^{{\mathbb C}}|^2 (\exp_x (t + i \tau) v)
dd^c_{t + i \tau} \chi_{\varepsilonsilon} (t + i \tau) d\mu_L(x, v) .
\end{array}$$
Now write $\log |x| = \log_+ |x| - \log_- |x|$. Here $\log_+ |x| = \max\{0, \log |x|\}$ and
$\log_ |x|= \max\{0, - \log |x| \}. $ Then we need upper bounds for
$$ \frac{1}{\lambdambda} \int_{S^* M} \int_{{\mathbb C}}
\log_{\pm} |\psi_j^{{\mathbb C}}|^2 (\exp_x (t + i \tau) v)
dd^c_{t + i \tau} \chi_{\varepsilonsilon} (t + i \tau) d\mu_L(x, v) .$$
For $\log_+$ the upper bound is an immediate consequence of Proposition \ref{PW}. For $\log_-$ the
bound is subtler: we need to show that $|\varphi_{\lambdambda}(z)| $ cannot be too small on too large a set.
As we know from Gaussian beams, it is possible that $|\varphi_{\lambdambda}(x) | \leq C e^{- \partialeltalta \lambdambda} $
on sets of almost full measure in the real domain;
we need to show that nothing worse can happen.
The map \eqref{E} is a diffeomorphism and since $B_{\varepsilonsilon}^* M = \bigcup_{0 \leq \tau \leq \varepsilonsilon} S^*_{\tau} M$
we also have that
$$E: S_{\varepsilonsilon, L} \times S^* M \to M_{\tau}, \;\;\; E(t + i \tau, x, v) = \exp_x (t + i \tau) v $$
is a diffeomorphism for each fixed $t$. Hence by letting $t$ vary, $E$
is a smooth fibration with fibers given by geodesic arcs. Over a point $\zeta \in M_{\tau}$ the fiber of the map is a geodesic arc
$$\{ (t + i \tau, x, v): \exp_x (t + i \tau) v = \zeta, \;\; \tau = \sqrt{\rho}(\zeta)\}. $$ Pushing forward the measure $
dd^c_{t + i \tau} \chi_{\varepsilonsilon} (t + i \tau) d\mu_L(x, v) $ under $E$ gives a positive measure $d\mu$ on $M_{\tau}$.
We claim that
\betagin{equation}\lambdabel{PUSH} \mu: = E_* \; dd^c_{t + i \tau} \chi_{\varepsilonsilon} (t + i \tau) d\mu_L(x, v) =\left (\int_{\gammamma_{x, v}}
\Deltalta_{t + i \tau} \chi_{\varepsilonsilon} ds \right) dV_{\omegaega}, \end{equation}
where $dV_{\omegaega}$ is the K\"ahler volume form $\frac{\omegaega^m}{m!} $ (see \S \ref{AC}.)
In fact, $d\mu_{L}$ is equivalent under $E$ to the contact volume form $\alphapha \wedge \omegaega_{\rho}^{m-1}$
where $\alphapha = d^c \sqrt{\rho}$.
Hence the claim amounts to saying that the K\"ahler volume form is $d \tau$ times the contact volume form.
In particular it is a smooth (and of course signed) multiple $J$ of the K\"ahler volume form $dV_{\omegaega}$, and we
do not need to know the coefficient function $J$ beyond that it is bounded above and below by constants independent of
$\lambdambda$.
We then have
\betagin{equation} \lambdabel{JEN} \int_{S^* M} \int_{{\mathbb C}}
\log |\psi_j^{{\mathbb C}}|^2 (\exp_x (t + i \tau) v)
dd^c_{t + i \tau} \chi_{\varepsilonsilon} (t + i \tau) d\mu_L(x, v) = \int_{M_{\tau}}
\log |\psi_j^{{\mathbb C}}|^2 J d V. \end{equation}
To complete the proof of \eqref{acalest} it suffices to prove that the right side is $\geq - C \lambdambda$ for some $ C> 0$.
We use the well-known
\betagin{lem} \lambdabel{HARTOGS} (Hartog's Lemma; (see
\cite[Theorem~4.1.9]{HoI-IV}): Let $\{v_j\}$ be a sequence of subharmonic functions in an
open set $X \subset {\mathbb R}^m$ which have a uniform upper bound on any
compact set. Then either $v_j \to -\infty$ uniformly on every
compact set, or else there exists a subsequence $v_{j_k}$ which is
convergent to some $u \in L^1_{loc}(X)$. Further, $\limsup_n
u_n(x) \leq u(x)$ with equality almost everywhere. For every
compact subset $K \subset X$ and every continuous function $f$,
$$\limsup_{n \to \infty} \sup_K (u_n - f) \leq \sup_K (u - f). $$
In particular, if $f \geq u$ and $\varepsilonsilon > 0$, then $u_n \leq f
+ \varepsilonsilon$ on $K$ for $n$ large enough. \end{lem}
This Lemma implies the desired lower bound on \eqref{JEN}:
there exists $C > 0$ so that \betagin{equation}
\lambdabel{LOGINT} \frac{1}{\lambdambda} \int_{M_{\tau} } \log |\psi_{\lambdambda}| J d V \geq - C. \end{equation}
For
if not, there exists a subsequence of eigenvalues $\lambdambda_{j_k}$
so that $\frac{1}{\lambdambda_{j_k}}\int_{M_{\tau}} \log |\psi_{\lambdambda_{j_k}}| J d V \to - \infty. $ By Proposition \ref{PW}, $\{\frac{1}{\lambdambda_{j_k}} \log |\psi_{\lambdambda_{j_k}}|\}$ has a uniform upper bound.
Moreover the sequence does not tend uniformly to $-\infty$ since $||\psi_{\lambdambda}||_{L^2(M)} = 1$.
It follows that a further subsequence tends in $L^1$ to a
limit $u$ and by the dominated convergence theorem the limit of \eqref{LOGINT} along the sequence
equals $\int_{M_{\tau}} u J dV \not= - \infty.$ This contradiction concludes the proof of \eqref{LOGINT}, hence
\eqref{acalest}, and thus
the theorem.
\end{proof}
\betagin{thebibliography}{HHHH}
\bibitem[AB]{AB} J.C. Alvarez Paiva and G. Berck, What is wrong with the Hausdorff measure in Finsler spaces. Adv. Math. 204 (2006), no. 2, 647--663.
\bibitem[AP]{AP} J. C. Alvarez Paiva and E. Fernandes, Gelfand transforms and Crofton formulas. Selecta Math. (N.S.) 13 (2007), no. 3, 369--390.
\bibitem[BL]{BL} K. Bellova and F. H. Lin,
Nodal Sets of Steklov Eigenfunctions
arXiv:1402.4323
\bibitem[B]{B} R. L. Bishop, Infinitesimal convexity implies local convexity. Indiana Univ. Math. J. 24 (1974/75), 169--172
\bibitem[Bou]{Bou} L. Boutet de Monvel,
Convergence dans le domaine complexe des s\'eries de fonctions
propres. C. R. Acad.\ Sci.\ Paris S\'er. A-B 287 (1978), no.\ 13,
A855--A856.
\bibitem[Bou2]{Bou2}L. Boutet de Monvel, Convergence dans le domaine complexe des s\'eries de fonctions propres. Journées: Équations aux D\'eriv\'ees Partielles (Saint-Cast, 1979), Exp. No. 3, 2 pp., \'Ecole Polytech., Palaiseau, 1979.
\bibitem[BK]{BK} L. Boutet de Monvel and P. Kr\'ee , Pseudo-differential operators
and Gevrey classes, Ann. Inst. Fourier, 17 (1967), 295-323.
\bibitem[DF]{DF} H. Donnelly and C. Fefferman, Nodal sets of eigenfunctions on Riemannian manifolds. Invent. Math. 93 (1988), no. 1, 161-183.
\bibitem[DG]{DG} J.J.Duistermaat and V.Guillemin, The spectrum of positive
elliptic operators and periodic bicharacteristics, Inv.Math. 24
(1975), 39-80.
\bibitem[F]{F} H. Federer, {\it Geometric measure theory.} Die Grundlehren der mathematischen Wissenschaften, Band 153 Springer-Verlag New York Inc., New York 1969.
\bibitem[GPPS]{GPPS} A. Girouard, L. Parnovski, I.Polterovich, and D. A. Sher,
The Steklov spectrum of surfaces: asymptotics and invariants
(arXiv:1311.5533).
\bibitem[GLS]{GLS} F. Golse, E. Leichtnam, and M. Stenzel,
Intrinsic microlocal analysis and inversion formulae for
the heat equation on compact real-analytic Riemannian manifolds.
Ann. Sci. \'Ecole Norm. Sup. (4) 29 (1996), no. 6, 669--736.
\bibitem[GS1]{GS1} V. Guillemin and M. Stenzel, Grauert tubes and the homogeneous Monge-Amp\`ere equation. J. Differential Geom. 34 (1991), no. 2, 561--570.
\bibitem[GS2]{GS2} V. Guillemin and M. Stenzel, Grauert tubes and the homogeneous Monge-Amp\`ere equation. II. J. Differential Geom. 35 (1992), no. 3, 627--641.
\bibitem[Hor]{Hor} L H\"ormander, The spectral function of an elliptic operator. Acta Math. 121 (1968), 193-218.
\bibitem[HoI-IV]{HoI-IV} L. H\"ormander, {\it Theory of
Linear Partial Differential Operators I-IV}, Springer-Verlag, New
York (1985).
\bibitem[L]{L} G. Lebeau, The complex Poisson kernel on a compact analytic Riemannian manifold, preprint (2013).
\bibitem[LS]{LS} L. Lempert and R. Sz\"oke,
Global solutions of the homogeneous complex Monge-Amp\`ere equation and
complex structures on the tangent bundle of Riemannian manifolds. Math. Ann. 290 (1991), no. 4, 689--712.
\bibitem[LU]{LU} J. M. Lee and G. Uhlmann,
Determining anisotropic real-analytic conductivities by boundary measurements.
Comm. Pure Appl. Math. 42 (1989), no. 8, 1097-1112.
\bibitem[Lin]{Lin} F.H. Lin, Nodal sets of solutions of elliptic and parabolic equations. Comm. Pure Appl. Math. 44 (1991), no. 3, 287-308.
\bibitem[PS]{PS} I. Polterovich and D. A. Sher,
Heat invariants of the Steklov problem, to appear in Jour. Geom. Anal. (arXiv:1304.7233).
\bibitem[St]{St} M. Stenzel, On the analytic continuation of the Poisson kernel, to appear in Manuscripta Math.
\bibitem[Sj]{Sj} J. Sj\"ostrand, Singularit\'es analytiques microlocales. Ast\'erisque, 95, 1–166, Ast\'erisque, 95, Soc. Math. France, Paris, 1982.
\bibitem[Sog]{Soggebook}
C. D. Sogge: {\em Fourier integrals in classical analysis}, Cambridge Tracts in Mathematics, 105, Cambridge University Press, Cambridge, 1993.
\bibitem[T]{T} M. E. Taylor,
Partial differential equations. III. Nonlinear equations. Corrected reprint of the 1996 original. Applied Mathematical Sciences, 117. Springer-Verlag, New York, 1997
\bibitem[Z]{Z} S. Zelditch, Complex zeros of real ergodic eigenfunctions. Invent. Math. 167 (2007), no. 2, 419-443.
\bibitem[Z2]{Z2} S. Zelditch, Pluri-potential theory on Grauert tubes of real analytic Riemannian manifolds, I. Spectral geometry, 299-339, Proc. Sympos. Pure Math., 84, Amer. Math. Soc., Providence, RI, 2012.
\bibitem[Z3]{Z3} S. Zelditch, Phase space Weyl laws and eigenfunction growth on Grauert tubes (in preparation).
\end{thebibliography}
\end{document} |
\betaegin{document}
\betaegin{abstract}
We prove the existence of generalized solution for incompressible
and viscous non-Newtonian two-phase fluid flow for spatial dimension 2 and 3.
The phase boundary moves along with the fluid flow
plus its mean curvature while exerting surface tension force
to the fluid. An approximation scheme
combining the Galerkin method and the phase field method
is adopted.
\varepsilonnd{abstract}
\maketitle
\makeatletter
\@addtoreset{equation}{section}
\renewcommand{\tauhesection.\@arabic\c@equation}{\tauhesection.\@arabic\c@equation}
\makeatother
\section{Introduction}
\quad
In this paper we prove existence results for a problem on incompressible
viscous two-phase fluid flow in the
torus $\Omega={\mathbb T}^d=({\mathbb R}/{\mathbb Z})^d$,
$d=2,\,3$. A freely moving $(d-1)$-dimensional phase boundary
$\Gammaamma(t)$ separates the domain $\Omega$ into two domains
$\Omega^+(t)$ and $\Omega^-(t)$, $t\geq 0$. The fluid flow is described
by means of the velocity field $u:\Omega\tauimes [0,\infty)\rightarrow
{\mathbb R}^d$ and the pressure
$\Pi:\Omega\tauimes [0,\infty)\rightarrow \mathbb R$. We assume the stress tensor
of the fluids is of the form $T^{\partialm}(u,\Pi)=\tauau^{\partialm}(e(u))-\Pi\, I$ on $\Omega^{\partialm}(t)$,
respectively. Here $e(u)$ is the symmetric part of the velocity gradient $\nabla u$, i.e. $e(u)=(\nabla u+\nabla u^T)/2$ and $I$ is the $d\tauimes d$ identity matrix.
Let $\mathbb{S}(d)$ be the set of $d\tauimes d$ symmetric matrices.
We assume that the functions $\tauau^{\partialm}:\mathbb{S}(d)\rightarrow\mathbb{S}(d)$ is locally Lipschitz and
satisfy for some $\nu_0>0$ and $p>\varphirac{d+2}{2}$ and for all $s,\,\hat{s}\in \mathbb{S}(d)$
\betaegin{equation}
\nu_0 |s|^p \lambdaeq \tauau^{\partialm}(s):s\lambdaeq \nu_0^{-1}(1+|s|^p),\lambdaabel{taucond1}
\varepsilonnd{equation}
\betaegin{equation}
|\tauau^{\partialm}(s)|\lambdaeq \nu_0^{-1}(1+|s|^{p-1}),\lambdaabel{taucond2}
\varepsilonnd{equation}
\betaegin{equation}
(\tauau^{\partialm}(s)-\tauau^{\partialm}(\hat{s})):(s-\hat{s})\geq 0.\lambdaabel{taucond3}
\varepsilonnd{equation}
Here we define $A:B=\rm{tr}(AB)$ for $d\tauimes d$ matrices $A,\, B$.
A typical example is $\tauau^{\partialm}(s)=(a^{\partialm}+b^{\partialm}|s|^2)^{\varphirac{p-2}{2}}s$
with $a^{\partialm}>0$ and $b^{\partialm}>0$.
We assume that the velocity field $u(x,t)$ satisfies the
following non-Newtonian fluid flow equation:
\betaegin{eqnarray}
\varphirac{\partial u}{\partial t}+u\cdot\nabla u ={\rm div}\,(T^+(u,\Pi)),\hspace{.5cm}{\rm div}\, u=0 &\quad & {\rm on} \ \Omega^+(t), \ t> 0,\lambdaabel{main1}\\
\varphirac{\partial u}{\partial t}+u\cdot\nabla u ={\rm div}\,(T^-(u,\Pi)),\hspace{.5cm}{\rm div}\, u=0 &\quad & {\rm on} \ \Omega^-(t), \ t> 0,\lambdaabel{main2}\\
u^+= u^-,\hspace{.5cm}n\cdot (T^+(u,\Pi)-T^-(u,\Pi))=
\kappaappa_1 H &\quad & {\rm on} \ \Gammaamma(t), \ t> 0.\qquad \qquad \lambdaabel{main3}
\varepsilonnd{eqnarray}
The upper script $\partialm$ in \varepsilonqref{main3} indicates the limiting values
approaching to $\Gammaamma(t)$ from $\Omega^{\partialm}(t)$,
respectively, $n$ is the unit outer normal vector of $\partial\Omega^+(t)$,
$H$ is the mean curvature vector of $\Gammaamma(t)$ and
$\kappaappa_1>0$ is a constant. The condition \varepsilonqref{main3}
represents the force balance with an
isotropic surface tension effect of the phase boundary.
The boundary $\Gammaamma(t)$ is assumed to move with the velocity given by
\betaegin{equation}
V_{\Gammaamma}=(u\cdot n)n+\kappaappa_2 H \hspace{.5cm}{\rm on}
\quad\Gammaamma(t),\quad t> 0,
\lambdaabel{velocity}
\varepsilonnd{equation}
where $\kappaappa_2>0$ is a constant. This differs from the
conventional kinematic condition ($\kappaappa_2=0$) and is
motivated by the phase boundary motion with hydrodynamic
interaction. The reader is referred to \cite{Liu}
and the references therein for the relevant physical
background. By setting $\varphi=1$ on $\Omega^+(t)$, $\varphi=-1$ on
$\Omega^-(t)$ and
\betaegin{equation*}
\tauau(\varphi,e(u))=\varphirac{1+\varphi}{2}\tauau^+(e(u))+\varphirac{1-\varphi}{2}
\tauau^-(e(u))
\varepsilonnd{equation*}
on $\Omega^+(t)\cup\Omega^-(t)$,
the equations \varepsilonqref{main1}-\varepsilonqref{main3} are expressed
in the distributional sense as
\betaegin{equation}
\betaegin{split}
\varphirac{\partial u}{\partial t}+u\cdot\nabla u &={\rm div}\,\tauau(\varphi,e(u))
-\nabla \Pi +\kappaappa_1 H\mathcal{H}^{d-1}\lambdafloor_{\Gammaamma(t)}
\hspace{.5cm} {\rm on} \ \Omega\tauimes (0,\infty), \lambdaabel{nsdist}\\
{\rm div}\, u&=0 \hspace{.5cm} {\rm on} \ \Omega\tauimes (0,\infty).
\varepsilonnd{split}
\varepsilonnd{equation}
where $\mathcal{H}^{d-1}$ is the $(d-1)$-dimensional Hausdorff measure.
The expression \varepsilonqref{nsdist} makes it evident that the phase
boundary exerts surface tension force on the fluid wherever $H\neq 0$
on $\Gammaamma(t)$. Note that if $\Gammaamma(t)$ is a boundary of convex domain, the sign
of $H$ is taken so that the presence of surface tension tends to accelerate
the fluid flow inwards in general.
We remark that the sufficiently smooth solutions of \varepsilonqref{main1}-\varepsilonqref{velocity} satisfy the following energy equality,
\betaegin{equation}
\varphirac{d}{dt}\lambdaeft\{\varphirac{1}{2}\int_{\Omega}|u|^2\,dx+\kappaappa_1{\mathcal H}^{d-1}(\Gammaamma(t))\right\}=-\int_{\Omega}\tauau(\varphi,e(u)):e(u)\,dx
-\kappaappa_1\kappaappa_2\int_{\Gammaamma(t)}|H|^2\,d{\mathcal H}^{d-1}. \lambdaabel{energyeq}
\varepsilonnd{equation}
This follows from the first variation formula for the
surface measure
\betaegin{equation}
\varphirac{d}{dt}{\mathcal H}^{d-1}(\Gammaamma(t))=-\int_{\Gammaamma(t)}
V_{\Gammaamma}\cdot H\, d{\mathcal H}^{d-1}
\lambdaabel{firstvar}
\varepsilonnd{equation}
and by the equations \varepsilonqref{main1}-\varepsilonqref{velocity}.
The aim of the present paper is to prove the time-global existence of
the weak solution for \varepsilonqref{main1}-\varepsilonqref{velocity} (see
Theorem \ref{maintheorem} for the precise statement). We construct
the approximate solution via the Galerkin method and the phase field method.
Note that it is not even clear for our problem
if the phase boundary may stay as a
codimension 1 object since a priori irregular flow field may tear
apart or crumble the phase boundary immediately, with a possibility
of developing singularities and fine-scale complexities. Even if we set
the initial datum to be sufficiently regular, the eventual occurrence
of singularities of phase boundary or flow field may not be avoided
in general. To accommodate the presence of singularities of phase
boundary, we use the notion of varifolds from
geometric measure theory. In
establishing \varepsilonqref{velocity} we adopt the formulation due
to Brakke \cite{Brakke} where he proved the
existence of moving varifolds by mean curvature.
We have the extra transport effect $(u\cdot n)n$ which is
not very regular in the present problem. Typically we would
only have $u\in L^p_{loc}([0,\infty);W^{1,p}(\Omegamega)^d)$. This poses a
serious difficulty in modifying Brakke's original construction
in \cite{Brakke} which is already intricate and involved. Instead
we take advantage of the recent progress on the understanding
on the Allen-Cahn equation with transport term to approximate the motion
law \varepsilonqref{velocity},
\[\varphirac{\partial\varphi}{\partial t}+u\cdot\nabla\varphi=\kappaappa_2\lambdaeft(\mathcal{D}elta\varphi-\varphirac{W'(\varphi)}{\varepsilon^2}\right).\hspace{1cm}{\rm (ACT)} \]
Here $W$ is the equal depth double-well potential and
we set $W(\varphi)=(1-\varphi^2)^2/2$. When $\varepsilon\rightarrow 0$, we have proved
in \cite{LST1} that the interface moves according to the velocity \varepsilonqref{velocity}
in the sense of Brakke with a suitable regularity assumptions on $u$.
To be more precise, we use a regularized
version of (ACT) as we present later for the result of \cite{LST1}
to be applicable. The result of \cite{LST1} was built upon those of
many earlier works, most relevant being \cite{Ilmanen1,Ilmanen2} which analyzed
(ACT) with $u=0$, and also \cite{Hutchinson,Tonegawa,Sato,Roeger}.
Since the literature of two-phase flow is immense and continues to grow rapidly, we
mention results which are closely related or whose aims point to some time-global existence
with general initial data. In the case without surface tension $(\kappaappa_1=\kappaappa_2=0)$,
Solonnikov \cite{Solonnikov1} proved the time-local existence of classical solution.
The time-local existence of weak solution was proved by Solonnikov \cite{Solonnikov2}, Beale \cite{Beale1}, Abels \cite{Abels1}, and others. For time-global existence of weak solution,
Beale \cite{Beale2} proved in the case that the initial data is small. Nouri-Poupaud \cite{Nouri} considered the case of multi-phase fluid.
Giga-Takahashi \cite{GigaTakahashi} considered the problem within the framework of level set method. When $\kappaappa_1>0$, $\kappaappa_2=0$, Plotnikov \cite{Plotnikov} proved the
time-global existence of varifold solution for $d=2$, $p>2$, and Abels \cite{Abels2} proved the time-global existence of measure-valued solution for
$d=2, 3$, $p>\varphirac{2d}{d+2}$. When $\kappaappa_1>0$, $\kappaappa_2>0$, Maekawa \cite{Maekawa} proved the time-local existence of classical solution with $p=2$ (Navier-Stokes and Stokes)
and for all dimension.
Abels-R\"{o}ger
\cite{Abels-Roeger} considered a coupled problem of Navier-Stokes
and Mullins-Sekerka (instead of motion by mean curvature in the
present paper) and proved the existence of weak solutions.
As for related phase field approximations of sharp interface model which we adopt in this paper, Liu and Walkington \cite{Liu} considered the case of fluids containing visco-hyperelastic particles. Perhaps the most closely related work to the
present paper is that of Mugnai and R\"{o}ger \cite{Mugnai} which studied
the identical problem with $p=2$ (linear viscosity case) and $d=2,3$. There
they introduced the notion of $L^2$ velocity and showed that \varepsilonqref{velocity}
is satisfied in a weak sense different from that of Brakke for the limiting
interface.
Kim-Consiglieri-Rodrigues \cite{Kim} dealt with a coupling of Cahn-Hilliard and Navier-Stokes equations to describe the flow of non-Newtonian two-phase fluid
with phase transitions. Soner \cite{Soner} dealt with a coupling of Allen-Cahn and heat equations to approximate the Mullins-Sekerka problem with kinetic undercooling.
Soner's work is closely related in that he showed the surface energy density bound
which is also essential in the present problem.
The organization of this paper is as follows. In Section 2, we summarize the basic notations and main results. In Section 3 we construct a
sequence of approximating solutions for the two-phase
flow problem.
Section 4 describes the result of \cite{LST1}
which establishes the
upper density ratio bound for surface energy and which proves \varepsilonqref{velocity}.
In the last Section 5 we combine the results from
Section 3 and 4 and obtain the desired weak solution
for the two-phase flow problem.
\section{Preliminaries and Main results}
\quad For $d\tauimes d$ matrices $A,B$ we denote $A:B={\rm tr}\,(AB)$ and $|A|:=\sqrt{A:A}$. For $a \in \mathbb R^d$,
we denote by $a\omegatimes a$ the $d\tauimes d$ matrix with the $i$-th row and $j$-th
column entry equal to $a_i a_j$.
\subsection{Function spaces}
\quad Set $\Omega={\mathbb T}^d$ throughout this paper. We set function spaces for $p>\varphirac{d+2}{2}$ as follows:
\betaegin{equation*}
\betaegin{split}
&{\mathcal V}=\lambdaeft\{v \in C^{\infty}(\Omega)^d\,;\,{\rm div}\,v=0\right\},\\
&{\rm for} \ s\in {\mathbb Z}^+ \cup\{0\}, \ W^{s,p}(\Omega)=\{v \ : \ \nabla ^j
v\in L^p(\Omega) \ {\rm for } \ 0\lambdaeq j\lambdaeq s\},\\
&V^{s,p}= {\rm closure \ of} \ {\mathcal V} \ {\rm in \ the} \
W^{s,p}(\Omega)^d{\rm \mathchar`-norm.}
\varepsilonnd{split}
\varepsilonnd{equation*}
We denote the dual space of $V^{s,p}$ by $(V^{s,p})^*$. The $L^2$ inner
product is denoted by $(\cdot,\cdot)$.
Let $\chi_A$ be the characteristic function of $A$, and let $|\nabla\chi_A|$
be the total variation measure of the distributional derivative $\nabla \chi_A$.
\subsection{Varifold notations}
\quad We recall some notions from geometric measure theory and refer to \cite{Allard,Brakke,Simon} for more details. A {\it general $k$-varifold} in $\mathbb R^d$ is a
Radon measure on $\mathbb R^d\tauimes G(d,k)$, where $G(d,k)$ is the space of $k$-dimensional subspaces in $\mathbb R^d$. We denote the set of all general $k$-varifolds by ${\betaf V}_k(\mathbb R^d)$.
When $S$ is a $k$-dimensional subspace, we also use $S$ to denote the orthogonal projection matrix corresponding to
$\mathbb R^d\rightarrow S$. The first variation of $V$ can be written as
\betaegin{equation*}
\delta V(g)=\int_{\mathbb R^d\tauimes G(d,k)}\nabla g(x):S\,dV(x,S)
=-\int_{\mathbb R^d}g(x)\cdot H(x)\,d\|V\|(x) \quad {\rm if }\, \|\delta V\|\lambdal \|V\|.
\varepsilonnd{equation*}
Here $V \in {\betaf V}_k(\mathbb R^d)$, $\|V\|$ is the mass measure of $V$, $g \in C_c^1(\mathbb R^d)^d$, $H=H_V$ is the generalized mean curvature vector if it exists and $\|\delta V\|\lambdal \|V\|$
denotes that $\|\delta V\|$ is absolutely continuous with respect to $\|V\|$.
We call a Radon measure $\mu$ {\it $k$-integral} if $\mu$ is represented as $\mu=\tauheta{\mathcal H}^k\lambdafloor_X$, where $X$ is a countably $k$-rectifiable, ${\mathcal H}^k$-measurable set,
and $\tauheta \in L^1_{\rm loc}({\mathcal H}^k\lambdafloor_X)$ is positive and
integer-valued ${\mathcal H}^k$ a.e on $X$. ${\mathcal H}^k\lambdafloor_X$ denotes
the restriction of ${\mathcal H}^k$ to the set $X$.
We denote the set of $k$-integral Radon measures
by ${\mathcal{IM}}_k$. We say that a $k$-integral varifold is of {\it unit
density} if $\tauheta=1$ ${\mathcal H}^k$ a.e. on $X$. For each such
$k$-integral measure $\mu$ corresponds a unique $k$-varifold $V$
defined by
\[\int_{\mathbb R^d\tauimes G(d,k)}\partialhi(x,S)\,dV(x,S)=\int_{\mathbb R^d}\partialhi(x,T_x\mu)\,d\mu(x)\quad {\rm for} \ \partialhi\in C_c(\mathbb R^d\tauimes G(d,k)),\]
where $T_x\mu$ is the approximate tangent $k$-plane. Note that $\mu=\|V\|$. We make such
identification in the following. For this reason we define $H_{\mu}$ as $H_V$
(or simply $H$) if
the latter exists. When $X$ is a $C^2$ submanifold without
boundary and $\tauheta$ is constant
on $X$, $H$ corresponds to the usual
mean curvature vector for $X$. In the following we suitably
adopt the above notions on $\Omega={\mathbb T}^d$ such as
${\betaf V}_k(\Omega)$, which present no essential difficulties.
\subsection{Weak formulation of free boundary motion}
For sufficiently smooth surface $\Gamma(t)$ moving by the
velocity \varepsilonqref{velocity}, the following holds for
any $\partialhi\in C^2(\Omega;\mathbb R^+)$ due to the first variation formula
\varepsilonqref{firstvar}:
\betaegin{equation}
\varphirac{d}{dt}\int_{\Gamma(t)}\partialhi\, d{\mathcal H}^{d-1}\lambdaeq
\int_{\Gamma(t)}(-\partialhi H+\nabla\partialhi)\cdot\{\kappaappa_2 H+(u\cdot n)n\}\,
d{\mathcal H}^{d-1}.
\lambdaabel{weakvelo}
\varepsilonnd{equation}
One can check that having this inequality for any $\partialhi\in C^2(\Omega;\mathbb R^+)$ implies
\varepsilonqref{velocity} thus \varepsilonqref{weakvelo} is equivalent to
\varepsilonqref{velocity}. Such use of non-negative test functions
to characterize the motion law is due to Brakke \cite{Brakke}
where he developed the theory of varifolds moving by the mean curvature.
Here we suitably modify Brakke's approach to incorporate the
transport term $u$.
To do this we recall
\betaegin{thm}{\betaf (Meyers-Ziemer inequality)}
For any Radon measure $\mu$ on $\mathbb R^d$with
\betaegin{equation*}D=\sup_{r>0,\, x \in {\mathbb R}^d}\varphirac{\mu(B_r(x))}{\omega_{d-1}r^{d-1}}<\infty,
\varepsilonnd{equation*}
we have
\betaegin{equation}
\int_{\mathbb R^d}|\partialhi|\,d\mu\lambdaeq c D\int_{\mathbb R^d}|\nabla \partialhi|\,dx
\lambdaabel{MZ1}
\varepsilonnd{equation}
for $\partialhi \in C_c^1(\mathbb R^d)$. Here $c$ depends only on $d$. \lambdaabel{MZ}
\varepsilonnd{thm}
See \cite{Meyers} and \cite[p.266]{Ziemer}.
By localizing \varepsilonqref{MZ1} to $\Omega={\mathbb T}^d$ we obtain (with $r$ in the
definition of $D$ above replaced by $0<r<1/2$)
\betaegin{equation}
\int_{\Omega}|\partialhi|^2\, d\mu\lambdaeq c D \lambdaeft(\|\partialhi\|_{L^2(\Omega)}^2+\|\nabla
\partialhi\|_{L^2(\Omega)}^2\right)
\lambdaabel{MZ2}
\varepsilonnd{equation}
where the constant $c$ may be different due to the localization but
depends only on $d$.
The inequality \varepsilonqref{MZ2} allows us to define $\int_{\Omega}|\partialhi|^2\, d\mu$
for $\partialhi\in W^{1,2}(\Omega)$ by the standard density argument when $D<\infty$.
We define for any Radon measure $\mu$, $u\in L^2(\Omega)^d$ and $\partialhi\in C^1(\Omega:\mathbb R^+)$
\betaegin{equation}
{\mathcal B}(\mu,\, u,\, \partialhi)=\int_{\Omega}
(-\partialhi H+\nabla\partialhi)\cdot\{\kappaappa_2 H+(u\cdot n)n\}\, d\mu
\lambdaabel{rhs}
\varepsilonnd{equation}
if $\mu\in {\mathcal{IM}}_{d-1}(\Omega)$ with generalized
mean curvature $H\in L^2(\mu)^d$ and with
\betaegin{equation}\sup_{\varphirac12>r>0,\, x \in \Omega} \varphirac{\mu(B_r(x))}{\omega_{d-1}r^{d-1}}<\infty
\lambdaabel{den}
\varepsilonnd{equation} and $u\in W^{1,2}(\Omega)^d$. Due to the definition
of ${\mathcal {IM}}_{d-1}(\Omega)$, the unit normal vector $n$ is
uniquely defined $\mu$ a.e. on $\Omega$ modulo $\partialm$ sign.
Since we have $(u,n)n$ in \varepsilonqref{rhs}, the choice of sign does not
affect the definition.
The right-hand side of \varepsilonqref{rhs} gives a well-defined
finite value due to the stated conditions and \varepsilonqref{MZ2}.
If any one of the conditions is not satisfied, we define ${\mathcal B}(\mu,\, u,\, \partialhi)=-\infty$.
Next we note
\betaegin{prop}
For any $0<T<\infty$ and $p>\varphirac{d+2}{2}$,
$$\lambdaeft\{u\in L^{p}([0,T];V^{1,p})\,;\,\varphirac{\partial u}{\partial t}\in L^{\varphirac{p}{p-1}}([0,T]; (V^{1,p})^*)\right\}\hookrightarrow C([0,T];\, V^{0,2}).$$
\lambdaabel{embed}
\varepsilonnd{prop}
The Sobolev embedding gives $V^{1,p} \hookrightarrow V^{0,2}$
for such $p$ and we may apply \cite[p. 35, Lemma 2.45]{Malek} to obtain
the above embedding. Indeed, we only need $p>\varphirac{2d}{d+2}$ for
Proposition \ref{embed} to be and we have
$\varphirac{d+2}{2}>\varphirac{2d}{d+2}$. Thus for this
class of $u$ we may define $u(\cdot, t)\in V^{0,2}$ for all $t\in
[0,T]$ instead of a.e. $t$ and we may tacitly assume that we
redefine $u$ in this way for all $t$.
For $\{\mu_t\}_{t\in [0,\infty)}$, $u\in L^p_{loc}([0,\infty);V^{1,p})$
with $\varphirac{\partial u}{\partial t}\in L^{\varphirac{p}{p-1}}_{loc}
([0,\infty); (V^{1,p})^*)$
for $p>\varphirac{d+2}{2}$ and $\partialhi\in C^1(\Omega;\mathbb R^+)$, we define
${\mathcal B}(\mu_t,\, u(\cdot,t),\, \partialhi)$ as in \varepsilonqref{rhs} for all $t\geq 0$.
\subsection{The main results}
Our main results are the following.
\betaegin{thm}
Let $d=2$ or $3$ and $p>\varphirac{d+2}{2}$. Let
$\Omega={\mathbb T}^d$. Assume that locally Lipschitz functions
$\tauau^{\partialm}:\mathbb{S}(d)\rightarrow\mathbb{S}(d)$
satisfy \varepsilonqref{taucond1}-\varepsilonqref{taucond3}.
For any initial data $u_0\in V^{0,2}$ and
$\Omega^+(0)\subset\Omega$ having $C^1$ boundary $\partialartial\Omega^+(0)$,
there exist
\betaegin{enumerate}
\item[(a)] $u \in L^{\infty}([0,\infty);V^{0,2})\cap L^p_{loc}([0,\infty);V^{1,p})$
with $\varphirac{\partial u}{\partial t}\in L^{\varphirac{p}{p-1}}_{loc}([0,\infty);(V^{1,p})^*)$,
\item[(b)] a
family of Radon measures $\{\mu_t\}_{t\in [0,\infty)}$ with
$\mu_t\in {\mathcal{IM}}_{d-1}$ for a.e. $t\in [0,\infty)$ and
\item[(c)] $\varphi \in BV_{loc}(\Omega\tauimes [0,\infty)) \cap L^{\infty}([0,\infty);BV(\Omega)) \cap C^{\varphirac{1}{2}}_{loc}([0,\infty);L^1(\Omega))$
\varepsilonnd{enumerate}
such that the following properties hold:
\betaegin{enumerate}
\item[(i)] The triplet $(u(\cdot,t),\, \varphi(\cdot,t),\,\mu_t)_{t\in [0,\infty)}$ is a weak solution of \varepsilonqref{nsdist}. More precisely, for any $T>0$ we have
\betaegin{equation}
\int_0^T
\int_{\Omega}-u\cdot \varphirac{\partial v}{\partial t}+(u\cdot\nabla u)\cdot v+\tauau(\varphi,e(u)):e(v)\,dxdt
=\int_{\Omega}u_0\cdot v(0)\,dx+\int_0^T\int_{\Omega}\kappaappa_1 H\cdot v \,
d\mu_t dt
\lambdaabel{maintheorem1}
\varepsilonnd{equation}
for any $v \in C^{\infty}([0,T];{\mathcal V})$ such that $v(T)=0$.
Here $H\in L^2([0,\infty);L^2(\mu_t)^d)$ is the generalized mean curvature vector
corresponding to $\mu_t$.
\item[(ii)] The triplet $(u(\cdot,t),\, \varphi(\cdot,t),\,\mu_t)_{t\in [0,\infty)}$
satisfies the energy inequality
\betaegin{equation}
\betaegin{split}
\varphirac12\int_{\Omega}|u(\cdot,T)|^2\,dx+\kappaappa_1\mu_T(\Omega)&+\int_0^T\int_{\Omega}
\tauau(\varphi,e(u)):e(u)\, dxdt+\kappaappa_1\kappaappa_2\int_0^T\int_{\Omega}|H|^2\, d\mu_t dt\\
&\lambdaeq \varphirac12\int_{\Omega}|u_0|^2\,dx+\kappaappa_1{\mathcal H}^{d-1}(\partialartial \Omega^+(0))
=: E_0\varepsilonnd{split}
\lambdaabel{eneineq}
\varepsilonnd{equation}
for all $T<\infty$.
\item[(iii)] For all $0\lambdaeq t_1<t_2< \infty$ and $\partialhi\in C^2(\Omega;\mathbb R^+)$ we have
\betaegin{equation}
\mu_{t_2}(\partialhi)-\mu_{t_1}(\partialhi)\lambdaeq \int_{t_1}^{t_2}{\mathcal B}(\mu_t,\, u(\cdot,t),\, \partialhi)\, dt.
\lambdaabel{maintheorem3}
\varepsilonnd{equation}
Moreover, ${\mathcal B}(\mu_t,\, u(\cdot,t),\, \partialhi)\in L^{1}_{loc}([0,\infty))$.
\item[(iv)] We set $D_0=\sup_{0<r<1/2,\, x\in \Omega}\varphirac{{\mathcal H}^{d-1}
(\partialartial\Omega^+(0)\cap B_r(x))}{\omega_{d-1}r^{d-1}}$.
For any $0<T<\infty$, there exists a constant
$D=D(E_0,D_0,T,p,\nu_0,\kappa_1,\kappa_2)$ such that
$$\sup_{0<r<1/2,\,x\in \Omega}\varphirac{\mu_t(B_r(x))}{\omegamega_{d-1}r^{d-1}}
\lambdaeq D$$ for all $t\in [0,T]$.
\item[(v)] The function $\varphi$ satisfies the following properties.\\
\ (1) $\varphi=\partialm 1$ {\rm a.e. on} $\Omega$ for all $t\in
[0,\infty)$.\\
\ (2) $\varphi(x,0)=\chi_{\Omega^+(0)}-\chi_{\Omega\setminus\Omega^+(0)}$ {\rm a.e. on} $\Omega$.\\
\ (3) ${\rm spt}|\nabla\chi_{\{\varphi(\cdot,t)=1\}}|
\subset{\rm spt}\mu_t$ for all $t\in [0,\infty)$.
\item[(vi)] There exists
\[T_1=T_1(E_0,D_0,p,\nu_0,\kappa_1,\kappa_2)>0\]
such that $\mu_t$ is of unit density for a.e. $t\in [0,T_1]$.
In addition $|\nabla\chi_{\{\varphi(\cdot,t)=1\}}|=\mu_t$ for a.e.
$t\in [0,T_1]$.
\varepsilonnd{enumerate}
\lambdaabel{maintheorem}
\varepsilonnd{thm}
\betaegin{rem}
Somewhat different from $u=0$ case we do not expect that
\betaegin{equation}
\lambdaimsup_{\mathcal{D}elta t\rightarrow 0}\varphirac{\mu_{t+\mathcal{D}elta t}(\partialhi)
-\mu_t(\partialhi)}{\mathcal{D}elta t}\lambdaeq {\mathcal B}(\mu_t,\, u(\cdot,t),\partialhi)
\lambdaabel{ve1}
\varepsilonnd{equation}
holds for all $t\geq 0$ and $\partialhi \in C^2(\Omega; \mathbb R^+)$ in
general. While
we know that the right-hand side is $<\infty$ (by definition)
for all $t$, we
do not know in general if the left-hand side is $<\infty$.
One may even expect that at a time when
$\int_{\Omega}|\nabla u(\cdot,t)|^p\,dx=\infty$, it may be $\infty$.
Thus we may need to define \varepsilonqref{velocity} in the integral form
\varepsilonqref{maintheorem3}
for the definition of Brakke's flow. Note that in case $u=0$,
one can show that the left-hand side of \varepsilonqref{ve1} is $<\infty$ for all $t\geq 0$
(see \cite{Brakke}).
\varepsilonnd{rem}
\betaegin{rem}
The difficulty of multiplicities have been often encountered in the measure-theoretic
setting like ours. Varifold solutions constructed by Brakke \cite{Brakke}
have the same properties in this regard. On the other hand, (vi) says that there is
no `folding' for some initial time interval $[0,T_1]$ at least.
\varepsilonnd{rem}
\betaegin{rem}
In the following we set $\kappaappa_1=\kappaappa_2=1$ for notational simplicity,
while all the argument can be modified with any positive $\kappaappa_1$ and
$\kappaappa_2$ with no essential differences. On the other hand, their being
positive plays an essential role, and most of the estimates and claims deteriorate
as $\kappaappa_1,\, \kappaappa_2\rightarrow 0$ and fail in the limit. How severely
they fail in the limit may be of independent interest which we do not pursue
in the present paper. Note that $\kappaappa_2=0$ limit should correspond precisely
to the setting of Plotnikov \cite{Plotnikov} for $d=2$.
\varepsilonnd{rem}
We use the following theorem. See \cite[p.196]{Malek} and the reference therein.
\betaegin{thm}{\betaf(Korn's inequality)}
Let $1<p<\infty$. Then there exists a constant $c_K=c(p,d)$ such that
\[\|v\|_{W^{1,p}(\Omega)}^p\lambdaeq c_K (\|e(v)\|_{L^p(\Omega)}^p+\|v\|^p_{L^1(\Omega)})\]
holds for all $v \in W^{1,p}(\Omega)^d$.
\lambdaabel{Korn}
\varepsilonnd{thm}
\section{Existence of approximate solution}
\quad In this section we construct a sequence of approximate solutions of
\varepsilonqref{main1}-\varepsilonqref{velocity} by
the Galerkin method and the phase field method. The proof is a suitable modification
of \cite{LinLiu} for the non-Newtonian setting even though we need to incorporate a
suitable smoothing of the interaction terms.
First we prepare a few definitions. We fix a sequence $\{\varepsilon_i\}_{i=1}^{\infty}$ with $\lambdaim_{i\rightarrow\infty}
\varepsilon_i=0$ and fix a radially symmetric non-negative function $\zeta\in C^{\infty}_c(\mathbb R^d)$ with ${\rm spt}\, \zeta\subset B_1(0)$ and
$\int\zeta\, dx=1$. For a fixed $0<\gamma<\varphirac12$ we define
\betaegin{equation}
\zeta^{\varepsilon_i}(x)=\varphirac{1}{\varepsilon_i^{\gamma}}\zeta\lambdaeft(\varphirac{x}
{\varepsilon_i^{\gamma/d}}\right).
\lambdaabel{zeta}
\varepsilonnd{equation}
We defined $\zeta^{\varepsilon_i}$ so that $\int \zeta^{\varepsilon_i}\, dx=1$,
$|\zeta^{\varepsilon_i}|\lambdaeq c(d)\varepsilon_i^{-\gamma}$ and $|\nabla\zeta^{\varepsilon_i}|
\lambdaeq c(d)\varepsilon_i^{-\gamma-\gamma/d}$.
For a given initial data $\Omega^+(0)\subset\Omega$ with $C^1$
boundary $\partial \Omega^+(0)$, we can approximate $\Omega^+(0)$ in $C^1$ topology by a sequence
of domains $\Omega^{i+}(0)$ with $C^3$ boundaries.
Let $d^{i}(x)$ be the signed distance
function to $\partial \Omega^{i+}(0)$ so that $d^{i}(x)>0$ on $\Omega^{i+}(0)$ and
$d^{i}(x)<0$ on $\Omega^{i-}(0)$. Choose $b^{i}>0$ so that $d^{i}$ is $C^3$
function on the $b^{i}$-neighborhood of $\partial\Omega^{i+}(0)$.
Now we associate $\{\varepsilon_i\}_{i=1}^{\infty}$ with $\Omega^{i+}(0)$ by re-labeling
the index if necessary so that $\lambdaim_{i\rightarrow\infty}\varepsilon_i/b^i=0$ and
$\lambdaim_{i\rightarrow\infty}\varepsilon_i^{j-1}|\nabla^j d^i|=0$ for
$j=2,\, 3$ on the $b^{i}$-neighborhood of $\partial\Omega^{i+}(0)$.
Let $h\in C^{\infty}(\mathbb R)$ be a function
such that $h$ is monotone increasing, $h(s)=s$ for $0\lambdaeq s\lambdaeq 1/4$
and $h(s)= 1/2$ for $1/2<s$, and define $h(-s)=-h(s)$ for $s<0$.
Then define
\betaegin{equation}
\varphi_0^{\varepsilon_i}(x)=\tauanh(b^i h(d^i(x)/b^i)/\varepsilon_i).
\lambdaabel{tanh}
\varepsilonnd{equation}
Note that we have $\varphi_0^{\varepsilon_i}\in
C^3(\Omega)$ and $\varepsilon_i^j|\nabla^j\varphi_0^{\varepsilon_i}|$ for $j=1,\, 2,\, 3$ are
bounded uniformly independent of $i$.
The well-known property of phase field approximation shows that
\betaegin{equation}
\lambdaim_{i\rightarrow
\infty}\|\varphi_0^{\varepsilon_i}-(\chi_{\Omega^+(0)}-\chi_{\Omega^-(0)})\|_{L^1(\Omega)}=0,\hspace{.5cm}
\varphirac{1}{\sigma}\lambdaeft(\varphirac{\varepsilon_i|\nabla\varphi_0^{\varepsilon_i}|^2}{2}
+\varphirac{W(\varphi_0^{\varepsilon_i})}{\varepsilon_i}\right)\, dx\rightarrow {\mathcal H}^{d-1}\lambdafloor_{
\partial\Omega^+(0)}
\lambdaabel{tanhprop}
\varepsilonnd{equation}
as Radon measures. Here $\sigma=\int_{-1}^{+1}\sqrt{2W(s)}\, ds$.
For $V^{s,2}$ with $s>\varphirac{d}{2}+1$ let $\{\omega^i\}_{i=1}^{\infty}$ be a set of basis for $V^{s,2}$ such that it is orthonormal in $V^{0,2}$.
The choice of $s$ is made so that the Sobolev embedding theorem implies
$W^{s-1,2}(\Omega)\hookrightarrow L^{\infty}(\Omega)$ thus $\nabla \omega^i \in L^{\infty}(\Omega)^{d^2}$.
Let $P_i:V^{0,2}\rightarrow V^{0,2}_i={\rm span}\,\{\omega_1,\omega_2,\cdots,\omega_i\}$ be the orthogonal projection.
We then project the problem \varepsilonqref{main1}-\varepsilonqref{velocity} to $V^{0,2}_i$ by utilizing the orthogonality in $V^{0,2}$.
Note that just as in \cite{LinLiu}, we approximate the mean
curvature term in \varepsilonqref{nsdist} by the appropriate
phase field approximation. We consider the following problem:
\betaegin{eqnarray}
\hspace{.3cm}
\varphirac{\partial u^{\varepsilon_i}}{\partial t}=P_i\lambdaeft({\rm div}\,\tauau(\varphi^{\varepsilon_i}, e(u^{\varepsilon_i}))-
u^{\varepsilon_i}\cdot\nabla u^{\varepsilon_i}-\varphirac{\varepsilon_i}{\sigma}{\rm div}\,((\nabla\varphi^{\varepsilon_i}\omegatimes\nabla\varphi^{\varepsilon_i})*\zeta^{\varepsilon_i})\right) & & {\rm on} \ \Omega\tauimes[0,\infty),\lambdaabel{appeq1}\\
u^{\varepsilon_i}(\cdot,t)\in V^{0,2}_i \qquad \qquad \qquad \qquad \qquad & & {\rm for} \ t\geq 0,\lambdaabel{appeq2}\\
\varphirac{\partial\varphi^{\varepsilon_i}}{\partial t}+(u^{\varepsilon_i}*\zeta^{\varepsilon_i})\cdot\nabla\varphi^{\varepsilon_i}=\mathcal{D}elta\varphi^{\varepsilon_i}-\varphirac{W'(\varphi^{\varepsilon_i})}{\varepsilon_i^2} \qquad \qquad \quad & & {\rm on} \ \Omega\tauimes [0,\infty),\lambdaabel{appeq3}\\
u^{\varepsilon_i}(x,0)=P_i u_0(x),\quad \varphi^{\varepsilon_i}(x,0)=\varphi_0^{\varepsilon_i}(x) \qquad \qquad \qquad & & {\rm on} \ \Omega.\lambdaabel{appeq4}
\varepsilonnd{eqnarray}
Here $*$ is the usual convolution.
We prove the following theorem.
\betaegin{thm}
For any $i\in {\mathbb N}$, $u_0\in V^{0,2}$
and $\varphi^{\varepsilon_i}_0$, there exists a weak solution $(u^{\varepsilon_i},\varphi^{\varepsilon_i})$ of
\varepsilonqref{appeq1}-\varepsilonqref{appeq4} such that $u^{\varepsilon_i} \in L^{\infty}([0,\infty);V^{0,2})\cap L^p_{loc}([0,\infty);V^{1,p})$,
$|\varphi^{\varepsilon_i}|\lambdaeq 1$,
$\varphi^{\varepsilon_i} \in L^{\infty}([0,\infty);C^3(\Omega))$ and
$\varphirac{\partial\varphi^{\varepsilon_i}}{\partial t}\in L^{\infty}([0,\infty);C^1(\Omega))$. \lambdaabel{globalexistence}
\varepsilonnd{thm}
We write the above system in terms of $u^{\varepsilon_i}=\sum_{k=1}^{i}c^{\varepsilon_i}_k(t)\omega_k(x)$ first. Since
\betaegin{gather*}
\lambdaeft(\varphirac{d}{dt}u^{\varepsilon_i},\,\omega_j\right)=\betaigg(\varphirac{d}{dt}\sum_{k=1}^i c^{\varepsilon_i}_k(t)\,\omega_k,\,\omega_j\betaigg)=\varphirac{d}{dt}c^{\varepsilon_i}_j(t),\\
(u^{\varepsilon_i}\cdot\nabla u^{\varepsilon_i},\,\omega_j)=\sum_{k,l=1}^i c_k^{\varepsilon_i}(t)c_l^{\varepsilon_i}(t)(\omega_k\cdot\nabla\omega_l,\,\omega_j),\\
\varepsilon_i({\rm div}\,((\nabla\varphi^{\varepsilon_i} \omegatimes\nabla\varphi^{\varepsilon_i})*\zeta^{\varepsilon_i}),\,\omega_j)
= \,-\varepsilon_i \int_{\Omega} (\nabla\varphi^{\varepsilon_i}\omegatimes\nabla\varphi^{\varepsilon_i})*\zeta^{\varepsilon_i}:
\nabla \omega_j \,dx,\\
\lambdaeft({\rm div}\,\tauau(\varphi^{\varepsilon_i},e(u^{\varepsilon_i})),\,\omega_j\right)
= -\int_{\Omega}\tauau(\varphi^{\varepsilon_i},e(u^{\varepsilon_i})):e(\omega_j)\,dx
\varepsilonnd{gather*}
for $j=1,\cdots,i$, \varepsilonqref{appeq1} is equivalent to
\betaegin{equation}
\betaegin{split}
\varphirac{d}{dt}c_j^{\varepsilon_i}(t)=& \,-\int_{\Omega}\tauau(\varphi^{\varepsilon_i},e(u^{\varepsilon_i})):e(\omega_j)\,dx
-\sum_{k,l=1}^i c_k^{\varepsilon_i}(t)c_l^{\varepsilon_i}(t)(\omega_k\cdot\nabla\omega_l,\,\omega_j) \\
& +\varphirac{\varepsilon_i}{\sigma}\int_{\Omega}(\nabla\varphi^{\varepsilon_i}\omegatimes\nabla\varphi^{\varepsilon_i})*\zeta^{\varepsilon_i}:
\nabla \omega_j \,dx= \,A^{\varepsilon_i}_j(t)+B_{klj} c^{\varepsilon_i}_k(t)c^{\varepsilon_i}_l(t)+D^{\varepsilon_i}_j(t).\lambdaabel{appeq1-2}
\varepsilonnd{split}
\varepsilonnd{equation}
Moreover, the initial condition of $c_j^{\varepsilon_i}$ is
\[c^{\varepsilon_i}_j(0)=(u_0,\,\omega_j)\quad {\rm for} \ j=1,2,\deltaots,i.\]
We also set
\[E_0={\mathcal H}^{d-1}(\partial\Omega^+(0))+\varphirac12 \int_{\Omega}|u_0|^2\, dx\]
and note that
\betaegin{equation}
\varphirac{1}{\sigma}\int_{\Omega}\lambdaeft(\varphirac{\varepsilon_i|\nabla\varphi_0^{\varepsilon_i}|^2}{2}
+\varphirac{W(\varphi^{\varepsilon_i}_0)}{\varepsilon_i}\right)\,dx+\varphirac12\sum_{j=1}^i(c^{\varepsilon_i}_j(0))^2\lambdaeq E_0
+o(1)
\lambdaabel{eqeq}
\varepsilonnd{equation}
by \varepsilonqref{tanhprop} and by the projection $P_i$ being orthonormal.
We use the following lemma to prove Theorem \ref{globalexistence}.
\betaegin{lemma}
There exists a constant $T_0=T_0(E_0,i,\nu_0,p)>0$ such that \varepsilonqref{appeq1}-\varepsilonqref{appeq4} with \varepsilonqref{eqeq} has a weak solution $(u^{\varepsilon_i},\varphi^{\varepsilon_i})$ in $\Omega\tauimes[0,T_0]$
such that $u^{\varepsilon_i} \in L^{\infty}([0,T_0];V^{0,2})\cap L^p([0,T_0];V^{1,p})$, $|\varphi^{\varepsilon_i}|\lambdaeq 1$,
$\varphi^{\varepsilon_i} \in L^{\infty}([0,T_0];C^3(\Omega))$ and $\varphirac{\partial\varphi^{\varepsilon_i}}{\partial t}
\in L^{\infty}([0,T_0];C^1(\Omega))$.
\lambdaabel{localexistence}
\varepsilonnd{lemma}
{\it Proof.} Assume that we are given a function
$u(x,t)=\sum_{j=1}^i c_j^{\varepsilon_i}(t)\omega_j(x)\in C^{1/2}([0,T];V^{s,2})$ with
\betaegin{equation}
c^{\varepsilon_i}_j(0)=(u_0,\,\omega_j),\hspace{.5cm}
\max_{t\in[0,T]}\lambdaeft(\varphirac12\sum_{j=1}^i|c^{\varepsilon_i}_j(t)|^2\right)^{1/2}+
\sup_{0\lambdaeq t_1<t_2\lambdaeq T}\sum_{j=1}^i
\varphirac{|c_j^{\varepsilon_i}(t_1)-c_j^{\varepsilon_i}(t_2)|}{|t_1-t_2|^{1/2}}\lambdaeq \sqrt{2E_0}.\lambdaabel{leraycond}
\varepsilonnd{equation}
We let $\varphi (x,t)$ be the solution of the following parabolic equation:
\betaegin{equation}
\betaegin{split}
\varphirac{\partial\varphi}{\partial t}+(u*\zeta^{\varepsilon_i})\cdot\nabla\varphi=\mathcal{D}elta\varphi-\varphirac{W'(\varphi)}{\varepsilon_i^2},\\
\varphi(x,0)=\varphi^{\varepsilon_i}_0(x).
\varepsilonnd{split}\lambdaabel{acapprox}
\varepsilonnd{equation}
The existence of such $\varphi$ with $|\varphi|\lambdaeq 1$ is guaranteed by the standard theory of parabolic equations (\cite{Ladyzhenskaya}).
By \varepsilonqref{acapprox} and the Cauchy-Schwarz inequality, we can estimate
\betaegin{equation*}
\varphirac{d}{dt}\int_{\Omega}\lambdaeft(\varphirac{\varepsilon_i|\nabla\varphi|^2}{2}+\varphirac{W(\varphi)}{\varepsilon_i}\right)\,dx
\lambdaeq -\varphirac{\varepsilon_i}{2}\int_{\Omega} \lambdaeft(\mathcal{D}elta\varphi-\varphirac{W'(\varphi)}{\varepsilon_i^2}\right)^2\,dx+\varphirac{\varepsilon_i}{2}\int_{\Omega} \lambdaeft\{(u*\zeta^{\varepsilon_i})\cdot\nabla\varphi\right\}^2\,dx.
\varepsilonnd{equation*}
Since for any $t \in [0,T]$
\betaegin{equation*}
\|u*\zeta^{\varepsilon_i}\|^2_{L^{\infty}(\Omega)} \lambdaeq \varepsilon_i^{-2\gamma}\|u\|^2_{L^{\infty}(\Omega)}
\lambdaeq i\varepsilon_i^{-2\gamma}\max_{1\lambdaeq j \lambdaeq i}\|\omega_j(x)\|^2_{L^{\infty}(\Omega)}
\sum_{j=1}^i|c^{\varepsilon_i}_j(t)|^2 \lambdaeq c(i)E_0,
\varepsilonnd{equation*}
\betaegin{equation*}
\varphirac{d}{dt}\int_{\Omega}\lambdaeft(\varphirac{\varepsilon_i|\nabla\varphi|^2}{2}+\varphirac{W(\varphi)}{\varepsilon_i}\right)\,dx
\lambdaeq c(i) E_0\int_{\Omega}\varphirac{\varepsilon_i|\nabla\varphi|^2}{2}\,dx.
\varepsilonnd{equation*}
This gives
\betaegin{equation}
\sup_{0\lambdaeq t \lambdaeq T}
\varphirac{1}{\sigma}\int_{\Omega}\lambdaeft(\varphirac{\varepsilon_i|\nabla\varphi|^2}{2}+\varphirac{W(\varphi)}{\varepsilon_i}\right)\,dx \lambdaeq e^{c(i) E_0 T}E_0.\lambdaabel{energyest}
\varepsilonnd{equation}
Hence as long as $T\lambdaeq 1$,
\betaegin{equation}
|D_j^{\varepsilon_i}(t)| \lambdaeq c \|\nabla\omega_j\|_{L^{\infty}(\Omega)}\varphirac{1}{\sigma}\int_{\Omega}\int_{\Omega}\varepsilon_i|\nabla\varphi(y)|^2\zeta^{\varepsilon_i}(x-y)\,dydx
\lambdaeq c(i)e^{c(i) E_0}E_0\lambdaabel{Dest}
\varepsilonnd{equation}
by $\nabla\omega_j \in L^{\infty}(\Omega)^{d^2}$ and \varepsilonqref{energyest}.
Next we substitute the above solution $\varphi$ into the place of $\varphi^{\varepsilon_i}$, and solve \varepsilonqref{appeq1-2} with the initial condition $c^{\varepsilon_i}_j(0)=(u_0,\,\omega_j)$. Since $\tauau$
is locally Lipschitz with respect to $e(u)$, there is at least some short time $T_1$ such that \varepsilonqref{appeq1-2} has a unique solution $\tauilde{c}^{\varepsilon_i}_j(t)$ on $[0,T_1]$ with the initial condition
$\tauilde{c}_j^{\varepsilon_i}(0)=(u_0,\,\omega_j)$ for $1\lambdaeq j\lambdaeq i$. We show that the solution exists up to $T_0=T_0(i,E_0,p,\nu_0)$ satisfying \varepsilonqref{leraycond}.
Let $\tauilde{c}(t)=\varphirac12\sum_{j=1}^i|\tauilde{c}^{\varepsilon_i}_j(t)|^2$.
Then,
\betaegin{equation*}
\varphirac{d}{dt}\tauilde{c}(t)=
A^{\varepsilon_i}_j\tauilde{c}^{\varepsilon_i}_j+B_{klj}\tauilde{c}^{\varepsilon_i}_k\tauilde{c}^{\varepsilon_i}_l\tauilde{c}^{\varepsilon_i}_j+D_j^{\varepsilon_i}\tauilde{c}^{\varepsilon_i}_j.
\varepsilonnd{equation*}
By \varepsilonqref{taucond1} $A_j^{\varepsilon_i}\tauilde{c}^{\varepsilon_i}_j\lambdaeq 0$ hence
\betaegin{equation*}
\varphirac{d}{dt}\tauilde{c}(t) \lambdaeq c(i,E_0)(\tauilde{c}^{3/2}+\tauilde{c}^{1/2}).
\varepsilonnd{equation*}
Therefore,
\betaegin{equation}
\alpharctan\sqrt{\tauilde{c}(t)} \lambdaeq \alpharctan\sqrt{E_0}+2c(i,E_0) t.\lambdaabel{arc}
\varepsilonnd{equation}
We can also estimate $|dc_j^{\varepsilon_i}/dt|$ due to \varepsilonqref{appeq1-2}, \varepsilonqref{Dest},
\varepsilonqref{arc} and
\varepsilonqref{taucond2} depending only on $E_0,i,p,\nu_0$. Thus, by choosing $T_0$ small depending only on $E_0,i,p,\nu_0$ we have the existence of solution
for $t\in[0,T_0]$ satisfying \varepsilonqref{leraycond}.
We then prove the existence of a weak solution on $\Omega\tauimes [0,T_0]$ by using Leray-Schauder fixed point theorem (see \cite{Ladyzhenskaya}). We define
\[\tauilde{u}(x,t)=\sum_{j=1}^i\tauilde{c}^{\varepsilon_i}_j(t)\omega_j(x)\]
and we define a map $\mathcal{L}:u\mapsto \tauilde{u}$ as in the above procedure. Let
\betaegin{equation*}
\betaegin{split}V(T_0):=&\lambdaeft\{u(x,t)
=\sum_{j=1}^i c_j(t)\omega_j(x)\,;\,\,\max_{t\in[0,T_0]}\lambdaeft(\varphirac12\sum_{j=1}^i|c_j(t)|^2\right)^{1/2}\right.\\ &\lambdaeft.+
\sup_{0\lambdaeq t_1<t_2\lambdaeq T_0}\sum_{j=1}^i
\varphirac{|c_j(t_1)-c_j(t_2)|}{|t_1-t_2|^{1/2}}\lambdaeq \sqrt{2E_0},\,c_j(0)=(u_0,\,\omega_j),\,c_j\in C^{1/2}([0,T_0]) \right\}.
\varepsilonnd{split}
\varepsilonnd{equation*}
Then $V(T_0)$ is a closed, convex subset of $C^{1/2}([0,T_0];V^{0,2}_i)$ equipped with the norm
\[\|u\|_{V(T_0)}=\max_{t\in[0,T_0]}\lambdaeft(\varphirac12\sum_{j=1}^i|c_j(t)|^2\right)^{1/2}+
\sup_{0\lambdaeq t_1<t_2\lambdaeq T_0}\sum_{j=1}^i
\varphirac{|c_j(t_1)-c_j(t_2)|}{|t_1-t_2|^{1/2}}\]
and by the above argument $\mathcal{L}:V(T_0)\rightarrow V(T_0)$. Moreover by the Ascoli-Arzel\`a compactness theorem $\mathcal{L}$ is
a compact operator. Therefore by using the Leray-Schauder fixed point theoremC$\mathcal{L}$ has a fixed point $u^{\varepsilon_i}\in V(T_0)$. We denote by $\varphi^{\varepsilon_i}$ the solution of \varepsilonqref{appeq3} and \varepsilonqref{appeq4}. Then $(u^{\varepsilon_i}, \varphi^{\varepsilon_i})$ is a weak solution of \varepsilonqref{appeq1}-\varepsilonqref{appeq4} in $\Omega\tauimes [0,T_0]$. Note that we have the
required regularities for $\varphi^{\varepsilon_i}$ due to the regularity of
$u^{\varepsilon_i}*\zeta^{\varepsilon_i}$ in $x$ and by the standard parabolic regularity theory.
$
{\Box}$
\betaegin{thm}
Let $(u^{\varepsilon_i},\varphi^{\varepsilon_i})$ be the weak solution of \varepsilonqref{appeq1}-\varepsilonqref{appeq4}
with \varepsilonqref{eqeq} in $\Omega\tauimes[0,T]$. Then the following energy estimate holds:
\betaegin{equation}
\betaegin{split}
\int_{\Omega}\varphirac{1}{\sigma}&\lambdaeft(\varphirac{\varepsilon_i|\nabla\varphi^{\varepsilon_i}(\cdot,T)|^2}{2}+\varphirac{W(\varphi^{\varepsilon_i}(\cdot,T))}{\varepsilon_i}\right)+\varphirac{|u^{\varepsilon_i}(\cdot,T)|^2}{2}\,dx\\
&+\int_0^{T}\int_{\Omega}\varphirac{\varepsilon_i}{\sigma}\lambdaeft(\mathcal{D}elta\varphi^{\varepsilon_i}-\varphirac{W'(\varphi^{\varepsilon_i})}{\varepsilon_i^2}\right)^2+\nu_0|e(u^{\varepsilon_i})|^p\,dxdt \lambdaeq E_0+o(1).
\lambdaabel{localenergy1}
\varepsilonnd{split}
\varepsilonnd{equation}
Moreover for any $0\lambdaeq T_1<T_2<\infty$
\betaegin{equation}
\int_{T_1}^{T_2}\|u^{\varepsilon_i}(\cdot,t)\|_{W^{1,p}(\Omega)}^p\, dt\lambdaeq c_K
\{\nu_0^{-1}E_0+(T_2-T_1)E_0^{\varphirac{p}{2}}\}+o(1).
\lambdaabel{localenergysup}
\varepsilonnd{equation}
\lambdaabel{localenergy}
\varepsilonnd{thm}
{\it Proof.}
Since $(u^{\varepsilon_i},\varphi^{\varepsilon_i})$ is the weak solution of \varepsilonqref{appeq1}-\varepsilonqref{appeq4}, we derive
\betaegin{equation}
\betaegin{split}
& \varphirac{d}{dt}\int_{\Omega}\varphirac{1}{\sigma}\lambdaeft(\varphirac{\varepsilon_i|\nabla\varphi^{\varepsilon_i}|^2}{2}+\varphirac{W(\varphi^{\varepsilon_i})}{\varepsilon_i}\right)+\varphirac{|u^{\varepsilon_i}|^2}{2}\,dx\\
& =\int_{\Omega}-\varphirac{\varepsilon_i}{\sigma}\varphirac{\partial \varphi^{\varepsilon_i}}{\partial t}\lambdaeft(\mathcal{D}elta\varphi^{\varepsilon_i}-\varphirac{W'(\varphi^{\varepsilon_i})}{\varepsilon_i^2}\right)+\varphirac{\partial u^{\varepsilon_i}}{\partial t}\cdot u^{\varepsilon_i}\,dx\\
& =\int_{\Omega}-\varphirac{\varepsilon_i}{\sigma}\lambdaeft(\mathcal{D}elta\varphi^{\varepsilon_i}-\varphirac{W'(\varphi^{\varepsilon_i})}{\varepsilon_i^2}-(u^{\varepsilon_i}*\zeta^{\varepsilon_i})\cdot\nabla\varphi^{\varepsilon_i}\right)
\lambdaeft(\mathcal{D}elta\varphi^{\varepsilon_i}-\varphirac{W'(\varphi^{\varepsilon_i})}{\varepsilon^2}\right)\,dx\\
& +\int_{\Omega}\lambdaeft\{{\rm div}\,\tauau(\varphi^{\varepsilon_i},e(u^{\varepsilon_i}))-u^{\varepsilon_i}\cdot\nabla u^{\varepsilon_i}
-\varphirac{\varepsilon_i}{\sigma}{\rm div}\,((\nabla\varphi^{\varepsilon_i}\omegatimes\nabla\varphi^{\varepsilon_i})*\zeta^{\varepsilon_i})\right\}\cdot u^{\varepsilon_i}\,dx=I_1+I_2.
\varepsilonnd{split}\lambdaabel{localenergy1cal}
\varepsilonnd{equation}
Since ${\rm div}\, (u^{\varepsilon_i}*\zeta^{\varepsilon_i})=({\rm div}\, u^{\varepsilon_i})*\zeta^{\varepsilon_i}=0$,
\betaegin{equation*}
\sigma I_1 = -\int_{\Omega}\varepsilon_i\lambdaeft(\mathcal{D}elta\varphi^{\varepsilon_i}-\varphirac{W'(\varphi)}{\varepsilon_i^2}\right)^2\,dx+\varepsilon_i\int_{\Omega}(u^{\varepsilon_i}*\zeta^{\varepsilon_i})\cdot\nabla\varphi^{\varepsilon_i}\mathcal{D}elta\varphi^{\varepsilon_i}\,dx.
\varepsilonnd{equation*}
For $I_2$, with \varepsilonqref{taucond1}
\betaegin{equation*}
\int_{\Omega}{\rm div}\,\tauau(\varphi^{\varepsilon_i},e(u^{\varepsilon_i}))\cdot u^{\varepsilon_i}\,dx
=-\int_{\Omega}\tauau(\varphi^{\varepsilon_i},e(u^{\varepsilon_i})):e(u^{\varepsilon_i})\,dx
\lambdaeq -\nu_0\int_{\Omega}|e(u^{\varepsilon_i})|^p\,dx.
\varepsilonnd{equation*}
Moreover the second term of $I_2$ vanishes by ${\rm div}\,u^{\varepsilon_i}=0$ and
\betaegin{equation*}
\betaegin{split}
&
-\int_{\Omega}\varepsilon_i {\rm div}\,(\nabla\varphi^{\varepsilon_i}\omegatimes\nabla\varphi^{\varepsilon_i}*\zeta^{\varepsilon_i})\cdot u^{\varepsilon_i}\,dx
= -\int_{\Omega}\varepsilon_i \lambdaeft(\nabla \varphirac{|\nabla\varphi^{\varepsilon_i}|^2}{2}+
\nabla \varphi^{\varepsilon_i}\mathcal{D}elta\varphi^{\varepsilon_i}\right)*\zeta^{\varepsilon} \cdot u^{\varepsilon_i}\,dx\\
& = -\varepsilon_i\int_{\Omega}(u^{\varepsilon_i}*\zeta^{\varepsilon_i})\cdot\nabla\varphi^{\varepsilon_i}\mathcal{D}elta\varphi^{\varepsilon_i}\,dx.
\varepsilonnd{split}
\varepsilonnd{equation*}
Hence \varepsilonqref{localenergy1cal} becomes
\betaegin{equation*}
\varphirac{d}{dt}\int_{\Omega}\varphirac{1}{\sigma}\lambdaeft(\varphirac{\varepsilon_i|\nabla\varphi^{\varepsilon_i}|^2}{2}+\varphirac{W(\varphi^{\varepsilon_i})}{\varepsilon_i}\right)+\varphirac{|u^{\varepsilon_i}|^2}{2}\,dx
\lambdaeq -\int_{\Omega}\varphirac{\varepsilon_i}{\sigma}\lambdaeft(\mathcal{D}elta\varphi^{\varepsilon_i}-\varphirac{W'(\varphi^{\varepsilon_i})}{\varepsilon_i^2}\right)^2
+\nu_0 |e(u^{\varepsilon_i})|^p\, dx.
\varepsilonnd{equation*}
Integrating with respect to $t$ over $t\in[0,T]$ and by \varepsilonqref{eqeq}, we obtain \varepsilonqref{localenergy1}. The proof of \varepsilonqref{localenergysup} follows from \varepsilonqref{localenergy1} and Theorem \ref{Korn}.
$
{\Box}$\\
{\it Proof of Theorem \ref{globalexistence}.}
For each fixed $i$ we have a short time existence for $[0,T_0]$ where $T_0$ depends only on $i,E_0,p,\nu_0$ at $t=0$. By Lemma \ref{localenergy} the energy at $t=T_0$ is again bounded by $E_0+o(1)$.
By repeatedly using Lemma \ref{localexistence}, Theorem \ref{globalexistence} follows.
$
{\Box}$\\
\section{Proof of main theorem}
\quad In this section we first prove that $\{\varphi^{\varepsilon_i}\}_{i=1}^{\infty}$ in
Section 3 and the associated surface energy measures $\{\mu_t^{\varepsilon_i}\}_{
i=1}^{\infty}$ converge subsequentially to $\varphi$ and $\mu_t$ which satisfy
the properties described in Theorem \ref{maintheorem}. Most of the
technical and essential ingredients have been proved in \cite{LST1}
and we only need to check the conditions to apply the results. We
then prove that the limit velocity field satisfies the weak non-Newtonian
flow equation, concluding the proof of Theorem \ref{maintheorem}.
First we recall the upper density ratio bound of the surface energy.
\betaegin{thm} (\cite[Theorem 3.1]{LST1})
Suppose $d\geq 2$, $\Omega={\mathbb T}^d$, $p>\varphirac{d+2}{2}$, $\varphirac12>\gamma\geq 0$,
$1\geq \varepsilon>0$ and $\varphi$ satisfies
\betaegin{eqnarray}
\varphirac{\partial \varphi}{\partial t}+u\cdot\nabla\varphi=\mathcal{D}elta\varphi-\varphirac{W'(\varphi)}{\varepsilon^2} \qquad \qquad \quad & & {\rm on} \ \Omega\tauimes [0,T],\lambdaabel{allen1}\\
\varphi(x,0)=\varphi_0(x) \qquad \qquad \qquad & & {\rm on} \ \Omega,\lambdaabel{allen2}
\varepsilonnd{eqnarray}
where $\nabla^i u,\, \nabla^j \varphi, \nabla^k \varphi_t\in C(\Omega\tauimes[0,T])$ for $0\lambdaeq i,\, k\lambdaeq 1$ and $0\lambdaeq j\lambdaeq 3$.
Let $\mu_t$ be the Radon measure on $\Omega$ defined by
\betaegin{equation}
\int_{\Omega}\partialhi(x)\, d\mu_t(x)=\varphirac{1}{\sigma}\int_{\Omega}\partialhi(x)\lambdaeft(\varphirac{\varepsilon|\nabla\varphi(x,t)|^2}{2}+\varphirac{W(\varphi(x,t))}{\varepsilon}\right)\, dx
\lambdaabel{dmu}
\varepsilonnd{equation}
for $\partialhi\in C(\Omega)$, where $\sigma=\int_{-1}^1
\sqrt{2 W(s)}\, ds$.
We assume also that
\betaegin{gather}
\sup_{\Omega}|\varphi_0|\lambdaeq 1\mbox{ and }\sup_{\Omega}\varepsilon^i|\nabla^i\varphi_0|\lambdaeq c_{1}\mbox{ for $1\lambdaeq
i\lambdaeq 3$},\lambdaabel{inibound}\\
\sup_{\Omega}\lambdaeft(\varphirac{\varepsilon|\nabla\varphi_0|^2}{2}-\varphirac{W(\varphi_0)}{\varepsilon}\right)\lambdaeq \varepsilon^{-\gamma},\lambdaabel{disbd}\\
\sup_{\Omega\tauimes[0,T]}\lambdaeft\{\varepsilon^{\gamma}|u|,\,
\varepsilon^{1+\gamma}|\nabla u|\right\}\lambdaeq c_{2}, \lambdaabel{uinfbound}\\
\int_0^T\|u(\cdot,t)\|^p_{W^{1,p}(\Omega)}\, dt\lambdaeq c_3.\lambdaabel{ubound}
\varepsilonnd{gather}
Define for $t\in [0,T]$
\betaegin{equation}
D(t)=\max\lambdaeft\{\sup_{x\in\Omega,\, 0<r\lambdaeq \varphirac12}\varphirac{1}{\omega_{d-1}r^{d-1}}
\mu_t(B_r(x)), 1\right\},\hspace{1.cm}D(0)\lambdaeq D_0.
\lambdaabel{dtdef}
\varepsilonnd{equation}
Then there exist $\varepsilonpsilon_1>0$ which depends only on
$d$, $p$, $W$, $c_1$, $c_2$, $c_3$, $D_0$, $\gamma$ and $T$,
and $c_4$ which depends only on $c_3$, $d$, $p$, $D_0$ and
$T$ such that for all $0<\varepsilon\lambdaeq \varepsilonpsilon_1$,
\betaegin{equation}
\sup_{0\lambdaeq t\lambdaeq T}D(t)\lambdaeq c_4.
\lambdaabel{fin1}
\varepsilonnd{equation}
\lambdaabel{mainmono}
\varepsilonnd{thm}
Using this we prove
\betaegin{prop}
For $\{\varphi^{\varepsilon_i}\}_{i=1}^{\infty}$ in Theorem \ref{globalexistence}, define
$\mu_t^{\varepsilon_i}$ as in \varepsilonqref{dmu} replacing $\varphi$ by $\varphi^{\varepsilon_i}$, and define
$D^{\varepsilon_i}(t)$ as in \varepsilonqref{dtdef} replacing $\mu_t$ by $\mu_t^{\varepsilon_i}$. Given
$0<T<\infty$, there exists $c_5$ which depends only on $E_0,\, \nu_0, \,
\gamma,\, D_0,\, T,\, d,\, p$ and $W$
such that
\betaegin{equation}
\sup_{0\lambdaeq t\lambdaeq T}D^{\varepsilon_i}(t)\lambdaeq c_5
\lambdaabel{key}
\varepsilonnd{equation}
for all sufficiently large $i$.
\lambdaabel{du}
\varepsilonnd{prop}
{\betaf Proof}. We only need to check the conditions of Theorem \ref{mainmono}
for $\varphi^{\varepsilon_i}$ and $\mu_t^{\varepsilon_i}$. Note that $u$ in \varepsilonqref{allen1}
is replaced by $u^{\varepsilon_i}*\zeta^{\varepsilon_i}$. We have $d\geq 2$, $\Omega={\mathbb T}^d$,
$p>\varphirac{d+2}{2}$, $\varphirac12>\gamma\geq 0$, $1\geq\varepsilon>0$ and \varepsilonqref{allen1} and
\varepsilonqref{allen2}. The regularity of functions is guaranteed in Theorem
\ref{globalexistence}. With an appropriate choice of $c_1$, \varepsilonqref{inibound}
is satisfied for all sufficiently large $i$ due to the choice of $\varepsilon_i$
in \varepsilonqref{tanh}. The sup bound \varepsilonqref{disbd} is satisfied with even 0 on
the right-hand side instead of $\varepsilon_i^{-\gamma}$. The bound for $u^{\varepsilon_i}*
\zeta^{\varepsilon_i}$ \varepsilonqref{uinfbound} is satisfied due to \varepsilonqref{zeta} and
\varepsilonqref{localenergy1}, and \varepsilonqref{ubound} is satisfied due to
\varepsilonqref{localenergysup}. Thus we have all the conditions, and Theorem \ref{mainmono}
proves the claim.
$
{\Box}$
We next prove
\betaegin{prop}
For $\{u^{\varepsilon_i}*\zeta^{\varepsilon_i}\}_{i=1}^{\infty}$ in Theorem \ref{globalexistence}, there
exist a subsequence (denoted by the same index) and the limit $u\in
L^{\infty}([0,\infty);V^{0,2})\cap L^p_{loc}([0,\infty); V^{1,p})$
such that for any $0<T<\infty$
\betaegin{equation}
u^{\varepsilon_i}*\zeta^{\varepsilon_i}\rightharpoonup u\mbox{ weakly in }L^p([0,T]; W^{1,p}(\Omega)^d),
\hspace{1.cm}u^{\varepsilon_i}*\zeta^{\varepsilon_i}\rightarrow u\mbox{ strongly in }L^2([0,T];L^2(\Omega)^d).
\lambdaabel{weak}
\varepsilonnd{equation}
\varepsilonnd{prop}
{\betaf Proof}.
Let $\partialsi \in V^{s,2}$ with $||\partialsi||_{V^{s,2}}\lambdaeq 1$. With \varepsilonqref{appeq1},
\varepsilonqref{appeq2} and integration by parts, we have
\betaegin{equation*}
\betaegin{split}
\lambdaeft(\varphirac{\partial u^{\varepsilon_i}}{\partial t},\partialsi\right)&=
\lambdaeft(\varphirac{\partial u^{\varepsilon_i}}{\partial t}, P_i\partialsi\right)
= \lambdaeft(-u^{\varepsilon_i}\cdot \nabla u^{\varepsilon_i}+{\rm div}\,\tauau(\varphi^{\varepsilon_i},e(u^{\varepsilon_i}))
-\varphirac{\varepsilon_i}{\sigma}{\rm div}
(\nabla\varphi^{\varepsilon_i}\omegatimes \nabla\varphi^{\varepsilon_i})*\zeta^{\varepsilon_i}, P_i\partialsi\right) \\
&=\lambdaeft(u^{\varepsilon_i}\omegatimes u^{\varepsilon_i}-\tauau(\varphi^{\varepsilon_i},e(u^{\varepsilon_i}))
+\varphirac{\varepsilon_i}{\sigma}
(\nabla\varphi^{\varepsilon_i}\omegatimes \nabla\varphi^{\varepsilon_i})*\zeta^{\varepsilon_i},\nabla P_i\partialsi\right).
\varepsilonnd{split}
\varepsilonnd{equation*}
Here we remark that
\[\|\nabla P_i\partialsi\|_{L^{\infty}(\Omega)}\lambdaeq c(d) \|P_i\partialsi\|_{W^{s,2}(\Omega)}\lambdaeq c(d)\|\partialsi\|_{W^{s,2}(\Omega)}=c(d)\|\partialsi\|_{V^{s,2}}\lambdaeq c(d)\]
by $s> \varphirac{d+2}{2}$ and properties of $P_i$ (see \cite{Lions} or
\cite[p.290]{Malek}).
Thus by \varepsilonqref{taucond2} and \varepsilonqref{localenergy1}, we obtain
\betaegin{equation*}
\lambdaeft(\varphirac{\partial u^{\varepsilon_i}}{\partial t},\partialsi\right)\lambdaeq c(d,p,\nu_0)\lambdaeft(1+E_0+\|u^{\varepsilon_i}
\|_{W^{1,p}(\Omega)}^{p-1}\right).
\varepsilonnd{equation*}
Again using \varepsilonqref{localenergy1} and integrating in time we obtain
\betaegin{equation}
\int_0^T\lambdaeft|\lambdaeft|\varphirac{\partial u^{\varepsilon_i}}{\partial t}\right|\right|_{(V^{s,2})^*}^{\varphirac{p}{p-1}}\,dt\lambdaeq c(d,p,E_0,\nu_0,T).
\lambdaabel{utes}
\varepsilonnd{equation}
Now we use Aubin-Lions compactness Theorem \cite[p.57]{Lions} with
$B_0=V^{s,2}$, $B=V^{0,2}\subset L^2(\Omega)^d$, $B_1=(V^{s,2})^*$, $p_0=p$ and $p_1=\varphirac{p}{p-1}$.
Then there exists a subsequence still denoted by $\{u^{\varepsilon_i}\}_{i=1}^{\infty}$ such that
\betaegin{equation*}
u^{\varepsilon_i} \rightarrow u \quad {\rm in} \ L^p([0,T];L^2(\Omega)^d). \lambdaabel{u-converge1}
\varepsilonnd{equation*}
Since we have uniform $L^{\infty}([0,T];L^2(\Omega)^d)$ bound for $u^{\varepsilon_i}$,
the strong convergence also holds in $L^2([0,T];L^2(\Omega)^d)$.
Note that we also have proper norm bounds to extract weakly convergent
subsequences due to \varepsilonqref{localenergy1}. For each $T_n$
which diverges to $\infty$ as
$n\rightarrow\infty$, we choose a subsequence and by choosing a
diagonal subsequence, we obtain the convergent subsequence with \varepsilonqref{weak}
with $u^{\varepsilon_i}$ instead of $u^{\varepsilon_i}*\zeta^{\varepsilon_i}$. It is not difficult to
show at this point that the same convergence results hold for $u^{\varepsilon_i}*
\zeta^{\varepsilon_i}$ as in \varepsilonqref{weak}.
$
\Box$
{\betaf Proof of main theorem}.
At this point, the rest of the proof concerning the existence of
the limit Radon measure $\mu_t$ and the limit $\varphi=\lambdaim_{i\rightarrow
\infty}\varphi^{\varepsilon_i}$ and their respective properties described in Theorem
\ref{maintheorem} can be proved by almost line by line identical
argument in \cite[Section 4,\, 5]{LST1}.
The only difference is that the energy $E_0$ in \cite{LST1} depends also on
$T$, while in this paper $E_0$ depends only on the initial data due to
\varepsilonqref{localenergy1}. This allows us to have time-global
estimates such as $u\in L^{\infty}([0,\infty);V^{0,2})$ and
$\varphi\in L^{\infty}([0,\infty);BV(\Omega))$. The argument in \cite{LST1}
then complete the existence proof of Theorem \ref{maintheorem}
(b), (c) along with (iii)-(vi). We still need to prove (a), (i) and (ii).
Due to \varepsilonqref{utes}, \varepsilonqref{taucond2} and \varepsilonqref{localenergysup} we may extract a
further subsequence so that
\betaegin{equation}
\varphirac{\partialartial u^{\varepsilon_i}}{\partialartial t}
\rightharpoonup \varphirac{\partialartial u}{\partialartial t}\mbox{ weakly in }
L^{\varphirac{p}{p-1}}([0,T];(V^{s,2})^*),\hspace{.5cm}
\tauau(\varphi^{\varepsilon_i},e(u^{\varepsilon_i}))\rightharpoonup \hat{\tauau}
\mbox{ weakly in }L^{\varphirac{p}{p-1}}([0,T];L^{\varphirac{p}{p-1}}(\Omegamega)^{d^2}).
\lambdaabel{meascon1}
\varepsilonnd{equation}
For $\omegamega_j\in V^{s,2}$ ($j=1,\cdots$) and $h \in C^{\infty}_c((0,T))$ we have
\betaegin{equation*}
\int_{\Omegamega}{\rm div}
((\nabla\varphi^{\varepsilon_i}\omegatimes\nabla\varphi^{\varepsilon_i})*\zeta^{\varepsilon_i})\cdot h\omegamega_j\,
dx=\int_{\Omegamega}\lambdaeft(\mathcal{D}elta\varphi^{\varepsilon_i}-\varphirac{W'(\varphi^{\varepsilon_i})}{\varepsilon_i^2}\right)
\nabla\varphi^{\varepsilon_i}\cdot h\omegamega_j*\zeta^{\varepsilon_i}\, dx
\varepsilonnd{equation*}
by integration by parts and ${\rm div}\,\omegamega_j=0$.
Thus the argument in \cite[p.212]{Lions} and the
similar convergence argument in \cite{LST1} show
\betaegin{equation}
\int_0^T\lambdaeft\{\lambdaeft(\varphirac{\partial u}{\partial t},h\omega_j\right)+\int_{\Omegamega}
(u\cdot\nabla u)\cdot h\omegamega_j+h\hat{\tauau}:e(\omegamega_j)
\, dx\right\}dt=\int_0^T\int_{\Omegamega}H\cdot h\omegamega_j\, d\mu_t dt.
\lambdaabel{meascon2}
\varepsilonnd{equation}
Again by the similar argument using the
density ratio bound and Theorem
\ref{MZ} one show by the density argument and \varepsilonqref{meascon2} that
$\varphirac{\partial u}{\partial t}\in L^{\varphirac{p}{p-1}}([0,T];(V^{1,p})^*)$ and
\betaegin{equation}
\int_0^T \lambdaeft\{\lambdaeft(\varphirac{\partial u}{\partial t}, v\right)+\int_{\Omegamega}(u\cdot\nabla u)\cdot v+\hat{\tauau}:e(v)
\, dx\right\}dt=\int_0^T\int_{\Omegamega}H\cdot v\, d\mu_t dt.
\lambdaabel{meascon3}
\varepsilonnd{equation}
for all $v\in L^p([0,T];V^{1,p})$.
We next prove
\betaegin{equation}
\int_0^T\int_{\Omega}\hat{\tauau}:e(v)\, dxdt
=\int_0^T\int_{\Omega}\tauau(\varphi,e(u)):e(v)\, dxdt
\lambdaabel{last}
\varepsilonnd{equation}
for all $v\in C^{\infty}_c((0,T);{\mathcal{V}})$.
As in \cite[p.213 (5.43)]{Lions}, we may deduce that
\betaegin{equation}
\varphirac12 \|u(t_1)\|^2_{L^2(\Omegamega)}+\int_0^{t_1}\int_{\Omegamega}\hat{\tauau}
:e(u)\, dxdt\geq \int_0^{t_1}\int_{\Omegamega}H\cdot u\, d\mu_t dt
+\varphirac12\|u(0)\|^2_{L^2(\Omegamega)}
\lambdaabel{last1}
\varepsilonnd{equation}
for a.e. $t_1\in [0,T]$.
We set for any $v\in V^{1,p}$
\betaegin{equation}
A_i^{t_1}=\int_0^{t_1}\int_{\Omegamega}
(\tauau(\varphi^{\varepsilon_i},e(u^{\varepsilon_i}))-\tauau(\varphi^{\varepsilon_i},e(v))):(e(u^{\varepsilon_i})-e(v))\, dxdt+
\varphirac12 \|u^{\varepsilon_i}(t_1)\|^2_{L^2(\Omegamega)}.
\lambdaabel{last2}
\varepsilonnd{equation}
The property \varepsilonqref{taucond3} of $e(\cdot)$ shows that the first term of \varepsilonqref{last2}
is non-negative. We may further assume that $u^{\varepsilon_i}(t_1)$
converges weakly to $u(t_1)$ in $L^2(\Omegamega)^d$ thus we have
\betaegin{equation}
\lambdaiminf_{i\rightarrow\infty}A_i^{t_1}\geq\varphirac12 \|u(t_1)\|^2_{L^2(\Omegamega)}.
\lambdaabel{last3}
\varepsilonnd{equation}
By \varepsilonqref{appeq1} we have
\betaegin{equation*}
\betaegin{split}
A_i^{t_1}=&\varphirac12\|u^{\varepsilon_i}(0)\|_{L^2(\Omegamega)}^2-\varphirac{\varepsilon_i}{\sigma}
\int_0^{t_1}\int_{\Omegamega}{\rm div}((\nabla\varphi^{\varepsilon_i}\omegatimes\nabla\varphi^{\varepsilon_i})
*\zeta^{\varepsilon_i})\cdot u^{\varepsilon_i}\\
&-\int_0^{t_1}\int_{\Omegamega}\tauau(\varphi^{\varepsilon_i},e(u^{\varepsilon_i})):e(v)+
\tauau(\varphi^{\varepsilon_i},e(v)):(e(u^{\varepsilon_i})-e(v))\, dxdt
\varepsilonnd{split}
\varepsilonnd{equation*}
which converges to
\betaegin{equation}
A^{t_1}=\varphirac12\|u(0)\|_{L^2(\Omegamega)}^2+\int_0^{t_1}\int_{\Omegamega}
H\cdot u\, d\mu_t dt-\int_0^{t_1}\int_{\Omegamega}\hat{\tauau}:e(v)
+\tauau(\varphi,e(v)):(e(u)-e(v))\, dxdt.
\lambdaabel{last4}
\varepsilonnd{equation}
Here we used that $\varphi^{\varepsilon_i}$ converges to $\varphi$ a.e. on $\Omegamega\tauimes
[0,T]$. By \varepsilonqref{last1}, \varepsilonqref{last3} and \varepsilonqref{last4}, we deduce that
\betaegin{equation*}
\int_0^{t_1}\int_{\Omegamega}(\hat{\tauau}-\tauau(\varphi,e(v))):
(e(u)-e(v))\, dxdt\geq 0.
\varepsilonnd{equation*}
By choosing $v=u+\varepsilonpsilon\tauilde{v}$, divide by $\varepsilonpsilon$
and letting $\varepsilonpsilon\rightarrow 0$, we prove \varepsilonqref{last}.
Finally, \varepsilonqref{eneineq} follows from \varepsilonqref{last}, the
strong $L^1(\Omega\tauimes[0,T])$ convergence of $\varphi^{\varepsilon_i}$,
the lower semicontinuity of the mean curvature square term
(see \cite{LST1}) and the energy equality appearing in Theorem \ref{localenergy}.
This concludes the proof of Theorem \ref{maintheorem}
$
{\Box}$
\betaegin{thebibliography}{99}
\betaibitem{Abels1} H.~Abels,
{\it The initial value problem for the Navier-Stokes equations with a free surface in $L^q$-Sobolev spaces},
Adv.\ Diff.\ Eqns.\ {\betaf 10} (2005), 45--64.
\betaibitem{Abels2} H.~Abels,
{\it On generalized solutions of two-phase flows for viscous incompressible fluids},
Interface.\ Free Bound.\ {\betaf 9} (2007), 31--65.
\betaibitem{Abels-Roeger} H.~Abels, M.~R\"{o}ger, {\it Existence of weak solutions for a non-classical sharp interface model for a two-phase flow of viscous, incompressible fluids}, preprint.
\betaibitem{Allard} W.~Allard,
\tauextit{On the first variation of a varifold},
Ann.\ of Math.\ \tauextbf{95} (1972), 417--491.
\betaibitem{Beale1} J.~T.~Beale,
{\it The initial value problem for the Navier-Stokes equations with a free surface},
Comm.\ Pure.\ Appl.\ Math.\ \tauextbf{34} (1981), 359--392.
\betaibitem{Beale2} J.~T.~Beale,
{\it Large-time regularity of viscous surface waves},
Arch.\ Ration.\ Mech.\ Anal.\ \tauextbf{84} (1984), 307--352.
\betaibitem{Brakke} K.~Brakke,
\tauextit{The motion of a surface by its mean curvature},
Princeton University Press, Princeton, NJ, (1978).
\betaibitem{Chen-Struwe} Y.~Chen, M.~Struwe, {\it Existence and partial regularity for the solutions to evolution
problems for harmonic maps}, Math. Z. \tauextbf{201} (1989), 83--103.
\betaibitem{Evans} L.~C.~Evans,
\tauextit{Partial differential equations},
Graduate Studies in Math. AMS, (1998).
\betaibitem{EvansGariepy} L.~C.~Evans, R.~F.~Gariepy,
\tauextit{Measure theory and fine properties of functions},
Studies in Advanced Math.\, CRC Press, (1992).
\betaibitem{GigaTakahashi} Y.~Giga, S.~Takahashi,
{\it On global weak solutions of the nonstationary two phase Stokes flow},
SIAM J.\ Math.\ Anal.\ \tauextbf{25} (1994), 876--893.
\betaibitem{Huisken} G.~Huisken, {\it Asymptotic behavior for
singularities of the mean curvature flow}, J. Diff. Geom.
\tauextbf{31} (1990), 285--299.
\betaibitem{Hutchinson} J.~E.~Hutchinson,Y.~Tonegawa, {\it Convergence of phase interfaces in the van der Waals-Cahn-Hilliard theory}, Calc.\ Var.\ PDE\ \tauextbf{10} (2000), 49--84.
\betaibitem{Ilmanen1} T.~Ilmanen,
\tauextit{Convergence of the Allen-Cahn equation to Brakke's motion by mean curvature},
J.\ Diff.\ Geom.\ \tauextbf{38} (1993), 417--461.
\betaibitem{Ilmanen2} T.~Ilmanen,
\tauextit{Elliptic regularization and partial regularity for motion by mean curvature},
Mem.\ Amer.\ Math.\ Soc.\ \tauextbf{108} (1994), no.~520.
\betaibitem{Kim} N.~Kim, L.~Consiglieri, J.~F.~Rodrigues,
{\it On non-Newtonian incompressible fluids with phase transitions},
Math.\ Meth.\ Appl.\ Sci.\ {\betaf 29} (2006), 1523--1541.
\betaibitem{Ladyzhenskaya} O.~A.~Ladyzhenskaya, N.~A.~Solonnikov, N.~N.~Uraltseva,
{\it Linear and Quasilinear Equations Of Parabolic Type},
Transl.\ Math.\ Monographs, Vol.~23, Amer.\ Math.\ Soc.\ (1968).
\betaibitem{LinLiu} F.~H.~Lin, C.~Liu,
{\it Nonparabolic dissipative systems modeling the flow of liquid crystals},
Comm.\ Pure.\ Appl.\ Math.\ \tauextbf{48} (1995), 501--537.
\betaibitem{Lions} J.~L.~Lions, {\it Quelques M\'{e}thodes de R\'{e}solution des
Probl\`{e}mes aux Limites Non Lin\'{e}aires}, Dunod, Paris.
\betaibitem{LST1} C.~Liu, N.~Sato, Y.~Tonegawa, {\it On the existence of mean curvature flow with transport term}, Interface.\ Free Bound.\ {\betaf 12} (2010), 251--277.
\betaibitem{Liu} C.~Liu, N.~J.~Walkington,
{\it An Eulerian description of fluids containing visco-hyperelastic particles},
Arch.\ Ration.\ Mech.\ Anal.\ \tauextbf{159} (2001), 229--252.
\betaibitem{Maekawa} Y.~Maekawa,
{\it On a free boundary problem for viscous incompressible flows},
Interface.\ Free Bound.\ {\betaf 9} (2007), 549--589.
\betaibitem{Malek} J.~M\'alek, J.~Ne\v{c}as, M.~Rokyta, M.~R\r{u}\v{z}i\v{c}ka,
{\it Weak and measure-valued solutions to evolutionary PDEs},
Appl.\ Math.\ Math.\ Comput.\ 13, Chapman \& Hall, London (1996).
\betaibitem{Meyers} N.~G.~Meyers, W.~P.~Ziemer,
{\it Integral inequalities of Poincar\'e and Wirtinger type for BV functions},
Amer.\ J.\ Math.\ {\betaf 99} (1977), 1345--1360.
\betaibitem{Mugnai} L.~Mugnai, M.~R\"{o}ger, {\it Convergence of perturbed
Allen-Cahn equations to forced mean curvature flow}, preprint.
\betaibitem{Nouri} A.~Nouri, F.~Poupaud,
{\it An existence theorem for the multifluid Navier-Stokes Problem},
J.\ Diff.\ Eqns.\ \tauextbf{122} (1995), 71--88.
\betaibitem{Plotnikov} P.~I.~Plotnikov
\tauextit{Generalized solutions to a free boundary problem of motion of a non-Newtonian fluid},
Siberian Math.\ J.\ {\betaf 34} (1993), 704--716.
\betaibitem{Roeger} M.~R\"oger, R.~Sch\"atzle,
\tauextit{On a modified conjecture of De Giorgi},
Math.\ Z.\ \tauextbf{254} (2006), 675--714.
\betaibitem{Sato} N.~Sato,
\tauextit{A simple proof of convergence of the Allen-Cahn equation
to Brakke's motion by mean curvature},
Indiana Univ.\ Math.\ J.\ \tauextbf{57} (2008), 1743--1751.
\betaibitem{Simon} L.~Simon,
\tauextit{Lectures on geometric measure theory},
Proc.\ Centre Math.\ Anal.\ Austral.\ Nat.\ Univ.\ \tauextbf{3} (1983).
\betaibitem{Solonnikov1} V.~A.~Solonnikov,
\tauextit{Estimates of the solution of a certain initial-boundary value problem for a linear nonstationary system
of Navier-Stokes equations},
Zap.\ Nauchn.\ Sem.\ Leningrad.\ Otdel.\ Mat.\ Inst.\ Steklov.\ (LOMI) {\betaf 59} (1976), 178--254, 257 (in Russian).
\betaibitem{Solonnikov2} V.~A.~Solonnikov,
\tauextit{On the transient motion of an isolated volume of viscous incompressible fluid},
Math.\ USSR-Izv.\ {\betaf 31} (1988), 381--405.
\betaibitem{Soner} H.~M.~Soner,
\tauextit{Convergence of the phase-field equations to the Mullins-Sekerka problem with kinetic undercooling},
Arch.\ Ration.\ Mech.\ Anal. {\betaf 131} (1995), 139--197.
\betaibitem{Tonegawa} Y.~Tonegawa, {\it Integrality of varifolds in the singular limit of reaction-diffusion equations}, Hiroshima Math. J. \tauextbf{33}, (2003), 323--341.
\betaibitem{Ziemer} W.~P.~Ziemer, \tauextit{Weakly differentiable functions}, Springer-Verlag (1989).
\varepsilonnd{thebibliography}
\varepsilonnd{document} |
\begin{document}
\title{Reconstructing surface triangulations by their intersection matrices}
\author{Jorge Arocha\thanks{Instituto de Matem\'{a}ticas, Universidad Nacional Aut\'{o}noma de M\'{e}xico}, Javier Bracho\thanks{Instituto de Matem\'{a}ticas, Universidad Nacional Aut\'{o}noma de M\'{e}xico}, Natalia Garc\'{i}a-Col\'{i}n\thanks{Instituto de Matem\'{a}ticas, Universidad Nacional Aut\'{o}noma de M\'{e}xico}, Isabel Hubard\thanks{Instituto de Matem\'{a}ticas, Universidad Nacional Aut\'{o}noma de M\'{e}xico}}
\maketitle
\begin{abstract}
The \emph{intersection matrix} of a finite simplicial complex has as each of its entries the rank of the intersection of its respective simplices. We prove that such matrix defines the triangulation of a closed connected surface up to isomorphism.\\
\textbf{Keywords:} triangulations, dual graph, closed surface.\\
\textbf{AMS classification:} 57Q15, 52A99.\\
\end{abstract}
\section{Introduction \& Motivation}
Within the theory of convex polytopes, the study of the combinatorial equivalence of $k$-skeleta of pairs polytopes which are not equivalent themselves has been of interest, this phenomena is referred to in the literature as ambiguity \cite{grunbaum1967convex}.
It is well known that for $k \geq \lfloor \frac{d}{2} \rfloor$ the $k$-skeleton of a convex polytope is not dimensionally ambiguous, this is, it defines the entire structure of its underlying $d$-polytope. However for $k < \lfloor \frac{d}{2} \rfloor$ the question is much more intricate.
One of the most interesting results in this direction is the solution to Perle's conjecture by P.Blind and R.Mani \cite{Blind1987} and, separately, by G. Kalai \cite{kalai1988simple} which states that the 1-skeleta of convex simple $d$-polytopes define their entire combinatorial structure. Or, on its dual version, that the dual graph (facet adjacency graph) of a convex simplicial d-polytope determines its entire combinatorial structure. (Also see \cite{ziegler1995lectures})
In its simplest version this theorem states that a triangulation of a convex polyhedra is defined up to isomorphism by its dual graph. We were motivated by this assertion to reveal possible generalizations of that theorem.
\section{Contribution}
A \emph{triangulated surface} is a simplicial complex whose underlying topological space is a connected $2$-manifold (without boundary). It is known that the dual graph of a triangulated space does not define it. In fact, there exist different triangulated surfaces having the same dual graph (see Goldberg snarks on \cite{mohar2004polyhedral}, pages 16-18). However, as building appropiate examples of the latter is not simple, this strongly suggests that more information is needed in order to reconstruct triangulations of surfaces (up to isomorphism). Here we exhibit exactly what additional information is needed, and argue in the conclusions that our hypothesis are optimal for these surfaces.
For a triangulated surface $S$ we will denote $V, E$ and $T$ as its sets of vertices, edges and triangles, respectively. We will say that two triangulations $S$ and $S'$ have the same intersection matrix if there is a bijective map, $f: T \rightarrow T'$, from the triangles of $S$ into the triangles of $S'$ such that for any two triangles $t_1, t_2 \in T$, the equality $| t_1\cap t_2|=| f(t_1)\cap f(t_2)|$ holds. Using this terminology we can now state our main result as follows:
\begin{theorem} \label{thm:main} Two triangulated surfaces which have the same intersection matrix are isomorphic.
\end{theorem}
This result will follow as a corollary of a more precise statement for which we need to introduce a few further definitions.
\section{Preliminaries}
Given two triangulations of a surface $S$ and $S'$, for convenience, we will say that a map $f: T \rightarrow T'$, between the triangles of $S$ and $S'$ which preserves their intersection matrix is an \emph{intersection preserving mapping}, additionally we will say that $f$ \emph{extends to an isomorphism} if there is a simplicial map $g:V \rightarrow V'$ which induces $f$. Not every intersection preserving map can be extended to an isomorphism. The triangulations of the projective plane shown in Figure \ref{fig:PT} are examples of such an occurrence.
\begin{figure}
\caption{The only two triangulated surface which have intersection preserving maps not extendable to isomorphisms.}
\label{fig:PT}
\end{figure}
The triangulation on the left hand side in the figure is the \emph{half-icosahedron} which we will denote $\mathcal{I}/2$, while the triangulation on the right hand side in the figure is a triangulation of the \emph{half-cube}, where each square facet is subdivided into four triangles by adding one additional vertex at its centre. This triangulation will be denoted as $\mathcal{TC}/2$. It can be easily checked that the intersection preserving map given by the labels of the triangles of each one of the triangulations into itself ($i \leftrightarrow i'$) is not extendable to an isomorphism.
In essence Theorem \ref{thm:main} holds because the half-icosahedron and triangulated half-cube are the only two examples of triangulations of closed surfaces which have intersection preserving mappings that cannot be extended to isomorphisms. More precisely:
\begin{theorem} \label{thm:main2} Let $S$ and $S'$ be two triangulated connected closed surfaces with no boundary and let $f: T \rightarrow T'$ be an intersection preserving map between their respective sets of triangles which does not extend to an isomorphism; then either $S \simeq S' \simeq \mathcal{I}/2$ or $S \simeq S' \simeq \mathcal{TC}/2$.
\end{theorem}
One of the characteristics of triangulations of a closed surface is that the neighbourhood of every vertex is a disk. Furthermore, the triangles incident to any vertex of such surface form the simplest of triangulations of a disk, namely an $n$-gon whose vertices are all linked by an edge to a central vertex in the centre of the $n$-gon. We start off by analysing the intersection patterns of such a structure, and find the two other objects which can have an intersection matrix equal to that of a triangulated disk.
\begin{definition} An n-shell is the abstract triangulation $\Delta$ such that it set of triangles $T_\Delta=\{t_0, t_1,\ldots, t_{n-1}\}$ satisfies $|t_i \cap t_{i+1\ mod n}|=2$ and $|t_i \cap t_{j}|=1$ for $|i-j|\geq2$ with $i,j \in \{0,\ldots, n-1\}$.
\end{definition}
\begin{lemma} \label{lem:disk} The vertex sets of the triangles in an $n$-shell, with $n\geq 3$, $T_\Delta=\{t_0, t_1,\ldots, t_{n-1}\}$ can be equivalent to the vertex sets of a triangulation of one of the following three objects;
\begin{enumerate}
\item a triangulated disk (i.e. an $n$-shell),\\
$t_{i}=\{a_{i \mod n}, a_{{i+1} \mod n}, x\}$ for all $0\leq i \leq n$, for any $n$;
\item a triangulation of a M\"{o}bius band with five triangles, \\
$t_{0}= \{a_{0}, a_{2}, a_{1}\}$,
$t_{1}= \{a_{1}, a_{3}, a_{2}\}$,
$t_{2}= \{a_{2}, a_{4}, a_{3}\}$,
$t_{3}= \{a_{3}, a_{0}, a_{4}\}$, and
$t_{4}= \{a_{4}, a_{1}, a_{0}\}$; or
\item a triangulation of a M\"{o}bius band with six triangles, \\
$t_{0}=\{a_{0}, a_{1}, a_{2}\}$, $t_{1}= \{a_{1}, a_{2}, a_{4}\}$,
$t_{2}= \{a_{2}, a_{3}, a_{4}\}$,
$t_{3}= \{a_{3}, a_{0}, a_{4}\}$,
$t_{4}= \{a_{0}, a_{5}, a_{4}\}$, and
$t_{5}= \{a_{5}, a_{2}, a_{0}\}$.
\end{enumerate}
\end{lemma}
The proof of this lemma consists of several parts and follows largely by a detailed analysis of the combinatorial structure of $n$-shells of triangles. We include it in detail in Appendix \ref{ap:lem}.
\section{Proof Theorem \ref{thm:main2}}
We now proceed to proving Theorem 2, using exhaustively the local and global implications of Lemma \ref{lem:disk}.
\begin{proof}
For each vertex $v \in V$, the set of vertices of the triangulation $S$, let $\Delta$ be the $n_{v}$-shell around $v$, by hypothesis union of the triangles in $\Delta$ is necessarily a disk.
Note that if for all $v \in V$, the triangles $f(T_\Delta)$ in $S'$ also form a disk, then the mapping $h: V \rightarrow V'$, from the vertices of $S$ into the vertices of $S'$ such that $h(v)= \bigcap_{t \in T_\Delta} f(t)$ is a simplicial mapping which induces $f$. Hence we can assume that there is a vertex $v \in V$ such that the union of the triangles in $f(\Delta)$ is not a disk.
\emph{Case 1.} Suppose the union of the triangles $f(T_\Delta)$ is the $5$-triangulation of the M\"{o}bius band described in Lemma \ref{lem:disk}.
Let $T_\Delta= \{ t_{0}, t_{1}, t_{2}, t_{3}, t_{4}\}$, where $t_{i}=\{a_{i \mod 5}, a_{i+1 \mod 5}, x\}$ and $f(T_\Delta)=\{ t_{0}', t_{1}', t_{2}', t_{3}', t_{4}'\}$ where
$t_{0}'= \{a_{0}', a_{2}', a_{1}'\}$,
$t_{1}'= \{a_{1}', a_{3}', a_{2}'\}$,
$t_{2}'= \{a_{2}', a_{4}', a_{3}'\}$,
$t_{3}'= \{a_{3}', a_{0}', a_{4}'\}$, and
$t_{4}'= \{a_{4}', a_{1}', a_{0}'\}$.
Given that $S'$ is also a closed connected surface, then each of the simplices $ t_{0}', t_{1}', t_{2}', t_{3}', t_{4}'$ has got a triangle adjacent to its remaining free edge. Let $r'_{i}$ be the simplices such that $|r'_{i}\cap t'_{i}|=2$, then
$r_{0}'= \{a_{0}', a_{2}', x_{0}'\}$,
$r_{1}'= \{a_{1}', a_{3}', x_{1}'\}$,
$r_{2}'= \{a_{2}', a_{4}', x_{2}'\}$,
$r_{3}'= \{a_{3}', a_{0}', x_{3}'\}$, and
$r_{4}'= \{a_{4}', a_{1}', x_{4}'\}$. \\ This is
$r_{i}'= \{a_{i\mod 5}', a_{i+2 \mod 5}', x_{i}'\}$. It follows that, $|r_{i}' \cap t_{j}'|\geq 1$ for all $i\neq j$.
Note that the interior of each of the edges $\{a_{i\mod 5}', a_{i+1 \mod 5}'\}$ is in the interior of the M\"{o}bius band, thus this edges cannot be repeated in any further simplex in the complex. This implies that $x_{i} \not \in \{a_{0}', a_{1}', a_{2}', a_{3}', a_{4}'\}$, because, if this was the case, at least one of the edges $\{a_{i\mod 5}', a_{i+1 \mod 5}'\}$ would be a subset of $r'_{i}$. Then, $|r_{i}' \cap t_{j}'|= 1$.
Let $r_{i}=f^{-1}(r'_{i})$, then $|r_{i} \cap t_{i}|=2$ and $|r_{i} \cap t_{j}|= 1$ for all $i\neq j$. As $t_{i}=\{a_{i \mod 5}, a_{i+1 \mod 5}, x\}$ then $r_{i}=\{a_{i \mod 5}, a_{i+1 \mod 5}, x_{i \mod 5}\}$. Here $|r_{i\mod 5} \cap t_{i+1\mod 5}|\geq 1$ and $|r_{i\mod 5} \cap t_{i-1\mod 5}|\geq 1$ trivially, hence \\ $a_{i-1 \mod 5}, a_{i+2 \mod 5} \not \in r_{i}$. However, for $|r_{i\mod 5} \cap t_{i+2\mod 5}|=1$ and $|r_{i\mod 5} \cap t_{i-2\mod 5}|=1$ to be accomplished, necessarily $x_{i}=a_{i+3\mod 5}=a_{i-2\mod 5}$. That is, $r_{i}=\{a_{i \mod 5}, a_{i+1 \mod 5}, a_{i-2 \mod 5}\}$, hence the simplicial complex asociated to $\bigcup_{i=0}^{4}r_{i}$ is a $5$-triangulation of a M\'{o}bius band, where $|r_{i} \cap r_{i+2 \mod 5}|=|r_{i} \cap r_{{i-2} \mod 5}|=2$, thus $T = \bigcup_{i=0}^{4}r_{i} \cup \bigcup_{i=0}^{4}t_{i}$ and the triangulations associated to $\bigcup_{i=0}^{4}r_{i}$ and $\bigcup_{i=0}^{4}t_{i}$ are a M\"{o}bius band and a disk, respectively, then it is straightforward to check that $S$ is the triangulation $\mathcal{I}/2.$
The latter also implies that $|r'_{i} \cap r'_{i+2 \mod 5}|=|r'_{i} \cap r'_{i-2 \mod 5}|=2$ then $v'=v'_{i}$ for all $i=1,\ldots 4$, so that $T' =\bigcup_{i=0}^{4}r'_{i} \cup \bigcup_{i=0}^{4}t'_{i}$, hence $S'$ is also the triangulation $\mathcal{I}/2$.
\emph{Case 2.} Suppose the union of the triangles $f(T_\Delta)$ is the $6$-triangulation of the M\"{o}bius band described in Lemma \ref{lem:disk}.
Let $T_\Delta= \{ t_{0}, t_{1}, t_{2}, t_{3}, t_{4}, t_{5}\}$, where $t_{i}=\{a_{i \mod 6}, a_{i+1 \mod 6}, x\}$ and $f(T_\Delta)=\{ t_{0}', t_{1}', t_{2}', t_{3}', t_{4}', t_{5}'\}$ where
$t'_{0}=\{a'_{0}, a'_{1}, a'_{2}\}$,
$t'_{1}= \{a'_{1}, a'_{2}, a'_{4}\}$,
$t'_{2}= \{a'_{2}, a'_{3}, a'_{4}\}$,
$t'_{3}= \{a'_{3}, a'_{0}, a'_{4}\}$,
$t'_{4}= \{a'_{0}, a'_{5}, a'_{4}\}$, and
$t'_{5}= \{a'_{5}, a'_{2}, a'_{0}\}$.
As $S'$ is a connected closed surface, then each of the simplices $\{ t_{0}', t_{1}', t_{2}', t_{3}', t_{4}', t_{5}'\}$ has got a triangle adjacent to its remaining free edge. Let $r'_{i}$ be the simplices such that $|r'_{i}\cap t'_{i}|=2$, then
$r_{0}'= \{a_{0}', a_{1}', x_{0}'\}$,
$r_{1}'= \{a_{1}', a_{4}', x_{1}'\}$,
$r_{2}'= \{a_{2}', a_{3}', x_{2}'\}$,
$r_{3}'= \{a_{3}', a_{0}', x_{3}'\}$,
$r_{4}'= \{a_{4}', a_{5}', x_{4}'\}$, and
$r_{5}'= \{a_{2}', a_{5}', x_{5}'\}$.
Here it follows that, $|r_{i}' \cap t_{j}'|\geq 1$ for all $i\neq j$, except for the pairs $i=0 \text{ and } j=2$, $i=1\text{ and }j=5$, $i=2\text{ and }j=4$, $i=3\text{ and }j=1$, $i=4\text{ and }j=0$ and $i=5\text{ and }j=3$; for these exceptions the intersection might be empty.
The latter implies that, if $r_{i}=f^{-1}(r'_{i})$, then $|r_{i} \cap t_{i}|=2$ and $|r_{i} \cap t_{j}|\geq 1$ for all $i\neq j$, except for the pairs $i=0 \text{ and } j=2$, $i=1\text{ and }j=5$, $i=2\text{ and }j=4$, $i=3\text{ and }j=1$, $i=4\text{ and }j=0$, and $i=5\text{ and }j=3$; for these exceptions the intersection might be empty.
As $t_{i}=\{a_{i \mod 6}, a_{i+1 \mod 6}, x\}$ then the vertex sets of the $r_{i}$'s are $r_{i}=\{a_{i \mod 6}, a_{i+1 \mod 6}, x_{i \mod 6}\}$. Note that $x_{0}' \not \in \{a'_{0}, a'_{1}, a'_{2}, a'_{4}, a'_{5}\}$ as the edges $\{a'_{0}, a_{2}\}$, $\{a'_{0}, a_{4}\}$, $\{a'_{0}, a_{5}\}$, $\{a'_{0}, a_{4}\}$ are edges whose interior is in the interior of the M\"{o}bius band. Thus we might have $v'_{0}=a'_{3}$, however if that was the case
$|r_{0}' \cap r_{3}'|= 2$, and $|r_{0} \cap r_{3}|= 2$, but this is not possible. Then necessarily $x_{0}' \not \in \{a'_{0}, a'_{1}, a'_{2}, a'_{3}, a'_{4}, a'_{5}\}$.
Using an argument analogous to the one in the previous case, we deduce that for each $i$, $x_{i}' \not \in \{a'_{0}, a'_{1}, a'_{2}, a'_{3}, a'_{4}, a'_{5}\}$; so that $|r_{i}' \cap t_{j}'|= 1$ for all $i\neq j$, except for the pairs $i=0\text{ and }j=2$, $i=1\text{ and }j=5$, $i=2\text{ and }j=4$, $i=3\text{ and }j=1$, $i=4\text{ and }j=0$, and $i=5\text{ and }j=3$, for which the intersection is empty.
The above implies $|r_{i} \cap t_{i}|=2$ and $|r_{i} \cap t_{j}|= 1$ for all $i\neq j$, except for the pairs $i=0\text{ and } j=2$, $i=1\text{ and } j=5$, $i=2\text{ and } j=4$, $i=3\text{ and } j=1$, $i=4\text{ and } j=0$, and $i=5\text{ and } j=3$, for which the intersection is empty. Hence, in order to accomplish the intersection dimensions indicated by the map necessarily, $x_{0}=x_{1}= a_{4}$, $x_{2}=x_{3}= a_{0}$, $x_{4}=x_{5}= a_{2}$, thus;
$r_{0}=\{a_{0}, a_{1}, a_{4}\}$,
$r_{1}=\{a_{1}, a_{2}, a_{4}\}$,
$r_{2}=\{a_{2}, a_{3}, a_{0}\}$,
$r_{3}=\{a_{3}, a_{4}, a_{0}\}$,
$r_{4}=\{a_{4}, a_{5}, a_{2}\}$, and
$r_{5}=\{a_{0}, a_{5}, a_{2}\}$.
Therefore, the triangulation associated to $\bigcup_{i=0}^{5} r_{i}$ is a $6$-triangulation of a M\"{o}bius band and $T= \bigcup_{i=0}^{5} t_{i} \cup \bigcup_{i=0}^{5} r_{i}$. It is now straightforward to check that $S$ is equal to the triangulation $\mathcal{TC}/2$.
The implication for $S'$ is that $|r'_{0} \cap r'_{1}|=2,\; |r'_{1} \cap r'_{4}|=2, \; |r'_{4} \cap r'_{5}|=2, \; |r'_{5} \cap r'_{2}|= 2, \; |r'_{2} \cap r'_{3}|=2, \; |r'_{3} \cap r'_{0}|=2$, which in turn implies $v'=v'_{i}$ for all $i \in \{0,\ldots 5\}$ and, further, $T'= \bigcup_{i=0}^{5} t'_{i} \cup \bigcup_{i=0}^{5} r'_{i}$, so that $S'$ is also equal to the triangulation $\mathcal{TC}/2$.
\end{proof}
\section {Proof Lemma \ref{lem:disk}} \label{ap:lem}
The proof will proceed by induction on the number of triangles on the $n$-shell. Before we proceed with the proof we need to define a special set of vertices present in every $n$-shell.
\begin{definition} The structural vertex list of an $n$-shell, $\Delta$ such that $T_\Delta=\{t_0, t_1,\ldots, t_{n-1}\}$, is the vertex set $\{a_{0}, a_{1}, \ldots, a_{n-1}, b_{0}, b_{1}, \ldots, b_{n-1}\} \subset \;_{n}\Pi^{2}_{0}$, where $a_{i}=t_{i}\setminus t_{i+1 \mod n }$ and $b_{i}=t_{i}\setminus t_{i-1 \mod n}$ for $0\leq i\leq n-1$.
If the $n$-shell is open (i.e. $|t_1 \cap t_{n-1}|=1$) then the structural vertex list is only defined for $0\leq i\leq n-2$ and $1\leq i\leq n-1$.
\end{definition}
Note that there might be some vertex repetition in the structural vertex list of any given n-shell; however, by definition, we can be certain that
\begin{equation} \label{eqn:begg}
a_{i}\neq b_{i} \text{ for all } 0 \leq i \leq n-1 \text{ fixed,}
\end{equation}
\begin{equation}
a_{i}\neq a_{i+1 \mod n} \text{\; and\; } b_{i}\neq b_{i+1 \mod n} \text{ for all } 0 \leq i \leq n-1.
\end{equation}
Moreover, by definition, neither $a_{i}$ nor $b_{i+1} \in \; t_{i}\cap t_{i+1}$ for any $0\leq i \leq n-1$ thus
\begin{equation} \label{eqn:end}
a_{i}\neq b_{i+1} \text{ for all } 1\leq i < n-1.
\end{equation}
For any $i$, observe that $t_{i-1 \mod n}=\{a_{i-1 \mod n}, a_{i}, x_{i}\}$, $t_{i}=\{a_{i}, b_{i}, x_{i}\}$ and $t_{i+1\ mod n}=\{b_{i}, b_{i+1 \mod n}, x_{i}\}$, where $x_{i}= t_{i-1}\cap t_{i} \cap t_{i+1}$.
In order to prove Lemma 1 we will study the repetition patterns of the structural vertex list and show that they characterize the different combinatorial types of $n$-shells, through the use of the following claims:
\begin{claim}There is no vertex repetition in the structural vertex list of any open 3-shell, $\{a_{i-1},a_{i},b_{i}, b_{i+1}\}$.
\end{claim}
By equations (1), (2) and (3) it only remains to be proven that $a_{i-1 \mod n} \neq b_{i+1 \mod n}$. If that was the case then $(t_{i-1 \mod n})_{0}=\{a_{i-1 \mod n}, a_{i}, x_{i}\}= (t_{i+1\ mod n})_{0}=\{b_{i}, b_{i+1 \mod n}, x_{i}\}$, forcing $n=3$.
\begin{claim}The vertices $a_{i}\neq a_{j}$ and $b_{i}\neq b_{j}$, for all $i, j \in \{0,\ldots,n-1\}$.
\end{claim}
First observe that the statement follows directly for $n$-shells if we prove it for open $n$-shells. The proof will follow by induction on the distance $|i-j|$ between the indices we are comparing.
Equation (2) proves the statement for $|i-j|=1$, while Claim 1 proves the statement for $|i-j|=2$. We assume the statement holds for $|i-j|\leq k>2$. Let $|i-j|=k+1$.
First suppose $a_i=a_j$. Considering that $ t_{i-1}=\{a_{i-1}, a_{i}, x_{i}\}$, $t_{i}=\{a_{i}, b_{i}, x_{i}\}$; and $t_{j-1}=\{a_{j-1}, a_{j}, x_{j}\}$, $t_{j}=\{a_{j}, b_{j}, x_{j}\}$, then$|t_{i-1}\cap t_{j-1}|= 1$, $|t_{i-1}\cap t_{j}|=1$, $|t_{i}\cap t_{j-1}|=1$ and $|t_{i}\cap t_{j}|=1$ already happen with just one repetition, $\{a_{i-1}, x_{i}, b_{i}, a_{j-1}, x_{j}, b_{j}, a_i=a_j\}$ are necessarily all different among them.
Given that $t_{j+1}\cap t_{i-1} =\{a_{i-1}, a_{i}, x_{i}\}\cap \{b_{j}, b_{j+1}, x_{j}\}$ and $t_{j+1}\cap t_{i}=\{b_{j}, b_{j+1}, x_{j}\}\cap \{a_{i}, b_{i}, x_{i}\}$, and that the intersection of the two pairs of sets above is of cardinality one, necesarily $b_{j+1}=a_i$ or $b_{j+1}=x_i$. Also, as $t_{i+1}\cap t_{j} =\{a_{j}, b_{j}, x_{j}\}\cap \{b_{i}, b_{i+1}, x_{i}\}$ have intersection of cardinality one too, so either $b_{i+1}=a_j$ or $b_{i+1}=x_j$. However the repetitions in this paragraph and last paragraph can't both happen simultaneously, preserving all the dimensions of the intersections.
The proof for $b_{i}\neq b_{j}$ is analogous.
\begin{claim} The structural vertex list of every four consecutive triangles, $t_i, t_{i+1},$ $t_{i+2}, t_{i+3}$, in an $n$-shell with $n \geq 5$ has exactly one repetition which is either $b_{i+1 }=a_{i+2}$ or $a_{i }=b_{i+3}$.
\end{claim}
By Claim 1 the lists $a_{i}, a_{i+1}, b_{i+1}, b_{i+2}$ and $a_{i+1}, a_{i+2}, b_{i+2}, b_{i+3}$ considered separately have no repetition among them and by Claim 2, all a's are different to the b's; thus, the only possible repetitions are $a_{i}=b_{i+3}$, or $b_{i+1}=a_{i+2}$, and they can't both occur simultaneously whilst maintaining the dimension of the intersections of the triangles.
\begin{claim} \label{claim:four} The vertex sets of four consecutive triangles $t_i, t_{i+1}, t_{i+2}, t_{i+3}$ in an $n$-shell are completely determined by the repetition of their structural vertex list, according to the following rules;
\begin{enumerate}[(a)]
\item if $b_{i+1 }=a_{i+2}$ then $t_{i}=\{a_{i}, a_{i+1}, x\}$,
$t_{i+1}=\{a_{i+1}, b_{i+1}=a_{i+2}, x\}$,
$t_{i+2}=\{a_{i+2}=b_{i+1}, b_{i+2}, x\}$ and
$t_{i+3}=\{b_{i+2}, b_{i+3}, x\}$;
\item if $a_{i}=b_{i+3}$ then $t_i=\{a_i=b_{i+3}, a_{i+1}, a_{i+2}\}$,
$t_{i+1}=\{a_{i+1}, b_{i+1}, a_{i+2}\}$
$t_{i+2}=\{b_{i+1}, b_{i+2}, a_{i+2}\}$ and
$t_{i+3}=\{b_{i+2}, b_{i+3}=a_i, b_{i+1}\}$.
\end{enumerate}
\end{claim}
In the first case (a) as, $t_i=\{a_i, b_{i}, x_{i}\}$ and $b_{i+1 }=a_{i+2}$, then
$t_{i+1}=\{a_{i+1}, b_{i+1}=a_{i+2}, x_{i+1}\}=\{a_{i+2}=b_{i+1}, a_{i+1}, x_{i+2}\}$ implies $x_{i+1}=x_{i+2}$ and the statement follows.
In the second case (b), if $a_{i}=b_{i+3}$ then we then have $t_i=\{a_i=b_{i+3}, a_{i+1}, x_{i+1}\}$
$t_{i+1}=\{a_{i+1}, b_{i+1}, x_{i+1}\}=\{a_i=b_{i+3}, a_{i+1}, x_{i+2}\}$
$t_{i+2}=\{b_{i+1}, b_{i+2}, x_{i+1}\}=\{a_{i+2}, b_{i+2}, x_{i+2}\}$ and
$t_{i+3}=\{b_{i+2}, b_{i+3}=a_i, x_{i+2}\}$. Then as $|t_i \cap t_{i+3}|=1$ all of $a_{i+1}, x_{i+1}, b_{i+2}, x_{i+2}$ are different. Then the two different expressions of $t_{i+1}$ and $t_{i+2}$ imply $b_{i+1}=x_{i+2}$ and $a_{i+2}=x_{i+1}$, respectively and the statement follows.
Figure \ref{fig:openshell} depicts the two possible repetitions in the structural vertex list of four triangles, as per Claim \ref{claim:four}; and the resulting sets of vertices of the triangles.
\begin{figure}
\caption{The two possible triangulations associated to an open $4$-shell.}
\label{fig:openshell}
\end{figure}
\begin{claim}The two occurring repetitions in the structural vertex list of the first five consecutive triangles of an open $n$-shell ($n\geq 5$) determine the repetition pattern of the structural vertex list of the whole $n$-shell, furthermore
the repetition pattern is one of the following:
\begin{enumerate}[(a)]
\item $b_{1}=a_{2}$ and $b_{2}=a_{3}$, and $b_{i}=a_{i+1}$ for all $i \in \{1, \ldots, n-3\}$;
\item $b_{1}=a_{2}$ and $a_{1}=b_{4}$, then $n \leq 6$ and when $n=6$, $b_{3}=a_{4}$;
\item $a_{0}=b_{3}$ and $b_{2}=a_{3}$, then $n \leq 6$ and when $n=6$, $a_{2}=b_{5}$;
\item $a_{0}=b_{3}$ and $a_{1}=b_{4}$, then $n \leq 4$.
\end{enumerate}
\end{claim}
First observe that no pair of repetitions, as allowed by Claim 3, among the groups $b_{1}=a_{2}$, $a_{2}=b_{5}$ and $b_{3}=a_{4}$ , $a_{0}=b_{3}$, might ever happen simultaneously. For instance, if $b_{1}=a_{2}=b_{5}$ then two b's would be equal in contradiction Claim 2.
Then we might have that two consecutive repetitions in the list can only possible be those in the statement of the claim.
(a) For $n=5$, the statement holds trivially. The proof will follow by induction on the number of triangles n. Assume the statement holds for $n\leq k$, and let $5 < n= k+1$. By the induction hypothesis $b_{i}=a_{i+1}$ for all $i \in \{1, \ldots, k-3\}$ and by Claim 4, we can deduce $b_{k-2}=a_{k-1}$, thus the statement follows.
(b) For $n=5$, the statement holds trivially. Consider the first five simplices of the n-shell. Then by Claim 4
$t_{0}=\{a_{0}, a_{1}=b_4, x\}$,\\
$t_1=\{a_1=b_{4}, a_{2}=b_1, x\}=\{a_1=b_{4}, a_{2}=b_1, a_{3}\}$,\\
$t_{2}=\{a_{2}=b_1, b_{2}, x\}=\{a_{2}=b_1, b_{2}, a_{3}\}$,\\
$t_{3}=\{b_{2}, b_{3}, x\}=\{b_{2}, b_{3}, a_{3}\}$, and \\
$t_{4}=\{b_{3}, b_{4}=a_1, b_{2}\}$, so that $x=a_3$ then all intersection sizes are fulfilled, hence no other repetition can happen.
Consider now $t_2, t_3, t_4, t_5$ then $t_5=\{a_1=b_4, b_5, x_4\}$. Here $x_4$ is either equal to $a_4=b_3$ or $b_2$. In the first case, $b_5$ cannot be made equal to any other vertex in order to complete the dimension of the intersection of $t_5$ with $t_2$ whilst fulfilling all the other dimensions of intersection.
Hence $x_4=a_4=b_3$, all the dimensions of the intersections are fulfilled, and no other repetition can occur.
Finally, consider $t_6$ and its vertex set $t_6=\{b_5, b_6, x_5\}$, then $x_5$ can be either $b_4=a_1$ or $b_2$. However in both cases it is impossible to complete the dimensions of the pairwise intersections with the other triangles. Thus, we have proven that in this case $n\leq 6$, and the statement follows.
(c) Follows in an analogous manner to (2).
(d) If $n=5$, the statement follows trivially, and by Claim 4,
$t_0=\{a_0=b_3, a_1, a_2\}$, $t_1=\{ a_1, a_2, a_3\}$, $t_2=\{a_2, b_2, a_3\}$, $t_3=\{a_0=b_3, b_2, a_3\}$, $t_4=\{a_0=b_3, b_2, b_4\}$. However the dimension of the intersection of $t_4$ with $t_0$ and $t_1$ has not been fulfilled. The only possible manner for the respective two intersections to be fulfilled without altering the rest of the intersections is for $a_1=b_4$. But in this case $dim(t_4 \cap t_0)=1$. Thus, we have proven that in this case $n\leq 4$, and the statement follows.
Using the previous claims the statement of the lemma follows easily. If $n\leq 4$, it can be easily verified using Claims 1 and 4.
For $n \geq 5$, clearly, case 1 in the statement of the lemma corresponds to case (a) in Claim 5, for any $n$.
It is also easy to check that there are no (closed) $5$-shells that correspond to the repetitions of the structural vertex list in cases (b) and (c) in Claim 5. However there is a unique (closed) $6$-shell corresponding to the repetition types of cases (b) and (c), this has the vertex set of case 3 in the statement of the lemma.
Finally, within the proof of case (d) in Claim 5 it was shown that there is only one (closed) $5$-shell with this type of repetiton on its vertex list. This $5$-shell has vertex sets as per case 2 in the statement of the lemma. \qed
Figure \ref{fig:closedshell} depicts the three possible repetitions of the structural vertex list of a closed $n$-shell and the corresponding resulting sets of vertices for a triangulation with shuch repetitions; these correspond to the three triangulations listed in Lemma \ref{lem:disk}.
\begin{figure}
\caption{The three possible traingulations associated to an $n$-shell. }
\label{fig:closedshell}
\end{figure}
\section{Conclusion}
The proof of Theorem \ref{thm:main} follows directly from Theorem \ref{thm:main2}.
As mentioned in the introduction, it is known that the dual graph of a triangulated space does not define it and in fact, there exist different triangulated closed connected surfaces with no boundary having the same dual graph \cite{mohar2004polyhedral}. The reader might wonder if the intersection matrix of a triangulation characterizes a connected surface with a boundary. The answer to this question is negative and some examples of this are provided by the objects in Lemma \ref{lem:disk}.
However some of the surfaces in the lemma are non orientable, thus one may further ask if the intersection matrix of a connected orientable surface defines it. Here, we conjecture that the answer to this question is possitive, as all examples of connected orientable surfaces having the same intersection matrix which we have found, are isomorphic, but, just as for the triangulations of the projective plane of Theorem \ref{thm:main2}, they do not have intersection preserving maps which extend to isomorphisms between them.
Further, one might wonder on the generalizations of Theorem \ref{thm:main} to simplicial complexes of any rank. We have strong evidence to suggest that the statement of Theorem \ref{thm:main} can indeed by extended to any rank, provided that we allow for some complexes which have no intersection preserving maps that extend to isomorphisms between them, but are isomoprhic nonetheless.
\end{document} |
\begin{document}
\title{A Generalized Axis Theorem for Cube Complexes}
\author{Daniel J. Woodhouse}
\email{daniel.woodhouse@mail.mcgill.ca}
\begin{abstract}
We consider a finitely generated virtually abelian group $G$ acting properly and without inversions on a CAT(0) cube complex $X$.
We prove that $G$ stabilizes a finite dimensional CAT(0) subcomplex $Y \subseteq X$ that is isometrically embedded in the combinatorial metric.
Moreover, we show that $Y$ is a product of finitely many quasilines.
The result represents a higher dimensional generalization of Haglund's axis theorem.
\end{abstract}
\maketitle
\section{Introduction}
A \emph{CAT(0) cube complex} $X$ is a cell complex that satisfies two properties: it is a geodesic metric space satisfying the CAT(0) comparison triangle condition, and each $n$-cell is isometric to $[0,1]^n$.
We will call this metric the \emph{CAT(0) metric} $\textup{\textsf{d}}_X$ and refer to~\cite{BridsonHaefliger} for a comprehensive account.
A \emph{hyperplane} $\Lambda \subseteq X$ is the subset of points equidistant between two adjacent vertices.
Despite the brevity of this definition, hyperplanes are better understood via their combinatorial definition, and the reader is urged to consult the literature; see~\cite{Sageev97}~\cite{HaglundSemiSimple}~\cite{WiseCBMS2012} for the required background.
There also exists an alternative metric on the $0$-cubes of $X$, that we will refer to as the \emph{combinatorial metric} $\textup{\textsf{d}}^c_X$, sometimes referred to as the \emph{$\ell^1$-metric}.
The combinatorial distance between two $0$-cubes is the length of the shortest combinatorial path in $X$ joining the $0$-cubes.
Equivalently, the combinatorial distance between two $0$-cubes is the number of hyperplanes in $X$ separating them.
We will always assume that a group $G$ acting on a CAT(0) cube complex preserves its cell structure and maps cubes isometrically to cubes.
A group $G$ acts without \emph{inversions} if the stabilizer of a hyperplane also stabilizes each complementary component.
The requirement that the action be without inversions is not a serious restriction as $G$ acts without inversions on the cubical subdivision.
A connected CAT(0) cube complex $X$ is a \emph{quasiline} if it is quasiisometric to $\ensuremath{\field{R}}$.
The \emph{rank} of a virtually abelian group commensurable to $\mathbb{Z}^n$ is $n$.
The goal of this paper will be the following theorem:
\begin{thmA}
Let $G$ be virtually $\mathbb{Z}^n$.
Suppose $G$ acts properly and without inversions on a CAT(0) cube complex $X$.
Then $G$ stabilizes a finite dimensional subcomplex $Y \subseteq X$ that is isometrically embedded in the combinatorial metric, and $Y \cong \prod_{i=1}^m C_i$, where each $C_i$ is a cubical quasiline and $m \geq n$.
Moreover, $\stab_G(\Lambda)$ is a codimension-1 subgroup for each hyperplane $\Lambda$ in $Y$.
\end{thmA}
\noindent
Note that $Y$ will not in general be a convex subcomplex.
\begin{cor}
Let $A$ be a finitely generated virtually abelian group acting properly on a CAT(0) cube complex $X$.
Then $A$ acts metrically properly on $X$.
\end{cor}
\begin{cor}
Let $G$ be a finitely generated group acting properly on a CAT(0) cube complex $X$.
Then virtually $\mathbb{Z}^n$ subgroups are undistorted in $G$.
\end{cor}
Let $g$ be an isometry of $X$, and let $x \in X$.
The \emph{displacement of $g$ at $x$}, denoted $\tau_x(g)$, is the distance $\textup{\textsf{d}}_X(x, gx)$.
The \emph{translation length} of $g$, denoted $\tau(g)$, is $\inf\{ \tau_x(g) \mid x \in X \}$.
Similarly, if $x$ is a $0$-cube of $X$, we can define the \emph{combinatorial displacement of $g$ at $x$}, denoted $\tau^c_x(g)$, as $\textup{\textsf{d}}^c_X(x, gx)$ and the \emph{combinatorial translation length}, denoted $\tau^c(g)$, is $\inf\{ \tau^c_x(g) \mid x \in X \}$.
Note that $\tau$, and $\tau^c$ are conjugacy invariant.
An isometry $g$ of a CAT(0) space is \emph{semisimple} if $\tau_x(g) = \tau(g)$ for some $x \in X$, and $G$ acts \emph{semisimply} on a CAT(0) space $X$ if each $g \in G$ is semisimple.
If a virtually $\mathbb{Z}^n$ group $G$ acts metrically properly by semisimple isometries on a CAT(0) space $X$, then the Flat Torus Theorem~\cite{BridsonHaefliger} provides a $G$-invariant, convex, flat $\mathbb{E}^n \subseteq X$.
A group acting on a CAT(0) cube complex does not, in general, have to do so semisimply.
See~\cite{AlgomKfirWajnrybWitowicz13} for examples of non-semisimple isometries in Thompson's group $F$ acting on an infinite dimensional CAT(0) cube complex.
Alternatively, in~\cite{Gersten94} a free-by-cyclic group $G$ is shown not to permit a semisimple action on a CAT(0) space.
Yet in~\cite{WiseGerstenRevisited} it is shown that $G$ does act freely on a CAT(0) cube complex.
Thus Theorem~\ref{thm:main} can be applied to such actions, whereas the classical Flat Torus Theorem cannot.
A virtually abelian subgroup is \emph{highest} if it is not virtually contained in a higher rank abelian subgroup.
If $G$ is a highest virtually abelian subgroup of a group acting properly and cocompactly on a CAT(0) cube complex $X$, then $G$ cocompactly stabilizes a convex subcomplex $Y$ which is a product of quasilines, as above~\cite{WiseWoodhouse15}.
However, this theorem fails without the highest hypothesis.
Moreover, most actions do not arise in the above fashion.
Despite the fact that the Flat Torus Theorem will not hold under the hypotheses of Theorem~\ref{thm:main}, we can deduce the following:
\begin{corA}
Let $G$ be virtually $\ensuremath{\field{Z}}^n$.
Suppose $G$ acts properly and without inversions on a CAT(0) cube complex $X$.
Then $G$ cocompactly stabilizes a subspace $F \subseteq X$ homeomorphic to $\mathbb{R}^n$ such that for each hyperplane $\Lambda \subseteq X$, the intersection $\Lambda \cap F$ is either empty or homeomorphic to $\mathbb{R}^{n-1}$.
\end{corA}
The initial motivation for Theorem~\ref{thm:main} and Corollary~\ref{cor:geometricFlat} was to resolve the following question posed by Wise.
Although we have not found a combinatorial flat, Corollary~\ref{cor:geometricFlat} is perhaps better suited to applications (see~\cite{Woodhouse15b}).
\begin{prob} \langlebel{Problem:Wise}
Let $\mathbb{Z}^2$ act freely on a CAT(0) cube complex $Y$.
Does there exists a $\ensuremath{\field{Z}}^2$-equivariant map $F \rightarrow Y$ where $F$ is a square $2$-complex homeomorphic to $\ensuremath{\field{R}}^2$, and such that no two hyperplanes of $F$ map to the same hyperplane in $Y$?
\end{prob}
A \emph{combinatorial geodesic axis for $g$} is a $g$-invariant, isometrically embedded, subcomplex $\gamma \subseteq X$ with $\gamma \cong \mathbb{R}$.
Note that $\gamma$ realizes the minimal combinatorial translation length of $g$.
Theorem~\ref{thm:main} is a high dimensional generalization of Haglund's combinatorial geodesic axis theorem.
Haglund's proof involved an argument by contradiction, exploiting the geometry of hyperplanes.
We reprove the result in Section~\ref{sec:HaglundRevisited} by using the dual cube complex construction of Sageev.
The results are further support for Haglund's slogan ``in CAT(0) cube complexes the combinatorial geometry is as nice as the CAT(0) geometry''.
The following is an application of Theorem~\ref{thm:main}, and the argument is inspired by the solvable subgroup theorem~\cite{BridsonHaefliger}.
Note that since we do not require that the action of $G$ on a CAT(0) cube complex be semisimple the following is not covered by the solvable subgroup theorem.
\begin{cor}
Let $H$ be virtually $\mathbb{Z}^n$, and let $\phi : H \rightarrow H$ be an injection with $\phi \neq \phi^i$ for all $i >1$.
Then $G = \langlengle H ,t \mid t^{-1} h t = \phi(h): \; h\in H \ranglengle$ cannot act properly on a CAT(0) cube complex.
\end{cor}
\begin{proof}
Suppose that $G$ acts properly on a CAT(0) cube complex $X$.
After subdividing $X$ we can assume that $G$ acts without inversions.
As $H$ is finitely generated, there exists an $a$ in the finite generating set such that $\phi^i(a) \neq a$ for all $i \in \mathbb{N}$, otherwise $\phi^i = \phi$ for some $i$, contradicting our hypothesis.
Thus, $| \{\phi^i(a)\}| = \infty$.
By Theorem~\ref{thm:main} there is an $H$-equivariant isometrically embedded subcomplex $Y \subseteq X$ such that $Y \cong \prod_{i=1}^m C_i$ where each $C_i$ is a cubical quasiline.
As $Y$ is isometrically embedded in $X$ in the combinatorial metric, the combinatorial translation length $\tau^c(\phi^i(a))$ is the same in $Y$ as it is in $X$.
The set $\{\tau^c(\phi^i(a))\}_{i \in \mathbb{N}}$ must be unbounded since the action of $H$ on $Y$ is proper and $Y$ is locally finite.
However, since $\tau^c$ is conjugacy invariant in $G$, we conclude that $\tau^c(\phi^i(a)) = \tau^c(\phi^j(a))$ for all $i,j \in \mathbb{N}$.
Thus, we arrive at the contradiction that $\{\tau^c(\phi^i(a)) \}_{i\in\mathbb{N}}$ is both bounded and unbounded.
\end{proof}
However, we have the following example of a solvable group which does act freely on a CAT(0) cube complex.
\begin{exmp} \langlebel{ex:solvableCubulated}
Let $H = \langlengle a_1, a_2, \ldots \mid [a_i , a_j] : i\neq j\ranglengle$.
Note that $H$ is the fundamental group of the nonpositively curved cube complex $Y$ obtained from a $0$-cube $v$, and $1$-cubes $e_1, e_2, e_3 \ldots$ with $n$-cubes inserted for every cardinality $n$ collection of $1$-cubes to create an $n$-torus.
One should think of $Y$ as an infinite cubical torus.
The oriented loop $e_i$ represents the element $a_i$.
Let $\phi : H \rightarrow H$ be the monomorphism such that $\phi(a_i) = a_{i+1}$.
Let $G = H \ast_\phi = \langlengle t, a_1, a_2, \ldots \mid [a_i, a_j] : i \neq j\;, t^{-1} a_i t = a_{i+1} \ranglengle$ be the associated ascending HNN extension.
Note that $G$ is generated by $a_1$ and $t$.
There is a graph of spaces $X$ obtained by letting $Y$ be the vertex space and $Y \times [0, 1]$ be the edge space and identifying $(v,1)$ and $(v,0)$ with $v$, and the $1$-cube $e_i \times \{1\}$ with $e_i$ and $e_i \times \{ 0 \}$ with $e_{i+1}$.
Note that $X$ is nonpositively curved, and therefore $G = \pi_1X$ acts freely on the CAT(0) cube complex $\widetilde X$, the universal cover of $X$.
\end{exmp}
{\bf Acknowledgements: } I would like to thank Daniel T. Wise, Mark F Hagen, Jack Button, Piotr Przytycki, and Dan Guralnik.
\section{Dual Cube Complexes}
Let $S$ be a set.
A \emph{wall} $\Lambda = \{ \ola{\Lambda}, \ora{\Lambda} \}$ in $S$ is a partition of $S$ into two disjoint, nonempty subsets.
The subsets $\ola{\Lambda}, \ora{\Lambda}$ are the \emph{halfspaces} of $\Lambda$.
A wall $\Lambda$ \emph{separates} $x,y\in S$ if they belong to distinct halfspaces of $\Lambda$.
Let $K \subseteq S$.
A wall $\Lambda$ \emph{intersects} $K$ if $K$ nontrivially intersects both $\ola{\Lambda}$ and $\ora{\Lambda}$.
Let $\mathcal{W}$ be a set of walls in $S$, then $(S,\mathcal{W})$ is a wallspace if for all $x,y \in S$, the number of walls separating $x$ and $y$ is finite.
If $\Lambda$ intersects $K$, then the \emph{restriction of $\Lambda$ to $K$}, is the wall in $K$ determined by $\restr{\Lambda}{K} = \{ \ola{\Lambda}\cap K, \ora{\Lambda} \cap K\}$.
In this paper duplicate walls are not permitted in $\mathcal{W}$.
Let $\mathcal{H}$ be the set of halfspaces of corresponding to $\mathcal{W}$.
\begin{exmp} \langlebel{exmp:CATCubeComplexes}
Let $X$ be a CAT(0) cube complex, and let $\Lambda \subseteq X$ be a hyperplane in $X$.
The complement $X - \Lambda$ has two components, therefore defining a wall in $X$ such that $\ola{\Lambda}$ is an open halfspace not containing $\Lambda$, and $\ora{\Lambda}$ is a closed halfspace containing $\Lambda$. Note that $\ola{\Lambda} \sqcup \ora{\Lambda} = X$.
Let $L(\Lambda)$ and $R(\Lambda)$ denote the maximal subcomplexes contained in $\ola{\Lambda}$ and $\ora{\Lambda}$ respectively.
Note that $L(\Lambda)$ and $R(\Lambda)$ are convex subcomplexes.
Let $\mathcal{W}$ be the set of walls determined by the hyperplanes in $X$.
Then $(X, \mathcal{W})$ is the wallspace associated to $X$.
Note that we are using $\Lambda$ to denote both the hyperplane and the wall corresponding to the hyperplane.
\end{exmp}
A function $c:\mathcal{W} \rightarrow \mathcal{H}$ is a \emph{$0$-cube} if $c[\Lambda] \in \{\ola{\Lambda}, \ora{\Lambda}\}$ and the following two conditions are satisfied:
\begin{enumerate}
\item \langlebel{ax:intersection} For all $\Lambda_1, \Lambda_2 \in \mathcal{W}$ the intersection $c[\Lambda_1] \cap c[\Lambda_2]$ is nonempty.
\item \langlebel{ax:finiteDisparity} For all $x \in S$, the set $\{ \Lambda \in \mathcal{W} \mid x \notin c[\Lambda] \}$ is finite.
\end{enumerate}
The \emph{dual cube complex} $C(S, \mathcal{W})$ is the connected CAT(0) cube complex obtained by letting the union of all $0$-cubes be the $0$-skeleton.
Two $0$-cubes $c_1 \neq c_2$ are endpoints of a $1$-cube if $c_1[\Lambda] = c_2[\Lambda]$ for all but precisely one $\Lambda \in \mathcal{W}$.
An $n$-cube is then inserted wherever there is the $1$-skeleton of an $n$-cube.
The hyperplanes in $C(S, \mathcal{W})$ are identified naturally with the walls in $\mathcal{W}$.
A proof of the fact that $C(S, \mathcal{W})$ is in fact a CAT(0) cube complex can be found in \cite{Sageev95}.
A point $x \in S$ determines a $0$-cube $c_x$ defined such that $x \in c_x[\Lambda]$ for all $\Lambda \in \mathcal{W}$.
Condition~\eqref{ax:intersection} holds immediately since $x \in c_x[\Lambda]$ for all $\Lambda \in \mathcal{W}$.
Condition~\eqref{ax:finiteDisparity} holds for $c_x$, since if $y \in S$ a wall $\Lambda$ does not separate $x$ and $y$, we can deduce that $y \in c_x[\Lambda]$, hence all but finitely many $\Lambda$ satisfy $y \in c_x[\Lambda]$.
Such $0$-cubes are called the \emph{canonical $0$-cubes}.
\begin{lem} \langlebel{lem:hemi0}
Let $X$ be a CAT(0) cube complex.
Let $\mathcal{W}$ be a set of walls obtained from the hyperplanes in $X$.
Let $Z$ be a connected subcomplex of $X$, and let $\mathcal{W}_{Z} \subseteq \mathcal{W}$ be the subset of walls intersecting $Z$.
Let $\mathcal{V}$ be walls in $\mathcal{W}_{Z}$ restricted to $Z$.
Then $(Z, \mathcal{V})$ is a wallspace and $C(Z, \mathcal{V})$ embeds in $C(X, \mathcal{W})$ isometrically in the combinatorial metric.
\end{lem}
\begin{proof}
We first claim that the map $\mathcal{W}_Z \rightarrow \mathcal{V}$ is an injection.
Suppose that $\Lambda_1, \Lambda_2 \in \mathcal{W}_Z$ are distinct walls.
As $\Lambda_1, \Lambda_2$ intersects $Z$, and since $Z$ is connected, there are $1$-cubes $e_1, e_2$ in $Z$ that are dual to the hyperplanes corresponding to $\Lambda_1, \Lambda_2$.
Therefore, both $0$-cubes in $e_1$ belong in a single halfspace of $\restr{\Lambda_2}{Z}$, so $\restr{\Lambda_1}{Z} \neq \restr{\Lambda_2}{Z}$.
We construct a map $\phi: C(Z, \mathcal{V}) \rightarrow C(X, \mathcal{W})$ on the $0$-skeleton first.
Let $c$ be a $0$-cube in $C(Z, \mathcal{V})$.
We let $\phi(c) \in C(X, \mathcal{W})$ be the uniquely defined $0$-cube such that $\phi(c)[\Lambda] \supseteq c[\restr{\Lambda}{Z}]$ for $\restr{\Lambda}{Z} \in \mathcal{V}$, and $\phi(c)[\Lambda] \supseteq Z$ for $\Lambda \in \mathcal{W} - \mathcal{W}_{Z}$.
To verify that $\phi(c)$ is a $0$-cube, first observe that $\phi(c)[\Lambda_1] \cap \phi(c)[\Lambda_2]$ is nonempty since $\restr{\Lambda_1}{Z} \cap \restr{\Lambda_2}{Z} \subseteq X$.
Secondly, if $x \in X$ we need to show that $x \in \phi(c)[\Lambda]$ for all but finitely many $\Lambda \in \mathcal{W}$.
Choose $z \in Z$, then $z \in c[\restr{\Lambda}{Z}]$ for all $\restr{\Lambda}{Z} \in \mathcal{V} - \{\restr{\Lambda_1}{Z}, \ldots, \restr{\Lambda_k}{Z} \}$, hence $z \in \phi(c)[\Lambda]$ for all $\Lambda \in \mathcal{W}_{Z} - \{\Lambda_1, \ldots, \Lambda_k\}$.
Let $\{\Lambda_{k+1}, \ldots, \Lambda_{k + \ell} \}$ be the set of walls in $\mathcal{W}$ separating $x$ and $z$.
Then $x \in \phi(c)[\Lambda]$ for all $\Lambda \in \mathcal{W} - \{ \Lambda_1, \ldots \Lambda_{k+\ell}\}$.
The $0$-cubes are embedded since if $c_1 \neq c_2$, there exists $\restr{\Lambda}{Z} \in \mathcal{V}$ such that $c_1[\restr{\Lambda}{Z}] \neq c_2[\restr{\Lambda}{Z}]$, hence $\phi(c_1)[\Lambda] \neq \phi(c_2)[\Lambda]$.
If $c_1, c_2$ are adjacent $0$-cubes in $C(Z, \mathcal{V})$, then $c_1[\restr{\Lambda}{Z}] = c_2[\restr{\Lambda}{Z}]$ for all $\restr{\Lambda}{Z} \in \mathcal{V}$, with the exception of precisely one wall $\restr{\hat{\Lambda}}{Z}$.
Therefore, we can deduce that $\phi(c_1)[\Lambda] = \phi(c_2)[\Lambda]$ for all walls in $\mathcal{W}$, with the precise exception of $\hat{\Lambda}$.
Therefore, the $1$-skeleton of $C(Z, \mathcal{V})$ embeds in $C(X,\mathcal{W})$, which is sufficient for $\phi$ to extend to an embedding of the entire cube complex.
Consider $C(Z, \mathcal{V})$ as a subcomplex of $C(X, \mathcal{W})$.
The set of hyperplanes in $C(Z, \mathcal{V})$ embeds into the set of hyperplanes in $C(X, \mathcal{W})$.
To see that $C(Z, \mathcal{V})$ is an isometrically embedded subcomplex, let $z_1, z_2$ be $0$-cubes in $Z$ and $\gamma$ be a geodesic combinatorial path in $C(Z, \mathcal{V})$ joining them.
Each hyperplane dual to $\gamma$ in $C(Z,\mathcal{V})$ intersects $\gamma$ precisely once, and since the hyperplanes in $C(Z, \mathcal{V})$ inject to hyperplanes in $C(X, \mathcal{W})$, it is geodesic there as well.
\end{proof}
Given a wall $\Lambda$ associated to a hyperplane in $X$ we let $N(\Lambda)$ denote the \emph{carrier} of $\Lambda$, by which we mean the union of all cubes intersected by $\Lambda$.
The following Lemma decribes what is called the \emph{restriction quotient} in~\cite{CapraceSageev2011}.
\begin{lem} \langlebel{lem:hemi1}
Let $S$ be a set and let $\mathcal{W}$ be a set of walls of $S$.
Let $G$ be a group acting on $(S, \mathcal{W})$.
Let $\mathcal{V} \subseteq \mathcal{W}$ be a $G$-invariant subset.
Then there is a $G$-equivariant function $\phi:C(S, \mathcal{W})^0 \rightarrow C(S, \mathcal{V})^0$.
Moreover, $\phi^{-1}(z)$ is nonempty for all $0$-cubes $z$ in $C(S, \mathcal{V})$.
\end{lem}
\begin{proof}
Let $c$ be a $0$-cube in $C(S, \mathcal{W})$.
Let $\phi(c)[\Lambda] = c[\Lambda]$ for $\Lambda \in \mathcal{V}$.
It is immediate that $\phi$ is $G$-equivariant.
To verify $\phi(c)[\Lambda]$ is a $0$-cube in $C(S, \mathcal{V})$ first note that $\phi(c_1)[\Lambda_1] \cap \phi(c_2)[\Lambda_2] \neq \emptyset$ for all $\Lambda_1, \Lambda_2 \in \mathcal{V}$, since $c_1[\Lambda_1] \cap c_2[\Lambda_2] \neq \emptyset$ for all $\Lambda_1, \Lambda_2 \in \mathcal{W}$.
Secondly, for all $x \in S$ observe that $x \in \phi(c)[\Lambda]$ for all but finitely many $\Lambda \in \mathcal{V}$.
Indeed, this is true for all but finitely many $\Lambda \in \mathcal{W}$.
To see that $\phi^{-1}(z)$ is non-empty for all $0$-cubes $z$ in $C(S, \mathcal{V})$ we determine a $0$-cube $x$ in $C(S, \mathcal{W})$ such that $\phi(x) = z$.
Fix $s \in S$.
Let $x[\Lambda] = z[\Lambda]$ for $\Lambda \in \mathcal{V}$.
Suppose that $\Lambda \in \mathcal{W} - \mathcal{V}$.
If $\ora{\Lambda} \supseteq z[\Lambda']$ for some $\Lambda' \in \mathcal{V}$ let $x[\Lambda] = \ora{\Lambda}$.
Similarly if $\ola{\Lambda} \supseteq z[\Lambda']$.
Otherwise, if $\Lambda$ intersects $z[\Lambda']$ for all $\Lambda' \in \mathcal{V}$ then let $s \in x[\Lambda]$.
To verify that $x$ is a $0$-cube, consider the following cases to show $x[\Lambda_1] \cap x[\Lambda_2] \neq \emptyset$ for $\Lambda_1, \Lambda_2 \in \mathcal{W}$.
If $\Lambda_1, \Lambda_2 \in \mathcal{V}$ then $x[\Lambda_1] \cap x[\Lambda_2] = z[\Lambda_1] \cap z[\Lambda_2] \neq \emptyset$.
Suppose that $\Lambda_1 \in \mathcal{W} - \mathcal{V}$ and $x[\Lambda_1] \subseteq z[\Lambda_1']$ for some $\Lambda_1' \in \mathcal{V}$.
If $\Lambda_2 \in \mathcal{V}$, then $x[\Lambda_1] \cap x[\Lambda_2] \supseteq z[\Lambda_1'] \cap z[\Lambda_2] \neq \emptyset$.
If $\Lambda_2 \in \mathcal{W} - \mathcal{V}$ and $x[\Lambda_2] \subseteq z[\Lambda_2']$ for some $\Lambda_2' \in \mathcal{V}$ then $x[\Lambda_1] \cap x[\Lambda_2] \subseteq z[\Lambda_1'] \cap z[\Lambda_2'] \neq \emptyset$.
If $\Lambda_2$ intersects $z[\Lambda]$ for all $\Lambda \in \mathcal{V}$, then $x[\Lambda_1] \cap x[\Lambda_2] \supseteq z[\Lambda_1'] \cap x[\Lambda_2] \neq \emptyset$.
Finally if both $s \in x[\Lambda_1]$ and $ x[\Lambda_2]$, then their intersection will contain at least $s$.
Finally, we verify that for $s' \in S$ there are only finitely many $\Lambda \in \mathcal{W}$ such that $s' \notin x[\Lambda]$.
Suppose, by way of contradiction, that there is an infinite subset of walls $\{ \Lambda_1, \Lambda_2, \ldots \} \subseteq \mathcal{W}$ such that $s' \notin x[\Lambda_i]$ for all $i \in \ensuremath{\field{N}}$.
We can assume, by excluding at most finitely many walls, that each $\Lambda_i \in \mathcal{W} - \mathcal{V}$.
Similarly, by excluding finitely many walls, we can assume that $\Lambda_i$ does not separate $s$ and $s'$.
Therefore, $s \notin x[\Lambda_i]$ for $i \in \ensuremath{\field{N}}$.
Therefore, by construction of $x$, there exist $\Lambda_i' \in \mathcal{V}$ such that $z[\Lambda_i'] \subseteq x[\Lambda_i]$, which implies that $s' \notin z[\Lambda_i']$.
There are infinitely many distinct $\Lambda_i'$, as otherwise there is a $\Lambda' \in \mathcal{V}$ such that $z[\Lambda']\subseteq x[\Lambda_i]$ for infinitely many $i$, which would imply that infinitely many $\Lambda_i$ separate $s'$ from an element in the complement of $z[\Lambda']$.
Therefore, infinitely many distinct walls $\Lambda_i' \in \mathcal{V}$ have $s' \notin z[\Lambda_i']$, contradicting that $z$ is a $0$-cube in $C(S, \mathcal{V})$.
\end{proof}
\section{Minimal $\mathbb{Z}^n$-invariant convex subcomplexes}
The following is Theorem 2 from~\cite{Gerasimov97}. As this paper is written in Russian, we give a proof in Appendix~\ref{AppendixA} based on the work in~\cite{NibloRoller98} as well as stating the definition of codimension-1.
\begin{thm}[Gerasimov~\cite{Gerasimov97}]\langlebel{thm:fixedCube}
Let $G$ be a finitely generated group that acts on a CAT(0) cube complex $X$ without a fixed point or inversions.
Then there is a hyperplane in $X$ that is stabilized by a codimension-1 subgroup of $G$.
\end{thm}
\noindent The goal of this section is to prove the following:
\begin{lem} \langlebel{lem:minSubcomplex}
Let $G$ be a finitely generated group acting without fixed point or inversions on a CAT(0) cube complex $X$.
There exists a minimal, $G$-invariant, convex subcomplex $X_o \subseteq X$ such that $X_o$ contains only finitely many hyperplane orbits, and every $X_o$ hyperplane stabilizer is a codimension-1 subgroup of $G$.
\end{lem}
\begin{proof}
Since $G$ is finitely generated, by taking the convex hull of a $G$-orbit we obtain a $G$-invariant convex subcomplex $X_o \subseteq X$ containing finitely many $G$-orbits of hyperplanes.
Assume that $X_o$ is a minimal such subcomplex in terms of the number of hyperplane orbits.
Let $(X_o, \mathcal{W})$ be the wallspace obtained from the hyperplanes in $X_o$.
Suppose that $\stab_G(\Lambda)$ is not a codimension-1 subgroup of $G$ for some $\Lambda \in \mathcal{W}$.
Let $G \Lambda \subseteq \mathcal{W}$ be the $G$-orbit of $\Lambda$.
By Lemma~\ref{lem:hemi1} there is an $G$-invariant map $\phi: X_o^{0} \rightarrow C(X_o, G\Lambda)^{0}$.
Since $\stab_G(\Lambda)$ is not commensurable to a codimension-1 subgroup, Theorem~\ref{thm:fixedCube} implies that there is a fixed $0$-cube $x$ in $C(X_o, G\Lambda)$.
Lemma~\ref{lem:hemi1} then implies that $\phi^{-1}(x)$ is non-empty.
Assuming that $\phi^{-1}(x) \subseteq \ola{\Lambda}$, then the intersection $\bigcap_{g\in G}g L(\Lambda)$ contains a proper, convex, $G$-invariant subcomplex of $X_o$, with one less hyperplane orbit.
This contradicts the minimality of $X_o$.
\end{proof}
The following Corollary follows since all codimension-1 subgroups of a rank $n$ virtually abelian group are of rank $(n-1)$.
\begin{cor} \langlebel{cor:abelianMinSubcomplex}
Let $G$ be a rank $n$, virtually abelian group acting without fixed point or inversions on a CAT(0) cube complex $X$.
Then there exists a minimal, $G$-invariant, convex subcomplex $X_o \subseteq X$ such that $X_o$ contains only finitely many hyperplane orbits, and every hyperplane stabilizer is a rank $(n-1)$ subgroup of $G$.
\end{cor}
\section{Proof of Main Theorem}
\begin{defn}
Regard $\ensuremath{\field{R}}$ as a CAT(0) cube complex whose $0$-skeleton is $\ensuremath{\field{Z}}$.
Let $g$ be an isometry of $X$.
A \emph{geodesic combinatorial axis} for $g$ is a $g$-invariant subcomplex homeomorphic to $\ensuremath{\field{R}}$ that embeds isometrically in $X$.
\end{defn}
\begin{defn}
Let $(M, d)$ be a metric space.
The subspaces $N_1, N_2 \subseteq M$ are \emph{coarsely equivalent} if each lies in an $r$-neighbourhood of the other for some $r>0$.
\end{defn}
\begin{thm} \langlebel{thm:main}
Let $G$ be virtually $\mathbb{Z}^n$.
Suppose $G$ acts properly and without inversions on a CAT(0) cube complex $X$.
Then $G$ stabilizes a finite dimensional subcomplex $Y \subseteq X$ that is isometrically embedded in the combinatorial metric, and $Y \cong \prod_{i=1}^m C_i$, where each $C_i$ is a cubical quasiline and $m \geq n$.
Moreover, $\stab_G(\Lambda)$ is a codimension-1 subgroup for each hyperplane $\Lambda$ in $Y$.
\end{thm}
\begin{proof}
By Corollary~\ref{cor:abelianMinSubcomplex} there is a minimal, non-empty, convex subcomplex $X_o \subseteq X$ stabilized by $G$, containing finitely many hyperplane orbits, and $\stabilizer_G(\Lambda)$ is a rank $(n-1)$ subgroup of $G$, for each hyperplane $\Lambda \subseteq X_o$.
Let $S = \{ g_1 ,\ldots, g_r \}$ be a generating set for $G$.
Let $x \in X_o$ be a $0$-cube.
Let $\Upsilon$ be the Cayley graph of $G$ with respect to $S$.
Let $\phi : \Upsilon \rightarrow X_o$ be a $G$-equivariant map that sends vertices to vertices, and edges to combinatorial paths or vertices in $X_o$.
Let $Q = \phi(\Upsilon)$.
As $G$ acts properly on $X$, and cocompactly on $\Upsilon$, the graph $Q $ is quasiisometric to $G$.
Let $\mathcal{W}_Q$ be the set of hyperplanes intersecting $Q$, and let $(Q, \mathcal{W}_Q)$ be the associated wallspace.
By Lemma~\ref{lem:hemi0} we know that $C(Q, \mathcal{W}_Q)$ is an isometrically embedded subcomplex of $X_o$.
Fix a proper action of $G$ on $\mathbb{R}^n$, and let $q: Q \rightarrow \mathbb{R}^n$ be a $G$-equivariant quasiisometry.
Note that $\stabilizer_G(\Lambda)$ is a quasiisometrically embedded subgroup of $G$, for all $\Lambda \in \mathcal{W}_Q$.
Thus $q(\Lambda \cap Q)$ is coarsely equivalent to a codimension-1 affine subspace $H \subseteq \mathbb{R}^n$.
Moreover, $q(\ola{\Lambda} \cap Q)$ and $q(\ora{\Lambda} \cap Q)$ are coarsely equivalent to the halfspaces of $H$.
Let $n>0$.
Since there are finitely many orbits of hyperplanes in $X_o$, there are only finitely many commensurability classes of stabilizers.
Therefore, we may partition $\mathcal{W}_Q$ as the disjoint union $ \bigsqcup_{i=1}^m \mathcal{W}_i$ where each $\mathcal{W}_i$ contains all walls with commensurable stabilizers.
For each $\Lambda_i \in \mathcal{W}_i$ let $q(\Lambda_i \cap Q)$ be coarsely equivalent to a codimension-1 affine subspace $H_i \subseteq \mathbb{R}^n$, stabilized by $\stabilizer_G(\Lambda_i)$.
If $i\neq j$ then $H_i$ and $H_j$ are nonparallel affine subspaces, and therefore $\Lambda_i$ and $\Lambda_j$ will intersect in $Q$.
Therefore, every wall in $\mathcal{W}_i$ intersects every wall in $\mathcal{W}_j$ if $i \neq j$, and thus $C(Q, \mathcal{W}_Q) \cong \prod_{i=1}^m C(Q, \mathcal{W}_i)$.
Finally, we show that $C(Q, \mathcal{W}_i)$ is a quasiline for each $1 \leq i \leq m$.
As $G$ permutes the factors in $\prod_{i=1}^m C(Q, \mathcal{W}_i)$, there is a finite index subgroup $G' \leqslant G$ that preserves each factor.
For each $i$, the stabilizers $\stab_G(\Lambda)$ are commensurable for all $\Lambda \in \mathcal{W}_i$.
Therefore, there is a cyclic subgroup $Z_i$ that is not virtually contained in any $\stab_G(\Lambda)$ and thus acts freely on $C(Q, \mathcal{W}_i)$.
As the stabilizers of $\Lambda \in \mathcal{W}_i$ are commensurable, all $q(\Lambda \cap Q)$ will be quasi-equivalent to parallel codimension-1 affine subspaces of $\ensuremath{\field{R}}^n$, which implies that only finitely many $Z_i$-translates of $\Lambda$ can pairwise intersect.
As there are finitely many $Z_i$-orbits of $\Lambda$ in $\mathcal{W}_i$, there is an upper bound on the number of pairwise intersecting hyperplanes in $\mathcal{W}_i$.
Thus, there are finitely many $Z_i$-orbits of maximal cubes in $C(Q, \mathcal{W}_i)$, which implies that $C(Q, \mathcal{W}_i)$ is CAT(0) cube complex quasiisometric to $\mathbb{R}$.
\end{proof}
We can now prove Corollary~\ref{cor:geometricFlat}.
\begin{cor} \langlebel{cor:geometricFlat}
Let $G$ be virtually $\ensuremath{\field{Z}}^n$.
Suppose $G$ acts properly and without inversions on a CAT(0) cube complex $X$.
Then $G$ cocompactly stabilizes a subspace $F \subseteq X$ homeomorphic to $\mathbb{R}^n$ such that for each hyperplane $\Lambda \subseteq X$, the intersection $\Lambda \cap F$ is either empty or homeomorphic to $\mathbb{R}^{n-1}$.
\end{cor}
\begin{proof}
By Theorem~\ref{thm:main} there is a $G$-equivariant, isometrically embedded, subcomplex $Y \subseteq X$, such that $Y = \prod_{i=1}^m C_i$, where each $C_i$ is a quasiline, and $\stab_G(\Lambda)$ is a codimension-1 subgroup.
Considering $Y$ with the CAT(0) metric, note that $Y$ is a complete CAT(0) metric space in its own right, and $G$ acts semisimply on $Y$.
By the Flat Torus Theorem~\cite{BridsonHaefliger} there is an isometrically embedded flat $F\subseteq Y$.
Note that $F \subseteq X$ is not isometrically embedded.
As $\stab_G(\Lambda)$ is a codimension-1 subgroup of $G$ for each hyperplane $\Lambda$ in $X$, the intersection $\Lambda \cap F = (\Lambda \cap Y) \cap F$ is either empty or, as $F \subseteq Y$ is isometrically embedded, the hyperplane intersection is an isometrically embedded copy of $\ensuremath{\field{R}}^{n-1}$.
\end{proof}
\section{Haglund's Axis} \langlebel{sec:HaglundRevisited}
The goal of this section is to reprove the following result of Haglund as a consequence of Corollary~\ref{cor:geometricFlat}.
\begin{thm}[Haglund~\cite{HaglundSemiSimple}] \langlebel{thm:HaglundAxis}
Let $G$ be a group acting on a CAT(0) cube complex without inversions.
Every element $g \in G$ either fixes a $0$-cube of $G$, or stabilizes a combinatorial geodesic axis.
\end{thm}
\begin{proof}
As finite groups don't contain codimension-1 subgroups, Theorem~\ref{thm:fixedCube} implies that if $g$ is finite order then it fixes a $0$-cube.
Suppose that $G$ does not fix a $0$-cube, then $\langlengle g \ranglengle$ must act properly on $X$.
By Corollary~\ref{cor:geometricFlat}, there is a line $L \subset X$ stabilized by $G$, that intersects each hyperplane at most once at a single point in $L$.
Let $\mathcal{W}_L$ be the set of hyperplanes intersecting $L$.
Note that the intersection points of the walls in $\mathcal{W}_L$ with $L$ is locally finite subset.
Fix a basepoint $p \in L$ that doesn't belong to a hyperplane intersecting $L$, and let $x$ be the canonical $0$-cube corresponding to $p$.
Let $\Lambda_1, \ldots, \Lambda_k$ be the set of hyperplanes separating $p$ and $gp$, and assume that $p \in \overleftarrow{\Lambda}_i$.
Reindex the hyperplanes such that $\ola{\Lambda}_1 \cap L \subseteq \ola{\Lambda}_2 \cap L \subseteq \cdots \subseteq \ola{\Lambda}_k \cap L$.
The ordering of the hyperplanes separating $p$ and $gp$ determines a combinatorial geodesic joining $x$ and $gx$ of length $k$, where the $i$-th edge is a $1$-cube dual to $\Lambda_i$.
This can be extended $\langlengle g \ranglengle$-equivariantly, to obtain a combinatorial geodesic axis $L_c$, since each hyperplanes intersects $L_c$ at most once.
\end{proof}
\appendix
\section{Codimension-1 Subgroups} \langlebel{AppendixA}
\begin{defn} \langlebel{defn:codim1}
Let $G$ be a finitely generated group.
Let $\Upsilon$ denote the Cayley graph of $G$ with respect to some finite generating set.
A subgroup $H \leqslant G$ is \emph{codimension-1} if $K \slash \Upsilon$ has more than one end.
Let $\oplus$ denote the operation of symmetric difference.
A subset $A \subseteq G$ is \emph{$H$-finite} if $A \subseteq HF$ where $F$ is some finite subset of $G$.
We will use the following equivalent formulation (see~\cite{Scott77}) of codimension-1:
A subgroup $H \leq G$ is a codimension-1 subgroup if there exists some $A \subseteq G$ such that
\begin{enumerate}
\item \langlebel{item:codim1:1} $A = HA$,
\item \langlebel{item:codim1:2} $A$ is \emph{$H$-almost invariant}, that is to say that $A \oplus Ag$ is $H$-finite for any $g \in G$.
\item $A$ is \emph{$H$-proper}, that is to say that neither $A$ nor $G-A$ is $H$-finite.
\end{enumerate}
\end{defn}
We will reprove the following theorem from~\cite{Gerasimov97} using techniques from~\cite{NibloRoller98}.
\begin{thm}
Let $G$ be a finitely generated group acting on a CAT(0) cube complex $X$ without edge inversions or fixing a $0$-cube.
Then the stabilizer of some hyperplane in $X$ is a codimension-1 subgroup of $G$.
\end{thm}
\begin{proof}
Suppose that no hyperplane stabilizer is a codimension-1 subgroup of $G$. We will find a $0$-cube fixed by $G$.
Let $\mathcal{H}$ denote the set of hyperplanes in $X$.
We can assume that $X$ has finitely many $G$-orbits of hyperplanes after possibly passing to the convex hull of a single $0$-cube orbit in $X$.
If $x,y$ are $0$-cubes in $X$, then let $\Delta(x,y) \subseteq \mathcal{H}$ denote the hyperplanes separating $x$ and $y$.
Note that $$\textup{\textsf{d}}_X^c(x,y) = | \Delta(x,y) |.$$
Let $\Lambda_1, \ldots, \Lambda_n$ be a minimal set of representatives of those orbits.
Let $$H_i =\stab_G(\Lambda_i) \; \textrm{ and } \; A_i = \{ g \in G \mid gx_0 \in \overleftarrow{\Lambda}_i \}. $$
We can verify that $A_i$ satisfies the first two criteria in Definition~\ref{defn:codim1}.
(\ref{item:codim1:1}): It is immediate that $A_i = H_iA_i$, as $G$ doesn't invert the hyperplanes in $X$.
(\ref{item:codim1:2}): Let \emph{xor} denote the exclusive or. For $f\in G$ we can deduce that $A_i \oplus A_if$ is $H_i$-finite:
\begin{align*}
g \in A_i \oplus A_i f & \iff gx_0 \in \overleftarrow{\Lambda}_i \textrm{ xor } gf^{-1}x_0 \in \overleftarrow{\Lambda}_i \\
& \iff x_0 \in g^{-1}\overleftarrow{\Lambda}_i \textrm{ xor } f^{-1}x_0 \in g^{-1}\overleftarrow{\Lambda}_i \\
& \iff g\in G \textrm{ such that } g^{-1}\Lambda_i \textrm{ separates $x_0$ and $f^{-1}x_0$. }
\end{align*}
As $(X, \mathcal{H})$ is a wallspace, there are only finitely many $g \in G$ such that $g^{-1}\Lambda_i$ separates $x_0$ and $f^{-1}x_0$.
If $g_1 \Lambda_i, \ldots, g_k\Lambda_i$ are the translates then
$$A_i + A_if = \{g_1, \ldots, g_k\} H_i$$
which implies almost $H_i$-invariant.
Therefore, $A_i$ cannot be $H_i$-proper, for any $i$, as we have assumed that none of the $H_i$ are codimension-1.
This means that $A_i$ is $H_i$-finite so
$A_i \subseteq H_iF_i$ where $F_i \subseteq G$ is finite.
\begin{claim}
$\textup{\textsf{d}}_X(x_0, fx_0) < 2 \max_i(|F_i|)$ for all $f \in G$.
\end{claim}
\begin{proof}
\begin{align*}
g\Lambda_i \in \Delta(x_0, fx_0) & \iff x_0[g\Lambda_i] \neq fx_0[g\Lambda_i] \\
& \iff x_0 \in g \overleftarrow{\Lambda}_i \textrm{ xor } fx_0 \in g \overleftarrow{\Lambda}_i \\
& \iff g^{-1}x_0 \in g \overleftarrow{\Lambda}_i \; \textrm{ xor } \; g^{-1}fx_0 \in g \overleftarrow{\Lambda}_i \\
& \iff g^{-1} \in A_i \; \textrm{ xor } \; g^{-1} \in Af^{-1}\\
& \iff g^{-1} \in A_i + A_if^{-1}
\end{align*}
As the final set is covered by $2|F_i|$ translates of $H_i$, we can deduce that there are at most $2|F_i|$ hyperplanes in $\Delta(x_0, fx_0)$.
\end{proof}
Thus, we can conclude that the $G$-orbit of $x_0$ is a bounded set.
If $G$ has a finite orbit in $X$, then the convex hull of the orbit is a compact, finite dimensional, complete CAT(0) cube complex, and we can apply Corollary II.2.8 (1) from~\cite{BridsonHaefliger} to find a fixed point $p$.
As $p$ is in the interior of some $n$-cube that is fixed by $G$, and since $G$ doesn't invert hyperplanes we can deduce that $G$ fixes a $0$-cube in that cube.
If the $G$-orbits in $X$ are infinite, then their convex hull may not be complete, so the above argument will not hold.
Let $\mathcal{C}(\mathcal{H})$ denote the \emph{connected cube}, a graph with vertices given by functions $c: \mathcal{H} \rightarrow \{0,1\}$ with finite support, and edges that join a pair of distinct vertices if and only if they differ on precisely one hyperplane.
Fix a $0$-cube $x_0$.
Then there is an embedding $$\phi: X^1 \hookrightarrow \mathcal{C}(\mathcal{H})$$
that maps the $0$-cube $x$ to $c_x$ where
\[ c_x(\Lambda) = \begin{cases}
1 & \textrm{if } \; x[\Lambda] \neq x_0[\Lambda] \\
0 & \textrm{if } \; x[\Lambda] = x_0[\Lambda] \\
\end{cases}
\]
\noindent A hyperplan $\Lambda \in \mathcal{H}$ \emph{separates} two vertices $c_1,c_2$ in $\mathcal{C}(\mathcal{H})$ if $c_1(\Lambda) \neq c_2(\Lambda)$.
Note that $\Lambda$ separates $0$-cubes $x,y$ in $X$ if and only if it separates $\phi(x)$ and $\phi(y)$.
Therefore, we can define $\Delta(c_1, c_2)$ for vertices in $\mathcal{C}(\mathcal{H})$ and conclude that if $x,y$ are $0$-cubes in $X$ then $\Delta(x,y) = \Delta( \phi(x), \phi(y) )$.
This implies that $\phi$ is an isometric embedding in the combinatorial metric.
We will show that a bounded orbit in $X$ implies there is a fixed $0$-cube in $\mathcal{C}(\mathcal{H})$ and then argue that we can go one step further and find a fixed $0$-cube in $X$.
Let $\ell^2(\mathcal{H})$ be the Hilbert Space of square summable functions $s: \mathcal{H} \rightarrow \mathbb{R}$.
There is an embedding $\rho: \mathcal{C}(\mathcal{H}) \rightarrow \ell^2(\mathcal{H})$ given by
\[
\rho(c)(\Lambda) = c[\Lambda]
\]
It is straight forward to verify that $\| \rho(c_1) - \rho(c_2) \|^2 = \textup{\textsf{d}}_{\mathcal{C}(\mathcal{H})}(c_1, c_2)$.
There is a $G$-action on $\ell^2(\mathcal{H})$ such that if $s \in \ell^2(\mathcal{H}), \Lambda \in \mathcal{H}, g \in G$ then
\[
g s(\Lambda) = \begin{cases}
s(g^{-1}\Lambda) & \textrm{if $c_{x_0}(g^{-1} \Lambda) = c_{x_0}(\Lambda)$} \\
1 - s(g^{-1}\Lambda) & \textrm{if $c_{x_0}(g^{-1}\Lambda) \neq c_{x_0}(\Lambda)$} \\
\end{cases}
\]
It is again straight forward to verify that this action is by isometries, and that $\rho$ is $G$-equivariant.
As $G{x_0}$ is bounded, so is $G(\rho \circ \phi(x_0))$.
It then follows that $G$ has a fixed point in $\ell^2(\mathcal{H})$ (\cite{NibloRoller98} gives a proof and also cites Lemma 3.8 in~\cite{HarpeValette89}).
Let $s: \mathcal{H} \rightarrow \mathbb{R}$ be the fixed point.
For all $g \in G$ we can deduce that $s(g \Lambda)$ is either $s(\Lambda)$ or $1 - s(\Lambda)$.
Therefore $s$ can only take two values on the hyperplanes in a single $G$-orbit.
As $s$ has to be square summable the two values have to be $0$ and $1$, and $s$ can only take the value $1$ on finitely many hyperplanes.
Thus, $s$ is the image of a point $c$ in $\mathcal{C}(\mathcal{S})$.
Let $c \in \mathcal{C}(\mathcal{S})$ be a $G$-invariant vetex which minimizes the distance to the image of $X^1$ in $\mathcal{C}(\mathcal{S})$.
Let $Z$ be a $G$-orbit of $0$-cubes in $X$ such that $\phi(Z)$ realize the minimal distance from $c$.
Let $\mathcal{V}$ be the set of hyperplanes that intersect $\{c \} \cup \mathcal{V}$.
Every hyperplane in $ \mathcal{V}$ must intersect $Z$ otherwise if $\mathcal{F} \subseteq \mathcal{V}$ is the finite, $G$-invariant subset of hyperplanes separating $c$ from $Z$ we can define a $0$-cube $c'$ such that
$$c'(\Lambda) = \begin{cases}
c(\Lambda) & \textrm{ if $\Lambda \notin \mathcal{F}$} \\
1 - c(\Lambda') & \textrm{ if $\Lambda \in \mathcal{F}$} \\
\end{cases} $$
and deduce that $c'$ is $G$-invariant and is $|\mathcal{F}|$ closer to $Z$ than $c$.
Let $z_0, z_1, z_2, \ldots$ an enumeration of $0$-cubes in $Z$.
Each hyperplane separating $z_0$ and $z_1$ must lie in either $\Delta(z_0, c)$ or $\Delta( z_1, c)$.
As $z_0$ is minimal distance in $X$ from $c$, the edges in $X$ incident to $z_0$ must be dual to hyperplanes not in $\Delta(z_0, c)$, and instead belongs to $\Delta(z_1,c)$.
Therefore, the hyperplane $\Lambda_0 \in \mathcal{V}$ dual to the first edge in a combinatorial geodesic joining $z_0$ to $z_1$ must lie in $\Delta(z_1,c)$.
Similarly, there exists a hyperplane $\Lambda_1$ dual to the first edge of the combinatorial geodesic in $X$ joining $z_1$ to $z_2$ that belongs to $\Delta(z_2,c)$ but not $\Delta(z_1,c)$.
Note that $\Lambda_1$ cannot intersect $\Lambda_0$ in $X$, otherwise $\Lambda_0$ would be dual to an edge incident to $z_1$, which would imply that there exists a $0$-cube in $X$ adjacent to $z_1$ that is closer to $c$.
Therefore $\Lambda_0, \Lambda_1$ separates $z_0$ from $z_2$ in $X$.
Iterating this argument produces a sequence of disjoint hyperplanes $\Lambda_0, \Lambda_2, \Lambda_3, \ldots$ such that $\Lambda_0, \ldots, \Lambda_k$ separates $z_0$ from $z_{k+1}$ in $X$.
This contradicts the hypothesis that $Z$ is a bounded set in $X$.
\end{proof}
\iffalse
The action of $G$ on $\mathcal{H}$ induces an action on $\mathcal{C}(\mathcal{H})$ such that $g c(\Lambda) = c(g^{-1} \Lambda)$.
\begin{lem}
The embedding $\phi$ is $G$-equivariant.
\end{lem}
\begin{proof}
As $gx[\Lambda] = x[g^{-1}\Lambda]$ we can deduce that
\[
c_{gx}[\Lambda] = \begin{cases}
1 & \textrm{if } \; x[g^{-1} \Lambda] \neq x_0[\Lambda] \\
0 & \textrm{if } \; x[g^{-1} \Lambda] = x_0[\Lambda] \\
\end{cases}
\]
Which coincides with $(gc_x)[\Lambda]$ so $g\phi(x) = \phi(gx)$.
\end{proof}
Suppose that $X$ has finitely many orbits of hyperplanes. Let $\Lambda_1, \ldots, \Lambda_n$ be a minimal set of representatives of those orbits.
Let $H_i =\stab_G(\Lambda_i)$ and $$A_i = \{ g \in G \mid gx_0 \in \overleftarrow{\Lambda}_i \} $$
Then
\begin{enumerate}
\item It is immediate that $A_i = H_iA_i$, as $G$ doesn't invert the hyperplane.
\item
\item If none of the $H_i$ are codimension-1 then $A_i$ is not $H_i$ proper, for all $i$.
This means that we can assume that $A_i$ is $H_i$-finite so
$A_i \subseteq H_iF_i$ where $F_i \subseteq G$ is finite.
\begin{claim}
$\textup{\textsf{d}}_X(x_0, fx_0) < 2 \textrm{Max}(|F_i|)$ for all $f \in G$.
\end{claim}
\begin{proof}
\begin{align*}
g\Lambda_i \in \Delta(x_0, fx_0) & \iff x_0[g\Lambda_i] \neq fx_0[g\Lambda_i] \\
& \iff x_0 \in g \overleftarrow{\Lambda}_i \textrm{ xor } fx_0 \in g \overleftarrow{\Lambda}_i \\
& \iff g^{-1}x_0 \in g \overleftarrow{\Lambda}_i \textrm{ xor } g^{-1}fx_0 \in g \overleftarrow{\Lambda}_i \\
& \iff g^{-1} \in A_i \textrm{xor} g^{-1} \in Af^{-1}\\
& \iff g^{-1} \in A_i + A_if^{-1}
\end{align*}
As the final set is covered by $2|F_i|$ translates of $H_i$ we can deduce that there are only $2|F_i|$ hyperplanes in $\Delta(x_0, fx_0)$.
\end{proof}
Thus we can conclude that the $G$-orbit of $x_0$ is a bounded set.
If $G$ has a finite orbit, then we can apply Corollary 2.8 (1) from \cite{BridsonHaefliger} on the convex hull of these points to find a fixed point $p$.
As $p$ is in the interior of some $n$-cube that is fixed by $G$, and since $G$ doesn't invert hyperplanes we can deduce that $G$ fixes a $0$-cube in that cube.
If the orbits are infinite, the following argument of Niblo and Roller will show that this implies there is a fixed $0$-cube in $\mathcal{C}(\mathcal{H})$.
We will then argue that we can go one step further and find a fixed $0$-cube in $X$.
\end{enumerate}
\fi
\end{document} |
\begin{document}
\title{Classical Origin of the Spin of Relativistic Pointlike Particles\\ and Geometric interpretation of Dirac Solutions}
\author{S. Savasta and O. Di Stefano}
\affiliation{Dipartimento di Fisica della Materia e
Tecnologie Fisiche Avanzate, Universit\`{a} di Messina Salita
Sperone 31, I-98166 Messina, Italy}
\begin{abstract}
{
Spin of elementary particles is the only kinematic degree of freedom not having classical corre-
spondence. It arises when seeking for the finite-dimensional representations of the Lorentz group,
which is the only symmetry group of relativistic quantum field theory acting on multiple-component
quantum fields non-unitarily. We study linear transformations, acting on the space
of spatial and proper-time velocities rather than on coordinates. While ensuring the relativistic in-
variance, they avoid these two exceptions: they describe the spin degree of freedom of a pointlike
particle yet at a classical level and form a compact group hence with unitary finite-dimensional rep-
resentations. Within this approach changes of the velocity modulus and direction can be accounted
for by rotations of two independent unit vectors. Dirac spinors just provide the quantum description
of these rotations.}
\end{abstract}
\pacs{11.30.-j,11.30.Cp, 11.10.-z,03.65.-w}
\maketitle
\section{Introduction}
Quantum spin differs from the other quantum observables as position, momentum, energy, angular momentum etc.,
for the absence of classical correspondence. Pauli described it
in his paper on the exclusion principle \cite{Pauli} as a classical nondescribable two-valuedness.
Accordingly in many textbooks about quantum theory, spin is referred as a non-classical degree of freedom.
Moreover spin cannot be regarded as related to some internal symmetry like hypercharge since it originates from spacetime symmetries i.e. from finite-dimensional representations of the homogeneous Lorentz group.
It is the only kinematic degree of freedom not having classical correspondence, although it is worth mention that some composite classical dynamical models with additional variables are able to reproduce after quantization the Dirac electron theory \cite{CS1,CS2}.
More recently a geometric origin of the spin angular momentum has been suggested \cite{Newman}.
The fact that the relativistic Dirac theory automatically includes the effects of spin leads to the conclusion that spin is a quantum relativistic effect. Nevertheless this conclusion is not generally accepted. Weinberg (Ref.\ \cite{weinberg} Chapter 1) wrote: {\em \dots it is difficult to agree that there is anything fundamentally wrong with the relativistic equation for zero spin that forced the development of the Dirac equation -- the problem simply is that the electron happens to have spin $\hbar /2$, not zero.}
Historically, Paul Dirac found the Klein-Gordon equation physically unsatisfactory \cite{Dirac}, thus he seeked for a relativistically invariant wave equation of first order in time satisfying a Schr\"{o}dinger-like wave equation of the form
\begin{equation}
i\partial_t \psi = \hat H \psi\, .
\label{Sl}\end{equation}
In order to have a more symmetric relativistic wave equation in the 4-momentum components, Dirac seeked for an equation that because is linear in the time-derivative, it is also linear in space-derivatives, so that $\hat H$ takes the form,
\begin{equation}
\hat H = {\bm \alpha} \cdot {\bf \hat p} + \beta m \, ,
\label{DH}\end{equation}
with ${\bm \alpha}$ and $\beta$ being independent on spacetime and 4-momentum.
The condition that Eq.\ (\ref{DH}) provides the correct relationship between energy and momentum
\begin{equation}
E^2 = {\bf p}^2+m^2\, ,
\label{Ec}\end{equation}
requires that ${\bm \alpha}$ and $\alpha_4\equiv \beta$ obey the anticommutation rules $\left \{ \alpha_i, \alpha_j \right \}= 2 \delta_{ij}$ ($i=1,4$). Dirac found that a set of $4 \times 4$ matrices satisfying this relation provides the lowest order representation of the four $\alpha_i$. They can be expressed in terms of Pauli matrices $\rho_i$ and $\sigma_i$ belonging to two different Hilbert spaces: ${\alpha}_i = \rho_1 \sigma_i$ ($i=1,3$) and $\beta=\rho_3$. Inserting Eq.\ (\ref{DH}) into (\ref{Sl}) the Dirac equation involving a four-component wavefunction is obtained.
Richard Feynman in his Nobel lecture wrote: {\em Dirac obtained his equation for the
description of the electron by an almost purely mathematical proposition. A
simple physical view by which all the contents of this equation can be seen is
still lacking}.
Dirac's Hamiltonian seems not to have a direct correspondence with the classical relativistic Hamiltonian of the free pointlike particle
\begin{equation}
H = \sqrt{{\bf p}^2+m^2}\, ,
\label{Ecsr}\end{equation}
in contrast to the nonrelativistic Schr\"{o}dinger-like wave equation which can be derived directly from the Hamiltonian after the quantum operator replacement. Analogously the Klein-Gordon equation can be derived directly from the relativistic relationship (\ref{Ec}).
Actually, in 1928, (the same year of the Dirac's exceptional achievement) Breit \cite{Breit} provided the lacking correspondent principle, recalling that another way of writing Eq.\ (\ref{Ecsr}) is
\begin{equation}
H = {\bf \dot x} \cdot {\bf p} + m\sqrt{1- \left| {\bf \dot x} \right|^2}\, \, ,
\label{H_Br}\end{equation}
which is the form at which one usually arrives first in the derivation of the Hamiltonian function as $H = p_i q_i -L$, where $L$ is the Lagrangian function.
Dividing Eq.\ (\ref{Ec}) by $E$ and recalling that ${\bf \dot x}= {\bf p}/E$ and that $m/E = \sqrt{1- \left| {\bf \dot x} \right|^2}$, the relativistic invariance of Eq.\ (\ref{H_Br}) is easily verified.
Eq.\ (\ref{H_Br}) has the same structure of the Dirac Hamiltonian. In particular the Dirac equation can be derived from it after the following additional replacements: ${\bf \dot x} \to {\bm \alpha}$, and $\sqrt{1- \left| {\bf \dot x} \right|^2} \to \beta$. The correspondence can be unequivocably proved calculating
the expectation values of the Dirac matrices by using the solutions of the Dirac equation for a particle of definite momentum ${\bf p}$,
\begin{eqnarray}
&&\langle {\bm \alpha} \rangle = {\bf p}/E = {\bf \dot x}\, ,\nonumber \\
&&\langle {\beta} \rangle = m/E = \sqrt{1- \left| {\bf \dot x} \right|^2}\, .\nonumber
\end{eqnarray}
It may be surprising that the few-line derivation of the classical origin of Dirac equation by Breit was scarcely exploited in the literature on the interpretation of the Dirac equation.
Moreover, even more surprisingly, it is not described in textbooks on QFT at our knowledge (see e.g. \cite{weinberg,peskin,maggiore,mandl,hey}). Only ${\bm \alpha}$ is identified as the velocity operator from the Heisenberg equation
$d{\bf \hat x}/dt = [{\bf \hat x}, \hat H]$ and/or from the current density operator \cite{Dirac,weinberg,peskin,hey}.
We believe that such a simple and profound result has been hidden by the relentless quest for explicit covariance.
Ordinary velocity is not considered in special relativity as a fundamental variable, it is not a 4-vector or part of it. Moreover it could be argued that Eq.\ {\ref{H_Br}} is not a proper classical Hamiltonian since it depends on velocity.
Nevertheless it correctly expresses the enegy of a free relativistic particle,
$
E = {\bf \dot x} \cdot {\bf p} + m\sqrt{1- \left| {\bf \dot x} \right|^2}
$. Moreover the Dirac Hamiltonian depends explicity on the velocity operator ${\bm \alpha}$, hence
one should not be surprised if its classical counterpart do depends on the velocity of the particle.
Dirac equation is generally presented in its explicit covariant form
\begin{equation}
(i \gamma^\mu p_\mu - m) \psi =0\, ,
\end{equation}
with $\gamma^0= \beta$ and $\gamma^i=\gamma_0 \alpha_i$ ($i=1,3$).
In contrast to ${\bm \alpha}$ and $\beta$, the matrices $\gamma^\mu$
transform as a 4-vector under the application of the spinor representation of a Lorentz transformation: $\Lambda_{\frac{1}{2}}^{-1} \gamma^\mu \Lambda_{\frac{1}{2}} =\Lambda^{\mu}_{\nu}\gamma^{\nu}$,
where
\begin{equation}
\Lambda_{\frac{1}{2}} = \exp({-\frac{i}{2} \omega_{\mu \nu} S_{1/2}^{\mu \nu}})\,
\end{equation}
is the spinor representation of a Lorentz transformation, being $S_{1/2}^{\mu \nu} =(i/4)[\gamma_\mu, \gamma_\nu]$ the generators, and $\omega_{\mu \nu}$ an antisymmetrix matrix defining the specific transformation.
The homogeneous Lorentz group denoted as $O(3,1)$ is the subgroup of Poincar$\acute{\text{e}}$ transformations describing rotations and boosts.
It is defined as the group of linear coordinate transformations.
$ x^{\mu} \to x^{'\mu} = \Lambda^{\mu}_{\nu}x^{\nu}$
which leave invariant the {\em proper time} interval $d \tau$.
Elements with det$\Lambda=1$ (called proper Lorentz transformations) form a subgroup denoted $SO(3,1)$.
In order to study the transformation properties of multiple-component quantum fields,
it is necessary to look for the finite-dimensional representations of $SO(3,1)$ as the spinor representation $\Lambda_{\frac{1}{2}}$.
Although the $SO(3,1)$ algebra can be written as the algebra of $SU(2) \times SU(2)$, the group $SO(3,1)$ is non-compact.
Hence it has no faithful, finite-dimensional representations that are unitary despite the group $SU(2) \times SU(2)$ has.
The homogeneous Lorentz group, is thus the only group of relativistic QFT acting on multiple component quantum fields non-unitarily \cite{Fuchs}.
This rather surprising fact (at least apparently) conflicts with an important theorem proved by Wigner in 1931 (see Ref.\ \cite{weinberg} Chapter 2) which tells us that any symmetry operation on quantum states must be induced by a unitary (or anti-unitary) transformation. The conflict is overcome, either by regarding the field not as a multicomponent quantum wavefunction but as a classical field \cite{peskin}, or by pointing out that the fundamental group is not the (homogeneous) Lorentz group but the
Poincar$\acute{\text{e}}$ group \cite{weinberg,maggiore}.
Independently on the point of view, one consequence is that the Hermitean conjugate $\psi^\dag$ of the (four-component) spinor field $\psi$ does not have the inverse transformation property of $\psi$ as requested by quantum mechanics. The solution is to define $\bar \psi = \psi^\dag \gamma^0$ called the {\em Dirac conjugate} of $\psi$, being $\gamma^0$ the {\em time} Dirac matrix \cite{weinberg,peskin,maggiore,mandl,hey}.
The Lorentz-invariant Dirac Lagrangian can thus be written as
\begin{equation}
{\cal L}_{\text{Dirac}} =\bar \psi(i \gamma_\mu \partial_\mu - m) \psi\, .
\label{DiracL1}\end{equation}
It is quite curious that, by exploiting the relationship between the ${\bm \alpha}$ and ${\bm \gamma}$ matrices, it can be expressed without the need for the Dirac conjugate and in terms of the Dirac Hamiltonian and hence of the matrices ${\bf \alpha}$ and $\beta$:
\begin{equation}
{\cal L}_{\text{Dirac}} =\psi^\dag( i \partial_t - \hat H) \psi\, .
\label{DiracL2}\end{equation}
As discussed above, Breit found an expression for the classical Hamiltonian of a relativistic pointlike particle linear in the momentum and the mass parameter, which is the classical correspondent of Dirac's Hamiltonian \cite{Breit}. Eq.\ (\ref{H_Br}) also depends on the ordinary velocity and on $1/(dt/d \tau) =\sqrt{1- \left| {\bf \dot x} \right|^2}$.
This correspondence suggests that these velocity variables and their simmetry properties play a fundamental role.
Let us consider an inertial reference frame $S$ including a clock placed at rest measuring the time $t$.
Let us consider a pointlike particle moving with a velocity ${\bf \dot x}$ relative to the inertial system $S$.
We will indicate by the vector ${\bf x}$ the position of the particle with respect to $S$.
Let us indicate by $\tau$ the time measured by a clock moving with the particle.
The proper time interval $d \tau$ is the time interval measured by a clock fixed in the reference frame $S'$ which sees the particle at rest.
In this paper we investigate the very simple transformation properties of the ordinary velocity ${\bf \dot x}$ and of $1/(dt/d \tau)$.
In analogy to the ordinary velocity components, defined as $ \dot x_i \equiv d x_i/ dt$, we can regard
the fourth relevant variable as the proper time speed: $\dot \tau$.
Specifically, regarding the time of the particle (like the position) as a function of time, we can invert obtaining $\dot \tau \equiv d \tau / dt = 1/(dt/d \tau)$. $\dot \tau$ describes the rate of ticking of particle's clock with respect that of the reference frame clock \cite{nota}.
Eq.\ (\ref{H_Br}) can thus be written as
\begin{equation}
H = {\bf \dot x} \cdot {\bf p} + \dot \tau\, m\, .
\label{E2}\end{equation}
We show that the relativistic linear transformations, acting on the space of velocities rather than on coordinates, display some relevant advanteges as compared to the homogenous Lorentz group: (i) they are able to describe the spin additional degree of freedom of a pointlike particle yet at a classical relativistic level; (ii)
they form a compact group hence with unitary finite-dimensional representations; (iii)
they describe antiparticles at a classical level as a direct consequence of rotation symmetry.
In addition we show that these linear transformations, acting on rates of change with respect to the time coordinate $t$ of the reference frame, attribute to the latter a special role as required by quantum mechanics. Hence the proposed symmetry group holds promise for a better reconciliation between relativity and quantum mechanics.
\section{S0(4) spacetime transformations}
The velocity of the particle and particle-time speed satisfy the following relationship,
\begin{equation}
{\bf \dot x}^2 + {\dot \tau}^2 = 1\, .
\label{epstein}\end{equation}
independently of the inertial reference frame.
Linear transformations which leave invariant the norm in a four-dimensional (4D) Euclidean space (here defined by $(\dot x_1,\dot x_2,\dot x_3,\dot \tau)$\, ) constitute the group SO(4). This group is not simple and has the same algebra of $SU(2) \times SU(2)$ as $SO(3,1)$ but (in contrast to $SO(3,1)$) is compact.
The kinematics of a relativisitc pointlike particle can be easily understood in terms of these variables. Eq.\ (\ref{epstein}) can be viewed as the norm of a unit 2D vector, which for later convenience we express as a 3D unit vector lying on the ${\bf i}{\bf k}$ plane: ${\bf r} \equiv(\pm\left| {\bf \dot x} \right|,0,{\dot \tau})$.
Fig.\ 1 displays one such kinematic vector with positive components.
If the particle is at rest with respect to the reference frame, the vector lies on the ${\bf k}$ axis (${\dot \tau} = 1$) moreover $\dot \tau$ decreases with particle-speed increasing in the way predicted by special relativity (time dilation).
\begin{figure}
\caption{Representation of the unit vector ${\bf r}
\label{fig1f}
\end{figure}
The unit vector ${\bf r}$, besides ${\dot \tau}$, is able only to describe the signed modulus of the particle velocity $\pm\left| {\bf \dot x} \right|$. The particle velocity is actually a 3D vector and the direction of ${\bf \dot x}$ can be accounted for by one additional 3D unit vector ${\bf s}$ providing just the direction of ${\bf \dot x}$.
Hence the motion state of a particle can be described by a specific couple of unit vectors ${\bf r}$ and ${\bf s}$:
\begin{eqnarray}
{\dot \tau} &=& {r}_3 \nonumber\\
{\bf \dot x}&=& {r}_1\, {\bf s}\, .
\end{eqnarray}
Within this approach changes of the particle velocity
respect to an inertial frame can be accounted for by rotations of ${\bf r}$ in the kinematic plane (changes of the modulus) and rotations of ${\bf s}$ (changes of the direction).
${\bf s}$ can be transformed according to arbitrary 3D rotations around an arbitrary 3D unit vector ${\bf n}$.
$s_i \to s_i' = [{R}_{\bf n}(\theta)]_{ij}\, s_j$ ($\theta$ labels the angle of rotation about ${\bf n}$) . Physical kinematic states ${\bf r}$ admit only 2D rotations about the ${{\bf j}}$-axis: $r_i \to r_i' = [R'_{{\bf j}}(\phi)]_{ij}\, r_j$.
According to these rotation symmetries, states obtained rotating
${\bf r}$, and ${\bf s}$ should be considered as possible states. In particular
${\bf r}$ also describes
kinematic states in the second and third quadrant with ${\dot \tau} < 0$.
These states provide a classical description of antiparticles. This point will appear more evident after quantization.
From the point of view of classical (not quantum) special relativity, the description in terms of ${\bf s}$ and ${\bf r}$ appears to be redundant: a given velocity is described by two different states. For example a given velocity along direction ${\bf d}= {\bf \dot x}/ \left| {\bf \dot x} \right|$ can be described by the unit vectors
${\bf s}_\uparrow = {\bf d}$ and ${\bf r}_\uparrow = (\sin \theta ,0,\cos \theta)$ with $\theta = \arcsin \left| {\bf \dot x} \right|$, or equivalently by the unit vectors ${\bf s}_\downarrow =-{\bf d}$
and ${\bf r}_\downarrow = (\sin \theta' ,0,\cos \theta')$ with $\theta' = -\theta$.
Thus a given physical velocity and the corresponding proper-time speed are described by
two states. This twofold degeneracy recalls quantum spin, which Pauli described in his paper on the exclusion principle \cite{Pauli} as a {\em classical nondescribable two-valuedness}. This two-valuedness can be described in terms of the helicity variable $h = ({\bf s} \cdot {\bf p})/p$.
In the following we will demonstrate that this classical twofold degeneracy is the classical correspondent of the helicty states determined by quantum spin.
It is worth pointing out that, although the present approach describes a spin-like degree of freedom yet at a classical level, the interaction of a classical particle with the electromagnetic field does not appear to be affected by this additional degree of freedom, in contrast to what happens after quantization.
\begin{figure}
\caption{(Color online). Unit vectors ${\bf s}
\label{tintegrated ky=0}
\end{figure}
Figure\ 2 provides a clear geometric interpretation of the different kind of kinematic states: the first quadrant contains spin up particles, the second one spin up antiparticles, the third spin down antiparticles and the fourth quadrant spin down particles.
If we want to realize these transformation properties on quantum states, we have to find the representations of
the v-group acting on complex Hilbert spaces. The algebra of the v-group (as that of $SO(3,1)$) can be written as the algebra of $SU(2) \times SU(2)$.
As it is well known from non-relativistic quantum mechanics, the rotation group has finite representations
that are unitary with its generators represented by
angular momentum matrices. There is one irreducible representation $SU(N)$ for each finite dimension $N =2j +1$ with $j$ integer or semi-integer.
Being the v-group the tensor product of two independent rotations, the corresponding quantum generators will be a pair of independent angular momentum matrices.
Let us consider transformations on elementry complex Hilbert spaces namely two level systems. In this case the generators are represented by pairs of Pauli matrices ${\bm \sigma}$ and ${\bm \rho}$ acting on two different 2D Hilbert spaces. These matrices have the same transformation properties of their classical counterparts ${\bf r}$ and ${\bf s}$. E.g.:
\begin{equation}
D^{r \dag}_{{\bf j}}(\phi)\, \rho_i\, D^r_{{\bf j}}(\phi) = [R^r_{{\bf j}}(\phi)]_{ij}\, \rho_j\, ,
\end{equation}
being $D^r_{{\bf j}}(\phi) = \exp{\left( -i \rho_2\, \phi/{2}\right)}$ .
As a consequence their expectation values transform as classical vectors. They are the quantum analog of classical unit vectors.
Thus, in order to combine rotation symmetry with quantum mechanics we can associate Pauli matrices to classical unit vectors. Applying this concept to the v-group above described we obtain:
\begin{eqnarray}
{\dot \tau} &=& {r}_3 \to \rho_3 \otimes I = \beta \nonumber\\
{\bf \dot x}&=& {r}_1\, \hat{\bf s} \to \rho_1 \otimes {\bm \sigma} = {\bm \alpha}\, ,
\label{corr1}\end{eqnarray} being $I$ the identity operator in the $s$ space describing the direction of velocity.
It turns out that ${\bm \alpha}$ and $\beta$ are the well-known Dirac matrices in the standard representation.
${\bm \alpha}$ is the velocity vector operator, and Eq.\ (\ref{corr1}) allows us to identify $\beta$ as the proper-time speed operator.
Also higher order angular-momentum operators transform as classical vectors.
However it is worth noticing that expectation values of Pauli matrices have an additional unique property:
they obey the following relationship: $\sum_i \langle \sigma_i \rangle^2 =1$, implying
$\langle {\bm \alpha} \rangle^2 + \langle \beta \rangle^2=1$ (compare with Eq.\ (\ref{epstein})). This property ensures a closer adherence of quantum expectation values
to classical values. For example, working with a 3D complex Hilbert space ($j=1$), there would be quantum states displaying both space and proper time speeds equal to zero: $\langle J_3 \rangle = \langle J_1 \rangle = 0$ in clear contrast with the behaviour of classical relativistic particles. This observation attributes a unique role to spin $1/2$ particles as the quantum correspondents of classical pointlike particles. The study of higher order ($N >2$) representations is left for future work, we expect that $j=1$ representations describe vector fields.
\section{Geometric description of Dirac solutions}
In order to better point out our correspondence principle for the Dirac equation, we start from Eq.\ (\ref{E2}) describing the energy of a classical free pointlike particle.
It is formally a linear function of momentum and depends on the components of the two unit vectors ${\bm r}$ and ${\bm s}$: $E = {\bf p} \cdot {{\bf s}}\; r_1 + m\, r_3$.
If the symmetry properties of these unit vectors have any physical relevance, we should be able to quantize Eq.\ (\ref{E2}) by replacing the spatial and proper time velocities with the corresponding quantum operators according to (\ref{corr1}). Performing also the usual operator replacements
$E \to i {\partial}_t$ and ${\bf p} \to -i {\partial}_{\bf x}$,
the Dirac equation is indeed obtained:
\begin{equation}
i {\partial}_t \psi(t,{\bf x}) = \left(-i {\bm \alpha} \cdot {\partial}_{\bf x} + \beta m \right) \psi(t,{\bf x}) \equiv \hat H \psi(t,{\bf x})\, ,
\label{Dirac}\end{equation}
where $\psi$ is a four component wave function.
This alternative derivation of the Dirac equation based on the symmetry properties of kinematic rates
demonstrates that the symmetry properties of the spatial and proper time velocities play a deep role.
\begin{figure}
\caption{(Color online). Geometric representation of Dirac spinors on the ${\bf i}
\end{figure}
Let us look at the solutions of Dirac equation of the usual form
$
\psi = e^{-i E t}\, e^{i {\bf p} \cdot {\bf x}} \left| \eta, \zeta \right>
$,
where $\eta$ and $\zeta$ are states belonging to the two 2D Hilbert spaces (s and r) where $\sigma_i$ and $\rho_j$ act respectively. We assume that only ${\bf p}$ is determined and seek for $\eta$, $\zeta$, and $E$ solving the eigenvalue problem obtained after inserting the above solution into Eq.\ (\ref{Dirac}):
\begin{equation}
{\cal H} \left| \eta \right>_{s} \left| \zeta \right>_{r} = E \left| \eta \right>_{s} \left| \zeta \right>_{r}
\label{eigen}\end{equation}
with ${\cal H} = \rho_1\; {\bm \sigma} \cdot {\bf p} + m\, \rho_3$.
The states in $s$ eigenstates of ${\bm \sigma} \cdot {\bf \tilde p}$, where ${\bf \tilde p} = {\bf p}/p$:
\begin{equation}
{\bm \sigma} \cdot {\bf \tilde p} \left|{\bf \tilde p}, \pm \right>_{s} =
\pm \left|{\bf \tilde p}, \pm \right>_{s}\, ,
\end{equation}
are eigenstates of ${\cal H}$. Inserting these eigenkets into Eq.\ (\ref{eigen}) and multiplying from the left for the same eigenbras, we are left with a pair of equations involving only states in $r$:
\begin{equation}
(\pm p \rho_1 + m\, \rho_3) \left| \zeta \right>_{r} = E \left| \zeta \right>_{r}\, ,
\end{equation}
which more compactly can be writen as
\begin{equation}
{\bf s}_{\pm} \cdot {\bm \rho} \left| \zeta \right>_{r} = {\cal E} \left| \zeta \right>_{r}\, ,
\end{equation}
with ${\cal E} = E/\sqrt{p^2 + m^2}$ and
\begin{equation}
{\bf s}_{\pm}= \frac{1}{ \sqrt{p^2 + m^2}} (\pm p,0,m) = (r_1,0,r_3) \, .
\end{equation}
The four Dirac solutions for a given ${\bf p}$ can thus be written as:
\[ \psi = e^{-i {\cal E} \sqrt{p^2+m^2}t} e^{i {\bf p} \cdot {\bf x}} \left|{\bf \hat p}, j \right>_{s} \left|{\bf s}_{u}, {\cal E} \right>_{r}
\]
with ${\cal E},k = \pm 1$.
They have a precise and simple geometric meaning that can be easily visualized on the Bloch sphere (Fig.\ 2). Positive energy solutions $\left|{\bf s}_{\pm}, + \right>_{r}$ are given by eigenstates of ${\bf s}_{\pm} \cdot {\bm \rho}$ with ${\cal E} =1$ describing the system aligned along the unit vectors ${\bf s}_{\pm}$
in the first and fourth quadrant. Negative energy solutions $\left|{\bf s}_{\pm}, + \right>_{r}$, corresponding to antiparticles, are given by eigenstates of ${\bf s}_{\pm} \cdot {\bm \rho}$ with ${\cal E} =-1$ and describe the system aligned along directions in the second and third quadrant which are opposite to ${\bf s}_{\pm}$. The results displayed in Fig.\ 2 unequivocably show the correspondence between the classical and quantum descriptions of spin and antiparticles here proposed.
In particular solutions of the Dirac equation with negative energy (corresponding to antiparticles) display $\langle \beta \rangle < 0$, so the claim that classical states with
$\dot \tau =r_3 < 0$ are the classical correspondent of antiparticles is fully justified.
We finally observe that the present approach describes the transformation from an inertial reference frame to another by rotations, which after quantization imply the rotation on the Bloch sphere of the states $\left| \eta \right>_{s} \left| \zeta \right>_{r} \to \left| \eta' \right>_{s} \left| \zeta' \right>_{r}$. The correct coordinate dependence $\phi' = \phi (t',{\bf x}')$ of the wave function corresponding to the rotated spinors $\phi (t',{\bf x}') \left| \eta' \right>_{s} \left| \zeta' \right>_{r}$ can be uniquely determined by solving the Dirac eigenvalue equation with $\phi (t',{\bf x}')$ as the unkown function. So doing (Lorentz) coordinate transformations can be recovered a posteriori in the spirit of a background-free theory.
\section{Discussion and Outlook}
We have presented new relativistic linear
transformations involving the ordinary velocity and the proper-time rate of change with respect to the time-coordinate of the frame of reference. According to them changes of the velocity modulus
and direction can simply be accounted for by rotations of two
independent unit vectors. Dirac spinors just provide the quantum
description of these rotations.
Within this approach antiparticles
results from rotation symmetry. Moreover these transformations are
able to describe the spin additional degree of freedom of a
pointlike particle yet at a classical relativistic level. In
contrast to the homogeneous Lorentz group, they form a compact group
hence with unitary finite-dimensional representations as all other
symmetry groups in QFT. Hence the present approach is promising
towards a better reconciliation of relativity and quantum mechanics.
The approach here described provides a direct geometric visualization of Dirac spinors. It sacrifies explicit covariance by making explicit rotation symmetry which nevertheless is the fundamental symmetry on which the algebra of the Lorentz group is based.
A surprising feature of these results, requiring further
investigations, is that they have been obtained by regarding the
position-coordinate and proper-time as functions of the
time-coordinate of the reference frame, thus attributing to the
latter a special role as required by quantum mechanics. In so doing
these transformations put on the same footing $d{\bf x}$ and $d
\tau$, suggesting for the mass parameter $m$ the role of momentum
operator conjugate to $\tau$ as $-i {\bm \nabla}$ to ${\bf x}$. The
symmetric structure of Eq.\ (\ref{E2}) enforces this suggestion.
This would imply an internal time- energy uncertainty principle $\Delta \tau\, \Delta m$,
in agreement with the evidence of a gedanken experiment proposed by Aharonov and Rezni \cite{Ahranov}.
Further investigations are also required to understand the physical
meaning of the ${\bf j}$ axis in the $r$ space and hence of the
operator $\rho_2$ which is the generator of speed changes. It turns
out that all the kinematic states in special relativity are
described by unit vectors on the ${\bf i}{\bf k}$ plane and
consequently Dirac eigenstates are made of $r$ states on the
${\bf i}{\bf k}$ plane of the Bloch sphere. We envisage that
states outside the ${\bf i}{\bf k}$ plane play a role when
taking into account gravity.
\acknowledgments
We would like to thank B.\ Lucini and O.\ M.\ Marag\`{o} for helpful discussions and suggestions.
\end{document} |
\begin{document}
\title{Isomorphy classes of $k$-involutions of $G_2$}
\begin{abstract}
Isomorphy classes of $k$-involutions have been studied for their correspondence with symmetric $k$-varieties, also called generalized symmetric spaces. A symmetric $k$-variety of a $k$-group $G$ is defined as $G_k/H_k$ where $\theta:G \to G$ is an automorphism of order $2$ that is defined over $k$ and $G_k$ and $H_k$ are the $k$-rational points of $G$ and $H=G^{\theta}$, the fixed point group of $\theta$, respectively. This is a continuation of papers written by A.G. Helminck and collaborators \cite{He00}, \cite{DHW06}, \cite{DHW-}, \cite{HW02} expanding on his combinatorial classification over certain fields. Results have been achieved for groups of type $A$, $B$ and $D$. Here we begin a series of papers doing the same for algebraic groups of exceptional type.
\end{abstract}
\section{Introduction}
The problem of identifying all isomorphy classes of symmetric $k$-varieties is described by Helminck in \cite{He94}. There he notes that isomorphy classes of symmetric $k$-varieties of algebraic groups and isomorphy classes of their $k$-involutions are in bijection. In the following we provide a classification of isomorphy classes of $k$-involutions for the split type of $G_2$ over certain fields. \
The main result of this paper is an explicit classification of $k$-involutions of the split form of $G_2$ where $k=\mathbb{R},\mathbb{C},\mathbb{Q}, \mathbb{Q}_p,$ and $\mathbb{F}_q$, where $q>2$. We do this by finding explicit elements of $\Aut(G_2)$, where $G_2=\Aut(C)$ and $C$ is always the split octonion algebra over a given field of characteristic not $2$. \
The results from this paper rely most heavily on the works of Jacobson \cite{Ja58} on composition algebras, Lam's presentation of quadratic forms \cite{La05}, and Helminck et. al. on symmetric spaces and $k$-involutions of algebraic groups.
A \emph{$k$-involution} is an automorphism of order exactly 2, that is defined over the field $k$. The isomorphy classes of these $k$-involutions are in bijection with the quotient spaces $G_k/H_k$, where $G_k$ and $H_k$ are the $k$-rational points of the groups $G$ and $H=G^{\theta}=\{g\in G \ | \ \theta(g)=g \}$ respectively, these quotient spaces will be called \emph{symmetric $k$-varieties} or \emph{generalized symmetric spaces}. \
The group of characters and root space associated with a torus $T$ are denoted by $X^*(T)$ and $\Phi(T)$ respectively. We will also denote by
\[ A_{\theta}^- = \{ a \in A \ | \ \theta(a) = a^{-1} \}^{\circ}, \]
and by
\[ I_k(A_{\theta}^-) = \{ a \in A_{\theta}^- \ | \ \theta \circ \mathcal{I}nn(a) \text{ is a $k$-involution} \}. \]
In the following we introduce a characterization of $k$-involutions of an algebraic group of a specific type given by Helminck. The full classification can be completed with the classification of the following three types of invariants, \cite{He00},
\begin{enumerate}[(1)]
\item classification of admissible involutions of $(X^*(T),X^*(A), \Phi(T), \Phi(A))$, where $T$ is a maximal torus in $G$, A is a maximal $k$-split torus contained in $T$
\item classification of the $G_k$-isomorphy classes of $k$-involutions of the $k$-anisotropic kernel of $G$
\item classification of the $G_k$-isomorphy classes of $k$-inner elements $a\in I_k(A_{\theta}^-)$.
\end{enumerate}
For this paper we do not consider $(2)$ since our algebraic groups will be $k$-split. We mostly focus on (3) and refer to (1) when appropriate, though Helminck has provided us with a full classification of these \cite{He00}. \
The main result is an explicit description of the $k$-inner elements up to isomorphy, which completes the classification of $k$-involutions for the split group of type $G_2$. Each admissible involution of $(X^*(T), X^*(A), \Phi(T), \Phi(A) )$ can be lifted to a $k$-involution $\theta$ of the algebraic group. This lifting is not unique. The involutions that induce the same involution as $\theta$ when restricted to \\ $(X^*(T), X^*(A), \Phi(T), \Phi(A) )$ are of the form $\theta \circ \mathcal{I}nn(a)$ where $a \in A_{\theta}^-$. This set of elements such that $\theta \circ \mathcal{I}nn(a)$ is a $k$-involution form the set of \emph{$k$-inner elements} associated with the involution $\theta$ and are denoted $I_k(A_{\theta}^-)$. \
Yokota wrote about $k$-involutions, $\theta$, and fixed point groups, $G^{\theta}$, for algebraic groups of type $G_2$ for $k=\mathbb{R}, \mathbb{C}$. In fact the elements of $G_2=\Aut(C)$ we call $\mathcal{I}_{t_{(\pm 1, \pm 1)}}$ correspond to the $\gamma$ maps in \cite{Yo90}, which are a conjugation with respect to complexification at different levels within the octonion algebra taken over $\mathbb{R}$ or $\mathbb{C}$ \cite{Yo90} . We found these using different methods than in \cite{Yo90}, and show the correspondence. \
In section 3.1 we find $k$-involutions that come from conjugation by elements in a maximal $k$-split torus and using a result of Jacobson show they are isomorphic in Proposition \ref{tconj}. This will take care of all cases where $\Aut(C)= G_2$ is taken over a field whose structure permits a split octonion algebra and only split quaternion algebras, namely $k=\mathbb{C}$ and $\mathbb{F}_p$ when $p>2$. \
Over other fields there is the possibility of division quaternion algebras, and this fact using Proposition \ref{jake} gives us another isomorphy class of $k$-involutions when we take $k=\mathbb{R}, \mathbb{Q}$ and $\mathbb{Q}_p$. In section 3.2 we find the $k$-involution $\theta$ for which our maximal $k$-split torus is a maximal $(\theta,k)$-split torus and in Lemma \ref{nonsplitq} we find a representative for the only other possible isomorphy class of $k$-involutions over these fields. \
In section 3.3 we summarize the main results of the paper in Theorem \ref{maintheorem} and give the full classification of $k$-involutions when $k=\mathbb{C}, \mathbb{R}, \mathbb{Q}_p$ and $\mathbb{F}_q$ when $p \geq 2$ and $q>2$. We finish up by giving descriptions of the fixed point groups of isomorphy classes of $k$-involutions.
\subsection{Preliminaries and recollections}
Most of our notation is borrowed from \cite{Sp98} for algebraic groups, \cite{He00} for $k$-involutions and generalized symmetric spaces, \cite{La05} for quadratic forms and \cite{SV00} for octonion and quaternion algebras. \
The letter $G$ is reserved for an arbitrary reductive algebraic group unless it is $G_2$, which is specifically the automorphism group of an octonion algebra. When we refer to a maximal torus we use $T$ and any subtorus is denoted by another capital letter, usually $A$. Lowercase Greek letters are field elements and other lowercase letters denote vectors. We use $Z(G)$ to denote the center of $G$, $Z_G(T)$ to denote the centralizer of $T$ in $G$, and $N_G(T)$ to denote the normalizer of $T$ in $G$. \
By $\Aut(G)$ we mean the automorphism group of $G$, and by $\Aut(C)$ we mean the linear automorphisms of the composition algebra $C$. The group of inner automorphisms are denoted $\mathcal{I}nn(G)$ and the elements of $\mathcal{I}nn(G)$ are denoted by $\mathcal{I}_g$ where $g\in G$ and $\mathcal{I}_g(x) = gxg^{-1}$. \
We define a \emph{$\theta$-split torus}, $A$, of an involution, $\theta$, as a torus $A \subset G$ such that $\theta(a)=a^{-1}$ for all $a\in A$. We call a torus \emph{$(\theta, k)$-split} if it is both $\theta$-split and $k$-split. \
Let $A$ be a $\theta$-stable maximal $k$-split torus such that $A_{\theta}^-$ is a maximal $(\theta,k)$-split torus. By \cite{HW02} there exists a maximal $k$-torus $T \supset A$ such that $T_{\theta}^- \supset A_{\theta}^-$ is a maximal $\theta$-split torus. The involution $\theta$ induces an involution $\theta \in \Aut(X^*(T), X^*(A), \Phi(T), \Phi(A))$. It was shown by Helminck \cite{He00} that such an involution is unique up to isomorphy. For $T\supset A$ a maximal $k$-torus, an
\[ \theta \in \Aut(X^*(T), X^*(A), \Phi(T), \Phi(A)) \] is \emph{admissible} if there exists an involution $\tilde{\theta} \in \Aut(G,T,A)$ such that $\tilde{\theta}|_T = \theta$, $A_{\theta}^-$ is a maximal $(\theta, k)$-split torus, and $T_{\theta}^-$ is a maximal $\tilde{\theta}$-split torus of $G$. \
This will give us the set of $k$-involutions on $G$ that extend from involutions on the group of characters, $X^*(T)$.\
As for the $k$-inner elements they are defined as follows; if $\theta$ is a $k$-involution and $A_{\theta}^-$ is a maximal $\theta$-split torus then the elements of the set,
\[ I_k(A_{\theta}^-) = \left\{ a \in A_{\theta}^- \ \big| \ \left(\theta \circ \mathcal{I}_a \right)^2 = \id, \ \left(\theta \circ \mathcal{I}_a\right)(G_k)=G_k \right\}, \]
are called \emph{$k$-inner elements} of $\theta$. Some compositions $\theta \circ \mathcal{I}_a$ will not be isomorphic in the group $\Aut(G)$ for different $a\in I_k(A_{\theta}^-)$, though they will project down to the same involution of the group of characters of a maximal torus fixing the characters associated with a maximal $k$-split subtorus for all $a\in I_k(A_{\theta}^-)$. \
\subsection{Split octonion algebra}
Throughout this paper we use $N$ to be a quadratic form of a composition algebra and $\langle \ , \ \rangle$ to be the bilinear form associated with $N$. The capital letters $C$ and $D$ denote composition algebras and composition subalgebras respectively. The composition algebras we will refer to always have an identity, $e$. There is an anti-automorphism on a composition algebra that resembles complex conjugation denoted by, $\bar{ \ }$, which will have specific but analogous definitions depending on the dimension of the algebra. \
If $\{ e, a, b, ab\}$ is a basis for $D$, a quaternion algebra such that $a^2 = \alpha$ and $b^2 = \beta$, then we denote its quadratic from by, $\left( \frac{\alpha,\beta}{k} \right) \cong \langle 1, -\alpha, -\beta, \alpha\beta \rangle$. We will often refer to a quaternion algebra over a field $k$ by the $2$-Pfister form notation of its quadratic form. Since a composition algebra is completely determined by its quadratic form and its center, $ke$, there is no risk of ambiguity. By $k$ we are referring to an arbitrary field and by using the blackboard bold $\mathbb{F}$ we refer to a specific field. We consider only fields that do not have characteristic $2$. \
We always consider the octonion algebra as a doubling of the split quaternions thought of as $M_2(k)$, the $2 \times 2$ matrices over our given field. The octonion algebras we consider are an ordered pair of these with an extended multiplication that will be described in the next section.
For the following results we refer you to \cite{SV00} for the proofs. In these upcoming results $V$ is a vector space over a field, $k$, $\ch(k)\neq 2$, equipped with a quadratic form $N:V \to k$ and the associated bilinear form $\langle \ , \ \rangle:V\times V \to k$, such that
\begin{enumerate}
\item $N(\alpha v) = \alpha^2 N(v)$,
\item $\langle v,w \rangle = N(v+w) - N(v) - N(w)$,
\end{enumerate}
where, $v,w \in V$ and $\alpha \in k$.
A \emph{composition algebra}, $C$, will be a vector space with identity, $e$, and $N$ and $\langle \ , \ \rangle$ as above such that $N(xy)=N(x)N(y)$. Note that $ke$ is a composition algebra in a trivial way.
\newtheorem{doubling}[subsubsection]{Proposition, \cite{SV00}}
\begin{doubling}
When $C$ is a composition algebra with $D\subset C$ a finite dimensional subalgebra of $C$ with $C\neq D$, then we can choose $a \in D^{\perp}$ with $N(a) \neq 0$, then $D \oplus Da$ is a composition algebra. The product, quadratic form, and complex conjugation are given by
\begin{enumerate}
\item $(x+ya)(u+va) = (xu + \alpha \bar{v}y) + (vx + y\bar{u})a$, \text{ for } $x,y,u,v \in D$, $\alpha \in k^*$
\item $N(x+ya) = N(x) - \alpha N(y)$
\item $\overline{x+ya} = \bar{x} - ya$.
\end{enumerate}
The dimension of $D\oplus Da$ is twice the dimension of $D$ and $\alpha = -N(a)$.
\end{doubling}
We often use this decomposition above in the results that follow, and a theorem of Adolf Hurwitz gives us the possible dimensions of such algebras.
\newtheorem{hurwitz}[subsubsection]{Theorem, (A. Hurwitz), \cite{SV00}}
\begin{hurwitz}
\label{hurwitz}
Every composition algebra can be obtained from iterations of the doubling process starting from $ke$. The possible dimensions of a composition algebra are $1,2,4$, or $8$. A composition algebra of dimension $1$ or $2$ is commutative and associative, a composition algebra of dimension $4$ is associative and not commutative, a composition algebra of dimension $8$ is neither commutative nor associative.
\end{hurwitz}
\newtheorem{doublesplit}[subsubsection]{Corollary, \cite{SV00}}
\begin{doublesplit}
Any doubling of a split composition algebra is again a split composition algebra.
\end{doublesplit}
There are $2$ general types of composition algebras. If there are no zero divisors we call the composition algebra a \emph{division algebra}, and otherwise we call it a \emph{split algebra}. It follows from the definition that a composition algebra is determined completely by its norm, and we have the following theorem.
\newtheorem{splitcomp}[subsubsection]{Theorem, \cite{SV00}}
\begin{splitcomp}
\label{splitcomp}
In dimensions $2,4,$ and $8$ there is exactly one split composition algebra, over a given field $k$, up to isomorphism.
\end{splitcomp}
We can take as a split quaternion algebra, $D$, over a field $k$ to be $M_2(k)$, the $2\times 2$ matrices over $k$. Multiplication in $D$ will be typical matrix multiplication, our quadratic form will be given by
\[ N\left(
\begin{bmatrix}
x_{11} & x_{12} \\
x_{21} & x_{22}
\end{bmatrix}
\right)
=
\text{det} \left(
\begin{bmatrix}
x_{11} & x_{12} \\
x_{21} & x_{22}
\end{bmatrix}
\right)
= x_{11} x_{22} - x_{12}x_{21},
\]
and \emph{bar involution} will be given by
\[
\overline{
\begin{bmatrix}
x_{11} & x_{12} \\
x_{21} & x_{22}
\end{bmatrix}
}
=
\begin{bmatrix}
x_{22} & -x_{12} \\
-x_{21} & x_{11}
\end{bmatrix}.
\]
Elements of our split octonion algebra will have the form
\[ (x,y) =\left(
\begin{bmatrix}
x_{11} & x_{12} \\
x_{21} & x_{22}
\end{bmatrix},
\begin{bmatrix}
y_{11} & y_{12} \\
y_{21} & y_{22}
\end{bmatrix}
\right).
\]
Since all split octonions over a given field are isomorphic, we can take $\alpha =1$ in our composition algebra doubling process. The multiplication, quadratic form, and octonion conjugation are given by the following;
\begin{align}
(x,y)(u,v) &= (xu + \bar{v}y, vx + y \bar{u}), \\
N\big( (x,y) \big) &= \text{det}(x) - \text{det}(y), \\
\overline{(x,y)} &= (\bar{x},-y),
\end{align}
with $x,y,u,v \in M_2(k)=D$. The basis of the underlying vector space is taken to be $\left\{ (E_{ij}, 0) , ( 0, E_{ij}) \right\}_{i,j=1,2}$, where the $E_{ij}$ are the standard basis elements for $2 \times 2$ matrices, and so our identity element in $C$ is $e = (E_{11} + E_{22},0)$.
\section{Automorphisms of $G_2$}
\subsection{Some results on $G_2$}
It is well known that the automorphism group of a $k$-split octonion algebra, $C$, over a field, $k$, is a $k$-split linear algebraic group of type $G_2$ over $k$. We can compute a split maximal torus for $\Aut(C)$, where $C= \big( M_2(k), M_2(k) \big)$ as above. Here we collect some some known results, and again we refer to \cite{SV00}.
\newtheorem{maximaltorus}[subsubsection]{Theorem}
\begin{maximaltorus}
The following statements concerning $G= \Aut(C)$ are true;
\begin{enumerate}
\item There is a maximal $k$-split torus $T \subset G$ of the form
\[ T = \left\{ \diag(1,\beta \gamma, \beta^{-1} \gamma^{-1}, 1, \gamma^{-1}, \beta, \beta^{-1}, \gamma ) \ \big| \beta, \gamma \in k^* \right\}. \]
\item The center of $\Aut(C)$ contains only the identity.
\item For any composition algebra, $C$, over $k$. The only nontrivial subspaces of $C$ left invariant by $\Aut(C)$ are $ke$ and $e^{\perp}$.
\item All automorphisms of $\Aut(C)$ are inner automorphisms.
\end{enumerate}
\end{maximaltorus}
\subsection{$k$-involutions of $G$}
\newtheorem{kinv}[subsubsection]{Remark}
\begin{kinv}
If $\theta \in \mathcal{I}nn(G)$ and $\theta = \mathcal{I}_t$ is a $k$-involution then $t^2 \in Z(G)$.
\end{kinv}
Since groups of type $G_2$ have a trivial center, the problem of classifying $k$-involutions for $\Aut(C)$, where $C$ is a split octonion algebra, is the same as classifying the conjugacy classes of elements of order $2$ in $\Aut(C)$ that preserve the $k$-structure of $\Aut(C)$. \
\newtheorem{torusinv}[subsubsection]{Remark}
\begin{torusinv}
\label{torusinv}
The involutions that are of the form $\mathcal{I}_t$ where
\[ t_{(\beta, \gamma)} \in T = \left\{ \diag(1,\beta \gamma, \beta^{-1} \gamma^{-1}, 1, \gamma^{-1}, \beta, \beta^{-1}, \gamma ) \ \big| \beta, \gamma \in k^* \right\} \]
have $(\beta, \gamma) = (1,-1),(-1,1)$ or $(-1,-1)$.
\end{torusinv}
\begin{proof}
We set $\diag(1,\beta \gamma, \beta^{-1} \gamma^{-1}, 1, \gamma^{-1}, \beta, \beta^{-1}, \gamma )^2 = \id$ and exclude $(\beta, \gamma)=(1,1)$ which corresponds to the identity map.
\end{proof}
Using the above statement and the following result of Jacobson we can show that all $k$-involutions given by conjugation by elements coming from the maximal $k$-split torus $T$ are isomorphic.
\newtheorem{jake}[subsubsection]{Proposition, \cite{Ja58}}
\begin{jake}
\label{jake}
Let $C$ be an octonion algebra over $k$, then the conjugacy class of quadratic elements, $t\in G=\Aut(C)$ such that $t^2=\id$ are in bijection with the isomorphism classes of quaternion subalgebras of $C$.
\end{jake}
In particular if $t \in \Aut(C)$ has order $2$, then it leaves some quaternion subalgebra $D$ elementwise fixed giving us the eigenspace corresponding to the eigenvalue $1$. Then $D^{\perp}$ is the eigenspace corresponding to the eigenvalue $-1$. If $gtg^{-1}=s$ for some $g\in G$, then $s$ has order $2$ and $g(D)=D'$, $D'$ a quaternion subalgebra elementwise fixed by $s$, and $D\cong D'$.
\newtheorem{qsubalg}[subsubsection]{Corollary}
\begin{qsubalg}
Let $C$ be an octonion algebra over $k$ and $D$ and $D'$ quaternion subalgebras of $C$. If $s,t \in G = \Aut(C)$ are elements of order $2$ and $s,t$ fix $D,D'$ elementwise respectively, then $s \cong t$ if and only if $D \cong D'$ over $k$.
\end{qsubalg}
\newtheorem{conjint2}[subsubsection]{Corollary}
\begin{conjint2}
\label{conjint2}
For $s,t \in \Aut(C)$, $\mathcal{I}_t \cong \mathcal{I}_s$ if and only if $s$ and $t$ leave isomorphic quaternion subalgebras invariant.
\end{conjint2}
We can begin looking for elements of order $2$ in the $k$-split maximal torus we have computed, which in our case will be $t \in T$, $t^2 = \id$. Solving the following equation
\[ \diag(1,\beta \gamma, \beta^{-1} \gamma^{-1}, 1, \gamma^{-1}, \beta, \beta^{-1}, \gamma )^2 = \id, \]
we obtain the elements
\[ t_{(\beta,\gamma)} \text{ where } (\beta ,\gamma) = (\pm 1,\pm 1), \text{ and } (\beta,\gamma) \neq (1,1). \]
\newtheorem{normalconjlemma}[subsubsection]{Lemma}
\begin{normalconjlemma}
\label{normalconjlemma}
$\mathcal{I}_g \mathcal{I}_{\varepsilon}\mathcal{I}_g^{-1} = \mathcal{I}_{g\epsilon g^{-1}}$
\end{normalconjlemma}
\begin{proof}
We can apply the left hand side to an element $y\in G$,
\begin{align*}
\mathcal{I}_g\mathcal{I}_{\varepsilon}\mathcal{I}_g^{-1} (y) &= g \big( \varepsilon (g^{-1} y g) \varepsilon^{-1} \big) g^{-1} \\
&= (g \varepsilon g^{-1}) y (g \varepsilon g^{-1})^{-1} \\
&= \mathcal{I}_{(g \varepsilon g^{-1})}(y).
\end{align*}
\end{proof}
\newtheorem{normalconj}[subsubsection]{Proposition}
\begin{normalconj}
If $\varepsilon_1^2=\varepsilon_2^2 = \id$ and $\varepsilon_1, \varepsilon_2 \in T$ a maximal torus of $G$ when $Z(G)=\{ \id \}$, then $\mathcal{I}_{\varepsilon_1} \cong \mathcal{I}_{\varepsilon_2}$ if and only if $\varepsilon_1 = n \varepsilon_2 n^{-1}$ for some $n \in N_G(T)$.
\end{normalconj}
\begin{proof}
If $n\varepsilon_2 n^{-1} = \varepsilon_1$ for $n\in N_G(T)$ then $\mathcal{I}_{\varepsilon_1} \cong \mathcal{I}_{\varepsilon_2}$ via the isomorphism $\mathcal{I}nn \left( \mathcal{I}_n \right)$.
Now we let $\mathcal{I}_{\varepsilon_1} \cong \mathcal{I}_{\varepsilon_2}$, and so there exists a $g\in G$ such that $\mathcal{I}_{\varepsilon_1}=\mathcal{I}_g \big(\mathcal{I}_{\varepsilon_2} \big) \mathcal{I}_g^{-1}$,
then we have by Lemma \ref{normalconjlemma},
\[ (g \varepsilon_2 g^{-1})^{-1}\varepsilon_1 y = y (g \varepsilon_2 g^{-1})^{-1}\varepsilon_1, \]
for all $y\in G$ and so $(g \varepsilon_2 g^{-1})^{-1}\varepsilon_1 \in Z(G) = \{\id \}$. Thus we have that $\varepsilon_1^{-1} = (g \varepsilon_2 g^{-1})^{-1}$, so $\varepsilon_1 = g \varepsilon_2 g^{-1}$. So now notice $S = g T g^{-1}$ is a maximal torus containing $\varepsilon_1$. The group $Z_G(\varepsilon_1)$ contains $S$ and $T$, so there exists an $x\in Z_G(\varepsilon_1)$ such that $x S x^{-1} = T$. We know that $S = gTg^{-1}$ so
\[ xg T g^{-1}x^{-1} = xg T (xg)^{-1} = T, \]
which has $xg \in N_G(T)$. We notice that
\begin{align*}
\mathcal{I}_{xg}\mathcal{I}_{\varepsilon_2} \mathcal{I}_{xg}^{-1} &= \mathcal{I}_{ xg \varepsilon_2 (xg)^{-1} } \\
&= \mathcal{I}_{ xg \varepsilon_2 g^{-1}x^{-1} } \\
&= \mathcal{I}_{x\varepsilon_1 x^{-1}} \\
&= \mathcal{I}_{\varepsilon_1},
\end{align*}
which from the previous argument we have $(xg)\varepsilon_2(xg)^{-1} = \varepsilon_1$.
\end{proof}
Using the previous proposition it is possible to find elements $n,m \in N_G(T)$ such that
\[ t_{(-1,-1)} = n \left(t_{(-1,1)}\right) n^{-1} = m \left(t_{(1,-1)}\right) m^{-1}. \]
It is also possible to show, and perhaps more illustrative, that they leave isomorphic quaternion subalgebras invariant, and thus by Corollary \ref{conjint2} provide us with isomorphic $k$-involutions.
\newtheorem{tconj}[subsubsection]{Proposition}
\begin{tconj}
\label{tconj}
$t_{(-1,-1)} \cong t_{(-1,1)} \cong t_{(1,-1)}.$
\end{tconj}
\begin{proof}
Let $G=\Aut(C)\supset T = \left\{ \diag(1,\beta \gamma, \beta^{-1} \gamma^{-1}, 1, \gamma^{-1}, \beta, \beta^{-1}, \gamma ) \ \big| \ \beta, \gamma \in k^* \right\}$, then $G$ is an algebraic group over the field $k$, $\ch(k)\neq 2$. The automorphism $t_{(-1,-1)}$ leaves the split quaternion subalgebra $(M_2(k),0)$ elementwise fixed. \
The element of order $2$,
\[ t_{(1,-1)} = \diag(1,-1,-1,1,-1,1,1,-1), \]
leaves the quaternion subalgebra,
\[
k
\underbrace{
\left(
\begin{bmatrix}
1 & \\
& 1 \\
\end{bmatrix},
0
\right)
}_{e}
\bigoplus
k
\underbrace{
\left( 0,
\begin{bmatrix}
& 1 \\
-1 & \\
\end{bmatrix}
\right)
}_{a}
\bigoplus
k
\underbrace{
\left( 0,
\begin{bmatrix}
& 1 \\
1 & \\
\end{bmatrix}
\right)
}_{b}
\bigoplus
k
\underbrace{
\left(
\begin{bmatrix}
1 & \\
& -1 \\
\end{bmatrix},
0 \right)
}_{ab}
,
\]
elementwise fixed. Notice that $(b-a)(e+ab)=(0,0)$, and so the quaternion subalgebra is split.
And
\[ t_{(-1,1)} = \diag(1,-1,-1,1,1,-1,-1,1), \]
leaves the quaternion algebra
\[
k
\underbrace{
\left(
\begin{bmatrix}
1 & \\
& 1 \\
\end{bmatrix},
0
\right)
}_{e}
\bigoplus
k
\underbrace{
\left(
\begin{bmatrix}
1 & \\
& -1 \\
\end{bmatrix},
0 \right)
}_{a}
\bigoplus
k
\underbrace{
\left( 0,
\begin{bmatrix}
1 & \\
& 1 \\
\end{bmatrix}
\right)
}_{b}
\bigoplus
k
\underbrace{
\left( 0,
\begin{bmatrix}
1 & \\
& -1 \\
\end{bmatrix}
\right)
}_{ab},
\]
elementwise fixed. Notice that $(ab + b)(e + a) = (0,0)$, and so the quaternion subalgebra is split. Since over a given field $k$ every split quaternion subalgebra is isomorphic, we have that $t_{(-1,-1)} \cong t_{(-1,1)} \cong t_{(1,-1)}$.
\end{proof}
\newtheorem{cortconj}[subsubsection]{Corollary}
\begin{cortconj}
\label{cortconj}
$\mathcal{I}_{t_{(-1,-1)}} \cong \mathcal{I}_{t_{(-1,1)}} \cong \mathcal{I}_{t_{(1,-1)}}.$
\end{cortconj}
So from now on we refer to a representative of the congruence class containing $\mathcal{I}_{t_{(-1,-1)}}$, $\mathcal{I}_{t_{(-1,1)}}$, and $\mathcal{I}_{t_{(1,-1)}}$ as $\mathcal{I}_t$, when there is no ambiguity.
\newtheorem{splitinv}[subsubsection]{Lemma}
\begin{splitinv}
There is only one isomorphy class of $k$-involutions when $k$ is a finite field of order greater than $2$, complex numbers, $p$-adic fields when $p>2$, or when $k$ is a complete, totally imaginary algebraic number field.
\end{splitinv}
\begin{proof}
In these cases only split quaternion algebras exist, \cite{O'M63}, \cite{SV00}.
\end{proof}
In \cite{Yo90} Yokota talks about the maps $\gamma, \gamma_C$, and $\gamma_H$ and shows are they isomorphic, and that they are also isomorphic to any composition of maps between them. In his paper he defines a conjugation coming from complexification. In particular we can look at $\gamma_H$, which is the complexification conjugation on the quaternion level of an octonion algebra over $\mathbb{R}$. If we take $u + vc \in H \oplus Hc$ where $u,v\in H$ and $c\in H^{\perp}$ his map is
\[ \gamma_H(u+vc) = u-vc, \]
which in our presentation of the octonion algebra would look like,
\[ \gamma_H \left(
\begin{bmatrix}
u_{11} & u_{12} \\
u_{21} & u_{22}
\end{bmatrix},
\begin{bmatrix}
v_{11} & v_{12} \\
v_{21} & v_{22}
\end{bmatrix} \right)
= \left(
\begin{bmatrix}
u_{11} & -u_{12} \\
-u_{21} & u_{22}
\end{bmatrix},
\begin{bmatrix}
v_{11} & -v_{12} \\
-v_{21} & v_{22}
\end{bmatrix} \right),
\]
and corresponds to our map $\mathcal{I}_{t_{(-1,1)}}$.
\subsection{Maximal $\theta$-split torus}
Rather than trying to find a maximal $\theta$-split torus, where $\theta \cong \mathcal{I}_t$, and then computing its maximal $k$-split subtorus, we find a $k$-involution $\theta$ that splits our already maximal $k$-split torus of the form
\[ T = \left\{ \diag(1,\beta \gamma, \beta^{-1} \gamma^{-1}, 1, \gamma^{-1}, \beta, \beta^{-1}, \gamma ) \ \big| \beta, \gamma \in k^* \right\}. \]
It is straight forward to check that
\[
s=
\begin{bmatrix}
& & & 1 \\
& & 1 & \\
& 1 & & \\
1 & & &
\end{bmatrix}
\bigoplus
\begin{bmatrix}
& & & 1 \\
& & 1 & \\
& 1 & & \\
1 & & &
\end{bmatrix}
, \]
is an element of $\Aut(C)$, where $C$ is the split octonion algebra described above over a field $k$, $\ch(k) \neq 2$. It is immediate that $T$ is a $\mathcal{I}_s$-split torus.
\newtheorem{maxsplit}[subsubsection]{Proposition}
\begin{maxsplit}
$T$ is a maximal $(\mathcal{I}_s,k)$-split torus.
\end{maxsplit}
\begin{proof}
Notice first that if $t \in T$ that $\mathcal{I}_s(t)=t^{-1}$, and next that $T$ is $k$-split and is a maximal torus.
\end{proof}
\newtheorem{scongt}[subsubsection]{Proposition}
\begin{scongt}
\label{scongt}
$\mathcal{I}_s \cong \mathcal{I}_t$
\end{scongt}
\begin{proof}
The element $s$ is an automorphism of order $2$ of $C$, our split octonion algebra described above, that leaves the following quaternion algebra fixed elementwise,
\[
k
\underbrace{
\left(
\begin{bmatrix}
1 & \\
& 1 \\
\end{bmatrix},
0
\right)
}_{e}
\bigoplus
k
\underbrace{
\left(
\begin{bmatrix}
& 1 \\
1 & \\
\end{bmatrix},
0 \right)
}_{a}
\bigoplus
k
\underbrace{
\left( 0,
\begin{bmatrix}
1 & \\
& 1 \\
\end{bmatrix}
\right)
}_{b}
\bigoplus
k
\underbrace{
\left( 0,
\begin{bmatrix}
& 1 \\
1 & \\
\end{bmatrix}
\right)
}_{ab}.
\]
Notice that $(b+ab)(e+a+b+ab)=0$, and so the quaternion subalgebra is split.
\end{proof}
\subsection{Another isomorphy class of $k$-involutions over certain fields}
We have seen that our maximal torus $T=T_{\mathcal{I}_s}^-$, and so we can look at elements of $T$ for $k$-inner elements of $\mathcal{I}_s$ that will give us new conjugacy classes over fields for which quaternion division algebras can exist. The fields we are interested in include the real numbers, 2-adics, and rationals.
\newtheorem{nonsplitq}[subsubsection]{Lemma}
\begin{nonsplitq}
\label{nonsplitq}
For $C$ a split octonion algebra over a field $k=\mathbb{R}, \mathbb{Q}_2, \mathbb{Q}$,
\[ s\cdot t_{(1,-1)} \in \Aut(C), \]
leaves a quaternion division subalgebra elementwise fixed.
\end{nonsplitq}
\begin{proof}
The element $s\cdot t_{(1,-1)} \in \Aut(C)$ leaves the following quaternion subalgebra elementwise fixed,
\[
k
\underbrace{
\left(
\begin{bmatrix}
1 & \\
& 1 \\
\end{bmatrix},
0
\right)
}_{e}
\bigoplus
k
\underbrace{
\left( 0,
\begin{bmatrix}
1 & \\
& -1 \\
\end{bmatrix}
\right)
}_{a}
\bigoplus
k
\underbrace{
\left( 0,
\begin{bmatrix}
& 1 \\
1 & \\
\end{bmatrix}
\right)
}_{b}
\bigoplus
k
\underbrace{
\left(
\begin{bmatrix}
& 1 \\
-1 & \\
\end{bmatrix},
0 \right)
}_{ab}.
\]
All basis elements are such that $x \bar{x} = 1$, and so have a norm isomorphic to the 2-Pfister form $\left( \frac{-1,-1}{k} \right)$, where $k=\mathbb{R}, \mathbb{Q}_2, \mathbb{Q}$, which corresponds to a quaternion division algebra over each respective field. Moreover, over $k=\mathbb{R}$ or $\mathbb{Q}_2$ there is only one quaternion division algebra up to isomorphism.
\end{proof}
\newtheorem{nonsplitqQp}[subsubsection]{Lemma}
\begin{nonsplitqQp}
\label{nonsplitqQp}
For $C$ a split octonion algebra over a field $k=\mathbb{Q}_p$ and $p>2$ $s \cdot t_{(-N_p,-pN_p^{-1})}$ leaves a divison quaternion algebra elementwise fixed.
\end{nonsplitqQp}
\begin{proof}
The element $s \cdot t_{(-N_p,-pN_p^{-1})}$ leaves the following quaternion subalgebra elementwise fixed,
\[
k
\underbrace{
\left(
\begin{bmatrix}
1 & \\
& 1 \\
\end{bmatrix},
0
\right)
}_{e}
\bigoplus
k
\underbrace{
\left( 0,
\begin{bmatrix}
& -N_p \\
1 & \\
\end{bmatrix}
\right)
}_{a}
\bigoplus
k
\underbrace{
\left( \begin{bmatrix}
& p \\
1 & \\
\end{bmatrix},0
\right)
}_{b}
\bigoplus
k
\underbrace{
\left(0,
\begin{bmatrix}
N_p & \\
& -p \\
\end{bmatrix}
\right)
}_{ab},
\]
with $\mathbb{Q}_p^*/ (\mathbb{Q}_p^*)^2 = \{ 1, p, N_p, pN_p\}$. This algebra is ismorphic to $\left( \frac{p,N_p}{\mathbb{Q}_p} \right)$, which is a representative of the unique ismorphy class of quaternion division algebras for a given $p$.
\end{proof}
\newtheorem{maintheorem}[subsubsection]{Theorem}
\begin{maintheorem}
\label{maintheorem}
Let $\theta = \mathcal{I}_s$ and $G=\Aut(C)$ where $C$ is a split octonion algebra over a field $k$, then
\begin{enumerate}
\item when $k=\mathbb{R}, \mathbb{Q}, \mathbb{Q}_2$; $\theta$ and $\theta \circ \mathcal{I}_{ t_{(1,-1)} }$ are representatives of $2$ isomorphy classes of $k$-involutions of $G$. In the cases $k=\mathbb{R}$ or $\mathbb{Q}_2$ these are the only cases, but this is not true for $k=\mathbb{Q}$.
\item when $k=\mathbb{Q}_p$ and $p>2$, we have two isomorphy classes of $k$-involutions.
\item when $k=\mathbb{C}$ and $\mathbb{F}_p$ with $p>2$; there is only one isomorphy class of $k$-involutions of $G$.
\end{enumerate}
\end{maintheorem}
\begin{proof}
For part (1) we only need to notice that over the fields $\mathbb{R}$, $\mathbb{Q}$ and $\mathbb{Q}_2$ that $\theta$ and $\theta \circ \mathcal{I}_{t_{(1,-1)}}$ leave nonisomorphic subalgebras elementwise fixed and so by Theorem \ref{jake}, Corollary \ref{cortconj}, and Proposition \ref{scongt} they are not isomorphic. And by Theorem \ref{splitcomp} and Lemma \ref{nonsplitq} these are the only $2$ quaternion subalgebras up to isomorphism. There are no other possible isomorphy classes.
For part (2) by Theorem \ref{splitcomp}, Lemma \ref{nonsplitqQp}.
For part (3) by Theorem \ref{splitcomp}, Proposition \ref{tconj}, and Corollary \ref{cortconj} we have the result.
\end{proof}
\subsection{Fixed point groups}
In order to compute the fixed point groups of each $k$-involution we first look at how such elements of $\Aut(C)$ act on $C$.
\newtheorem{fixedlemma}[subsubsection]{Lemma}
\begin{fixedlemma}
\label{fixedlemma}
Let $t\in \Aut(C)=G$ such that $t^2 = \id$ and $D \subset C$ the quaternion algebra elementwise fixed by $t$ then $f \in G^t := G^{\mathcal{I}_t} = \{g \in G \ | \ \mathcal{I}_t(g)=g \}$ if and only if $f$ leaves $D$ invariant.
\end{fixedlemma}
\begin{proof}
($\Rightarrow$): Let $D \subset C$ be fixed elementwise by $t$ and $f\in G^t$, then $\mathcal{I}_t(f)=f$. Now let $c\in C$ be any element of the octonion algebra containing $D$, then we can write $C = D \oplus D^{\perp}$ and $c=a+b$ where $a \in D$ and $b \in D^{\perp}$. Since $t$ is a $k$-involution it has only $\pm 1$ as eigenvalues and $t(a+b) = a - b$. Furthermore,
\begin{align*}
\mathcal{I}_t(f)(c) &= tft^{-1}(c) \\
&=tft(c) \\
&=tft(a+b) \\
&=tft(a)+tft(b) \\
&=tf(a)+f(b),
\end{align*}
and since $\mathcal{I}_t(f)(c) = f(c)=f(a+b)$ for all $c\in C$,
\[ f(a)+f(b) = tf(a) + f(b). \]
From this we can conclude that $tf(a) = f(a)$ so $f(a) \in D$. \
($\Leftarrow$): If we assume, conversely, that $D \subset C$ is the subalgebra fixed elementwise by $t$ and $f(D)=D$ then $f(D^{\perp}) = D^{\perp}$, and
\begin{align*}
tft^{-1}(c) &= tft(a+b) \\
&= tft(a)+tft(b) \\
&= tf(a) - tf(b) \\
&= f(a) + f(b) \\
&= f(c),
\end{align*}
for all $c\in C$ and we have the result.
\end{proof}
Every involution in $\Aut(C)$ leaves some quaternion algebra, $D$, elementwise fixed. Now we need only to see what automorphisms of $C$ leave $D$ invariant. In every case we will consider if we have a fixed quaternion algebra $D \subset C$, then $C= D\oplus D^{\perp}$ with respect to $N$. The automorphisms of $C$ that leave $D$ invariant, denoted $\Aut(C,D)$ are of the form,
\[ s(x+ya) = s(x) +s(y)s(a), \]
where $s \in \Aut(C,D)$; $x,y \in D$ and $a\in D^{\perp}$ such that $N(a) \neq 0$. Since $s$ leaves $D$ invariant we can further see that
\[ s(y) \in D, \text{ and } s(a) \in D^{\perp} \text{ such that } N(s(a)) \neq 0, \]
and we can write
\[ s(x+ya) = s_{dp}(x+ya) = dxd^{-1} + (pdyd^{-1}) a, \]
where $d, p \in D$ and $N(d)\neq 0$, $N(p)=1$. For more details see \cite{SV00}.
First let us consider the case where $D$ is a split quaternion algebra. In this case $D \cong M_2(k)$ and $d\in \GL _2(k)$ and $p \in \SL_2(k)$.
\newtheorem{fixsplit}[subsubsection]{Proposition}
\begin{fixsplit}
When $t \in \Aut(C)$ is an involution and leaves $D \subset C$, a split quaternion subalgebra, elementwise fixed, then
\[ G^t := G^{\mathcal{I}_t} \cong \PGL_2(k) \times \SL_2(k). \]
\end{fixsplit}
\begin{proof}
If we consider the map
\[ \psi: \GL _2(k) \times \SL_2(k) \to \Aut(C,D) \text{, where } (d,p) \mapsto s_{dp}, \]
is surjective. The kernel is given by $\ker(\psi) = \{ (\alpha \cdot e , e ) \ | \ \alpha \in k^*\}$.
\end{proof}
In the case where the involution $t$ leaves a quaternion division algebra $D$ invariant, we have the same initial set up, i.e., $s\in \Aut(C,D)$ then
\[ s(x+ya) = s_{dp}(x+ya) = d x d^{-1} + (pdyd^{-1}) a, \]
where $N(d) \neq 0$ and $N(p)=1$, only $D \not\cong M_2(k)$ it is isomorphic to Hamilton's quaternions over $k$. In this case $N(d) \neq 0$ only tells us that $d\neq 0 \in D$.
\newtheorem{fixdiv}[subsubsection]{Proposition}
\begin{fixdiv}
When $t \in \Aut(C)$ is an involution and leaves $D \subset C$, a quaternion division algebra, elementwise fixed, then $G^t \cong \SO(D_0,N) \times \Sp(1)$ where $D_0 = ke^{\perp}$.
\end{fixdiv}
\begin{proof}
Then $N(p) = p \bar{p} = 1$ tells us that $p \in \Sp(1)$ is the group of $1 \times 1$ symplectic matrices over $D$. If we consider the surjective homomorphism
\[\psi: D^* \times \Sp(1) \to \Aut(C,D) \text{ where } (d,p) \mapsto s_{dp},\]
and $D^*=D-\{0\}$ is the group consisting of the elements of $D$ having inverses, its kernel, $\ker(\psi) = \{ (\alpha \cdot e,e) \ | \ \alpha \in k^* \}$. So we have
\[ D^*/Z(D^*) \times \Sp(1) \cong \mathcal{I}nn(D^*) \times \Sp(1). \]
Jacobson tells us, \cite{Ja58}, that all automorphisms of $D^*$ are inner and leave the identity fixed. Also included in \cite{Ja58} is that $\mathcal{I}nn(D^*) \cong \SO(D_0,N)$, where $D_0 \subset D$ such that $D_0 = e^{\perp}$, the three dimensional subspace, and $\SO(D_0,N)$ is the group of rotations of $D_0$ with respect to $N|_{D_0}$.
\end{proof}
\section{Concluding remarks}
\subsection{$k=\mathbb{Q}$}
Isomorphy classes of quaternion algebras over $k$ are given by equivalence classes of $2$-Pfister forms $\left(\frac{\alpha,\beta}{k}\right)$ over $k$ corresponding to the quadratic form,
\[ N(x) = x_0^2 + (-\alpha)x_1^2 + (-\beta)x_2^2 + (\alpha\beta)x_3^2, \]
while octonion algebras depend on $3$-Pfister forms, $\left( \frac{\alpha,\beta,\gamma}{k} \right)$ with quadratic form,
\[ N(x) = x_0^2 + (-\alpha)x_1^2 +(-\beta)x_2^2 + (\alpha\beta)x_3^2 + (-\gamma)x_4^2 + (\alpha\gamma)x_5^2 + (\beta\gamma)x_6^2 + (-\alpha\beta\gamma)x_7^2. \]
It is not difficult to show that for a prime $p$, such that $p \equiv 3 \mod 4$,
\[ \left( \frac{ -1, p }{\mathbb{Q}} \right) \]
is a division algebra. Further, for $p$ and $q$ distinct primes both equivalent to $3 \mod 4$
\[ \left( \frac{ -1, p }{\mathbb{Q}} \right) \not\cong \left( \frac{ -1, q }{\mathbb{Q}} \right), \]
see \cite{Pi82} Exercise 1.7.4.
\newtheorem{rationalex}[subsubsection]{Example}
\begin{rationalex}
Let $C = \left(M_2(\mathbb{Q}),M_2(\mathbb{Q}) \right)$. We can find an involution of $\Aut(C)$ leaving $D \cong \left( \frac{ -1, p }{\mathbb{Q}} \right)$ elementwise fixed by first constructing a basis for $D$. If we pick
\[ a = \left( 0,
\begin{bmatrix}
& 1 \\
1 &
\end{bmatrix}
\right)
\text{ and }
b = \left(
\begin{bmatrix}
& p \\
1 &
\end{bmatrix},
0 \right), \]
then $a^2 = -1$ and $b^2 = p$. Then the other basis elements of $D$ are $e= (\id,0)$ and
\[ ab = \left( 0,
\begin{bmatrix}
-1 & \\
& -p
\end{bmatrix}
\right). \]
Let $p \equiv 3 \mod 4$, then $D$ is a division algebra and
\[
s_p = \begin{bmatrix}
& & & 1 \\
& & p & \\
& p^{-1} & & \\
1 & & &
\end{bmatrix}
\bigoplus
\begin{bmatrix}
& & & p^{-1} \\
& & 1 & \\
& 1 & & \\
p & & &
\end{bmatrix} \in \Aut(C), \]
leaves $D$ elementwise fixed. Notice $s_p = s \cdot t_{(p,1)}$. There is a $\mathbb{Q}$-involution for each $p \equiv 3 \mod 4$ of which there are an infinite number.
\end{rationalex}
Over $\mathbb{Q}$ there are only $2$ octonion algebras, $\left( M_2(\mathbb{Q}), M_2(\mathbb{Q}) \right)$ and a division algebra. When we consider quaternion algebras over $\mathbb{Q}$ we get one split quaternion algebra, but we get exactly one division algebra for each unique set of an even number of real or finite places of $\mathbb{Q}$, \cite{O'M63} and \cite{SV00}. \
\subsection{$k$ is an algebraic number fields}
When the quaternion or octonion algebra is taken over an algebraic number field we have that $2$ quadratic forms are equivalent if and only if they are equivalent over all local fields $k_{\nu}$ where $\nu$ varies over all places of $k$, which is a result of Hasse's Theorem \cite{Se73}. Hasse's Theorem also tells us the number of possible anisotropic quadratic forms corresponding to nonisomorphic octonion algebras is given an upper bound of $2^r$ where $r$ is the number of real places of $k$ \cite{SV00}. \
\end{document} |
\begin{document}
\annalsline{158}{2003}
\received{January 24, 2002}
\startingpage{323}
\def\end{document}{\end{document}}
\font\tenrm=cmr10
\def\ritem#1{\item[{\rm #1}]}
\input amssym.def
\input amssym.tex
\def\hensp#1{\quad\hbox{#1}\quad }
\catcode`\@=11
\font\twelvemsb=msbm10 scaled 1100
\font\tenmsb=msbm10
\font\ninemsb=msbm10 scaled 800
\newfam\msbfam
\textfont\msbfam=\twelvemsb \scriptfont\msbfam=\ninemsb
\scriptscriptfont\msbfam=\ninemsb
\def\hexnumber@\msbfam{\hexnumber@\msbfam}
\def\Bbb{\relax\ifmmode\let\next\Bbb@\else
\def\next{\errmessage{Use \string\Bbb\space only in math
mode}}\fi\next}
\def\Bbb@#1{{\Bbb@@{#1}}}
\def\Bbb@@#1{\fam\msbfam#1}
\catcode`\@=12
\catcode`\@=11
\font\twelveeuf=eufm10 scaled 1100
\font\teneuf=eufm10
\font\nineeuf=eufm7 scaled 1100
\newfam\euffam
\textfont\euffam=\twelveeuf \scriptfont\euffam=\teneuf
\scriptscriptfont\euffam=\nineeuf
\def\hexnumber@\euffam{\hexnumber@\euffam}
\def\frak{\relax\ifmmode\let\next\frak@\else
\def\next{\errmessage{Use \string\frak\space only in math
mode}}\fi\next}
\def\frak@#1{{\frak@@{#1}}}
\def\frak@@#1{\fam\euffam#1}
\catcode`\@=12
\title{Global existence and convergence for a\\ higher order flow in conformal
geometry}
\shorttitle{Global existence}
\author{Simon Brendle}
\institutions{Princeton University, Princeton, NJ\\
{\eightpoint {\it E-mail address\/}: brendle@math.princeton.edu
}}
\newcommand{\hbox{\rm tr}\,}{\hbox{\rm tr}\,}
\section{Introduction}
An important problem in conformal geometry is the construction of
conformal metrics for which a certain curvature quantity equals a prescribed function, e.g. a
constant. In two dimensions, the uniformization theorem assures the
existence of a conformal metric with constant Gauss curvature. Moreover,
J.\ Moser \cite{Mo} proved that for every positive function $f$ on $S^2$
satisfying $f(x) = f(-x)$ for all $x \in S^2$ there exists a conformal metric on
$S^2$ whose Gauss curvature is equal to $f$.
A natural
conformal invariant in dimension four is $$ Q =
-\frac{1}{6} \, (\Delta R - R^2 + 3 \, |{\rm Ric}|^2),$$
where $R$ denotes the scalar curvature and ${\rm Ric}$ the Ricci tensor.
This formula can also be
written in the form $$ Q = -\frac{1}{6} \, (\Delta R - 6 \, \sigma_2(A)),$$
where $$ A = {\rm Ric} - \frac{1}{6} \, Rg$$ is the Schouten tensor of $M$ and
$$ \sigma_2(A) = \frac{1}{2} \, (\hbox{\rm tr}\, A)^2 - \frac{1}{2} \, |A|^2$$ is the
second elementary symmetric polynomial in its eigenvalues.
Under a conformal change of the metric $$ g = e^{2w} g_0,$$ the quantity $Q$
transforms according to $$ Q = e^{-4w} (Q_0 + P_0 w),$$ where $P_0$ denotes the
Paneitz operator with respect to $g_0$. The Gauss-Bonnet-Chern theorem asserts that
$$ \int_M Q \, dV + \int_M \frac{1}{4} \, |W|^2 \, dV = 8\pi^2 \chi(M).$$
Since the Weyl tensor $W$ is conformally invariant, it follows
that the expression $$ \int_M Q \, dV$$ is conformally invariant, too.
The quantity $Q$ plays an important role in four-dimensional conformal geometry;
see \cite{BCY}, \cite{CY1}, \cite{CGY}, \cite{Gu1} (note that our notation differs slightly from that
in \cite{BCY}, \cite{CY1}). Moreover, the Paneitz operator plays a similar role as the
Laplace operator in dimension two; compare \cite{BCY}, \cite{CY1},
\cite{CGY}, \cite{DMA1}, \cite{DMA2}. We also
note that the Paneitz operator is of considerable interest in mathematical
physics, see \cite[\SS IV.4]{Co}.
T.\ Branson, S.-Y.\ A.\ Chang and P.\ Yang \cite{BCY} studied metrics for which the
curvature quantity $Q$ is constant. Since $$ \int_M Q \, dV$$ is conformally
invariant, these metrics minimize the functional $$ \int_M Q^2 \, dV$$ among all
conformal metrics with fixed volume. In addition, these metrics are critical points of the
functional $$ E_1[w] = \int_M 2 \, w \, P_0 w \, dV_0 + \int_M 4 \, Q_0 \, w \, dV_0 -
\int_M Q_0 \, dV_0 \,
\log \bigg ( \int_M e^{4w} \, dV_0 \bigg ),$$ where $g_0$ denotes a fixed
metric on $M$ and $g = e^{2w} g_0$.
According to the results in [2], one can construct conformal metrics of constant $Q$-curvature by minimizing the
functional $E_1[w]$ provided that the Paneitz operator is weakly positive and the integral of the $Q$-curvature on
$M$ is less than that on the standard sphere $S^n$. In dimension four, M. Gursky \cite{Gu2} proved that both conditions are
satisfied if
$$
Y(g_0)\ge 0
$$
and $$\int_M Q_0\, dV_0\ge 0,
$$
and $M$ is not conformally equivalent to the standard sphere $S^4$.
C.\ Fefferman and R.\ Graham \cite{FG1}, \cite{FG2}
established the existence of a conformally invariant self-adjoint
operator with leading term $(-\Delta)^{\frac{n}{2}}$
in all even dimensions $n$. Moreover, there is a
curvature quantity which transforms according to
$$ Q = e^{-nw} (Q_0 + P_0 w)$$ for $$ g = e^{2w} g_0.$$ This implies that
the expression $$ \int_M Q \, dV$$ is
conformally invariant. Hence, a metric with $Q = \hbox{\rm constant}$ minimizes
the functional $$ \int_M Q^2 \, dV$$ among all
conformal metrics with fixed volume. Finally, the analogue of the functional
$E_1[w]$ is given by
$$ E_1[w] = \int_M \frac{n}{2} \, w \, P_0 w \, dV_0 + \int_M n \, Q_0 \, w \, dV_0 -
\int_M Q_0 \, dV_0 \,
\log \bigg ( \int_M e^{nw} \, dV_0 \bigg ).$$
Our aim is to construct conformal metrics for which
the curvature quantity $Q$ is a constant multiple of a prescribed positive
function $f$ on $M$. This equation is the Euler-Lagrange equation for
the functional
$$ E_f[w] = \int_M \frac{n}{2} \, w \, P_0 w \, dV_0 + \int_M n \, Q_0 \, w \, dV_0 -
\int_M Q_0 \, dV_0 \,
\log \bigg ( \int_M e^{nw} \, f \, dV_0 \bigg ).$$
We construct critical points of the functional $E_f[w]$ using
the gradient flow for $E_f[w]$. A similar method was used by
R.\ Ye \cite{Ye} to prove Yamabe's theorem for locally conformally flat
manifolds. K.\ Ecker and G.\ Huisken \cite{EH}
used a variant of mean curvature flow to construct hypersurfaces with
prescribed mean curvature in cosmological spacetimes.
The flow of steepest descent for the functional $E_f[w]$ is given by
$$ \frac{\partial}{\partial t} g =
- \Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big ) \, g.$$ Here,
$\overline{Q}$ and $\overline{f}$ denote the mean values of $Q$ and $f$ respectively,
i.e.\ $$ \int_M (Q - \overline{Q}) \, dV = 0\quad\hbox{ and }\quad \int_M (f - \overline{f}) \, dV = 0.$$
This evolution equation preserves the conformal structure of $M$. Moreover,
since $$ \int_M \Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big ) \, dV
= \int_M \Big ( \overline{Q} - \frac{\overline{Q} \, \overline{f}}{\overline{f}} \Big ) \, dV = 0,$$
the volume of $M$
remains constant. From this it follows that $\overline{Q}$ is constant in
time. If we write $g = e^{2w} g_0$ for a fixed metric $g_0$, then
the evolution equation takes the form
$$ \frac{\partial}{\partial t} w = -\frac{1}{2} \, e^{-nw} \, P_0 w
- \frac{1}{2} \, e^{-nw} \, Q_0 + \frac{1}{2} \, \frac{\overline{Q} \, f}{\overline{f}}, \pagebreak $$
where $P_0$ denotes the Paneitz operator with respect to $g_0$.
Therefore, the function $w$ satisfies a
quasilinear parabolic equation of order $n$ involving the critical Sobolev exponent.
Moreover, the reaction term is nonlocal, since $\overline{f}$ involves values of $w$ on the
whole of $M$.
\proclaim{Theorem}\label{convergence.1}
Assume that the Paneitz operator $P_0$ is weakly positive with kernel consisting
of the constant functions. Moreover{\rm ,} assume that
$$ \int_M Q_0 \, dV_0 < (n-1)! \, \omega_n.$$ Then the evolution equation $$ \frac{\partial}{\partial t} g
= - \Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big ) \, g$$ has a
solution which is defined for all times and converges to a metric with $$ \frac{Q}{f} =
\frac{\overline{Q}}{\overline{f}}.$$
\endproclaim
On the standard sphere $S^n$, we have
$$ \int_M Q \, dV = (n-1)! \, \omega_n\, ;$$ hence Theorem \ref{convergence.1}
cannot be applied. In fact, the conclusion of Theorem \ref{convergence.1}
fails for $M = S^n$. To see this, one can consider the Kazdan-Warner identity
$$ \int_{S^n} \langle \nabla_0 Q,\nabla_0 x_j \rangle \, e^{nw} \, dV_0 = 0\, ;$$
see \cite{CY1}. If
$f$ is an increasing function of $x_j$, then $$ \int_{S^n} \langle \nabla_0 Q,
\nabla_0 x_j \rangle \, e^{nw} \, dV_0 > 0.$$ Consequently, there is no conformal
metric on $S^n$ satisfying $$ \frac{Q}{f} =
\frac{\overline{Q}}{\overline{f}}.$$
Nevertheless, the conclusion of Theorem \ref{convergence.1} holds if
$f(x) = f(-x)$ and $w(x) = w(-x)$ for all
$x \in S^n$. This is a generalization of Moser's theorem \cite{Mo}.
\proclaim{Theorem}\label{convergence.2}
Suppose that $M = {\bf RP}^n$. Then the evolution equation $$ \frac{\partial}{\partial t} g
= - \Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big ) \, g$$ has a
solution which is defined for all times and converges to a metric with $$ \frac{Q}{f} =
\frac{\overline{Q}}{\overline{f}}.$$
\endproclaim
Combining Theorem 1.2 with M. Gursky's result \cite{Gu2} gives
\proclaim{Theorem}\label{convergence.3}
Suppose that $M$ is a compact manifold of dimension four satisfying
$$ Y(g_0) \geq 0\hensp{and} \int_M Q_0 \, dV_0 \geq 0.$$
Moreover{\rm ,} assume that $M$ is not conformally equivalent to the standard
sphere~$S^4$. Then the evolution equation $$ \frac{\partial}{\partial t} g
= - \Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big ) \, g$$ has a
solution which is defined for all times and converges to a metric with $$ \frac{Q}{f} =
\frac{\overline{Q}}{\overline{f}}.$$
\endproclaim
Finally, we prove a compactness theorem for conformal metrics on $S^n$.
In two dimensions, the corresponding result was first established by X. Chen
\cite{Ch1} (see also \cite{St}).
\proclaim{Proposition}\label{compactness.result}
Let $g_k = e^{2w_k} \, g_0$ be a sequence of conformal metrics on $S^n$ with fixed
volume such that $$ \int_{S^n} Q_k^2 \, dV_k \leq C.$$
Assume that for every point $x \in S^n$ there
exists $r > 0$ such that
$$ \lim_{r \to 0} \lim_{k \to \infty} \int_{B_r(x)} |Q_k| \, dV_k < \frac{1}{2}
\, (n-1)! \, \omega_n.$$
Then the sequence $w_k$ is uniformly bounded in $H^n$.
\endproclaim
The evolution equation can be viewed as a generalization of the Ricci flow on
compact surfaces. In dimension four, the quantity $Q$ plays a similar role as the
Gauss curvature in dimension two. Moreover, the energy
functional $E_1[w]$ corresponds to
the Liouville energy studied by B.\ Osgood, R.\ Phillips and
P.\ Sarnak in \cite{OPS}.
It was shown by R.\ Hamilton \cite{Ha} and B.\ Chow \cite{Ch2} that every
solution of the Ricci flow on a compact surface exists for all time
and converges exponentially to a
metric with constant Gauss curvature. A different approach
was introduced by X.\ Chen \cite{Ch1} in his work on the Calabi flow. Similar methods were used by M.\ Struwe
\cite{St} to establish global existence and exponential convergence for the Ricci flow on compact surfaces, and by X.
Chen and G. Tian [7] to prove convergence of the K\"ahler-Ricci flow on K\"ahler-Einstein surfaces. For the Ricci flow,
the situation is more complicated since the Calabi energy is not decreasing along the flow. H.\ Schwetlick \cite{Sc}
used similar arguments to deduce global existence and convergence for a natural
sixth order flow on surfaces. The approach used \pagebreak in \cite{Ch1} and \cite{St}
is based on integral estimates and does not rely on the maximum principle. These
ideas are also useful in our situation. This is due to the fact that the
equation studied in this paper has higher order, hence the maximum principle is not available.
In Section 2 we derive the evolution equation for the conformal factor and the
curvature quantity $Q$. In
Section 3 we show that the solution is bounded in $H^{\frac{n}{2}}$. In Sections 4 and 5 we
show that the solution exists for all time, and in Section 6 we prove that the
evolution equation converges to a stationary solution. Finally, the proof of
Proposition \ref{compactness.result} is carried out in Section 8.
The author would like to thank S.-Y.\ A.\ Chang and J.\ Viaclovsky for
helpful comments.
\section{The evolution equations for $w$ and $Q - \frac{\overline{Q} \,
f}{\overline{f}}$}
Since the evolution equation preserves the conformal structure, we may write $g = e^{2w} \, g_0$
for a fixed metric $g_0$ and some real-valued function $w$. Then we have the
formula $$ Q = e^{-nw} \, (Q_0 +
P_0w),$$ where $P_0$ denotes the Paneitz operator with respect
to the metric $g_0$. Hence, the function $w$ obeys the evolution equation
$$ \frac{\partial}{\partial t} w = -\frac{1}{2} \, e^{-nw} \, P_0 w
- \frac{1}{2} \, e^{-nw} \, Q_0 + \frac{1}{2} \,
\frac{\overline{Q} \, f}{\overline{f}}.$$
Differentiating both sides with respect to $t$, we obtain
$$ \frac{\partial}{\partial t} \Big ( Q - \frac{\overline{Q} \, f}{\overline
{f}} \Big ) = -\frac{1}{2} \, P \Big ( Q
- \frac{\overline{Q} \, f}{\overline{f}} \Big ) + \frac{n}{2} \, Q \Big (
Q - \frac{\overline{Q} \, f}{\overline{f}} \Big ) + \frac{\overline{Q} \,
f}{\overline{f} \, ^2} \, \frac{d}{dt} \overline{f},$$ where $P = e^{-nw} \, P_0$ is the Paneitz
operator with respect to the metric $g$. It follows from the evolution
equation for $w$ that
$$ \frac{d}{dt} \overline{f} = -\int_M \frac{n}{2} \, f \, \Big ( Q - \frac
{\overline{Q} \, f}{\overline{f}} \Big ) \, dV.$$
This implies
\begin{eqnarray*}
\frac{\partial}{\partial t} \Big ( Q - \frac{\overline{Q} \, f}{\overline
{f}} \Big ) &=& -\frac{1}{2} \, P \Big ( Q
- \frac{\overline{Q} \, f}{\overline{f}} \Big ) + \frac{n}{2} \, Q \Big (
Q - \frac{\overline{Q} \, f}{\overline{f}} \Big )
\\[5pt] &&- \frac{n}{2} \, \frac{\overline{Q} \, f}{\overline{f}} \int_M \frac{f}{\overline{f}} \,
\Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big ) \,
dV, \end{eqnarray*}
where $P$ denotes the Paneitz operator with respect to
the \pagebreak metric $g$.
\section{Boundedness of $w$ in $H^{\frac{n}{2}}$}
We consider the functional
$$ E_f[w] = \int_M \frac{n}{2} \, w \, P_0 w \, dV_0 + \int_M n \, Q_0 \, w \, dV_0 -
\int_M Q_0 \, dV_0 \,
\log \bigg ( \int_M f \, e^{nw} \, dV_0 \bigg ).$$
Since $P_0$ is self-adjoint,
\begin{eqnarray*}
\frac{d}{dt} E_f[w]
&=& \int_M \frac{n}{2} \, \frac{\partial}{\partial t} w \, P_0 w \, dV_0
+ \int_M \frac{n}{2} \, w \, P_0 \frac{\partial}{\partial t} w \, dV_0
+ \int_M n \, Q_0 \, \frac{\partial}{\partial t} w \, dV_0
\\ &&- \int_M n \, \frac{\overline{Q} \, f}{\overline{f}} \, \frac{\partial}{\partial
t} w \, dV \\
&=& \int_M n \, P_0 w \, \frac{\partial}{\partial t} w \, dV_0
+ \int_M n \, Q_0 \, \frac{\partial}{\partial t} w \, dV_0
- \int_M n \, \frac{\overline{Q} \, f}{\overline{f}} \, \frac{\partial}{\partial
t} w \, dV \\
&=& \int_M n \, Q \, \frac{\partial}{\partial t} w \, dV
- \int_M n \, \frac{\overline{Q} \, f}{\overline{f}} \, \frac{\partial}{\partial
t} w \, dV \\
&=& \int_M n \, \Big ( Q - \frac{\overline{Q} \, f}{\overline
{f}} \Big ) \, \frac{\partial}{\partial t} w \,
dV.
\end{eqnarray*}
Since the time derivative of $w$ is given by $$ \frac{\partial}{\partial t} w =
-\frac{1}{2} \, \Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big ),$$ we obtain
$$ \frac{d}{dt} E_f[w] = -\int_M \frac{n}{2} \, \Big ( Q - \frac{\overline{Q}
\, f}{\overline{f}} \Big )^2 \, dV.$$
In particular, the functional $E_f[w]$ is decreasing under the evolution
equation. This implies
$$ E_f[w] \leq C.$$
In the first step, we consider the case $$ \int_M Q_0 \, dV_0 < 0.$$
Using Jensen's inequality we obtain
$$ \log \bigg ( \int_M e^{n(w - \overline{w})} \, dV_0 \bigg ) \geq -C.$$
This implies
\begin{eqnarray*}
E_f[w] &\geq& \int_M \frac{n}{2} \, w \, P_0 w \, dV_0 + \int_M n \, Q_0 \, w \, dV_0 \\
&&- \int_M Q_0 \, dV_0 \,
\log \bigg ( \int_M e^{nw} \, dV_0 \bigg ) - C \end{eqnarray*}
\begin{eqnarray*}
&=& \int_M \frac{n}{2} \, w \, P_0 w \, dV_0 + \int_M n
\, Q_0 \, (w - \overline{w}) \, dV_0 \\
&&- \int_M Q_0 \, dV_0 \,
\log \bigg ( \int_M e^{n(w-\overline{w})} \, dV_0 \bigg ) - C \\
&\geq&\int_M \frac{n}{2} \, w \, P_0 w \, dV_0 + \int_M n \, Q_0 \, (w - \overline{w}) \, dV_0
- C \\
&\geq &2\delta \int_M \big ( (-\Delta_0)^{\frac{n}{4}} w \big )^2 \, dV_0 + \int_M n \, Q_0 \, (w -
\overline{w}) \, dV_0 - C \\
&\geq& \delta \int_M \big ( (-\Delta_0)^{\frac{n}{4}} w \big )^2 \, dV_0 - C.
\end{eqnarray*}
In the second step, we consider the case
$$ 0 \leq \int_M Q_0 \, dV_0 < (n - 1)! \, \omega_n.$$
Since the Paneitz operator $P_0$ is self-adjoint and weakly positive,
it has a square root $P_0^{\frac{1}{2}}$. Moreover, the kernel of
$P_0^{\frac{1}{2}}$ coincides with the kernel of $P_0$, which
consists of the constant functions. Thus, we conclude that
$$ w(y) - \overline{w} = \int_M P_0^{\frac{1}{2}} w(z) \, H(y,z) \, dV_0(z)$$
for a suitable function $H(y,z)$. The leading term in the asymptotic expansion of
the kernel $H(y,z)$ coincides with that of the Green's function for the operator
$(-\Delta)^{\frac{n}{4}}$ in ${\bf R}^n$. Hence, we can apply
an inequality of D. Adams (see \cite[Theorems~1 and 2]{Ad}). This
implies
$$ \int_M e^{\frac{2^n \pi^n n}{\omega_{n-1}} \, \frac
{(w - \overline{w})^2}{\int_M (P_0^{\frac{1}{2}} w)^2 \, dV_0}} \, dV_0 \leq C,$$
hence
$$ \int_M e^{\frac{2^n \pi^n n}{\omega_{n-1}} \, \frac
{(w - \overline{w})^2}{\int_M w \, P_0 w \, dV_0}} \, dV_0 \leq C.$$
Since $$ \omega_{n-1} \omega_n = \frac{2^{n+1} \pi^n}{(n - 1)!},$$
we obtain $$ \int_M e^{n(w - \overline{w})} \, dV_0
\leq C \, e^{\int_M \frac{n}{2(n-1)! \, \omega_n} \, w \, P_0 w \, dV_0}.$$
From this it follows that
\begin{eqnarray*}
E_f[w] &\geq& \int_M \frac{n}{2} \,
w \, P_0 w \, dV_0 + \int_M n \, Q_0 \, w \, dV_0 \\
&&- \int_M Q_0 \, dV_0 \,
\log \bigg ( \int_M e^{nw} \, dV_0 \bigg ) - C \end{eqnarray*}
\begin{eqnarray*}
&=& \int_M \frac{n}{2} \, w \, P_0 w \, dV_0 + \int_M n \, Q_0 \, (w - \overline{w}) \, dV_0 \\
&&- \int_M Q_0 \, dV_0 \,
\log \bigg ( \int_M e^{n(w-\overline{w})} \, dV_0 \bigg ) - C \\
&\geq& \bigg ( 1 - \frac{\int_M Q_0 \, dV_0}{(n-1)! \, \omega_n} \bigg ) \, \int_M \frac
{n}{2} \, w \, P_0 w
\, dV_0 + \int_M n \, Q_0 \, (w - \overline{w}) \, dV_0 - C \\
&\geq &2\delta \int_M \big ( (-\Delta_0)^{\frac{n}{4}} w \big )^2 \, dV_0 + \int_M 4 \, Q_0 \, (w -
\overline{w}) \, dV_0 - C \\
&\geq &\delta \int_M \big ( (-\Delta_0)^{\frac{n}{4}} w \big )^2 \, dV_0 - C.
\end{eqnarray*}
Since $E_f[w]$ is bounded from above, we conclude that
$$ \int_M \big ( (-\Delta_0)^{\frac{n}{4}} w \big )^2 \, dV_0 \leq C;$$
hence
$$ \|w - \overline{w}\|_{H^{\frac{n}{2}}} \leq C.$$
Using an inequality of N. Trudinger, we obtain
$$ \int_M e^{\alpha(w - \overline{w})} \, dV_0 \leq C$$
for all real numbers $\alpha$.
In particular, we have
$$ \int_M e^{n(w - \overline{w})} \, dV_0 \leq C.$$
Since $ \int_M e^{nw} \, dV_0 = 1,$ we conclude that
$ e^{-n\overline{w}} \leq C;$ hence
$ -C \leq \overline{w} \leq C.$
This implies
$ \|w\|_{H^{\frac{n}{2}}} \leq C$
and
$ \int_M e^{\alpha w} \, dV_0 \leq C$
for all real numbers $\alpha$.
Since the functional $E_f[w]$ is bounded from below, we finally obtain
$$ \int_0^T \int_M \Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big )^2 \, dV \, dt \leq C.$$
\section{Boundedness of $w$ in $H^n$ for $0 \leq t \leq T$}
Let $T$ be a fixed, positive real number. We claim that
$$ \|w\|_{H^n} \leq C$$
for all $0 \leq t \leq T$. For the sake of brevity, we put
\begin{eqnarray*} v
&=& -\frac{1}{2} \, e^{\frac{nw}{2}} \, \Big ( Q - \frac{\overline{Q}
\, f}{\overline{f}} \Big ) =
e^{\frac{nw}{2}} \, \frac{\partial}{\partial t} w \\ &=& -\frac{1}{2} \,
e^{-\frac{nw}{2}} \, P_0 w
- \frac{1}{2} \, e^{-\frac{nw}{2}} \, Q_0
+ \frac{1}{2} \, e^{\frac{nw}{2}} \, \frac{\overline{Q} \,
f}{\overline{f}}. \end{eqnarray*}
This implies
$$ \frac{\partial}{\partial t} w = e^{-\frac{nw}{2}} \, v\hensp{ and }
P_0 w = -2 \, e^{\frac{nw}{2}} \, v - Q_0 + e^{nw} \, \frac{\overline{Q} \, f}{\overline{f}}.$$
From this it follows that
\begin{eqnarray*}
\frac{d}{dt} \bigg ( \int_M (P_0 w)^2 \, dV_0 \bigg )
&= &-\int_M 4 \, (e^{\frac{nw}{2}} \, v) \, P_0(e^{-\frac{nw}{2}} \, v) \, dV_0 \\
&&- \int_M 2 \, Q_0 \, P_0(e^{-\frac{nw}{2}} \, v) \\
&&+ \int_M \frac{2 \, \overline{Q}}{\overline{f}} \, (e^{nw} \, f) \, P_0(e
^{-\frac{nw}{2}} \, v) \, dV_0. \end{eqnarray*}
This implies
\begin{eqnarray*}
\frac{d}{dt} \bigg ( \int_M (P_0 w)^2 \, dV_0 \bigg )
&=& -\int_M 4 \, (-\Delta_0)^{\frac{n}{4}}(e^{\frac{nw}{2}} \, v) \,
(-\Delta_0)^{\frac{n}{4}}(e^{-\frac{nw}{2}} \, v) \, dV_0 \\
&&- \int_M 2 \, P_0 Q_0 \, (e^{-\frac{nw}{2}} \, v) \\
&&+ \int_M \frac{2 \, \overline{Q}}{\overline{f}} \, (-\Delta_0)^{\frac{n}{4}}
(e^{nw} \, f) \, (-\Delta_0)^{\frac{n}{4}}
(e^{-\frac{nw}{2}} \, v) \, dV_0 \\ &&+ \hbox{ \rm lower order terms}. \end{eqnarray*}
Here, we adopt the convention that
$$ (-\Delta_0)^{m+\frac{1}{2}} = \nabla_0 \, (-\Delta_0)^m$$
for all integers $m$ (see \cite{Ad}). The right-hand side involves derivatives
of $v$ and $w$ of order at most $\frac{n}{2}$. Moreover, the total number of
derivatives is at most $n$. Therefore, we obtain
\begin{eqnarray*}
\frac{d}{dt} \bigg ( \int_M (P_0 w)^2 \, dV_0 \bigg )
&=& -\int_M 4 \, \big ( (-\Delta_0)^{\frac{n}{4}} v \big )^2 \, dV_0 \\ &&+ C
\sum_{k_1, \ldots, k_m} \int_M |\nabla_0^{k_1} v| \cdot
|\nabla_0^{k_2} v| \cdot |\nabla_0^{k_3} w| \cdots |\nabla_0^{k_m} w| \, dV_0 \\ &&+ C
\sum_{l_1, \ldots, l_m} \int_M |\nabla_0^{l_1} v| \cdot
|\nabla_0^{l_2} w| \cdots |\nabla_0^{l_m} w| \, e^{\alpha w} \, dV_0.
\end{eqnarray*}
The first sum is taken over all $m$-tuples $k_1, \ldots, k_m$ with $m \geq 3$
satisfying the conditions
\begin{eqnarray*}
0\le k_i\le \frac{n}{2} && \hbox{for } 1\le i\le 2,\\
1\le k_i\le \frac{n}{2}&&\hbox{for }3\le i\le m,
\end{eqnarray*}
and
$$
k_1+\cdots + k_m \le n.
$$
To estimate this term, we choose real numbers $p_1, \ldots, p_m \in [2,\infty[$ such that
\begin{eqnarray*}
k_i \leq \frac{n}{p_i} &&\hbox{ for } \quad 1 \leq i \leq 2,\\
\frac{n}{p_i} < k_i &&\hbox{ for } \quad 3
\leq i \leq m
\end{eqnarray*} and $$ \frac{1}{p_1} +\cdots +\frac{1}{p_m} = 1.$$
Moreover, we define real numbers $\theta_1, \ldots, \theta_m$ by
$$ \theta_i = \frac{k_i - \frac{n}{p_i} + \frac{n}{2}}{\frac{n}{2}} \in [0,1]\quad \hbox{
for }1 \leq i \leq 2$$ and
$$ \theta_i = \frac{k_i - \frac{n}{p_i}}{\frac{n}{2}} \in \hspace{1.2mm} ]0,1[ \quad \hbox{ for }\quad 3 \leq i \leq
m.$$ Then we have
$ \theta_1 +\cdots +\theta_m \leq 2$;
hence
$$ \theta_3 +\cdots +\theta_m \leq (1-\theta_1) + (1-\theta_2).$$
Since $w$ is bounded in $H^{\frac{n}{2}}$, this implies
\begin{eqnarray*}
&&-\int_M 2 \, \big ( (-\Delta_0)^{\frac{n}{4}} v \big )^2 \, dV_0
\\ &&+ C \sum_{k_1, \ldots, k_m} \int_M |\nabla_0^{k_1} v| \cdot
|\nabla_0^{k_2} v| \cdot |\nabla_0^{k_3} w| \cdots |\nabla_0^{k_m} w| \, dV_0
\\ &\leq &-\|v\|_{H^{\frac{n}{2}}}^2 + C
\sum_{k_1, \ldots, k_m} \|\nabla_0^{k_1} v\|_{L^{p_1}} \cdot
\|\nabla_0^{k_2} v\|_{L^{p_2}} \cdot \|\nabla_0^{k_3} w\|_{L^{p_3}}
\cdots \|\nabla_0^{k_m} w\|_{L^{p_m}} \\
&\leq& -\|v\|_{H^{\frac{n}{2}}}^2 + C
\sum_{k_1, \ldots, k_m} \|v\|_{H^{k_1 - \frac{n}{p_1} + \frac{n}{2}}} \\&&\cdot
\|v\|_{H^{k_2 - \frac{n}{p_2} + \frac{n}{2}}} \cdot
\|w\|_{H^{k_3 - \frac{n}{p_3} + \frac{n}{2}}}
\cdots \|w\|_{H^{k_m - \frac{n}{p_m} + \frac{n}{2}}} \\
&\leq &-\|v\|_{H^{\frac{n}{2}}}^2 + C
\sum_{k_1, \ldots, k_m} \|v\|_{L^2}^{(1-\theta_1)+(1-\theta_2)} \,
\\
&&\cdot \|v\|_{H^{\frac{n}{2}}}^{\theta_1+\theta_2}
\, \|w\|_{H^{\frac{n}{2}}}^{(1-\theta_3)+\cdots +(1-\theta_m)} \,
\|w\|_{H^n}^{\theta_3+\cdots +\theta_m} \\
&\leq& -\|v\|_{H^{\frac{n}{2}}}^2 + C
\sum_{k_1, \ldots, k_m} \|v\|_{L^2}^{(1-\theta_1)+(1-\theta_2)} \,
\|v\|_{H^{\frac{n}{2}}}^{\theta_1+\theta_2}
\, \|w\|_{H^n}^{\theta_3+\cdots +\theta_m} \\
&\leq& C
\sum_{k_1, \ldots, k_m} \|v\|_{L^2}^2 \,
\|w\|_{H^n}^{\frac{2(\theta_3+\cdots +\theta_m)}{(1-\theta_1)+(1-\theta_2)}} \\
&\leq& C \, \|v\|_{L^2}^2 \, (\|w\|_{H^n}^2 + 1).
\end{eqnarray*}
The second sum is taken over all $m$-tuples $l_1, \ldots, l_m$ with $m \geq 1$
satisfying the conditions
\begin{eqnarray*}
&&0\le l_1\le \frac{n}{2},\\
&&1\le l_i\le \frac{n}{2}\qquad \hbox{for }2\le i\le m
\end{eqnarray*}
and
$$
l_1+\cdots +l_m\le n.
$$
To estimate this term, we choose real numbers $q_1, \ldots, q_m \in [2,\infty[$ such that
$$
l_1\leq\frac{n}{q_1},\qquad \frac{n}{q_i} <l_i\enspace\hbox{ for } \enspace 2 \leq
i \leq m$$
and $$
\frac{1}{2} \leq \frac{1}{q_1} +\cdots +\frac{1}{q_m} < 1.
$$
Moreover, we define real numbers $\rho_2, \ldots, \rho_m$ by
$$ \rho_1 = \frac{l_1 - \frac{n}{q_1} + \frac{n}{2}}{\frac{n}{2}} \in [0,1] $$
and
$$ \rho_i = \frac{l_i - \frac{n}{q_i}}{\frac{n}{2}} \in \hspace{1.2mm} ]0,1[\quad\hbox{
for }2 \leq i \leq m.$$ Then we have
$ \rho_1 +\cdots +\rho_m \leq 2$;
hence
$ \rho_2 +\cdots +\rho_m \leq 2 - \rho_1.$
Since $w$ is bounded in $H^{\frac{n}{2}}$, this implies
\begin{eqnarray*}
&&-\int_M 2 \, \big ( (-\Delta_0)^{\frac{n}{4}} v \big )^2 \, dV_0
+ C \sum_{l_1, \ldots, l_m} \int_M |\nabla_0^{l_1} v| \cdot
|\nabla_0^{l_2} w| \cdots |\nabla_0^{l_m} w| \, e^{\alpha w} \, dV_0
\\ &\leq& -\|v\|_{H^{\frac{n}{2}}}^2 + C
\sum_{l_1, \ldots, l_m} \|\nabla_0^{l_1} v\|_{L^{q_1}} \cdot
\|\nabla_0^{l_2} w\|_{L^{q_2}}
\cdots \|\nabla_0^{l_m} w\|_{L^{q_m}} \\
&\leq& -\|v\|_{H^{\frac{n}{2}}}^2 + C
\sum_{l_1, \ldots, l_m} \|v\|_{H^{l_1 - \frac{n}{q_1} + \frac{n}{2}}}
\cdot \|w\|_{H^{l_2 - \frac{n}{q_2} + \frac{n}{2}}}
\cdots \|w\|_{H^{l_m - \frac{n}{q_m} + \frac{n}{2}}} \\
&\leq& -\|v\|_{H^{\frac{n}{2}}}^2 + C
\sum_{l_1, \ldots, l_m} \|v\|_{L^2}^{1-\rho_1} \,
\|v\|_{H^{\frac{n}{2}}}^{\rho_1} \, \|w\|_{H^{\frac{n}{2}}}^{(1-\rho_2)
+\cdots +(1-\rho_m)} \,
\|w\|_{H^n}^{\rho_2+\cdots +\rho_m} \\
&\leq& -\|v\|_{H^{\frac{n}{2}}}^2 + C
\sum_{l_1, \ldots, l_m} \|v\|_{L^2}^{1-\rho_1} \, \|v\|_{H^{\frac{n}{2}}}^{\rho_1} \, \|w\|_{H^n}^{\rho_2+\cdots +\rho_m} \\
&\leq &C
\sum_{l_1, \ldots, l_m} \|v\|_{L^2}^{\frac{2-2\rho_1}{2-\rho_1}} \,
\|w\|_{H^n}^{\frac{2(\rho_2+\cdots +\rho_m)}{2-\rho_1}} \\
&\leq& C \, (\|v\|_{L^2}^2 + 1) \, (\|w\|_{H^n}^2 + 1).
\end{eqnarray*}
Thus, we conclude that
$$ \frac{d}{dt} \bigg ( \int_M (P_0 w)^2 \, dV_0 \bigg )
\leq C \, (\|v\|_{L^2}^2 + 1) \, (\|w\|_{H^n}^2 + 1).$$
From the positivity of $P_0$ it follows that
$$ \|w\|_{H^n}^2 \leq C \int_M (P_0 w)^2 \, dV_0.$$
Moreover, the function $v$ satisfies
$$ \|v\|_{L^2}^2 = \int_M \frac{1}{4} \, e^{nw} \, \Big (
Q - \frac{\overline{Q} \, f}{\overline{f}} \Big )^2 \, dV_0 = \int_M
\frac{1}{4} \, \Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big )^2 \, dV.$$
Therefore, we obtain
$$
\frac{d}{dt} \bigg ( \int_M (P_0 w)^2 \, dV_0 + 1 \bigg )
\leq C \, \bigg ( \int_M (P_0 w)^2 \, dV_0 +
1 \bigg ) \, \bigg ( \int_M
\Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big )^2 \, dV + 1 \bigg ).
$$ Since
$$ \int_0^T \int_M \Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big )^2 \,
dV \, dt \leq C,$$ we deduce that
$$ \int_M (P_0 w)^2 \, dV_0 \leq C\quad\hbox{ for all }\quad 0 \leq t \leq T.$$
This implies $$ \|w\|_{H^n} \leq C$$ for all $0 \leq t \leq T$.
Using the Sobolev inequality, we obtain
$$ |w| \leq C\quad\hbox{ for all }\quad 0 \leq t \leq T.$$
\section{Boundedness of $w$ in $H^{2k}$ for $0 \leq t \leq T$}
We now establish bounds for the higher order derivatives:
\begin{eqnarray*}
\frac{d}{dt} \bigg ( \int_M |(-\Delta_0)^k w|^2 \, dV_0 \bigg )
&\leq& -\int_M e^{-nw} \, |(-\Delta_0)^{k+\frac{n}{4}} w|^2 \, dV_0
\\ &&+ C \sum_{k_1, \ldots, k_m} \int_M |\nabla_0^{k_1} w| \cdots |\nabla_0^{k_m}
w| \, dV_0\,;
\end{eqnarray*}
hence
\begin{eqnarray*}
\frac{d}{dt} \bigg ( \int_M |(-\Delta_0)^k w|^2 \, dV_0 \bigg )
&\leq &-\frac{1}{C} \int_M |(-\Delta_0)^{k+\frac{n}{4}} w|^2 \, dV_0
\\ &&+ C \sum_{k_1, \ldots, k_m} \int_M |\nabla_0^{k_1} w| \cdots |\nabla_0^{k_m}
w| \, dV_0.
\end{eqnarray*}
Here, the sum is taken over all $m$-tuples $k_1, \ldots, k_m$, with $m \geq 3$,
which satisfy the conditions
$$ 1 \leq k_i \leq 2k + \frac{n}{2}\quad\hbox{ and }\quad k_1 + \cdots + k_m \leq 4k + n.$$
We now choose real numbers $p_1, \ldots, p_m \in [2,\infty[$ such that
$$ k_i \leq 2k + \frac{n}{p_i}\hensp{and}\frac{1}{p_1} + \cdots + \frac{1}{p_m} = 1.$$
Moreover, we define real numbers $\theta_1, \ldots, \theta_m$ by
$$ \theta_i ={\rm max} \left\{ \frac{k_i-\frac{n}{p_i}-\frac{n}{2}}{2k-\frac{n}{2}} ,0\right\}.$$
Since $m \geq 3$, we can choose $p_1,\ldots ,p_m\in [2,\infty[$ such that
$$ \theta_1 + \cdots + \theta_m < 2.$$
From this it follows that
\begin{eqnarray*}
\frac{d}{dt} \|w\|_{H^{2k}}^2
&\leq& -\frac{1}{C} \, \|w\|_{H^{2k+\frac{n}{2}}}^2
+ C \sum_{k_1, \ldots, k_m} \|\nabla_0^{k_1} w\|_{L^{p_1}} \cdots
\|\nabla_0^{k_m} w\|_{L^{p_m}} \\
&\leq& -\frac{1}{C} \, \|w\|_{H^{2k+\frac{n}{2}}}^2
+ C \sum_{k_1, \ldots, k_m} \|w\|_{H^{k_1-\frac{n}{p_1}+\frac{n}{2}}} \cdots
\|w\|_{H^{k_m-\frac{n}{p_m}+\frac{n}{2}}} \\
&\leq& -\frac{1}{C} \, \|w\|_{H^{2k+\frac{n}{2}}}^2
+ C \sum_{k_1, \ldots, k_m} \|w\|_{H^n}^{(1-\theta_1) + \cdots + (1-\theta_m)}
\, \|w\|_{H^{2k+\frac{n}{2}}}^{\theta_1 + \cdots + \theta_m} \\
&\leq& -\frac{1}{C} \, \|w\|_{H^{2k+\frac{n}{2}}}^2
+ C \sum_{k_1, \ldots, k_m} \|w\|_{H^{2k+\frac{n}{2}}}^{\theta_1 + \cdots + \theta_m} \\
&\leq &-\frac{1}{C} \, \|w\|_{H^{2k+\frac{n}{2}}}^2 + C \\
&\leq &-\frac{1}{C} \, \|w\|_{H^{2k}}^2 + C
\end{eqnarray*}
for all $0 \leq t \leq T$. Thus, we conclude that $$ \|w\|_{H^{2k}} \leq C\hensp{
for all }0 \leq t \leq T.
$$
Therefore, the evolution equation has a
solution which is defined for all time.
\section{Convergence}
For the sake of brevity, we put
$$ y(t) = \int_M \Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big )^2 \, dV $$
and we show that $$ y(t) \to 0\hensp{for}t \to \infty.
$$
Let $\varepsilon$ be an arbitrary positive
number. We choose $t_0 \geq 0$ such that $ y(t_0) \leq \varepsilon.$ We claim
that $ y(t) \leq 3\varepsilon$ for all $t \geq t_0$. Otherwise, we define
$$ t_1 =
\inf \{t \geq t_0: y(t) \geq 3\varepsilon\}.$$ This implies
$ y(t) \leq 3\varepsilon$ for all $t_0 \leq t \leq t_1$.
From this it follows that $$ \int_M e^{-nw} \, (Q_0 + P_0 w)^2 \, dV_0 \leq C$$ for
all $t_0 \leq t \leq t_1$. Moreover, it follows from results
in Section 3 that
$$ \int_M e^{3nw} \, dV_0 \leq C\hensp{for all} t_0 \leq t \leq t_1.$$
Using H\"older's inequality, we obtain
$$
\int_M |Q_0 + P_0 w|^{\frac{3}{2}} \, dV_0 \leq \bigg ( \int_M e^{-nw} \, (Q_0 + P_0 w)^2 \, dV_0 \bigg
)^{\frac{3}{4}} \,
\bigg ( \int_M e^{3nw} \, dV_0 \bigg )^{\frac{1}{4}}. $$
From this it follows that
$$ \int_M |P_0 w|^{\frac{3}{2}} \, dV_0 \leq C\hensp{for all } t_0 \leq t \leq t_1.$$ Using the Sobolev inequality, we
obtain
$$ |w| \leq C\hensp{for all}t_0 \leq t \leq t_1.
$$
We have shown in Section 2 that the function $Q - \frac{\overline{Q} \,
f}{\overline{f}}$ satisfies the evolution equation
\begin{eqnarray*}
\frac{\partial}{\partial t} \Big ( Q - \frac{\overline{Q} \, f}{\overline{f}}
\Big ) &= &-\frac{1}{2} \, P \Big ( Q - \frac{\overline{Q} \, f}{\overline{f}}
\Big ) + \frac{n}{2} \, Q \Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big )
\\ &&- \frac{n}{2} \, \frac{\overline{Q} \, f}{\overline{f}} \int_M \frac{f}{\overline{f}} \,
\Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big ) \,
dV, \end{eqnarray*}
where $P$ denotes the Paneitz
operator with respect to the metric $g$. From this it follows that
\begin{eqnarray*}
\frac{d}{dt} \bigg ( \int_M \Big (
Q - \frac{\overline{Q} \, f}{\overline{f}} \Big )^2 \, dV \bigg )
&= &-\int_M \Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big
) \, P \Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big ) \, dV
\\ &&+ \int_M \frac{n}{2} \, \Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big )^3 \, dV .
\\ &&+ \int_M n \, \frac{\overline{Q} \, f}{\overline{f}} \,
\Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big )^2 \, dV
\\ &&- n \, \overline{Q} \, \bigg ( \int_M \frac{f}{\overline{f}} \,
\Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big ) \, dV
\bigg )^2.
\end{eqnarray*}
Using the Gagliardo-Nirenberg inequality, we can bound
$$ \Big \| Q - \frac{\overline{Q} \, f}{\overline{f}} \Big \|_{L^3}
\leq C \, \Big \| Q - \frac{\overline{Q} \, f}{\overline{f}}
\Big \|_{L^2}^{\frac{2}{3}}
\, \Big \| Q - \frac{\overline{Q} \,
f}{\overline{f}} \Big \|_{H^{\frac{n}{2}}}^{\frac{1}{3}},$$
where the norms are taken with respect to the background metric $g_0$. This implies
\begin{eqnarray*}
&&\int_M
\Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big )^3 \, dV_0 \\
&&\qquad \leq C \, \bigg ( \int_M \Big ( Q - \frac{\overline{Q} \, f}{\overline
{f}} \Big )^2 \, dV_0 \bigg ) \,
\bigg ( \int_M \Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big ) \,
P_0 \Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big ) \, dV_0 \bigg
)^{\frac{1}{2}}. \end{eqnarray*}
Since $w$ is uniformly bounded for $t_0 \leq t \leq t_1$, we obtain
\begin{eqnarray*}
&&\int_M
\Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big )^3 \, dV \\
&&\qquad \leq C \, \bigg ( \int_M \Big ( Q - \frac{\overline{Q} \, f}{\overline
{f}} \Big )^2 \, dV \bigg ) \,
\bigg ( \int_M \Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big ) \,
P \Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big ) \, dV \bigg
)^{\frac{1}{2}}. \end{eqnarray*}
Thus, we conclude that
\begin{eqnarray*}
\frac{d}{dt} \bigg ( \int_M \Big ( Q - \frac{\overline
{Q} \, f}{\overline{f}} \Big )^2 \, dV \bigg ) &\leq& C \,
\bigg ( \int_M \Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big )^2
\, dV \bigg )^2\\
&& + C \, \bigg ( \int_M
\Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big )^2 \, dV \bigg );
\end{eqnarray*}
hence $$ \frac{d}{dt} y(t) \leq C \, y(t)^2 + C \, y(t).$$
Therefore, we obtain
$$ 2\varepsilon \leq y(t_1) - y(t_0) \leq C \int_{t_0}^{t_1} y(t) \, dt.$$
If we choose $t_0$ large enough, then we have
$$ C \int_{t_0}^\infty y(t) \, dt \leq \varepsilon.$$ Hence, we obtain $ 2\varepsilon
\leq \varepsilon $ which is a contradiction.
Thus, we conclude that $$ y(t) \to 0\hensp{ for }t \to \infty.
$$
From this it follows that
$$ |w| \leq C\hensp{for all} t \geq 0.
$$
Moreover, we have
$$ \int_M e^{-nw} \, (Q_0 + P_0 w)^2 \, dV_0 \leq C$$ for all $t \geq 0$.
From this it follows that
$$ \int_M (Q_0 + P_0 w)^2 \, dV_0 \leq C;$$ hence $$ \|w\|_{H^n} \leq C$$ for all $t
\geq 0$. Arguing as above, we obtain $$ \|w\|_{H^k} \leq C\hensp{for all} t \geq 0.$$
It remains to show that the flow converges to a metric
satisfying $$ \frac{Q}{f} = \frac{\overline{Q}}{\overline{f}}.$$
The evolution equation $$ \frac{\partial}{\partial t} g =
- \Big ( Q - \frac{\overline{Q} \, f}{\overline{f}} \Big ) \, g$$
is the gradient flow for the functional $$ E_f[w] = \int_M \frac{n}{2} \, w \, P_0 w \, dV_0 + \int_M n \, Q_0 \, w \, dV_0 -
\int_M Q_0 \, dV_0 \,
\log \bigg ( \int_M e^{nw} \, f \, dV_0 \bigg ).$$ Since the functional
$E_f[w]$ is real analytic, the assertion follows from a general result
of L.\ Simon \cite{Si}.
\section{The case $M = {\bf RP}^n$}
In this section, we consider the special case $M = {\bf RP}^n$.
We normalize the metric such that
the volume of $M$ is equal to $\frac{1}{2} \, \omega_n$ and
the mean value of the function $Q$ is equal to $(n-1)!$.
By Theorem \ref{convergence.1}, the flow converges to a limit metric $g$ satisfying
$$ \frac{Q}{f} = \frac{(n-1)!}{\overline{f}}.$$
In particular, for every positive function $f$ on ${\bf RP}^n$, there exists a metric $g$ on
${\bf RP}^n$ such that
$$ \frac{Q}{f} = \frac{(n-1)!}{\overline{f}}.$$
We now consider the case $f = 1$. In this case, the limit metric $g$ satisfies
$Q = (n-1)!$. It follows from a result of S.-Y.\ A.\ Chang and P.\
Yang \cite{CY2}
(see also C.\ S.\ Lin's paper \cite{Li})
that the limit metric is the standard metric on~${\bf RP}^n$.
We claim that the flow converges exponentially. To show this, we denote by $g_0$
the standard metric on ${\bf RP}^n$. Then the conformal factor satisfies the
evolution equation $$ \frac{\partial}{\partial t} w = -\frac{1}{2} \, e^{-nw} \,
P_0 w + \frac{1}{2} \, (n-1)! \, (1 - e^{-nw}).$$
Linearizing this equation, we obtain $$ \frac{\partial}{\partial t} w = -\frac{1}{2}
\, P_0 w + \frac{1}{2} \, n! \, w.$$
The Paneitz operator on ${\bf RP}^n$ is given by
$$ P_0 = \prod_{k=1}^{\frac{n}{2}} (-\Delta_0 + (k-1)(n-k)).$$
The first eigenvalue of the Laplace operator $-\Delta_0$ on ${\bf RP}^n$ is
strictly greater than $n$. Hence, the first eigenvalue of the Paneitz operator $P_0$ is strictly greater than $n!$.
Therefore, the first eigenvalue of
the linearized operator
is strictly less than $0$. Thus, we conclude that the flow
converges exponentially to the standard metric on ${\bf RP}^n$.
\section{A compactness result for conformal metrics on $S^n$}
In this section, we give a proof for Proposition \ref{compactness.result}.
Let $g_k = e^{2w_k} \, g_0$ be a sequence of conformal metrics on $S^n$ with fixed
volume such that $$ \int_{S^n} Q_k^2 \, dV_k \leq C.$$ Since
$$ Q_k = e^{-nw_k} \, (Q_0 + P_0w_k),$$ we obtain
$$ \int_{S^n} e^{-nw_k} \, (Q_0 + P_0 w_k)^2 \, dV_0 \leq C.$$
Moreover, we have $$ \int_{S^n} |Q_k| \, dV_k \leq C.$$
Hence $$ \int_{S^n} |P_0 w_k| \, dV_0 \leq C.$$
Finally, we have
$$ \lim_{r \to 0} \lim_{k \to \infty} \int_{B_r(x)} |Q_k| \, dV_k < \frac{1}{2}
\, (n-1)! \, \omega_n.$$
This implies
$$ \lim_{r \to 0} \lim_{k \to \infty} \int_{B_r(x)} |P_0 w_k| \, dV_0 <
\frac{1}{2} \, (n-1)! \, \omega_n.$$
Choosing $r$ sufficiently small, we obtain
$$ \lim_{k \to \infty} \int_{B_r(x)} |P_0 w_k| \, dV_0 < \frac{1}{2} \, (n-1)! \,
\omega_n.$$
Let $$ I_k = \int_{B_r(x)} |P_0 w_k| \, dV_0.$$
We now use the formula
$$ w_k(y) - \overline{w}_k =
\int_{S^n} P_0 w_k(z) \, K(y,z) \, dV_0(z).$$
This implies
$$ np (w_k(y) - \overline{w}_k) \leq \int_{B_r(x)} np \, |P_0 w_k(z)| \,
|K(y,z)| \, dV_0(z) + C$$
for all $y \in B_{\frac{r}{2}}(x)$. Using Jensen's inequality, we obtain
$$ e^{np (w_k(y) - \overline{w}_k)} \leq \frac{C}{I_k} \int_{B_r(x)} |P_0
w_k(z)| \, e^{npI_k \, |K(y,z)|} \, dV_0(z)$$
for all $y \in B_{\frac{r}{2}}(x)$. Since
$$ \lim_{k \to \infty} I_k < \frac{1}{2} \, (n-1)! \, \omega_n,$$ we can find a real number $p > 1$
such that $$ \lim_{k \to \infty} pI_k < \frac{1}{2} \, (n-1)! \, \omega_n.$$
We now use an asymptotic formula of the
function $K(y,z)$ for $|y - z| \to 0$. To derive this formula, we use the
identity $$ (-\Delta)^{\frac{n}{2}} \log |y - z| = -2^{n-2} \, \big ( \big ( \frac
{n-2}{2} \big )! \big )^2 \, \omega_{n-1} \, \delta(y - z).$$
This implies
$$ (-\Delta)^{\frac{n}{2}} \log |y - z| = -\frac{1}{2} \, (n-1)! \, \omega_n
\, \delta(y - z).$$ Therefore, we obtain
$$ \frac{1}{2} \, (n-1)! \, \omega_n \, K(y,z) \sim -\log |y - z|;$$
hence $$ e^{\frac{1}{2} \, (n-1)! \, \omega_n \, |K(y,z)|} \sim
\frac{1}{|y-z|}.$$
From this it follows that $$ \int_{S^n} e^{npI_k \, |K(y,z)|} \, dV_0(y)
\leq C.$$ Since
$$ \frac{1}{I_k} \int_{B_r(x)} |P_0 w_k| \, dV_0 = 1,$$ we conclude that
$$ \int_{B_{\frac{r}{2}}(x)}
e^{np (w_k(y) - \overline{w}_k)} \, dV_0(y) \leq C.$$
Covering $S^n$ with finitely many balls $B_{\frac{r}{2}}(x)$, we
obtain $$ \int_{S^n} e^{np(w_k - \overline{w}_k)} \, dV_0 \leq C$$ for some
$p > 1$. In particular, we have $$ \int_{S^n} e^{n(w_k - \overline{w}_k)} \, dV_0 \leq
C.$$ Since $ \int_{S^n} e^{nw_k} \, dV_0 = 1,$ we conclude that
$ e^{-n\overline{w}_k} \leq C;$
hence
$ -C \leq \overline{w}_k \leq C.$
This implies
$$ \int_{S^n} e^{npw_k} \, dV_0 \leq C.$$
By H\"older's inequality,
$$\int_{S^n} |Q_0 + P_0 w_k|^{\frac{2p}{p+1}} \, dV_0 \leq \bigg ( \int_{S^n} e^{-nw_k} \, (Q_0 + P_0 w_k)^2 \, dV_0
\bigg )^{\frac{p}{p+1}} \,
\bigg ( \int_{S^n} e^{npw_k} \, dV_0 \bigg )^{\frac{1}{p+1}}. $$
From this it follows that
$$ \int_{S^n} |P_0 w_k|^{\frac{2p}{p+1}} \, dV_0 \leq C.$$
Using the Sobolev inequality, we obtain
$ |w_k| \leq C.$ Thus, we conclude that $$ \int_{S^n} |P_0 w_k|^2 \, dV_0
\leq C.$$ Therefore, the sequence $w_k$ is uniformly bounded in $H^n$.
\begin{references}
\bibitem{Ad}
\name{D.\ Adams}, A sharp inequality of J.\ Moser for higher order derivatives,
{\it Ann.\ of Math\/}.\ {\bf 128} (1988), 385--398.
\bibitem{BCY}
\name{T.\ Branson, S.-Y.\ A.\ Chang}, and \name{P.\ Yang}, Estimates and extremals
for zeta function determinants on four-manifolds, {\it Comm.\ Math.\
Phys\/}.\ {\bf 149} (1992), 241--262.
\bibitem{CY1}
\name{S.-Y.\ A.\ Chang} and \name{P.\ Yang}, Extremal metrics of zeta function
determinants on\break $4$-manifolds, {\it Ann.\ of Math\/}.\ {\bf 142} (1995),
171--212.
\bibitem{CY2}
\bibline, On uniqueness of solutions of $n$th order
differential equations in conformal geometry, {\it Math.\ Res.\
Lett\/}.\ {\bf 4} (1997), 91--102.
\bibitem{CGY}
\name{S.-Y.\ A.\ Chang, M.\ Gursky}, and \name{P.\ Yang}, An
equation of Monge-Amp\`ere type in conformal geometry, and four-manifolds of
positive Ricci curvature, {\it Ann. of Math\/}.\ {\bf 155} (2002),
709--787.
\bibitem{Ch1}
\name{X.\ Chen}, Calabi flow in Riemann surfaces revisited: a new point of view,
{\it Internat.\ Math.\ Res.\ Notices\/} (2001), no.\ 6, 275--297.
\bibitem{CT} \name{X. Chen} and \name{G. Tian}, Ricci flow on K\"ahler-Einstein surfaces, {\it Invent.\ Math.\/} {\bf
147} (2002), 487--544.
\bibitem{Ch2}
\name{B.\ Chow}, The Ricci flow on the $2$-sphere, {\it J.\ Differential
Geom\/}.\ {\bf 33} (1991), 325--334.
\bibitem{Ch3}
\name{P.\ Chru\'sciel}, Semi-global existence and convergence of solutions of the
Robinson-Trautman
($2$-dimensional Calabi) equation, {\it Comm.\ Math.\ Phys\/}.\ {\bf
137} (1991), 289--313.
\bibitem{Co}
\name{A.\ Connes}, {\it Noncommutative Geometry\/}, Academic Press, San Diego,
CA (1994).
\bibitem{DMA1}
\name{Z.\ Djadli, A.\ Malchiodi}, and \name{M.\ Ahmedou}, Prescribing a fourth order
conformal invariant on the standard sphere -- Part I: a perturbation result,
{\it Commun.\ Contemp.\ Math.\/} {\bf 4} (2002), 375--408.
\bibitem{DMA2}
\bibline, Prescribing a fourth order
conformal invariant on the standard sphere, Part II: blow-up analysis and
applications, preprint (2000).
\bibitem{EH}
\name{K.\ Ecker} and \name{G.\ Huisken}, Parabolic methods for the construction of
spacelike slices of prescribed mean curvature in cosmological spacetimes,
{\it Comm.\ Math.\ Phys\/}.\ {\bf 135} (1991), 595--613.
\bibitem{FG1}
\name{C.\ Fefferman} and \name{C.\ R.\ Graham}, Conformal invariants, in {\it The
Mathematical Heritage of
\'Elie Cartan\/} (Lyon, 1984),
{\it Ast\'erisque\/} ({\bf 1985\/}), 95--116.
\bibitem{FG2}
\bibline, $Q$-curvature and Poincar\'e metrics,
{\it Math.\ Res.\ Lett\/}.\ {\bf 9} (2002), 139--151.
\bibitem{Gu1}
\name{M.\ Gursky}, The Weyl functional, de Rham cohomology, and
K\"ahler-Einstein metrics, {\it Ann.\ of Math\/}.\ {\bf 148} (1998),
315--337.
\bibitem{Gu2}
\name{M.\ Gursky}, The principal eigenvalue of a conformally invariant
differential operator, with an application to semilinear elliptic PDE,
{\it Comm.\ Math.\ Phys\/}.\ {\bf 207} (1999), 131--143.
\bibitem{Ha}
\name{R.\ Hamilton}, The Ricci flow on surfaces, in {\it Mathematics and
General Relativity\/} (Santa Cruz, CA, 1986), {\it Contemp.\ Math\/}.\
{\bf 71} (1988), 237--262.
\bibitem{Li}
\name{C.\ S.\ Lin}, A classification of solutions of a conformally invariant
fourth order equation in ${\bf R}^n$, {\it Comment.\ Math.\ Helv\/}.\
{\bf 73} (1998), 206--231.
\bibitem{Mo}
\name{J.\ Moser}, On a nonlinear problem in differential geometry, in
{\it Dynamical Systems\/}\break
(M.\ Peixoto, ed.), 273--280, Academic Press, New York (1973).
\bibitem{OPS}
\name{B.\ Osgood, R. Phillips}, and \name{P. Sarnak}, Extremals of determinants of
Laplacians,\break {\it J.\ Funct.\ Anal\/}.\ {\bf 80} (1988), 148--211.
\bibitem{Si}
\name{L.\ Simon}, Asymptotics for a class of nonlinear evolution equations, with
applications to
geometric problems, {\it Ann.\ of Math\/}.\ {\bf 118} (1983), 525--571.
\bibitem{Sc}
\name{H.\ Schwetlick}, Higher order curvature flows on surfaces, preprint
(2001).
\bibitem{St}
\name{M. Struwe}, Curvature flows on surfaces, preprint (2000).
\bibitem{Ye}
\name{R.\ Ye}, Global existence and convergence of Yamabe flow,
{\it J.\ Differential Geom\/}.\ {\bf 39} (1994), 35--50.
\end{references}
\end{document} |
\begin{document}
\title{The Calderon projection over $C^*$--algebras}
\`a uthor{Paolo Antonini\thanks{
Projet Alg\'ebres d'op\'erateurs
Universit\'e Paris Diderot paolo.anton@gmail.com}}
\maketitle
\begin{abstract}We construct the Calderon projection on the space of Cauchy datas for a twisted Dirac operator on a compact manifold with boundary acting on a bundle of finitely generated $C^*$--Hilbert modules . In particular an invertible double is constructed in the Mischenko--Fomenko setting generalizing the classical result.
\`e nd{abstract}
\section{Introduction}
The introduction of $C^*$- algebras in index theory and differential geometry initiated by Mishchenko, Fomenko, Connes, Kasparov and Moscovici
\cite{connes,mf,kas} led to a number of applications and new insights including the establishment of the Novikov conjecture for a large class of manifolds \cite{cm} or the Connes--Skandalis general index theorem for foliations \cite{connesskandalis}. After this appearance, primary and secondary invariants of elliptic operators gained a promotion.
Higher indices belong to the $K$--theory of a ground $C^*$--algebra while the ordinary (numerical) ones are called lower indices.
Higher invariants contain refined informations and gain stability properties from the cohomological character of the $K$-theory of operator algebras.
For example it is well known that the $C^*$--algebraic index class of the signature operator is homotopy invariant.
While the theory of elliptic operators which are invariant under the action
of $C^*$- algebras is nowadays well founded for closed manifolds, less is known for structures with boundary. There are at least two methods to deal with geometric operators on a manifold with boundary, doing analysis on the incomplete manifold following the paradigm of elliptic boundary value problems, or following the Melrose $b$--philosophy by looking at the associated complete manifold with cylindrical ends.
The two points of view should not be intended complementary or opposite but integrating one each other. This is clear from the beginning and pointed out in the seminal paper by Atiyah Patodi and Singer \cite{aps}. The interplay complete/incomplete becomes essential when dealing with the topological properties of signature operator which was the original motivation of A.P.S.
The cylindrical case in the higher setting was studied by Piazza, Leichtnam, Schick, Lott and Wahl \cite{lp1,lp2,charlotte1}. In the incomplete case the literature is still lacking. Since the spectrum of the boundary operator is no more discrete there are, in general, no A.P.S. boundary conditions in strict sense. The formulation of global elliptic boundary value problems relies on the notion of noncommutative spectral section.
In this paper we generalize the classical theory constructing a basic tool for the investigation of boundary value problems of Dirac operators acting on sections of bundles of finitely generated Hilbert modules over a $C^*$ algebra (tipically In the applications is the $C^*$--algebra of the fundamental group).
We show the existence of a nice operator called the Calderon projection. It is an order zero pseudodifferential operator in the Mishchenko--Fomenko calculus on the boundary projecting on the space of the smooth Cauchy datas. In the classical situation the whole theory is mastered by the property of this operator. Indeed not only the definition of ellipticity for boundary value problems is expressed in terms of the Calderon projection but also the index of the Fredholm realization can be computed as the relative index of the projection and the boundary condition. We postpone applications to a future paper.
\subsubsection*{Acknowledgments}
It is a pleasure to thank Paolo Piazza for having proposed this line of research, Georges Skandalis, Francesca Arici and Charlotte Wahl for interesting discussions.
\section{Review of the classical theory}
\subsection{Unique continuation property}
\begin{ddef}
One says that an operator $A$ on a smooth connected manifold $M$ (also with boundary) has the \`e mph{unique continuation property} (U.C.P.) if every solution $s$,
$$As=0$$ which vanishes on an open set also vanishes everywhere.
\`e nd{ddef}
It is well known that all the Dirac type operators on a manifold enjoy the U.C.P. There is a huge amount of literature on the subject. We limit ourselves to cite the exaustive book \cite{Boos} and the recent paper
\cite{Lesch}. The crucial property of Dirac type operators $D$, which moreover distinguishes them among first order ones, is the product form:
\begin{equation}
\label{productform}
D=G(y,u)(\partial_u+B_u),
\`e nd{equation}
for a locally deformed Riemannian structure. This is true a fortiori on a manifold with boundary \cite{aps}, with product metric. The tangential piece $B_u$ has an \`u nderline{elli}p\`u nderline{tic} and selfadjoint part $1/2(B_u+B_u^*)$.
For a Dirac type operator the Green formula reads:
\begin{equation}
\label{green}
\langle D s_1, s_2 \rangle - \langle s_1, D^* s_2 \rangle = - \int_{\partial M} \langle c(v)(s_1 |_{\partial M}), s_2 |_{\partial M} \rangle.
\`e nd{equation}
Here $c(v)$ denotes Clifford multiplication by the inward normal unit vector to $\partial M$.
Let us emphasize that on a manifold with boundary, if the metric and all the geometric datas defining the Dirac operator are product type near the boundary, then equation \`e qref{productform} simplifies and the operator writes as
\begin{equation}
\label{prodformbnd}
D=G(y)(\partial_u+B),
\`e nd{equation}
in a collar neighborhood $N$ of the boundary, with $G$ unitary and $B$ selfadjoint, both independent on the normal variable $u$. In this case the unique continuation property near the boundary immediately follows from elementary harmonic analysis by expanding the solution in the form
\begin{equation}\label{spectral}
s(u,y)=\sum_{\lambda}f_{\lambda}(u)\varphi_{\lambda}(y)
\`e nd{equation} where ${\varphi_{\lambda}(y)}_{\lambda}$ is a spectral resolution of $B$ over the boundary. The expansion \`e qref{spectral} also plays a crucial role in the original proof of the Atiyah--Patodi--Singer index formula, in particular it establishes the equivalence of the A.P.S. pseudodifferential elliptic boundary value problem with the natural $L^2$ theory on the corresponding manifold with an attached cylinder.
The typical application of this principle, (from \`e qref{green}) is the following result
\begin{teo}Let $X=X_+ \cup X_-$ be a connected partitioned manifold with $X_+\cap X_-=\partial X_{\pm}=Y.$ Then there are no smooth \`e mph{ghost solutions} i.e. solutions $s$ of $As=0$ such that $s_{|Y}=0.$
\`e nd{teo}
We rapidly review the proof of the classical unique continuation property. If $s$ is zero on an open set $V$ which is properly contained in $M$ one choose some $x_0\in \partial V$ and a point $p\in V$ at distance $r$ from $x_0$ such that the ball $B(p,r)$ is contained in $V$.
One shows that $$s_{|B(p,r+T/2)}=0\quad \textrm{for some }T>0$$
In turn this follows from the Carleman estimate \cite{Boos}, holding for every arbitrary sufficiently big $R>0$
\begin{equation}
\boxed{
R\int_{u=0}^{T}\int_{\mathbb{S}^{n-1}_{p,u}}e^{R(T-u)^2}\|v(u,y)\|^2dydu\leq C\int_{u=0}^T\int_{\mathbb{S}^{n-1}_{p,u}}\|Av(u,y)\|^2dydu}
\`e nd{equation} with $v(u,y):=\varphi(u)s(u,y)$ for a smooth cut off function $\varphi$ such that $\varphi_{|u<8/10T}=1$ and $\varphi_{|u>9/10T}=0.$
The unique continuation property for Dirac operators also holds in a $C^*$ algebraic setting:
\begin{teo}
Let $X$ be a compact manifold equipped with a Clifford module bundle $\mathcal{E}$ endowed with compatible connection and $\mathcal{A}$ a bundle of finitely generated projective Hilbert modules over a unital $C^*$ algebra\footnote{we assume our $C^*$ algebras to be complex; however everything can be formulated for real $C^*$ algebras under small modification} $A$. One forms the twisted Dirac operator
$$D:\Gamma(X;\mathcal{E}\`o times \mathcal{A})\longrightarrow \Gamma(X;\mathcal{E}\`o times \mathcal{A}).$$
If a smooth section $s$ satisfies $Ds=0$ and vanishes on an open set $\Omega \subset X$ then $s=0$.
\`e nd{teo}
\begin{proof}It immediately follows from the fundamental estimate of Xie and Yu \cite{Xie}
$$\|s\|\leq C_1(\Omega)\|s_{|\Omega}\|+C_2(\Omega)\|Ds\|.$$
\`e nd{proof}
In this section we briefly recall, for the convenience of the reader, the properties of the standard Calderon projector.
There is a lot of excellent literature on the subject \cite{Boos,Lesch,Lesch2,calderon}. We refer to it.
\subsection{The invertible double construction}
\label{double}
Let us assume for simplicity that $X$ is a compact \`e mph{even} dimensional manifold with bounday $\partial X =Y$. This assumption will allow us to make use of the chiral notation. We denote by $D^+:\Gamma(X;\mathcal{E}^+)\longrightarrow \Gamma(X;\mathcal{E}^-)$ the positive part of a chiral Dirac operator on $X$. Assume that all structure are product near the boundary. Then, $D^+$ is in product form \`e qref{prodformbnd} with unitary Green form $G$ and selfadjoint tangential part $B$.
One can construct the doubled manifold $\widetilde{X}:=X_1 \cup_{c(du)}X_2$ attaching a reversed copy of $X$ by the Clifford multiplication along the boundary. More precisely, let $X_1=X$ and denote by $X_2:=-X$, the manifold with opposite orientation. We attach a cylinder $\partial Y \`o times (-\varepsilonilon,0]$ to $X_1$ and $[0, \varepsilonilon) x \partial Y$ to $X_2$, glue together the two resulting manifolds and send $\varepsilonilon$ to zero.
The operators $G$ and $B$ anticommute, since $D$ is (formally) selfadjoint. Moreover, the unitary map sending a section $f \in \Gamma ([0,\varepsilonilon) \times \partial X)$ to $Gf \in \Gamma((-\varepsilonilon,0]\times \partial X )$ conjugates $D$ and $-D$.
We can glue together $D^+$ and $-D^+$ into a new operator
\[\widetilde{D^+} := D^+ \cup_G -D^+\]
using $J$ as a clutching function.
Let use introduce the corresponding bundles, $\mathcal{E}^+$ over $X_1$ and $\mathcal{E}^-$ over $X_2$ using the identification given by Clifford multiplication. Then we obtain the bundles of spinors of positive and of negative chirality
\[ \widetilde{\mathcal{E}}^+ := \mathcal{E}^+ \cup_{c(v)}\mathcal{E}^- \qquad \widetilde{\mathcal{E}}^- := \mathcal{E}^- \cup_{c(v)} \mathcal{E}^+.\]
\begin{figure}[h!]
\centering
\includegraphics[width=4in]{double}
\caption[The doubled manifold.]
{The doubled manifold.}
\`e nd{figure}
So both the bundles and the operator extend to a doubled operator. The opeator is invertible due to the unique continuation property; the inverse is of course pseudodifferential of order $-1$.
An analogous construction is performed for the negative part of the chiral Dirac opeator $D^-$ to get the total Dirac opeator $\widetilde{D}$.
\subsection{The Calderon projector}
\subsubsection{Spaces of Chauchy data}
The following definition holds both for the total Dirac opeator $D$ and for its chiral compontent $D^+$.
\begin{ddef}
Let $D: \Gamma(M, E) \rightarrow \Gamma(M, F)$
be a Dirac type operator over a partitioned manifold $X= X_1 \sqcup X_2$, $ X_1 \cap X_2 = Y$.
We define the spaces of Cauchy datas $H_{1,2}$ to be
\begin{equation}
H_{i}(D) := \lbrace u \vert Y \ \vert \ u \in \Gamma (E) , \ Du =0 \mbox{ on } X_{i} \rbrace \quad i=1,2.
\`e nd{equation}
\`e nd{ddef}
These are the spaces of the traces on the boundary of smooth solutions in $X_1$ or $X_2$.
It is easy to see from the Green formula that the space of Cauchy datas $H_1(\widetilde{D})$ and $H_2(\widetilde{D})$ intersect only in the zero section and are $L^2$ orthogonal.
Let $r_{1,2}$ denote the restriction maps, $r_{i}(f_1, f_2) = f_{i}$, and $\gamma_0^{\pm}$ the trace map.
Composing the adjoint $\gamma^*$ of the trace map $\gamma_-$ with the inverse of the operator, and restricting to $X_i$ one forms the Poisson operator:
\[K_{i}:= r_i(\widetilde{D})^{-1}(\gamma_0^-)^*G .\]
The Calderon projector $\mathcal{C}_i$ is defined as the trace to the boundary composed with the Poisson operator.
Since there are non--trivial regularity issues a lot of work has to be done to show that these traces make sense and the limits in a collar of the restrictions converge in $L^2$. The delicate analysis carried out in \cite{Boos,calderon} that we shall repurpose next for $C^*$- bundles shows that $\mathcal{C}^+$ is a pseudodifferential idempotent projection on $H_1$ along $H_2$.
The principal symbol of the Calderon projector is the same of the A.P.S. boundary condition i.e. the projection on the space of eigenvalues of the principal symbol of $D^+$ with positive real part. Indeed from this coincidence one can develop a theory of global elliptic boundary value problems. The boundary conditions must satisfy an assumption on the principal symbol which is formulated in terms of the principal symbol of $\mathcal{C}^+$. One of the most important results is the possibility of expressing the index of the boundary value problems in terms of purely boundary operators. More precisely if $R$ is such a boundary condition, then the $L^2$--realization of $D^+$ i.e. the unbounded operator $D_R^+$ acting on
$$\`o peratorname{Dom}(D_R^+):=\{u\in H^1(X):\,R(u_{|Y})=0\}$$ is Fredholm and
$$\`o peratorname{ind}(D^+_R)=i(R,\mathcal{C}_+).$$ At the right--hand side we find the relative index of two projections \cite{Boos}. We plan to develop a similar formula in the context of $C^*$- bundles in a forthcoming paper \cite{noi}.
\section{Sobolev modules}
Let $X$ be a compact Riemannian manifold with boundary $Y:=\partial X$, let $\widetilde{X}$ the double manifold. Given a unital $C^*$- algebra $A$, we define bundles $\mathcal{A},\,\, \widetilde{\mathcal{A}}$ of finitely generated projective Hilbert $A$--modules over $X$ and $\widetilde{X}$ respectively.
For $k\in \mathbb{N}$ the Hilbert--Sobolev modules of sections are defined as a discrete chain of topological $C^*$- -Hilbert $A$ modules \cite{Va}, in particular there is no preferred Hilbertian product on the $\mathcal{H}^k$'s - except for $\mathcal{H}^0$ - but an admissible class of products such that the induced Banach topologies are the same and give rise to the same space of adjointable functionals.
Admissible Hilbert products can be defined in coordinate patches for $X,\,\widetilde{X}$, using the notion of weak derivativatives ($L^2$--derivatives) or, for $\widetilde{X}$ using powers of the Laplacian \cite{Va}.
Let $\mathbb{H}^n:=\{x \in \mathbb{R}^n:\, x_n\geq 0\}$ denote the half space and let $V$ be a Hilbert $A$-module. For real $s$ the Hilbert module $\mathcal{H}^s(\mathbb{R}^n;V)$ can be defined by the Fourier transform
$$\mathcal{H}^s(\mathbb{R}^n;V)=\{f \in \mathcal{H}^0(\mathbb{R}^n;V):\, (1+|\xi|^2)^{s/2}\hat{f}(\xi)\in \mathcal{H}^0(\mathbb{R}^n;V)\}$$ where
$$\hat{f}(\xi):=\frac{1}{(2\pi)^{\frac{n}{2}}}\int e^{-i \xi \cdot x}f(x) dx.$$
More generally, following Schwartz \cite{Schwartz}, we can define (tempered) distributions with values in $V$. These will be useful next.
We have a continuous embedding with dense range $$J_{\mathbb{R}^n}:\mathcal{H}^1(\mathbb{R}^n;V)\longrightarrow \mathcal{H}^0(\mathbb{R}^n;V).$$
The Hilbert--module adjoint is easily described in terms of the Fourier transform. If $h\in \mathcal{H}^0(\mathbb{R}^n;V)$ then
$$\widehat{J_{\mathbb{R}^n}^*h}=\dfrac{\hat{h}(\xi)}{1+|\xi|^2}\, \textrm{ i.e. }J_{\mathbb{R}^n}^*h=\Delta^{-1}h\,.$$ From this formula we see that $J^*_{\mathbb{R}^n}$ is surjective on the Schwartz sections and has dense range.
Since we are dealing only with $L^2$ and $H^1$ we can define a continuous extension operator by reflection $\`e ll_i:\mathcal{H}^i(\mathbb{H}^n;V)\longrightarrow \mathcal{H}^i(\mathbb{R}^n;V)$, $i=0,1$, having a more simplified expression than the classical one (\cite{Boos}):
\begin{equation}\label{ext}
\`e ll_i f:=\begin{cases}
f(y,t) \quad t\geq 0
\\
-f(y,-t) \quad t<0
\`e nd{cases}
\`e nd{equation}
It has an obvious section which is the restriction map. It is adjointable with adjoint map $\`e ll_i^*:g\longmapsto g(y,t)+g(y,-t)$.
We have a commutative diagram,
$$\xymatrix{\mathcal{H}^1(\mathbb{R}^n;V)\`a r[rr]^{J_{\mathbb{R}^n}}& &\mathcal{H}^0(\mathbb{R}^n;V)\`a r[d]^{r_0}\\\mathcal{H}^1(\mathbb{H}^n;V)\`a r[rr]_{J_{\mathbb{H}^n}}\`a r[u]^{\`e ll_1}& &\mathcal{H}^0(\mathbb{H}^n;V)}$$ then $r_0$ is the extension by zero and
$$J^*_{\mathbb{H}^n}=\`e ll_1^*\circ J^*_{\mathbb{R}^n}\circ r_0^*.$$ Now introduce in $\mathbb{R}^n$ the operators $\Lambda_{\pm}:=\mp \partial_{x_n}+\sqrt{1+\Delta_{(n-1)}}$. They are isomorphisms from $\mathcal{H}^s$ to $\mathcal{H}^{s-1}$, $\Lambda_+^*=\Lambda_-$ and $\Lambda_+\Lambda_-=\Delta_{(n)}=\Delta$ the positive Laplace Beltrami operator. It is well known that for every $t$, the operator $ \Lambda_+^t$ preserves the space of distributions supported in the half space $\mathbb{H}^n_-$ \cite{Boos} and $\Lambda_-$ preserves distributions supported in $\mathbb{H}^n_+$. Now applying this together with the property $\star \Lambda_+ \star=\Lambda_+$ for $\star:f\longmapsto -f(-t)$ one sees that, when restricted to Schwartz sections, the map $\`e ll_1^*$ is simply the restriction and $J_{\mathbb{R}}^*$ can be inverted. In other words $J_{\mathbb{R}}^*$ has dense range.
In particular by the standard Friedrichs method \cite{Ta} $\mathcal{H}^1(\mathbb{H}^n)$ sitting dense in $\mathcal{H}^0(\mathbb{H}^n)$ is the domain of a positive selfadjont operator $D$ and the complex interpolation procedure can be carried on to define the intermediate Hilbert--Sobolev modules
$$\mathcal{H}^{\theta}(\mathbb{H}^n;V):=[\mathcal{H}^0(\mathbb{H}^n;V),\mathcal{H}^1(\mathbb{H}^n;V)]_{\theta}=\mathcal{D}(D^{\theta}),\quad 0<\theta<1.$$
Negative order spaces are defined by duality as usual.
These results are transported on a Riemannian manifold with boundary using coordinates, local trivializations and a partition of unity.
\section{The Calderon projection}
\subsection{Invertible double}
Following the recent work \cite{Xie} and the classical theory \cite{Boos, Lesch} we carry on the construction of the invertible double of the Dirac operator coupled with $C^*$- Hilbert module bundles.
We do everything in even dimension just for notational simplicity. All the results of this section extend to odd dimensions in a trivial manner.
So let $X_1$ be an even dimensional compact Riemannian manifold with boundary $Y$. Let $\mathcal{E}_1$ be a graded Clifford bundle over $X_1$ and $\mathcal{A}$ a bundle of finitely generated projective Hilbert $A$-modules for a (real or complex) unital $C^*$ algebra $A$. Assume the metric and the Clifford structure are product near the boundary. For simplicity, we will also assume that the connection and metric on the twisting bundles to be in product form.
We denote by $X_2:=-X_1$ the manifold with opposite orientation and corresponding Clifford bundle $\mathcal{E}_2$. The bundles $\mathcal{E}_1\`o times \mathcal{A}$ and $\mathcal{E}_2\`o times \mathcal{A}$ are glued together by the Clifford multiplication $c(v)$ where $v:=d/du$ is the inward unit normal vector near the boundary of $X_1$. We decorate with tilde the resulting doubled bundle and manifold i.e. $\widetilde{X}:=X_1 \cup_{c(v)}X_2$ and
$$\widetilde{\mathcal{E}}\`o times \widetilde{\mathcal{A}}:=\big{(}\mathcal{E}_1^{\pm}\cup_{c(v)}\mathcal{E}_2^{\mp}\big{)}\`o times \mathcal{A}.$$
A section of $\widetilde{\mathcal{E}}^+\`o times \widetilde{\mathcal{A}}$ can be identified with a pair $(s_1,s_2)$ where $s_1\in \Gamma(\mathcal{E}_1^+\`o times \mathcal{A})$, $s_2\in \Gamma(\mathcal{E}_2^-\`o times \mathcal{A})$ such that near the boundary $$s_2=c(v)s_1.$$
We have two graded Dirac operators
$$D_i^{\pm}:\Gamma(X_i;\mathcal{E}_i^\pm \`o times \mathcal{A})\longrightarrow \Gamma(X_i;\mathcal{E}_i^\mp\`o times \mathcal{A})$$ and a resulting double operator
$\widetilde{D}$ on $\widetilde{X}$,
$$\widetilde{D}^{\pm}(s_1,s_2):=(D^{\pm}_1s_1,D_2^{\mp}s_2).$$ It is a bounded operator between the corresponding Hilbert--Sobolev modules
$$
\left(\begin{array}{cc}0 & \widetilde{D}^- \\\widetilde{D}^+ & 0\`e nd{array}\right):\mathcal{H}^1(\widetilde{X};\widetilde{\mathcal{E}}^+\`o times \mathcal{A})\`o plus \mathcal{H}^1(\widetilde{X};\widetilde{\mathcal{E}}^-\`o times \mathcal{A})\longrightarrow \mathcal{H}^0(\widetilde{X};\widetilde{\mathcal{E}}^+\`o times \mathcal{A})\`o plus \mathcal{H}^0(\widetilde{X};\widetilde{\mathcal{E}}^-\`o times \mathcal{A})
$$
\begin{teo}
({\bf{Invertible double construction}}).
The operator $$\widetilde{D}^+:\mathcal{H}^1(\widetilde{X};\mathcal{\widetilde{E}}^+\`o times \widetilde{\mathcal{A}})\longrightarrow \mathcal{H}^0(\widetilde{X};\widetilde{\mathcal{E}}^-\`o times \widetilde{\mathcal{A}})$$ is invertible with bounded inverse
$$(\widetilde{D}^+)^{-1}:\mathcal{H}^0(\widetilde{X};\widetilde{\mathcal{E}}^-\`o times \widetilde{\mathcal{A}})\longrightarrow \mathcal{H}^1(\widetilde{X};\widetilde{\mathcal{E}}^+\`o times \widetilde{\mathcal{A}}).$$
\`e nd{teo}
\begin{proof}First of all the operator $\widetilde{D}^+$ is bounded below because the entire $\widetilde{D}$ is bounded below by Theorem 5.1 in \cite{Xie}:
$$\|\sigma\|\leq C\|\widetilde{D}\sigma\|,\quad \sigma\in \Gamma(\widetilde{X};\widetilde{\mathcal{E}}\`o times \widetilde{\mathcal{A}}) .$$
Then zero is isolated in the spectrum of $(\widetilde{D}^+)^*\widetilde{D}^+$ by the John Roe convergence transfer principle (Proposition 1.13 in \cite{roe}). Indeed the original proof works word by word in the context of Hilbert modules. Then by Lemma 3.2 in the Appendix and by the Mishchenko Lemma we know that $\`o peratorname{Ran}(\widetilde{D}^+)$ is closed and orthocomplemented
$$\mathcal{H}^0(\widetilde{X};\widetilde{E}^-\`o times \widetilde{\mathcal{A}})=\`o peratorname{Ker}\widetilde{D}^-\`o plus^{\bot} \`o peratorname{Ran}\widetilde{D}^+.$$ It is sufficient to show that $\`o peratorname{Ker}\widetilde{D}^-$ is zero. This follow as in the classical situation by the unique continuation property. Indeed let $(s_1,s_2)$ such a solution. We are going to show that it vanishes on $Y$. Indeed $D^+s_1=0=D^-s_2$ hence
$$0=\langle D^+s_1,s_2\rangle_A-\langle s_1,D^-s_2\rangle_A=-\int_{X}\langle c(v)(s_1)_{|Y},(s_2)_{|Y}\rangle_A dy=\langle (s_2)_{|Y}, (s_2)_{|Y}\rangle_A.$$
It follows exactly by the classical argument (Lemma 9.2. in \cite{Boos}) that setting
\begin{equation}\label{uep}
\widetilde{s}:=\begin{cases}
s_1 \quad \`o peratorname{on }X_1
\\
0 \quad \`o peratorname{on }X_2
\`e nd{cases}
\`e nd{equation} gives a weak solution and by elliptic regularity a strong solution. This solution must be zero by the unique continuation property.
\`e nd{proof}
\begin{rem}The invertible double construction remains valid if the manifold is no more compact but the scalar curvature is positive and bounded by below outside a compact set \cite{Xie}.
\`e nd{rem}
Of course the inverse $(\widetilde{D}^+)^{-1}$ is a order $-1$ pseudo differential operator in the Mishchenko--Fomenko calculus. To see that just take a parametrix $Q$,
$$Q\widetilde{D}^+=1+K$$ with $K$ a smoothing operator. Then
$$(\widetilde{D}^{+})^{-1}=Q-K(\widetilde{D}^{+})^{-1}.$$
\subsection{The Calderon projection}
Define the space of Cauchy datas along $Y$ by
$$H_{i}(\widetilde{D}^+):=\{u_{|Y}: u\in \Gamma
(\widetilde{\mathcal{E}}^+\`o times \mathcal{A})
,\quad \widetilde{D}^+u=0, \`o peratorname{ in } X_{i}\}, \quad i=1,2.$$
The closure of the Cauchy data spaces in $\mathcal{H}^{s-1/2}$ will be denoted by $H_{1,2}(\widetilde{D}^+,s).$
It is easy to see from the Green formula that these spaces intersect only in the zero section.
We denote, for $s>1/2$ by $\`o peratorname{Ker}_{1,2}(\widetilde{D}^+,s)$ the closure in $\mathcal{H}^{s-1/2}$ of the kernel of $\widetilde{D}^+$ in $X_{1,2}$.
By the unique continuation property there are no solutions in these spaces with support contained in the interior or identically vanishing on the boundary. Let $r_1$ be the operator of restriction of sections from $\widetilde{X}$ to $X_1$ and $\gamma_{t}$ the trace map which restricts a section to the slice which is distant $t$ from the boundary. It is continuous and adjointable from the Sobolev modules $\mathcal{H}^s$ to $\mathcal{H}^{s-1/2}$ for $s>1/2$.
In particular we have the trace to the boundary
$\gamma_0^-:\mathcal{H}^s(\widetilde{X};\widetilde{\mathcal{E}}^-\`o times \widetilde{\mathcal{A}})\longrightarrow \mathcal{H}^{s-1/2}(Y;\widetilde{\mathcal{E}}^-\`o times \widetilde{\mathcal{A}}_{|Y}).$
It is known that $1/2$ is the critical regularity for the trace to the boundary indeed the space of smooth sections supported in the interior is dense in $\mathcal{H}^{1/2}$. Instead solutions of the Dirac operators of any regularity have traces.
\begin{teo}\label{traccia}For every positive real $s$ (actually for every) the trace map $\gamma$ is well defined as a map $$\gamma:\`o peratorname{Ker}_{1,2}(\widetilde{D}^+,s)\longrightarrow \mathcal{H}^{s-1/2}(Y;\widetilde{\mathcal{E}}\`o times \widetilde{\mathcal{A}}_{|Y}),$$ in other words
$$\gamma(f)=\lim_{t \rightarrow 0}\gamma_{t}(f).$$
\`e nd{teo}
\begin{proof}
Thanks to the complex interpolation procedure above defined the classical proof \cite{Boos} repeats words by words.
\`e nd{proof}
Now define the \`e mph{Poisson operator}
$$K_1:=r_1 (\widetilde{D}^+)^{-1}(\gamma_0^-)^*G:\Gamma(Y;\widetilde{\mathcal{E}}^+\`o times \widetilde{\mathcal{A}}_{|Y})\longrightarrow \Gamma(Y;\widetilde{\mathcal{E}}^+\`o times \widetilde{\mathcal{A}}_{|X_1\setminus Y}).$$
\begin{teo}
$$ $$
The Poisson operator $K_1$ extends to a continuous surjective map from $\mathcal{H}^{s-1/2}(Y;\widetilde{\mathcal{E}}^+\`o times \widetilde{\mathcal{A}}_{|Y})$ to the $s$-- kernel $\`o peratorname{Ker}_{1}(\widetilde{D}^+,s)$ and induces by restriction a bijection from the Cauchy data space to the kernel:
$$K_1:H_{1}(\widetilde{D}^+,s)\longrightarrow \`o peratorname{Ker}_{1}(\widetilde{D}^+,s).$$
\`e nd{teo}
\begin{proof}
First of all, as a consequence of \ref{traccia}, $K_1$ maps $\mathcal{H}^s$ continuously to $\mathcal{H}^{s+1/2}$ for $s\geq 0$. The Cauchy data space property is a straightforward computation as in \cite{Boos}.
\`e nd{proof}
Now the Calderon projection is finally defined as
$$\mathcal{C}_{+}:=\gamma^+ K_1.$$ It maps $\mathcal{H}^{s}(Y;\widetilde{\mathcal{E}}^+\`o times \widetilde{\mathcal{A}}_{|Y})$ continuously to itself for every $s$, $s\geq 0$. The classical proof adapts to show that when restricted to $\mathcal{H}^0$ is a projection (idempotent) on the space of Cauchy datas $H_1$ along $H_2$. Similar statements hold reversing the role of the left/right side of the doubled manifold.
\begin{teo}The Calderon projection is a pseudo differential operator of zero order in the Mischenko--Fomenko calculus on the boundary.
\`e nd{teo}
\begin{proof}The proof in \cite{Boos} works in the Mischenko--Fomenko framework. Due to its relevance we give a sketch. It is basically based on the main feature of the pseudodifferential calculus i.e the composition formula for symbols.
Localizing the problem the nontrivial step is the investigation of the limit
$$\lim_{t \rightarrow 0}\gamma_{t}M_{\varphi_1}(\widetilde{D}^+)^{-1}M_{\varphi_2}\gamma_0^*,$$ where $M_{\varphi_1}$ are cutoff functions with non disjoint supports meeting the boundary and we have suppressed the suffix ${\pm}$ on the trace operators. The operator $M_{\varphi_1}(\widetilde{D}^+)^{-1}M_{\varphi_2}$ is pseudodifferential with total symbol admitting an asymptotic expansion
$$\sum_{k=1}^{\infty}c_k,\quad c_{-1}=a_1^{-1}=\sigma_1(\widetilde{D}^+)^{-1}.$$ Then if the $\mathcal{C}_k=M_{\varphi_1}C_k(x,D)M_{\varphi_2}$ are the homogeneous pseudodifferential operators given by the symbols $c_k$ then we can write for every $k_0$
$$M_{\varphi_1}(\widetilde{D}^+)^{-1}M_{\varphi_2}=T+\`u nderbrace{\sum_{k<k_0}\mathcal{C}_{-k}}_{\textrm{errors}}$$ with $T$ an operator of order $-K_0$ then only the investigations of the errors is needed. The point is a precise information contained in the symbol of $\mathcal{C}_{-k}\gamma_0^*$. Indeed for a test section $g$,
\begin{equation}\label{cald1}
\mathcal{C}_{-k}\gamma_0^*g(y,t)=\lim_{m\rightarrow \infty}(2\pi)^{-n}\int_{\mathbb{R}^{n-1}}e^{iy\cdot \`e ta}\hat{g}(\`e ta)\bigg{[}\int_{-\infty}^{\infty}
e^{it\tau}\hat{\`a lpha}(\tau/m)c_{-k}(y,t;\`e ta,\tau)d\tau\bigg{]}d\`e ta
\`e nd{equation} for a bump function $\`a lpha$ on $\mathbb{R}$ supported in $(-1,-1/2)$ with $\int \`a lpha=1$. The decisive step is the replacement of the $\mathbb{R}$ integral in \`e qref{cald1} to an integral on a finite path $\Gamma(\`e ta)$ contained in $\Im(\tau)\geq 0$. This will provide uniform boundedness of the integrals permitting the passage of the limit inside the integral,
\begin{equation}\label{cald2}
\mathcal{C}_{-k}\gamma_0^*g(y,t)=(2\pi)^{-n}\int_{\mathbb{R}^{n-1}}e^{iy\cdot \`e ta}\hat{g}(\`e ta)\bigg{[}\int_{\Gamma(\`e ta)}
e^{it\tau}c_{-k}(y,t;\`e ta,\tau)d\tau\bigg{]}d\`e ta.
\`e nd{equation}
The choice of $\Gamma(\`e ta)$
depends on the invertible properties of the complex extension in $\tau$ of $a_1(y,t;\`e ta,\tau)$. In our case, the operator is a twisted Dirac operator and the principal symbol is the Clifford multiplication $a_1(\xi)= c(\xi)\`o times \`o peratorname{1_{\mathcal{A}}}$, so all the arguments in \cite{Boos} remain valid. More precisely for $\`e ta$ in the cosphere bundle there is a compact set $\mathcal{Z}\subset \mathbb{C}$ not intersecting the real axis such that $a_1$ is invertible for $\tau$ in the complement of $\mathcal{Z}$. Divide $\mathcal{Z}$ in the pieces $\mathcal{Z}^{\pm}$ with positive/negative imaginary part.
The positive part is contained in a circle of radius $R$.
The contour is
$$\Gamma(\`e ta):=\partial\big{(}\{\tau:|\tau|\leq \max(1;R)\}\cap \{\Im \tau\geq 0\}\big{)}.$$
\begin{figure}[hpbt]
\centering
\includegraphics[width=4in]{pat}
\`e nd{figure}
Once the integral is changed into a contour integral one has dominated convergence with the integrands dominated by a common polynomial in $|\tau|$ then one can pass the limit inside the integral giving the uniform convergence of $\gamma_{t}\mathcal{C}_{-k}\gamma_0^*g$ in $L^2$. It also follows that the operator $\lim_{t \rightarrow 0}\gamma_{t}\mathcal{C}_{-k}\gamma_0^*$ is pseudodifferential of order $1-k$ with total symbol
$$p_{-k}(y,\`e ta):=1/2\pi \int_{\Gamma(\`e ta)}c_{-k}(y,0;\`e ta,\tau)d\tau$$ belonging to the homogenous standard symbol class.
\`e nd{proof}
We can as well compute the principal symbol of the Calderon projection. Remember the product form near the boundary
$$\widetilde{D}^+=G(y)(\partial_u+B)$$ with selfadjoint boundary operator $B$ itself a twisted Dirac operator with twisted symbol $b(x,y)\`o times \mathbb{1}_{\mathcal{A}}:\widetilde{\mathcal{E}^+}_y\`o times \mathcal{A}_y\rightarrow \widetilde{\mathcal{E}^+}_y\`o times \mathcal{A}_y .$
Then
\begin{eqnarray}
\nonumber
\sigma_1(\mathcal{C}_+)(y,\`e ta)=\sigma_1(\lim \gamma_{t}\mathcal{C}_1\gamma_0^*G)(y,\`e ta)&=&1/2\pi\int_{\Gamma(\`e ta)}a_1^{-1}(0,y;\tau,\`e ta)d\tau \cdot G(y)\\ \nonumber&=&1/2\pi \int_{\Gamma(\`e ta)}\{G(y)(i\tau+b(x,y)\`o times \mathbb{1}_{\mathcal{A}})\}^{-1}\cdot G(y)d\tau\\ \nonumber
&=&-1/2\pi \int_{\Gamma(\`e ta)}\{
ib(x,y)\`o times \mathbb{1}_{\mathcal{A}}-\tau\}^{-1}dt\\
\nonumber
&=&q_+(x,y)\`o times \mathbb{1}_{\mathcal{A}}
,
\`e nd{eqnarray}
where $q_+(x,y)$ is the spectral projection of $b(x,y)$ on the space associated to the eigenvalues with positive real part.
Of course $\mathcal{C}_+$ is not self adjoint i.e. is not a orthogonal projection in the Hilbert module of $L^2$--sections on the boundary. Its range as a bounded operator in $L^2$ is closed and complementable being the range of a selfadjoint adjointable (since pseudofferential) idempotent. In the applications, especially dealing with spectral sections and global boundary value problems one can change the operator with its orthogonalized.
\begin{lem}The operator $F:=\mathcal{C}_+\mathcal{C}_+^*+(1-\mathcal{C}_+^*)(1-\mathcal{C}_+)$ acting on the Hilbert module of $L^2$--sections on the boundary is invertible. The operator $$\mathcal{C}_+^\bot:=\mathcal{C}_+\mathcal{C}_+^*F^{-1}$$ is the orthogonal projection on the image of $\mathcal{C}_+$. It is a pseudodifferential operator of order zero with the same principal symbol as $\mathcal{C}_+$.
\`e nd{lem}
\begin{proof}
Since the range of the Calderon projection is complementable we have the splitting
$$\mathcal{H}=\`o peratorname{range}(\mathcal{C}_+)\`o plus \`o peratorname{range}(\mathcal{C}_+)^{\bot}=\`o peratorname{range}(\mathcal{C}_+)\`o plus \`o peratorname{Ker}(\mathcal{C}_+)^{*}$$
The operator $F$ can be written as
$$F=\`u nderbrace{\mathcal{C}_+\mathcal{C}_+^*}_{G}+\`u nderbrace{(1-\mathcal{C}_+^*)(1-\mathcal{C}_+)}_T$$ where $G$ is the partial isometry $\`o peratorname{range}(\mathcal{C}_+)\longrightarrow \`o peratorname{range}(\mathcal{C}_+^*)$ and $T$ is the partial isometry on the complements.
The rest of the proof is standard.
\`e nd{proof}
\`a ppendix
\section{Hilbert Modules}
We recall here some results on the theory of operators on Hilbert $C^*$--modules. For more details and proofs the reader is referred to \cite{lance, Skand, Wo}
\subsection{Basic Definitions}
Let $A$ be a $C^*$ algebra. We denote by $P^+(A)$ the set of all positive elements in the $C^*$ algebra, i.e. those $a \in A$ satisfying one of the following equivalent conditions:
\begin{itemize}
\item $a$ has positive spectrum, i.e. $\sigma(a) \subset [0, +\infty)$;
\item $a=bb^*$ for some $b \in A$;
\item $a=h^2$ for some Hermitean $h\in A$.
\`e nd{itemize}
We denote by $\mathcal{P}(A)$ the category of \`e mph{finitely generated projective modules} over $A$.
\begin{ddef}
A topological $A$-module $M$ is called a \`e mph{Hilbert $A$-modules} if it is equipped with a continuous map
\[\begin{aligned}
M\times M \rightarrow A
(x,y) \mapsto \langle x , y \rangle_A
\`e nd{aligned}\]
satisfying the conditions
\begin{enumerate}
\item $\langle x, x \rangle_A \geq 0$ for any $x \in M$, i.e. $\langle x, x \rangle_A \in P^+(A)$;
\item $\langle x, x \rangle_A =0$ if and only if $x=0$;
\item $\langle x, y \rangle_A = \langle y, x \rangle_A^*$ for all $x, y \in M$;
\item $\langle x, ya \rangle_A =\langle x, y \rangle_A$ for all $x, y \in M$ and $a \in A$.
\`e nd{enumerate}
and the module $M$ is a Banach space with respect to the norm induced by the inner product $\| x \|^2_A :=\| \langle x, x \rangle_A \|_A$, where $\| \cdot \|_A$ denotes the $C^*$-norm on $A$.
\`e nd{ddef}
The map $\langle \cdot, \cdot \rangle_A$ is defined to be a Hermitean $A$-inner product. When no confusion arises we will omit the subscript.
We will say that two elements $x$ and $y$ in $M$ are orthogonal if $\langle x, y \rangle =0 $, and we will write $x \perp y$. Orthogonality for arbitrary sets may be defined similarly.
If $N, L$ are two closed submodules in $M$, and $N\`o plus L = M$, $N \perp M$, then $N$ is called the $A$-orthogonal complement of $L$ in $M$ (and viceversa).
Any free $A$-module $A^n$ and any projective module $P \in \mathcal{P}(A)$ can be equipped with a structure of Hilbert $A$-module.
Let $P \in \mathcal{P}(A)$. We consider the set $\`e ll_2(P)$ of infinite sequences
\[(x_1, \dots, x_i, \dots) = x \quad x_i \in P, \quad i=1, \dots\]
such that the series $\sum_{i} \langle x_i , x_i \rangle_A$ converges in the algebra $A$. For any two elements $x, y \in P$ we define
\[\langle x, y \rangle = \sum_{i=0}^{\infty} \langle x_i, y_i \rangle_A .\]
The space $\`e ll^2(P)$ is a Hilbert $A$-module with respect to this inner product.
\begin{lem}
Any free $A$-module admits a unique (up to isomorphism) $A$-inner product.
\`e nd{lem}
\begin{teo}
Suppose that $\`e ll_2(A)= M \`o plus N$, where $M, N$ are closed submodules of $\`e ll_2(A)$ and $N$ has a finite number of generators. Then $N$ is a projective module.
\`e nd{teo}
\subsection{Operators in Hilbert Modules}
Let $M, N$ be Hilbert modules.
\begin{ddef}
A mapping $T: M \rightarrow N$ such that for some mapping $T^*: N \rightarrow M$ the relation
\begin{equation}
\langle u, Tv \rangle = \langle T^* u, v \rangle \qquad \mbox{holds for all} \ u \in N, v \in M,
\`e nd{equation}
is called an operator from $M$to $N$. We denote the space of such operatos by $\mathcal{L}(M,N)$.
\`e nd{ddef} Note that for mappings between $C^*$-modules, the conditions of linerarity and boundedness do not always imply the existence of an adjoint.
The subspace $\mathcal{K}(M,N) \subset \mathcal{L}(M,N)$ of compact operators is determined as the norm closure of the space generated by operators of rank 1, i.e. operators of the form \[\theta_{x,y} z:= x \langle y, z \rangle \qquad x \in M_2, \quad y, z \in M_1.\]
Obviously, \[(\theta_{x,y})^* = \theta_{y,x} \qquad \theta_{x,y}\theta_{u,v} = \theta_{x \langle y, u \rangle, v}=\theta_{x, v \langle u, y \rangle}
\]
For $M=N$, teh space $\mathcal{K}(M) := \mathcal{K}(M,M)$ is a $C^*$-ideal in $\mathcal{L}(M)$.
\begin{lem}
Let $T\in \mathcal{L}(M,N)$
\begin{enumerate}
\item if $T$ is surjective $TT^*$ is invertible in $\mathcal{L}(M)$ and $M=\`o peratorname{Ker}T\`o plus \`o peratorname{Ran}T^*.$
\item If $T$ is bijective then so it is $T^*$, $T^{-1}\in \mathcal{L}(N,M)$ and $(T^{-1})^*=(T^*)^{-1}.$
\`e nd{enumerate}
\`e nd{lem}
\begin{lem}Let $T\in \mathcal{L}(M,N).$ The following conditions are equivalent:
\begin{itemize}
\item $\`o peratorname{Ran}T$ is closed in $N$,
\item $\`o peratorname{Ran}T^*$ is closed in $M$,
\item $0$ is isolated in the spectrum of $T^*T$,
\item $0$ is isolated in the spectrum of $TT^*$.
\`e nd{itemize}
\`e nd{lem}
\begin{lem}[Mishchenko]
Let $M,N$ Hilbert $A$--modules and $T\in \mathcal{L}(M,N)$ an operator with closed range. Then $\`o peratorname{Ker}T$ is complemented in $M$, $\`o peratorname{Ran}T$ is complemented in $N$ and
$$N=\`o peratorname{Ker}T^*\`o plus \`o peratorname{Ran}T.$$
\`e nd{lem}
\begin{proof}Let $N_0:=\`o peratorname{Ran}T$ and $T_0:M\longrightarrow N_0$ an operator such its action coincides with the action of $T$. By the open mapping theorem $T_0(B_1(M))$ (the image of the unit ball) contains some ball of radius $\delta>0$ in $N_0$. Therefore for every $y\in N_0$ there is an $x\in M$ such that $T_0x=y$ and $\|x\|\leq \delta^{-1}\|y\|.$
Then $\|T^*_0y\|^2=\|\langle y, T_0T^*_0y\rangle \|\leq \|y\|\cdot \|T_0T_0^* y\|$ hence
$$\|y\|^2=\|\langle T_0x,y\rangle \|=\|\langle x,T_0^*y\|\leq \|x\|\cdot \|T^*_0y\|\leq \delta^{-1}\|y\|^{3/2}\|T_0T_0^*y\|^{1/2},$$
i.e. $$\|Y\|\leq \delta^{-2}\|T_0T_0^*y\|,\quad y\in N_0.$$ Now since $0$ is not in the spectrum of $T_0T_0^*$ we have that $T_0T_0^*$ is invertible and for every $z\in M$ there exists $w\in N_0$ such that $T_0z=T_0T_0^*w.$ Then $z-T_0^*w \in \`o peratorname{Ker}T$ and
$$z=(z-T_0^*w)+T_0^*w\in \`o peratorname{Ker}T+\`o peratorname{Ran}T_0^*.$$
Since $\`o peratorname{Ran}T_0^*$ is obviously orthogonal to
$\`o peratorname{Ker}T$, it is a complement for
$\`o peratorname{Ker}T$.
This completes the proof of orthogonal complementability for $\`o peratorname{Ker}T$. Now pass to
$\`o peratorname{Ran}T$. Since
$M=\`o peratorname{Ker}T\`o plus \`o peratorname{Ran}T_0^*$,
the submodule
$\`o peratorname{Ran}T_0^*$ is closed. Note that
$\`o peratorname{Ran}T_0^*=\`o peratorname{Ran}T^*$ so one can apply the previous argument to $T^*$ instead of $T$ which gives the orthogonal decomposition
$$N=\`o peratorname{Ker}T^*\`o plus \`o peratorname{Ran}T.$$
\`e nd{proof}
\`e nd{document} |
\begin{document}
\title{Quantum circuits for maximally entangled states}
\author{Alba Cervera-Lierta}
\email{a.cervera.lierta@gmail.com}
\affiliation{Barcelona Supercomputing Center (BSC).}
\affiliation{Dept. F\'{i}sica Qu\`{a}ntica i Astrof\'{i}sica, Universitat de Barcelona, Barcelona, Spain.}
\author{Jos\'{e} Ignacio Latorre}
\affiliation{Dept. F\'{i}sica Qu\`{a}ntica i Astrof\'{i}sica, Universitat de Barcelona, Barcelona, Spain.}
\affiliation{Nikhef Theory Group, Science Park 105, 1098 XG Amsterdam, The Netherlands.}
\affiliation{Center for Quantum Technologies, National University of Singapore, Singapore.}
\author{Dardo Goyeneche}
\affiliation{Dept. F\'{i}sica, Facultad de Ciencias B\'{a}sicas, Universidad de Antofagasta, Casilla 170, Antofagasta, Chile.}
\date{\today}
\begin{abstract}
We design a series of quantum circuits that generate absolute maximally entangled (AME) states to benchmark a quantum computer. A relation between graph states and AME states can be exploited to optimize the structure of the circuits and minimize their depth. Furthermore, we find that most of the provided circuits obey majorization relations for every partition of the system and every step of the algorithm. The main goal of the work consists in testing efficiency of quantum computers when requiring the maximal amount of genuine multipartite entanglement allowed by quantum mechanics, which can be used to efficiently implement multipartite quantum protocols.
\end{abstract}
\maketitle
\section{Introduction}
There is a need to set up a thorough benchmarking strategy for quantum computers. Devices that operate in very different platforms are often characterized by the number of qubits they offer, their coherent time and the fidelities of one- and two-qubit gates. This is somewhat misleading as the performance of circuits are far below the expected when the amount of genuine multipartite entanglement contained in state is relatively high.
There exist several figures of merit that try to quantify the success performance of a quantum device. Methods such as randomized benchmarking \cite{RB}, state and process tomography \cite{SPT} and gateset tomography \cite{GT1,GT2} are used to quantify gate fidelities. However, they are only useful for few-qubit experiments and fail when used to evaluate the performance of greater circuits \cite{fail1,fail2}. In that sense, IBM proposed a metric to be used in arbitrary large quantum circuits called \textit{quantum volume} \cite{volume}. This figure takes into account several circuit variables like number of qubits, connectivity and gate fidelities. The core of the protocol is the construction of arbitrary circuits formed by one- and two-qubit gates that are complex enough to reproduce a generic $n$-qubit operation. One can expect to generate high entanglement in this kind of circuits. Even though, we should certify that this amount of entanglement will be large enough to perform some specific tasks that, precisely, demand high entanglement. A further relevant reference concerns to volumetric framework for quantum computer benchmarks \cite{BY19}.
Two reasonable ways to tests for quantum computers are the following: \emph{(i)} implement a protocol based on maximally entangled states, \emph{(ii)} solve a problem that is hard for a classical computer. These two ways are linked by the fact that quantum advantage requires large amounts of entanglement, so that classical computers are unable to carry the demanded task even when a sofisticated technique, e.g. tensor networks \cite{TN}, is considered. We believe that item \emph{(i)} is fully doable with the current state of the art of quantum computers, at least for a small number of qubits. On the other hand, item \emph{(ii)} is much more challenging, as classical computers efficiently work with a large number of bits.
Quantum correlations depend on the delicate balance of the coefficients of the wave function. It is natural to expect that quantum computers will have to be very refined to achieve such a good description of multipartite correlations along the successive action of gates. Entanglement is at the heart of quantum efficiency \cite{Jozsa}. Again, if a quantum computer is not able to generate faithful large entanglement, it will remain inefficient.
A fundamental factor in quantum computing is the ability to generate large entangled states, such as area law violating states \cite{E13}. However, such ability has to be accomplished by a sufficiently large coherence time for such multipartite maximally entangled states. Note that GHZ-like states are highly entangled and useful to test violate qubit Bell inequalities \cite{Mermin90}, but even more entangled are the Absolute Maximally Entangled (AME) states \cite{FFPP08,HCLR12,GALRZ15}, which are maximally entangled in every bipartition of the system.
Following this line of thought, along this work we explore techniques to efficiently construct quantum circuits for genuinelly multipartite maximally entangled states. Some preliminar results reflect the difficulty to deal with quantum computers. For instance, the amount of Bell inequality violation rapidly decreases with the number of qubits considered \cite{Dani}.Also, the exact simulation of an analytical solvable model in a quantum computer significantly differs from the expected values, even when considering four qubits and less than thirty basic quantum gates \cite{Ising}. These examples illustrate the difference between gate fidelity and circuit fidelity, being the second one much harder to improve.
We shall present quantum circuits required to build multipartite maximally entangled states. This proposal differs from bosonic sampling method, where large amounts of entanglement are faithfully reproduced by classical simulations \cite{Boson}. AME states find applications in multipartite teleportation \cite{HC13} and quantum secret sharing \cite{HCLR12,HC13}. Along our work, maximally entangled states will be exclusively used to test the strength of multipartite correlations in quantum computers. Another possible use, not discussed by us, could be generate quantum advantage with respect to a classical computer or, ideally, to achieve quantum supremacy.
We describe a benchmark suit of quantum circuits, where each one should deliver an AME state. The circuits provided were designed to minimize the number of required gates under the presence of restricted connectivity of qubits. Some of them consider individual systems composed by more than two internal levels each, which sometimes can be effectively reduced to qubits. In general, we consider simple and compact circuits, illustrating the way in which multipartite entanglement is generate step by step along the circuit. We have also pay attention to a simple criteria of majorization of the entropy of reductions, which basically implies that multipartite entanglement, quantified by the averaged entropy of reductions, monotonically increase for all partitions.
This work is organized as follows: In Section \ref{sec:Review}, we review the basic properties of AME states and show explicit examples. In Section \ref{sec:circuits}, we present the quantum circuits that generate AME states by using the properties of graph states. We also propose the simulation of AME states having local dimension larger than 2 by using qubits instead of qudits. In Section \ref{sec:major}, we analyze the entanglement majorization criteria in the proposed circuits and find further optimal circuits for experimental implementation, by imposing a majorization arrow in terms of entanglement. In Section \ref{sec:implementation}, we implement GHZ and AME states for five qubit systems in IBM quantum computers, quantify the state preparation quality by testing maximal violation of suitably chosen Bell inequalities. Finally, in Section \ref{sec:conclusions} we discuss and summarize the main results of the paper.
\section{Review of AME states}\label{sec:Review}
The study of AME states have become an intensive area of research along the last years due to both theoretical foundations and practical applications. In this section we briefly review the current state of the art of the field. For a more extensive review on AME states, see e.g. Ref. \cite{GALRZ15}.
\subsection{General properties of AME states}
AME states, also known in some references as \emph{maximally multipartite entangled states}, are $n$ qudit quantum states with local dimension $d$ such that every reduction to $\lfloor n/2\rfloor$ parties is maximally mixed, where $\lfloor\,\cdot\,\rfloor$ is the floor function. Such states are maximally entangled when considering the average entropy of reductions as a measure of multipartite entanglement. That is, when the average Von Neumann entropy $S(\rho)=\mathrm{Tr}[\rho\log\rho]$, taken over all reductions to $\lfloor n/2\rfloor$ parties, achieves the global maximum value $S(\rho)=\lfloor n/2\rfloor$, where logarithm is taken in basis $d$. For instance, Bell states and GHZ states are AME states for bipartite and three partite systems, respectively, for any number of internal levels $d$.
The existence of AME states for $n$ qudit systems composed by $d$ levels each, denoted AME($n,d$), is a hard open problem in general \cite{HESG18}. This problem is fully solved for any number of qubits: an AME($n,2$) exists only for $n=2,3,5,6$ \cite{S04,HGS17,AME42,AME72}. Among all existing AME states, there is one special class composed by minimal support states. These states are defined as follows: an AME($n,d$) state has minimal support if it can be written as the superposition of $d^{\lfloor n/2\rfloor}$ fully separable orthogonal pure states. Here, we consider superposition at the level of vectors, in such a way that the linear combination of pure states always produce another pure state. For example, generalized Bell states for two-qudit systems and generalized GHZ states for three-qudit systems have minimal support. It is simple to show that all coefficients of every AME state having minimal support can be chosen to be identically equal to $d^{-\lfloor n/2\rfloor/2}$, i.e. identical positive numbers. By contrast, AME states having non-minimal support require to be composed by non-trivial phases in its entries in order to have all reduced density matrices proportional to the identity. In other words, non-minimal support AME states require destructive interference.
AME states connect to several mathematical tools. It is known that
AME states composed by $n$ parties and having minimal support, e.g. AME(2,2), AME(3,2) and AME(4,3), are one-to-one related to a special class of maximum distance separable (MDS) codes \cite{HP03}, index unity orthogonal arrays \cite{GZ14}, permutation multi-unitary matrices when $n$ is even \cite{GALRZ15} and to a set of $m=n-\lfloor n/2\rfloor$ mutually orthogonal Latin hypercubes of size $d$ defined in dimension $\lfloor n/2\rfloor$ \cite{GRDZ18}.
On the other hand, AME states inequivalent to minimal support states, e.g. AME(5,2) or AME(6,2), are equivalent to quantum error correction codes \cite{S04}, quantum orthogonal arrays \cite{GRDZ18}, non-permutation multiunitary matrices \cite{GALRZ15} and $m=N-\lfloor n/2\rfloor$ mutually orthogonal quantum Latin hypercubes of size $d$ defined in dimension $\lfloor n/2\rfloor$ \cite{GRDZ18}.
AME states define an interesting mathematical problem itself but also they define attractive practical applications. These include quantum secret sharing \cite{HCLR12,HC13}, open destination quantum teleportation \cite{HC13} and quantum error correcting codes \cite{S04}, the last one being a fundamental ingredient for building a quantum computer.
\subsection{Explicit expressions of AME states}
The simplest AME($n,d$) state, denoted $\Omega_{n,d}$, having minimal support are the Bell and GHZ states
\begin{eqnarray}
\Omega_{2,d}=\frac{1}{\sqrt{d}}\sum_{i=0}^{d-1}|ii\rangle,
\end{eqnarray}
and
\begin{eqnarray}
\Omega_{3,d}=\frac{1}{\sqrt{d}}\sum_{i=0}^{d-1}|iii\rangle,
\end{eqnarray}
respectively. These states are AME for any number of internal levels $d\geq2$. That is, every single particle reduction in both states $\Omega_{2,d}$ and $\Omega_{3,d}$, produces the maximally mixed state. On the other hand, it is not obvious to prove that there is no AME state for $n=4$ qubits \cite{AME42}. The AME(5,2) state \cite{LMPZ96} can be written as
\begin{equation}
|\Upsilon_{5,2}\rangle = \frac{1}{4\sqrt{2}} \sum_{i=1}^{32} c_i |i\rangle ,
\end{equation}
where the 5-digits binary decomposition of $i$ should be considered inside the ket and
\begin{align}
c_i=\{&1, 1, 1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, 1, \nonumber\\
& -1, -1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1\}.
\end{align}
By using local unitary operations, the same state can be reduced to any of the following states \cite{HCLR12,Cleve99},
\begin{align}
\ket{0_{L1}}= \frac{1}{4}
(& \ket{00000}+\ket{10010}+\ket{01001}+\ket{10100} \nonumber\\
&+ \ket{01010}-\ket{11011}-\ket{00110}-\ket{11000} \nonumber\\
&- \ket{11101}-\ket{00011}-\ket{11110}-\ket{01111} \nonumber\\
&- \ket{10001}-\ket{01100}-\ket{10111}+\ket{00101}),\nonumber\\
\ket{1_{L1}}= \frac{1}{4}
(& \ket{11111}+\ket{01101}+\ket{10110}+\ket{01011} \nonumber\\
&+ \ket{10101}-\ket{00100}-\ket{11001}-\ket{00111} \nonumber\\
&- \ket{00010}-\ket{11100}-\ket{00001}-\ket{10000} \nonumber\\
&- \ket{01110}-\ket{10011}-\ket{01000}+\ket{11010}).
\end{align}
For $n=6$, an AME(6,2) state \cite{BPBZCP07} can be constructed from the above AME(5,2) states $|0_{L1}\rangle$ and $|1_{L1}\rangle$ as
\begin{align}
\ket{\Omega_{6,2}}=& \frac{1}{\sqrt{2}}\left(\ket{0}\ket{0_{L1}}+\ket{1}\ket{1_{L1}}\right) \nonumber\\
=\frac{1}{4}\big(&\ket{000}(\ket{+-+}+\ket{-+-})- \nonumber\\
&\ket{001}(\ket{+--}-\ket{-++})+ \nonumber\\
&\ket{010}(\ket{++-}-\ket{--+})- \nonumber\\
&\ket{011}(\ket{+++}+\ket{---})- \nonumber\\
&\ket{100}(\ket{+++}-\ket{---})- \nonumber\\
&\ket{101}(\ket{++-}+\ket{--+})- \nonumber\\
& \ket{110}(\ket{+--}+\ket{-++})- \nonumber\\
&\ket{111}(\ket{+-+}-\ket{-+-})\big),
\end{align}
where $|\pm\rangle=\left(|0\rangle\pm|1\rangle\right)/\sqrt{2}$. This exemplifies that local unitaries can be used to find versions of an AME state with a reduced support. Similarly, an AME(5,2) state having eight real coefficients can be found by combining the two states
\begin{align}
|0_{L2}\rangle &=\frac{1}{2}(|00000\rangle + |00011\rangle + |01100\rangle -|01111\rangle),\\
|1_{L2}\rangle &=\frac{1}{2}(|11010\rangle + |11001\rangle + |10110\rangle - |10101\rangle)
\end{align}
in the following way \cite{GALRZ15}:
\begin{equation}
|\Omega_{5,2}\rangle=\frac{1}{\sqrt{2}}\left(|0_{L2}\rangle+|1_{L2}\rangle\right).
\end{equation}
It can be shown that neither the five- nor six-qubit AME states have minimal support.
For systems composed by $n>3$ parties and $d>2$ internal levels it is not simple to construct AME states. The AME(4,3) state \cite{AMEGraph} is defined as follows
\begin{align}
|\Omega_{4,3}\rangle &= \frac{1}{3}\sum_{i,j=0}^{2}|i\rangle|j\rangle|i+j\rangle|i+2j\rangle \nonumber\\
&=\frac{1}{3}\big(| 0000 \rangle +| 0111 \rangle +| 0222 \rangle \nonumber\\
&+| 1012 \rangle +| 1120 \rangle +| 1201 \rangle \nonumber\\
&+ |2021 \rangle +| 2102 \rangle +|2210 \rangle \big) .
\label{eq:AME43}
\end{align}
In a similar way, we can derive the AME(6,4) state \cite{GZ14}:
\begin{align}
\label{eq:AME64}
|\Omega_{6,4}\rangle =\frac{1}{8} & \big( \ket{000000}+ \ket{111100}+ \ket{222200}+ \ket{333300}+ \nonumber\\
&\ket{321010}+ \ket{230110}+ \ket{103210}+ \ket{012310}+ \nonumber \\
&\ket{132020}+ \ket{023120}+ \ket{310220}+ \ket{201320}+ \nonumber\\
& \ket{213030}+ \ket{302130}+ \ket{031230}+ \ket{120330}+ \nonumber \\
&\ket{231001}+ \ket{320101}+ \ket{013201}+ \ket{102301}+ \nonumber\\
&\ket{110011}+ \ket{001111}+ \ket{332211}+ \ket{223311}+ \nonumber\\
&\ket{303021}+ \ket{212121}+ \ket{121221}+ \ket{030321}+ \nonumber\\
& \ket{022031}+ \ket{133131}+ \ket{200231}+ \ket{311331}+ \nonumber\\
&\ket{312002}+ \ket{203102}+ \ket{130202}+ \ket{021302}+ \nonumber\\
&\ket{033012}+ \ket{122112}+ \ket{211212}+ \ket{300312}+ \nonumber \\
&\ket{220022}+ \ket{331122}+ \ket{002222}+ \ket{113322}+ \nonumber\\
& \ket{101032}+ \ket{010132}+ \ket{323232}+ \ket{232332}+ \nonumber \\
&\ket{123003}+ \ket{032103}+ \ket{301203}+ \ket{210303}+ \nonumber\\
&\ket{202013}+ \ket{313113}+ \ket{020213}+ \ket{131313}+ \nonumber \\
&\ket{011023}+ \ket{100123}+ \ket{233223}+ \ket{322323}+ \nonumber\\
&\ket{330033}+ \ket{221133}+ \ket{112233}+ \ket{003333}\big) \, .
\end{align}
This state is formed by $4^3=64$ equally superposed orthogonal states, so it is an AME state of minimal support.
\section{Quantum Circuits to construct AME states}\label{sec:circuits}
As mentioned above, AME states can be constructed in different ways. For our purpose, we consider graph states formalism \cite{HEB04}. Graph states are represented by an undirected graph, where each vertex corresponds to a $|+\rangle$ state
and each edge with a Control-Z (CZ) gate. We can easily construct the quantum circuit for a graph state by considering a simple rule, as we will see later. In addition, a graph can be transformed into another -equivalent one- by applying local unitary operations \cite{LC}. This kind of transformation modifies the number of edges of a graph but not its entanglement properties. This property could allows us to adapt the circuit to different quantum chip architectures, in order to reduce as much as possible the number of gates required to physically implement the state.
Despite graph states can be defined in any local dimension $d$, quantum computers can only implement qubit quantum circuits. Nonetheless, we can simulate AME states having larger local dimensions $d$ by using qubits. That is, by mapping each qudit state into a multi-qubit state and by adapting $d$-dimensional gates into non-local qubit gates, as we explain in Subsection \ref{subsec:AMEfromcircuits}.
\subsection{Graph States}
Graph states are $n$ partite pure quantum states constructed from an undirected graph composed by $n$ vertices $\mathcal{V}=\{v_{i}\}$ and connected by edges $\mathcal{E}=\{e_{ij}=\{v_{i},v_{j}\}\}$. Each graph has associated an adjancency matrix $A$, whose entries satisfy that $A_{ij}=1$ if an edge $e_{ij}$ exists and $A_{ij}=0$ otherwise. Self-interactions are forbidden, meaning that diagonal entries of $A$ vanish.
A graph state for $n$ qudits can be constructed as follows \cite{HEB04,AMEGraph}:
\begin{equation}\label{graphstate}
|G\rangle=\prod_{i<j}^{n}\mathrm{CZ}_{ij}^{A_{ij}}(F_{d}|\bar{0}\rangle)^{\otimes{n}},
\end{equation}
where
\begin{equation}
\mathrm{CZ}_{ij}=\sum_{k=0}^{d-1}\omega^{kl}|\bar{k}\rangle\langle \bar{k}|_{i}\otimes|\bar{l}\rangle\langle \bar{l}|_{j},
\label{eq:CZgate}
\end{equation}
is the generalized controlled-$Z$ gate, $\omega=e^{2\pi i/d}$ and
\begin{equation}
F_{d}=\frac{1}{\sqrt{d}}\sum_{k=0}^{d-1}\omega^{kl}|\bar{k}\rangle\langle \bar{l}|,
\label{eq:Fgate}
\end{equation}
is the Fourier qudit gate. From now on, we distinguish between qubits and qudits states, by writing a bar over symbols associated to qudit states, e.g. $|\bar{0}\rangle$, keeping the usual notation with no bar for qubits, e.g. $|0\rangle$.
Following the above definition, the explicit construction of a graph state from its corresponding graph is simple. First, each vertex corresponds with the qudit state $|\bar{\psi}_{0}\rangle = F_{d}|\bar{0}\rangle$, and second, each edge corresponds with a CZ gate applied between two vertices. For instance, consider the quantum circuit generating the AME(5,2) state, see Figure \ref{Fig:AME52}. For qubits, note that $F_{2}$ gate is actually the Hadamard gate. Preparation of a qubit graph state (\ref{graphstate}) is equivalent to initialize all qubits in the state $|+\rangle=(|0\rangle+|1\rangle)/\sqrt{2}$ and then apply CZ gates between the qubits, according to the chosen graph.
\begin{figure}
\caption{Quantum circuit to generate AME(5,2) (a) and its corresponding graph (b).}
\label{Fig:AME52a}
\label{Fig:AME52b}
\label{Fig:AME52}
\end{figure}
Note that after applying the Fourier gate $F_d$ over the initial state $|\bar{0}\rangle^{\otimes n}$ we obtain a state with all basis elements, in the computational basis decomposition. Then, since the CZ gates only introduce relative phases between these elements, the final state of a graph contains a superposition of the $d^n$ elements of the computational basis.
Graph states can also be described by using stabilizer states \cite{stab}. They find application in quantum error correcting codes \cite{QEC} and one-way quantum computing \cite{oneway}. A graphical interpretation of entanglement in graph states is provided in Ref.\cite{AMEGraph} and multipartite entanglement properties in qubit graph states, as well as its optimal state preparation, has been studied in Ref. \cite{HEB04,Cabello,OptimalGraph}.
\subsection{AME states from graph states}
We can write an AME state by using its corresponding graph, as described above. This is a particular form of an AME state having maximal support, as we have the superposition of all elements of the computational basis.
We are interested in finding optimal AME graph states, in the sense of having the minimum number of edges and coloring index \cite{Cabello}. The smaller the number of edges the smaller the number of operations required to generate AME states. Coloring index is related with the number of operations that can be performed in parallel, so it is proportional to the circuit depth. It worth to mention that graph AME states are hard to construct in general, specially for large values of local dimension $d$ and number of parties $n$. Fortunately, there are suitable tools useful to simplify the construction of graphs for specific values of $d$ and $n$ \cite{AMEGraph}.
The first interesting property is that some graph states can be constructed in any dimension $d$. The simpler cases are given by the generalized Bell ($n=2$) and generalized GHZ states ($n=3$). The graph states of $n=5$ and $n=6$, shown in Figures \ref{Fig:AME5d} and \ref{Fig:AME6d} respectively, produce AME states in any prime dimension $d$. The $n=4$ graph state of Figure \ref{Fig:AME4d} also fulfills this property for every prime dimension $d\geq 3$.
\begin{figure}
\caption{Quantum circuit to generate an AME(5,d) state by using qubits instead of qudits. The corresponding graph is the same as the one in Fig. \ref{Fig:AME52b}
\label{Fig:AME5d}
\end{figure}
\begin{figure}
\caption{Graph state that generates an AME(4,$d$) state for any prime dimension $d\geq 3$ (b) and its corresponding circuit (a) by using qubits instead of qudits.}
\label{Fig:AME4da}
\label{Fig:AME4db}
\label{Fig:AME4d}
\end{figure}
\begin{figure}
\caption{Graph state that generates an AME(6,$d$) state (b) and its corresponding circuit (a) by using qubits instead of qudits. The number of qubits needed for represent each qudit is $m=\lceil\log_{2}
\label{Fig:AME6da}
\label{Fig:AME6db}
\label{Fig:AME6d}
\end{figure}
\begin{figure}
\caption{Quantum circuit producing the AME(4,4) state with qubits (a) and its corresponding graph (b). Parties $A$, $B$, $C$ and $D$ are maximally entangled between them but not the qubits inside each party.
Notice that this circuit does not correspond to an AME(8,2) state, since this AME state does not exist.}
\label{Fig:AME44_circuita}
\label{Fig:AME44_circuitb}
\label{Fig:AME44_circuit}
\end{figure}
For a non-prime local dimension there exist some methods to find AME graph states \cite{AMEGraph}. One of those consists on taking the prime factorization $d=d_{1}d_{2}\cdots d_{m}$ and look for every AME($n,d_{i}$) state. The AME($n,d$) is just given by the tensor product of the AME($n,d_{i}$) states, followed by a suitable relabeling of symbols. When prime factorization of $d$ includes a power of some factor, we can construct an AME state by artificially defining each party, i.e. by using qudits in lower dimension $m<d$ and then performing the suitable set of CZ gates between the $m$ level qudit systems. For instance, this method can be used to find an the AME(4,4) state from qubits instead of ququarts (qudits with $d=4$ levels each), as we illustrate in Figure \ref{Fig:AME44_circuit}. The -real- local dimension of each party, $d=4$, is achieved by grouping qubits in pairs \cite{AMEGraph}.
\subsection{AME states circuits using qubits}\label{subsec:AMEfromcircuits}
As we have seen above, Bell and GHZ states together with the graphs from Figures \ref{Fig:AME5d} to \ref{Fig:AME44_circuit} serve to construct AME($n,d$), for $n=2-6$ and prime number of internal levels $d\geq 3$, and also the AME(4,4) state. Moreover, we can use a combination of these graphs to construct AME states of greater levels $d$.
The construction of a qubit quantum circuit from a graph state is straightforward since we just have to perform Hadamard gates on all qubits initialized at $|0\rangle$ state and CZ gates, according to graph edges. These quantum gates are commonly used in current quantum devices, e.g. in quantum computing \cite{IBM}. However, in order to implement an AME state for $d>2$ internal levels we require a qudit quantum computer, i.e. a machine performing quantum operations beyond binary quantum computation. The construction of such device is much more challenging than the current quantum computers and, therefore, perform such kind of experiment become really hard. Here, we propose to simulate AME states having larger local dimension by using qubits instead of qudits. To do so, we translate the local dimension $d$ into a multiqubit dimensional space. For instance, to transform a ququart system $d=4$ into a two qubit system $m=2$ we consider the following identification
\begin{equation}
|\bar{0}\rangle\equiv |00\rangle, \quad |\bar{1}\rangle\equiv |01\rangle, \quad |\bar{2}\rangle\equiv |10\rangle, \quad|\bar{3}\rangle\equiv |11\rangle.
\label{eq:ABC}
\end{equation}
For $d>4$, we need to increase the number of qubits accordingly, i.e. we need $m=\lceil\log_{2} d\rceil$ qubits to describe each $d$-level system, where $\lceil\,\cdot\,\rceil$ denotes the ceiling function.
Since we have the graphs for these states, the challenge is to simulate the effect of the generalized CZ gate \eqref{eq:CZgate} and the Fourier gate \eqref{eq:Fgate}, with qubit gates. To be precise, we are not interested in the exact Fourier gate but on generating the state $|\bar{\psi}_{0}\rangle = F_{d}|\bar{0}\rangle$. For that propose, we will look for an initialization gate $U_{d}^{in}$ that acts on qubits in the state $|0\rangle$ and obtains the $|\psi_{0}\rangle$ state, i.e. the state $|\bar{\psi}_{0}\rangle$ written in terms of qubits according to the mapping established by Eq.\eqref{eq:ABC}.
When local dimension $d$ is a power of 2, the state $|\bar{\psi}_{0}\rangle$ can be easily generated by using Hadamard gates only. In particular, for $d=4$ we have
\begin{align}
|\bar{\psi}_{0}\rangle &=F_{4}|\bar{0}\rangle=\frac{1}{2}\left(|\bar{0}\rangle+|\bar{1}\rangle+|\bar{2}\rangle+|\bar{3}\rangle\right)\nonumber \\
|\psi_{0}\rangle & = U_{4}^{in}|00\rangle = (H\otimes H)|00\rangle \nonumber\\ &=\frac{1}{2}\left(|00\rangle+|01\rangle+|10\rangle+|11\rangle\right).
\end{align}
Despite $F_{4}\neq U_{4}^{in}=(H\otimes H)$, the tensor product unitary transformation is suitable, as we just want to obtain the state $|\bar{\psi}_{0}\rangle$ with qubits.
For $d=3$ the state $|\bar{\psi}_{0}\rangle$ can be obtained from the gate $U_{3}^{in}$, defined in Figure \ref{Fig:F3}:
\begin{align}
|\bar{\psi}_{0}\rangle &= F_{3}|\bar{0}\rangle=\frac{1}{\sqrt{3}}\left(|\bar{0}\rangle+|\bar{1}\rangle+|\bar{2}\rangle\right)\nonumber \\
|\psi_{0}\rangle &=U_{3}^{in}|00\rangle = \frac{1}{\sqrt{3}}\left(|00\rangle+|01\rangle+|10\rangle\right).
\end{align}
\begin{figure}
\caption{Quantum circuit to obtain $|\bar{\psi}
\label{Fig:F3}
\end{figure}
In general, the circuit producing the state $|\psi_{0}\rangle$ is hard to find, except when $d$ is a power of 2, as explained above. On the contrary, a circuit implementing the generalized CZ gate is simpler since this gate only introduces a phase in some qudit states and we can reproduce this effect by using controlled-Phase gates, i.e. CPh($\theta)=|00\rangle\langle 00|+|01\rangle\langle 01|+|10\rangle\langle 10|+e^{i\theta}|11\rangle\langle 11|$.
Figure \ref{Fig:CZ3} shows the required circuit to implement generalized CZ gate for qutrits with qubits. Wel need four qubits and four CPh gates to achieve the expected result of this gate. The quantum circuit required to implement the generalized CZ gate for ququarts is shown in Figure \ref{Fig:CZ4}. Only three gates are needed here: two qubit CZ gates and a controlled-S gate, which is a CPh with $\theta=\pi/2$.
At this point, all ingredients to construct the AME states for qubits and to simulate AME states with local dimension $d>2$ has been introduced. Figures \ref{Fig:AME5d} and \ref{Fig:AME6d} can be used to simulate any AME(5,$d$) and AME(6,$d$) state with qubits, providing $U_d^{in}$ and CZ gates. Similarly, Figure \ref{Fig:AME4d} can be used to simulate any AME(4,$d$) state for prime dimension $d\geq3$. Finally, Figure \ref{Fig:AME44_circuit} shows explicitly the circuit and the graph required to obtain the AME(4,4) state.
\begin{figure}
\caption{Generalized CZ gate for qutrits, $d=3$, performed with four qubits. First two CPh gates and last two CPh gates can be implemented in parallel, so the circuit depth is just 2 CPh gates.}
\label{Fig:CZ3}
\end{figure}
\begin{figure}
\caption{Generalized CZ gate for ququarts, $d=4$, performed with four qubits. First gate is a controlled-S gate, which is actually a $CPh$ gate with $\theta=\pi/2$. Last two CZ gates can be implemented in parallel, so the circuit depth is just 2 gates.}
\label{Fig:CZ4}
\end{figure}
\subsection{AME states circuits of minimal support}
Since AME states of minimal support have connections with error correcting codes, it could be interesting to find the corresponding quantum circuits to generate them.
For qutrits, the AME(4,3) state of Eq.\eqref{eq:AME43} has minimal support.
The quantum circuit that generates this state is shown in Figure \ref{Fig:AME43} \cite{GALRZ15}. The quantum gates required to construct this circuit are the Fourier transform gate for qutrits $F_{3}$ and the C$_3$--adder gate
\begin{equation}
\overbar{\mathrm{C}}_{3}|\overbar{i}\rangle|\overbar{j}\rangle=|\overbar{i}\rangle|\overbar{i+j}\rangle,
\end{equation}
which is the generalization of CNOT gate for qutrits. It is represented with the CNOT symbol with the superscript 3, see Figure \ref{Fig:AME43}.
\begin{figure}
\caption{Quantum circuit required to generate the state
$|\Omega_{4,3}
\label{Fig:AME43}
\end{figure}
The simulation of the state $F_{3}|\bar{0}\rangle$ by using qubits has been already explained in the previous subsection.
The construction of the C$_{3}$--adder gate is more cumbersome and we leave the details to the Appendix \ref{app:C3}. The strategy that we use consists in using controlled gates that allow us to perform the sums separately for each control state. If the control qutrit is in the state $|\bar{0}\rangle$ we should apply the identity, so that no gates are needed in this case. If the control qutrit is prepared in the state $|\bar{1}\rangle$, i.e. $|01\rangle$, then we should implement CNOT and Toffoli gates (CCNOT) that take the second qubit as a control qubit, i.e. the second pair of qubits is not affected when the first two are prepared in a different state. Similarly, if the qutrit state is $|\bar{2}\rangle$, i.e. $|10\rangle$, we should search for a sequence of CNOT and CCNOT gates that implement the corresponding sums by using as a control qubit the first qubit.
The resulting circuit is depicted in Figure \ref{Fig:C3adder_approx}, where we have used approximate CCNOT gates described in Figure \ref{Fig:Toffaprox}, CCNOT$_{a}$ and CCNOT$_{b}$, instead of usual CCNOT gates in order to reduce significantly the circuit depth \cite{Barenco}. This circuit is divided in two sectors, each one performing the C$_{3}$--adder gate if the controlled qubit is $|\bar{1}\rangle$, the first 3 gates, or $|\bar{2}\rangle$, the last 3 gates. Any of those gates affect the qubit state if the control qutrit is in the $|\bar{0}\rangle$ state.
\begin{figure}
\caption{C$_{3}
\label{Fig:C3adder_approx}
\end{figure}
\begin{figure}
\caption{Approximations of CCNOT gate. They introduce a change of sign in some states, in particular $\mathrm{CCNOT}
\label{Fig:Toffaprox}
\end{figure}
Clearly, gate C$_{3}$ is the responsible for the growth of circuit depth. However, we can implement the first two adders by using two CNOT gates each one taking advantage that the target qutrit state is $|\bar{0}\rangle$, i.e. qubits are prepared in the state $|00\rangle$.
The final circuit required to simulate the state $|\Omega_{4,3}\rangle$ with qubits is shown in Figure \ref{Fig:AMEqubits}, where CZ gates are framed because they are only necessary if we are implementing the CCNOT$_a$ gate.
\begin{figure*}
\caption{Circuit for the construction of the AME(4,3) state by using two qubits to represent each qutrit. The controlled-Z gates (framed with dots), are only necessary when we use the approximation of Toffoli gate CCNOT$_a$.}
\label{Fig:AMEqubits}
\end{figure*}
\section{Entanglement majorization}\label{sec:major}
Majorization has deep implications in quantum information theory \cite{Nielsen}. In particular, quantum algorithms obey a majorization arrow, which means that majorization could be at the core of their efficiency \cite{LatorreMartin,OLM02}. Following this idea, we wonder whether the above quantum circuits designed to construct AME states obey majorization. If not, it is interesting to asking whether more efficient circuits obeying majorization exist.
Let $\mathbf{a},\mathbf{b}\in \mathbb{R}^{d}$ be vectors having entries ordered in decreasing order, namely $\mathbf{a}^{\downarrow}$ and $\mathbf{b}^{\downarrow}$ with $a_{i+1}^{\downarrow}\geq a_{i}^{\downarrow}$, and similarly for $\mathbf{b}^{\downarrow}$. We say that $\mathbf{a}$ \emph{majorizes} $\mathbf{b}$, i.e. $\mathbf{a}\succ\mathbf{b}$, iff
\begin{eqnarray}
\sum_{i=1}^{k}a_{i}^{\downarrow}\geq\sum_{i=1}^{k}b_{i}^{\downarrow} \quad \mathrm{for} \ k=1,\cdots, d,
\label{eq:majorization}
\end{eqnarray}
and $\sum_{i=1}^{d}a_{i}=\sum_{i=1}^{d}b_{i}$.
First, we should choose a set of parameters to study if they majorize at each step during the computation, i.e. after the application of each CZ gate. Since all circuits start with a product state and finish with a maximally entangled state in all bipartitions, a natural choice will be the eigenvalues of the reduce density matrices. At some step $s$ during the computation, the circuit has generated a quantum state with density matrix $\rho_{s}$. We then compute the reduce density matrix of every of its bipartitions in two subsytems, $A$ and $B$, i.e. $\rho_{A}^{s}=\mathrm{Tr}_{B}\rho_{s}$, and diagonalize this matrix to obtain its eigenvalues $\mathbf{\lambda^s}=\{\lambda_{i}^{s}\}$. We will establish that this circuit obeys majorization iff $\mathbf{\lambda^s}\succ\mathbf{\lambda^{s+1}}$, i.e.
\begin{equation}
\sum_{i=1}^{k}\left(\lambda_{i}^{\downarrow}\right)^{s} \geq \sum_{i=1}^{k}\left(\lambda_{i}^{\downarrow}\right)^{s+1} \ \mathrm{for} \ k=1,\cdots ,d^{m}-1 \ \forall A, s,
\end{equation}
where $m=n-\floor{n/2}$ is the number of qudits in $A$ bipartition. We do not consider last summation $k=d^m$ because the eigenvalues of a density matrix are normalized to the unity. Since there are $\left(\begin{array}{c}n\\ \floor{n/2} \end{array}\right)$ bipartitions, this analysis leads to a total number of $\left(\begin{array}{c}n\\ \floor{n/2} \end{array}\right)(d^{m}-1)$ inequalities to fulfill.
\begin{figure}
\caption{Majorization in AME(4,4) state circuit of Fig. \ref{Fig:AME44_circuit}
\label{fig:majS}
\label{fig:maj}
\label{Fig:major}
\end{figure}
We can apply less strict tests by looking at the majorization of other figures of merit to quantify bipartite entanglement, for instance Von Neumann entropy or purity, which in terms of $\lambda_{i}$ are defined as $S=-\sum_{i}\lambda_{i}\log_{d}\lambda_{i}$ and $\gamma=\sum_{i}\lambda_{i}^2$ respectively. Both of these functions are convex in terms of $\lambda_{i}$, so we can apply the Karamata's inequality \cite{Karamata} to prove that
\begin{eqnarray}
\mathbf{\lambda^s}\succ\mathbf{\lambda^{s+1}} &\Rightarrow & S^{s}\leq S^{s+1} \label{eq:majS}\nonumber\\
&\Rightarrow & \gamma^{s}\geq \gamma^{s+1}. \label{eq:majP}
\end{eqnarray}
Thus, we can first do one of these less restrictive tests. If the above inequalities are not fulfilled in every step, then there is no majorization in eigenvalues.
As an example, Figure \ref{Fig:major} shows the majorization of AME(4,4) state of Figure \ref{Fig:AME44_circuit} in terms of entropy and eigenvalues of the reduce density matrix for each bipartition. The circuit majorizes since entropy never decreases and eigenvalues never increase at each step. At the end of the computation, all bipartition have reached the maximum value $S=2\log_{2}4=4$ when all eigenvalues are identical, meaning that reduced density matrices are proportional to the identity, as expected for an AME state.
After analyzing the circuit to construct the state $|\Omega_{4,3}\rangle$ written in Figure \ref{Fig:AME43}, we found that it does not majorize, i.e. when the fourth C$_3$--adder is applied, the entropy of one of the bipartitions decreases before reaching the maximum value after the application of the last C$_3$--adder gate. For this reason, we conclude that this circuit is not optimal, being possible to obtain an AME(4,3) state with minimal support from a smaller number of gates. In particular, we found many equivalent circuits that can obtain this kind of state with only four C$_3$--adder gates. An example is shown in Figure \ref{Fig:AME43_optim}.
Notice that, in this example, two C$_3$--adders are applied in parallel, which reduces significantly the circuit depth, specially if we want to simulate this AME with qubits.
\begin{figure}
\caption{Quantum circuit to obtain an AME(4,3) of minimal support. This circuit has been found after applying a majorization test in circuit of Fig. \ref{Fig:AME43}
\label{Fig:AME43_optim}
\end{figure}
We found that circuits for AME($n,d$) states majorize up to $n=6$ and $d=4$, with exception of AME(6,2) and AME(6,4). In these two cases, only one bipartition does not majorize, which shows the high optimality of the entanglement power of the circuits proposed.
One can use this majorization criteria to find optimal entangling circuits based on graph states. For instance, if we are interested in entangle eight parties of our circuit, we can construct a greedy algorithm that finds such a circuit by imposing entanglement majorization. Moreover, we can restrict this algorithm to the given chip architecture, making it suitable for the experimental implementation.
\section{Experimental implementation}\label{sec:implementation}
The experimental implementation of an AME state is a highly demanding task for a quantum computer. It requires the consideration of some figure of merit in order to test the quality of preparation state. For qubit AME states of bi-partite and three-partite systems one can consider Mermin Bell inequalities as a figure of merit, as they are maximally violated by these states \cite{Mermin90}. On the other hand, for AME(5,2), AME(6,2) and any qubit graph state in general, there exist Bell inequalities maximally violated by these states \cite{GTHB05}. Besides Bell inequalities, one can also implement a quantum tomography protocol to reconstruct
the state, being the fidelity of state reconstruction the figure of merit.
This kind of protocols typically require a quadratic number of measurements outcomes, as a function of the dimension of the Hilbert space \cite{WF89,RBSC04,S06}. However, this number can be reduced to scale linearly with the dimension when \emph{a priori} information is available, e.g. when the state is nearly pure \cite{GCEGXLD15}.
As a first attempt to test the quality of implementation of AME states in quantum computers we considered a very simple test: check whether probability outcomes associated to a measurement in the computational basis are similar to theoretical probabilities. This is not a refined test, as complex phases of entries also play a crucial role. However, a suitable behavior of probabilities along a single projective measurement is a first indication that the state could be successfully prepared.
We have run two different circuits to generate AME(5,2) state in two quantum computers: the ibmqx4 device from IBM \cite{IBMspec} and the Acorn device from Rigetti Computing \cite{Acorn}. Due to connectivity restriction, it is not possible to implement the simplest quantum circuits predicted by graph states. For instance, the ibmqx4 chip needs from at least one extra CZ gate, as shows Figure \ref{Fig:IBM_topo}. We were able to generated an AME(5,2) state composed by five entangling gates and taking into account the restricted connectivity. The circuit is shown in Figure \ref{Fig:AME52_IBM}. For the Rigetti device, we were not able to find a circuit composed by five entangling gates, so we had to adapt the AME(5,2) graph state to the restricted connectivity by using SWAP gates.
\begin{figure}
\caption{Left graph shows the ibmqx4 connectivity. After applying LU operations, one can transform this graph into the linear graph, which belongs to a different graph state class than the one that includes the AME state \cite{HEB04}
\label{Fig:IBM_topo}
\end{figure}
\begin{figure}
\caption{Circuit to generate an AME(5,2) state on the ibmqx4 quantum computer provided by IBM. We optimized the circuit according to connectivity restriction, in the sense of minimazing the number of entangling gates. The minimal circuit depth achieved is not possible to reproduce when considering the graph AME state.
}
\label{Fig:AME52_IBM}
\end{figure}
The AME(5,2) state of Figure \ref{Fig:AME52_IBM} is given by
\begin{align}\label{AME52Fig16}
|\mathrm{AME}_{5,2}\rangle=\frac{1}{2\sqrt{2}}&\big(|00000\rangle + |00011\rangle + |01101\rangle + |01110\rangle+ \nonumber\\
& |10101\rangle + |10110\rangle + |11000\rangle + |11011\rangle\big).
\end{align}
The theoretical probability $P_{ijklm}$ of obtaining each element of the 5-qubit computational basis $|ijklm\rangle$ shown in (\ref{AME52Fig16}) is $1/8 = 0.125$. The results obtained after running the circuit of Figure \ref{Fig:AME52_IBM} in the ibmqx4 device, when considering 8192 shots, are the following:
\begin{equation}
\begin{array}{cc}
P_{00000} = 0.105, &\hspace{0,5cm} P_{00011} = 0.058, \\ P_{01101} = 0.038, & \hspace{0,5cm}P_{01110} = 0.128, \\
P_{10101} = 0.035, &\hspace{0,5cm} P_{10110} = 0.135, \\ P_{11000} = 0.084, &\hspace{0,5cm} P_{11011} = 0.052,
\end{array}
\end{equation}
where $|\psi\rangle$ is the real quantum state generated by the quantum device.
It seems that only three element basis are well reproduced, namely $|10110\rangle$, $|01110\rangle$ and $|00000\rangle$. In addition, two detected probabilities are not related to the AME(5,2) state (\ref{AME52Fig16}), namely $P_{00010} = 0.050$ and $P_{00110}=0.042$. These imprecise results do not allow us to efficiently implement the adaptative tomographic method presented in Ref. \cite{GCEGXLD15}, as it requires a faithful identification of the highest weights when measuring along the computational basis.
The results with Acorn chip from Rigetti computing were even worst, not allowing us to distinguish results from white noise state preparation. A possible explanation of the failure is related to the large circuit depth due to the consideration of SWAP gates.
The above results illustrate the difficultly to successfully implement AME states on currently existing quantum computers. , the large amount of genuine entanglement required by the states imply a fast decoherence process, reflected even when demanding effectiveness simple measurement in the computational basis. Our experiment reveals that there are two possible factors involved: \emph{i}) although the quantum circuit of Figure \ref{Fig:AME52_IBM} looks simple, a fast decoherence process occurs due to the high amount of multipartite entanglement required \emph{ii}) the difficulty to successfully implement the challenging state is due to physical limitations of the chip.
Additionally, we implemented the GHZ state in the 5-qubit IBM quantum computer ibmqx4, in order to test violation of the 5-qubit Mermin Bell inequality
\begin{align}
M_5=&-(a_1a_2a_3a_4a_5) +(a_1a_2a_3a'_4a'_5 + a_1a_2a'_3a_4a'_5 \nonumber\\
&+ a_1a'_2a_3a_4a'_5 +a'_1a_2a_3a_4a'_5 + a_1a_2a'_3a'_4a_5 \nonumber\\
&+ a_1a'_2a_3a'_4a_5 +a'_1a_2a_3a'_4a_5 + a_1a'_2a'_3a_4a_5 \nonumber\\
&+ a'_1a_2a'_3a_4a_5+ a'_1 a'_2 a_3 a_4 a_5 ) \nonumber\\
&-(a_1a'_2a'_3a'_4a'_5 + a'_1a_2a'_3a'_4a'_5 + a'_1a'_2a_3a'_4a'_5\nonumber\\
&+a'_1a'_2a'_3a4a'_5 + a'_1a'_2a'_3a'_4a_5) ,
\end{align}
where $a_j$ and $a'_k$ denote two dichotomic observables for five quantum observers \cite{Mermin90}. The theoretical state achieving the maximal violation of the inequality is the GHZ state depicted in Figure \ref{Fig:GHZ5}. This inequality has a classical value $C=4$ and a quantum value $Q=16$. Optimal settings are given by $a_j=\sigma_x$ and $a'_k=\sigma_y$, for $j,k=1,5$. Despite the shortness of the circuit shown in Figure \ref{Fig:GHZ5}, the strong correlations demanded by genuine entanglement imply a fast decoherence process, reflected in a reduction of the strength of violation of the inequality. Nonetheless, the experimentally achieved violation $Q_{exp}=6.90\pm0.01$ is large enough to confirming the genuine non-local nature of the 5-qubit quantum computer ibmqx4.
\begin{figure}
\caption{Quantum circuit required to prepare the 5 qubit GHZ state, restricted to the architecture imposed by the 5-qubit IBM quantum computer ibmqx4. It is worth to mention that the experiment has been implemented in December 2017. Nowadays, the restricted architecture of the computer ibmqx4 has changed.}
\label{Fig:GHZ5}
\end{figure}
\section{Discussion and conclusions}\label{sec:conclusions}
Quantum computing is a challenging field of research in quantum mechanics that could change the way we do computations in the future. The ultimate goal of a quantum computer is to coherently control a relatively large number of qubits in such a way that a multipartite quantum protocol can be successfully implemented, despite the inherent decoherence of quantum information. It is naturally to expect that quantum over classical advantage in computing is directly related to the amount of quantum correlations existing in the involved qubits. It is thus a remarkably important task to understand the behavior of quantum computers when multipartite correlations take extreme values, e.g. when of the system is a genuinelly multipartite maximally entangled state.
In this work, we studied the simplest possible ways to implement genuinely multipartite maximally entangled quantum states, so-called absolutely maximally entangled (AME) states, in order to test the strength of quantum correlations in quantum computers. We explicitly showed a collection of quantum circuits required to implement such states in a some simple scenarios composed by a few qubit systems. For higher dimensional Hilbert spaces, where AME states of qubits do not exist, we considered qudit AME states, where every qudit was artificially generated by considering a group of qubits, see Section \ref{sec:circuits}. For instance, the lack of the AME state for 8 qubit systems can be somehow compensated by considering the AME state of 4 ququarts, where every ququart is composed by two qubits. In this way, pairs of qubits are maximally correlated with three complementary pairs of qubits, thus exhibiting a maximal amount of quantum entanglement in a sense, see Figure \ref{Fig:AME44_circuit}.
One of the main problems when trying to prepare a multipartite quantum circuit over a quantum computer having a restricted architecture is the circuit depth. This is so because some bipartite quantum operations --like CNOT-- are forbidden for some pairs of qubits, as they cannot communicate directly. This physical limitation considerably extends the length of quantum circuit, as typically one has to consider swap operations to complement the lack of communication. In order to deal with this problem, we designed a tool that finds the optimal quantum circuit required to efficiently implement AME states based on entropic majorization of reductions, see Section \ref{sec:major}. As an interesting observation, optimal quantum circuits for AME states typically admit monotonically increasing entropies of reductions, implying that those states can be efficiently generated with our algorithm in a few steps, see Figure \ref{Fig:major}. In other words, our algorithm finds the minimal number of local and non-local quantum gates required to implement those AME states, taking into account the restrictions imposed by the architecture of a real quantum chip.
As a further step, we implemented the GHZ state of 5 qubits over a 5-qubit quantum computer provided by IBM, where we optimized the circuit according to the restrictions imposed by the architecture. The figure of merit to quantify the quality of the state preparation was the violation of the 5-qubit Mermin Bell inequality \cite{Mermin90}, which is maximally violated by the GHZ state. We achieved the experimental non-local value $6.90\pm0.01$, whereas the classical value is $C=4$ and the quantum value is $Q=16$. This result demonstrates the genuine non-local nature of the quantum computer ibmqx4 designed by IBM, which improves a previously achieved quantum value $4.05\pm0.06$ \cite{Dani}. These negative results reflect that the current state of the art of the considered quantum computers is not yet ready to fully exploit the strongest quantum correlations existing in 5 and 6 qubit quantum computers. Nonetheless, we remark that some protocols involving a partial amount of multipartite quantum entanglement have been successfully implemented in quantum computers for a few \cite{D16,M16} and large \cite{B16,B17,Z17} number of qubits.
\begin{appendix}
\section{C$_3$--adder gate construction}\label{app:C3}
To construct the C$_3$--adder gate with qubits we should find a sequence of gates that perform the following operations:
\begin{align}
\begin{array}{ll}
\mathrm{C}_{3}|00\rangle |00\rangle= |00\rangle |00\rangle, & \mathrm{C}_{3}|01\rangle |00\rangle= |01\rangle |01\rangle, \\
\mathrm{C}_{3}|00\rangle |01\rangle= |00\rangle |01\rangle, & \mathrm{C}_{3}|01\rangle |01\rangle= |01\rangle |10\rangle, \\
\mathrm{C}_{3}|00\rangle |10\rangle= |00\rangle |10\rangle, & \mathrm{C}_{3}|01\rangle |10\rangle= |01\rangle |00\rangle, \\
& \\
\mathrm{C}_{3}|10\rangle |00\rangle= |10\rangle |10\rangle, & \\
\mathrm{C}_{3}|10\rangle |01\rangle= |10\rangle |00\rangle, & \\
\mathrm{C}_{3}|10\rangle |10\rangle= |10\rangle |01\rangle. &
\end{array}
\label{eq:C3}
\end{align}
As a result, besides from CNOT gates, we will need from CCNOT gates. Three-qubit gates are difficult to implement experimentally, so we should decompose them in terms of one and two-qubit gates. The exact decomposition of CCNOT gate consist on 12 gates of depth.
However, we can use instead an approximate decomposition which differ from the previous for some phase shifts of the quantum states other than zero \cite{Barenco}. In particular, we can use the approximate CCNOT gates shown in Figure \ref{Fig:Toffaprox}. The only changes that those gates introduce respect the exact CCNOT gate are
\begin{align}
\mathrm{CCNOT}_{a}|101\rangle &= -|101\rangle,\nonumber \\
\mathrm{CCNOT}_{b}|100\rangle &= -|100\rangle.
\end{align}
This is translated into the use of controlled-Z gate in the first approximation to obtain the desired result after applying the gate sequence to construct the C$_3$--adder. The sign introduced in the CCNOT$_{b}$ gate is canceled after this sequence, so the circuit remains equal as exact CCNOT gates were used.
We can keep saving more gates. Notice that the firsts two C$_{3}$--adders of the AME circuit of Figure \ref{Fig:AME43} are implemented on qutrits in the state $|\bar{0}\rangle$. Let's write it explicitly. After the Fourier transform on qutrit 1, the circuit applies the C$_{3}$--adder on qutrit 3:
\begin{multline}
(\bar{\mathrm{C}}_{3})_{_{13}}\left[\frac{1}{\sqrt{3}}\left(|\bar{0}\rangle+|\bar{1}\rangle+|\bar{2}\rangle\right)_{1}\otimes|\bar{0}\rangle_{3} \right] \nonumber\\
= \frac{1}{\sqrt{3}}\left(|\bar{0}\bar{0}\rangle+|\bar{1}\bar{1}\rangle+|\bar{2}\bar{2}\rangle\right)_{13},
\end{multline}
where the subindex 13 stands for the qutrits affected from this operation. In qubits form
\begin{multline}
(\mathrm{C}_{3})_{_{13}}\left[\frac{1}{\sqrt{3}}\left(|00\rangle+|01\rangle+|10\rangle\right)_{1}\otimes|00\rangle_{3} \right] \nonumber\\
= \frac{1}{\sqrt{3}}\left(|00\rangle|00\rangle+|01\rangle|01\rangle+|10\rangle|10\rangle\right)_{13}.
\end{multline}
Then, the above operation consists uniquely in two CNOT gates between even and odd qubits. Similarly, the next C$_{3}$--adder acting on qutrit 4 can be implemented in the same way:
\begin{multline}
(\bar{\mathrm{C}}_{3})_{_{14}}\left[\frac{1}{\sqrt{3}}\left(|\bar{0}\bar{0}\rangle+|\bar{1}\bar{1}\rangle+|\bar{2}\bar{2}\rangle\right)_{13}\otimes|\bar{0}\rangle_{4}\right] \nonumber\\
= \frac{1}{\sqrt{3}}\left(|\bar{0}\bar{0}\bar{0}\rangle+|\bar{1}\bar{1}\bar{1}\rangle+|\bar{2}\bar{2}\bar{2}\rangle\right)_{134},
\end{multline}
which in the qubit form becomes
\begin{multline}
(\mathrm{C}_{3})_{_{14}}\left[\frac{1}{\sqrt{3}}\left(|00\rangle|00\rangle+|01\rangle|01\rangle+|10\rangle|10\rangle\right)_{13}\otimes|00\rangle_{4} \right] \nonumber\\
= \frac{1}{\sqrt{3}}\left(|00\rangle|00\rangle|00\rangle+|01\rangle|01\rangle|01\rangle+|10\rangle|10\rangle|10\rangle\right)_{134}.
\end{multline}
Again, the above state can be obtained from the previous using two CNOT gates, between even and odd qubits. This enormous simplification cannot be extended to the other C$_{3}$--adder gates, as all elements of the basis appear once we implement the $F_{3}$ gate on qutrit 2.\\
\end{appendix}
\end{document} |
\begin{document}
\title[Regularization for SBE]{Regularization by noise and \\ stochastic Burgers equations}
\author{M.~Gubinelli}
\address[M.~Gubinelli]{CEREMADE UMR 7534 -- Universit\'e Paris--Dauphine}
\email[M.~Gubinelli]{massimiliano.gubinelli@ceremade.dauphine.fr}
\author{M.~Jara}
\address[M.~Jara]{IMPA\\
Estrada Dona Castorina 110\\
CEP 22460-320\\
Rio de Janeiro\\
Brazil}
\email[M.~Jara]{mjara@impa.br}
\begin{abstract}
We study a generalized 1d periodic SPDE of Burgers type:
$$
\partial_t u =- A^\theta u + \partial_x u^2 + A^{\theta/2} \xi
$$
where $\theta > 1/2$, $-A$ is the 1d Laplacian, $\xi$ is a space-time white noise and the initial condition $u_0$ is taken to be (space) white noise. We introduce a notion of weak solution for this equation in the stationary setting. For these solutions we point out how the noise provide a regularizing effect allowing to prove existence and suitable estimates when $\theta>1/2$. When $\theta>5/4$ we obtain pathwise uniqueness. We discuss the use of the same method to study different approximations of the same equation and for a model of stationary 2d stochastic Navier-Stokes evolution.
\end{abstract}
\keywords{Kardar--Parisi--Zhang equation, SPDEs, noise regularization}
\subjclass[2000]{00X00}
\maketitle
The stochastic Burgers equation (SBE) on the one dimensional torus $\mathbb{T}=(-\pi,\pi]$ is the SPDE
\begin{equation}
\langlebel{eq:burgers}
\mathrm{d} u_t = \frac12 \partial_\xi^2 u_t(\xi) \mathrm{d} t + \frac12 \partial_\xi (u_t(\xi))^2 \mathrm{d} t + \partial_\xi \mathrm{d} W_t
\end{equation}
where $W_t$ is a cylindrical white noise on the Hilbert space $H={L^2_0(\TT)}$ of square integrable, mean zero real function on $\mathbb{T}$ and it has the form
$
W_t(\xi) = \sum_{k\in\mathbb{Z}_0} e_k(\xi) \beta^k_t
$
with $\mathbb{Z}_0 = \mathbb{Z}\backslash \{0\}$ and $e_k(\xi)=e^{i k\xi}/\sqrt{2\pi}$ and $\{\beta_t^k\}_{t\ge 0, k\in\mathbb{Z}_0}$ is a family of complex Brownian motions such that $(\beta^k_t)^*=\beta^{-k}_t$ and with covariance $\mathbb{E}[\beta_t^k \beta_t^q ]=\mathbb{I}_{q+k=0}$. Formally the solution $u$ of eq.~\eqref{eq:burgers} is the derivative of the solution of the Kardar--Parisi--Zhang equation
\begin{equation}
\langlebel{eq:kpz}
\mathrm{d} h_t = \frac12 \partial_\xi^2 h_t(\xi) \mathrm{d} t + \frac12 (\partial_\xi h_t(\xi))^2 \mathrm{d} t + \mathrm{d} W_t
\end{equation}
which is believed to capture the macroscopic behavior of a large class of surface growth phenomena~\cite{KPZ}.
The main difficulty with eq.~\eqref{eq:burgers} is given by the rough nonlinearity which is incompatible with the distributional nature of the typical trajectories of the process. Note in fact that, at least formally, eq.~\eqref{eq:burgers} preserves the white noise on $H$ and that the square in the non-linearity is almost surely $+\infty$ on the white noise. Additive renormalizations in the form of Wick products are not enough to cure this singularity~\cite{DDT}.
In~\cite{BG} Bertini and Giacomin studying the scaling limits for the fluctuations of an interacting particles system show that a particular regularization of~\eqref{eq:burgers} converges in law to a limiting process $u^{\textrm{hc}}_t(\xi)=\partial_\xi \log Z_t(\xi)$ (which is referred to as the Hopf-Cole solution) where $Z$ is the solution of the stochastic heat equation with multiplicative space--time white noise
\begin{equation}
\langlebel{eq:she}
\mathrm{d} Z_t = \frac12 \partial_\xi^2 Z_t(\xi) \mathrm{d} t + Z_t(\xi) \mathrm{d} W_t(\xi) .
\end{equation}
The Hopf--Cole solution is believed to be the correct physical solution for~\eqref{eq:burgers} however up to recently a rigorous notion of solution to eq.~\eqref{eq:burgers} was lacking so the issue of uniqueness remained open.
Jara and Gon\c{c}alves~\cite{JG} introduced a notion of \emph{energy solution} for eq.~\eqref{eq:burgers} and showed that the macroscopic current fluctuations of a large class of weakly non-reversible particle systems on $\mathbb{Z}$ obey the Burgers equation in this sense. Moreover their results show that also the Hopf-Cole solution is an energy solution of eq.~\eqref{eq:burgers}.
More recently Hairer~\cite{Hairer} obtained a complete existence and uniqueness result for KPZ. In this remarkable paper the theory of controlled rough paths is used to give meaning to the nonlinearity and a careful analysis of the series expansion of the candidate solutions allow to give a consistent meaning to the equation and to obtain a uniqueness result. In particular Hairer's solution coincide with the Cole-Hopf ansatz.
In this paper we take a different approach to the problem. We want to point out the regularizing effect of the linear stochastic part of the equation on the the non-linear part. This is linked to some similar remarks of Assing~\cite{assing1,assing2} and by the approach of Jara and Gon\c{c}alves~\cite{JG}. Our point of view is motivated also by similar analysis in the PDE and SPDE context where the noise or a dispersive term provide enough regularization to treat some non-linear term: there are examples involving the stochastic transport equation~\cite{FGP}, the periodic Korteweg-de~Vries equation~\cites{kdv,babin-kdv} and the fast rotating Navier-Stokes equation~\cite{babin-ns}. In particular in the paper~\cite{kdv} it is shown how, in the context of the periodic Korteweg-de~Vries equation, an appropriate notion of controlled solution can make sense of the non-linear term in a space of distributions. This point of view has also links with the approach via controlled paths to the theory of rough paths~\cite{controlling}.
With our approach we are not able to obtain uniqueness for the SBE above and we resort to study the more general equation (SBE$_\theta$):
\begin{equation}
\langlebel{eq:burgers-theta}
\mathrm{d} u_t = - A^\theta u_t \mathrm{d} t + F(u_t) \mathrm{d} t + A^{\theta/2} \mathrm{d} W_t
\end{equation}
where $F(u_t)(\xi)=\partial_\xi (u_t(\xi))^2$, $-A$ is the Lapacian with periodic b.c., where $\theta\ge 0$ and where the initial condition is taken to be white noise. In the case $\theta=1$ we essentially recover the stationary case of the SBE above (modulo a mismatch in the noise term which do not affect its law).
For any $\theta \ge 0$ we introduce a class $\mathcal{R}_\theta$ of distributional processes "controlled" by the noise, in the sense that these processes have a \emph{small time} behaviour similar to that of the stationary Ornstein-Uhlenbech process $X$ which solves the linear part of the dynamics:
\begin{equation}
\langlebel{eq:ou-theta}
\mathrm{d} X_t = - A^\theta X_t \mathrm{d} t + A^{\theta/2} \mathrm{d} W_t,
\end{equation}
where $X_0$ is white noise.
When $\theta > 1/2$ we are able to show that the \emph{time integral} of the non-linear term appearing in SBE$_\theta$ is well defined, namely that for all $v\in \mathcal{R}_\theta$
\begin{equation}
\langlebel{eq:drift-process}
A^v_t = \int_0 ^t F(v_s) \mathrm{d} s
\end{equation}
is a well defined process with continous paths in a space of distributions on $\mathbb{T}$ of specific regularity. Note that this process is not necessarily of finite variation with respect to the time parameter even when tested with smooth test functions.
The existence of the drift process~\eqref{eq:drift-process} allows to formulate naturally the SBE$_\theta$ equation in the space $\mathcal{R}_\theta$ of controlled processes and gives a notion of solution quite similar to that of energy solution introduced by Jara and Gon\c{c}alves~\cite{JG}. Existence of (probabilistically) weak solutions will be established for any $\theta > 1/2$, that is well below the KPZ regime. The precise notion of solution will be described below. We are also able to show easily pathwise uniqueness when $\theta > 5/4$ but the case $\theta=1$ seems still (way) out of range for this technique. In particular the question of pathwise uniqueness is tightly linked with that of existence of strong solutions and the key estimates which will allow us to handle the drift~\eqref{eq:drift-process} are not strong enough to give a control on the difference of two solutions (with the same noise) or on the sequence of Galerkin approximations.
Similar regularization phenomena for stochastic transport equations are studied in~\cite{FGP} and in~\cite{DF} for infinite dimensional SDEs. This is also linked to the fundamental paper of Kipnis and Varadhan~\cite{KV} on CLT for additive functionals and to the Lyons-Zheng representation for diffusions with singular drifts~\cites{MR1988703, MR2065168}.
\textbf{Plan.} In
Sec.~\ref{sec:controlled} we define the class of controlled paths and we recall some results of the stochastic calculus via regularization which are needed to handle the It\^o formula for the controlled processes.
Sec.~\ref{sec:ito-trick} is devoted to introduce our main tool which is a moment estimate of an additive functional of a stationary Dirichlet process in terms of the quadratic variation of suitable forward and backward martingales. In Sec.~\ref{sec:estimates} we use this estimate to provide uniform bounds for the drift of any stationary solution. These bounds are used in Sec.~\ref{sec:existence} to prove tightness of the approximations when $\theta > 1/2$ and to show existence of controlled solution of the stochastic Burgers equation via Galerkin approximations. Finally in Sec.~\ref{sec:uniq} we prove our pathwise uniqueness result in the case $\theta > 5/4$. In Sec.~\ref{sec:alternative} we discuss related results for the model introduced in~\cite{DDT}.
\textbf{Notations.} We write $X \lesssim_{a,b,\dots} Y$ if there exists a positive constant $C$ depending only on $a,b,\dots$ such that $X \le C Y$. We write $X \sim_{a,b,\dots} Y$ iff $X\lesssim_{a,b,\dots} Y \lesssim_{a,b,\dots} X$.
We let $\mathcal{S}$ be the space of smooth test functions on $\mathbb{T}$, $\mathcal{S}'$ the space of distributions and $\langlengle \cdot,\cdot\ranglengle$ the corresponding duality.
On the Hilbert space $H={L^2_0(\TT)}$ the family $\{e_k\}_{k\in\mathbb{Z}_0}$ is a complete orthonormal basis. On $H$ we consider the space of smooth cylinder functions $\mathcal{C}yl$ which depends only on finitely many coordinates on the basis $\{e_k\}_{k\in \mathbb{Z}_0}$ and for $\varphi \in\mathcal{C}yl$ we consider the gradient $D \varphi : H\to H$ defined as $D \varphi(x) = \sum_{k\in\mathbb{Z}_0} D_k \varphi(x) e_k$ where $D_k = \partial_{x_k}$ and $x_k = \langlengle e_k,x \ranglengle$ are the coordinates of $x$.
For any $\alpha\in \mathbb{R}$ define the space $\mathcal{F} L^{p,\alpha}$ of functions on the torus for which
$$
|x|_{\mathcal{F} L^{p,\alpha}} = \big[\sum_{k\in\mathbb{Z}_0} (|k|^\alpha |x_k|)^p\big]^{1/p}<+\infty
\, \text{
if $p<\infty$ and
}\,
|x|_{\mathcal{F} L^{\infty,\alpha}} = \sup_{k\in\mathbb{Z}_0} |k|^\alpha |x_k| <+\infty .
$$
We will use the notation $H^\alpha = \mathcal{F} L^{2,\alpha}$ for the usual Sobolev spaces of periodic functions on $\mathbb{T}$.
We let $A=-\partial_\xi^2$ and $B=\partial_\xi$ as unbounded operators acting on $H$ with domains respectively $H^2$ and $H^{1}$. Note that $\{e_k\}_{k\in\mathbb{Z}_0}$ is a basis of eigenvectors of $A$ for which we denote $\{\langlembda_k = |k|^2 \}_{k\in\mathbb{Z}_0}$ the associated eigenvalues. The operator $A^\theta$ will then be defined on $H^{\theta}$ by $A^\theta e_k = |k|^{2\theta}e_k$ with domain $H^{2\theta}$.
The linear operator $\Pi_N: H \to H$ is the projection on the subspace generated by $\{e_k\}_{k\in\mathbb{Z}_0, |k|\le N}$.
Denote $\mathcal{C}_T V = C([0,T],V)$ the space of continuous functions from $[0,T]$ to the Banach space $V$ endowed with the supremum norm and with $\mathcal{C}^\gamma_T V = C^\gamma([0,T],V)$ the subspace of $\gamma$-H\"older continuous functions in $\mathcal{C}_T V$ with the $\gamma$-H\"older norm.
\section{Controlled processes}
\langlebel{sec:controlled}
We introduce a space of stationary processes which ``looks like" an Ornstein-Uhlenbeck process. The invariant law at fixed time of these processes will be given by the canonical Gaussian cylindrical measure $\mu$ on $H$ which we consider as a Gaussian measure on $H^{\alpha}$ for any $\alpha<-1/2$. This measure is fully characterized by the equation
$$
\int e^{i \langlengle \psi,x \ranglengle}\mu(\mathrm{d} x) = e^{-\langlengle \psi,\psi\ranglengle/2}, \qquad \forall\psi\in H ;
$$
or alternatively by the integration by parts formula
$$
\int D_k \varphi(x) \mu(\mathrm{d} x) = \int x_{-k} \varphi(x) \mu(\mathrm{d} x),\qquad \forall k\in\mathbb{Z}_0, \varphi \in\mathcal{C}yl .
$$
\begin{definition}[Controlled process]
\langlebel{def:controlled}
For any $\theta\ge 0$ let $\mathcal{R}_\theta$ be the space of stationary stochastic processes $(u_t)_{0 \leq t \leq T}$ with continuous paths in $\mathcal{S}'$ such that
\begin{itemize}
\item[i)] the law of $u_t$ is the white noise $\mu$ for all $t\in[0,T]$;
\item[ii)] there exists a process $\mathcal{A} \in C([0,T],\mathcal{S}')$ of zero quadratic variation such that $\mathcal{A}_0 = 0$ and satisfying the equation
\begin{equation}
\langlebel{eq:controlled-decomposition}
u_t(\varphi) = u_0(\varphi) + \int_0^t u_s(-A^\theta \varphi) \mathrm{d} s+\mathcal{A}_t(\varphi) + M_t(\varphi)
\end{equation}
for any test function $\varphi \in \mathcal S$, where $M_t(\varphi)$ is a martingale with respect to the filtration generated by $u$ with quadratic variation $[M(\varphi)]_t = 2t\|A^{\theta/2} \varphi\|_{L^2_0(\mathbb{T})}^2$;
\item[iii)] the reversed processes $\hat u_t = u_{T-t}$, $\hat \mathcal{A}_t = -\mathcal{A}_{T-t}$ satisfies the same equation with respect to its own filtration (the backward filtration of $u$).
\end{itemize}
\end{definition}
For controlled processes we will prove that if $\theta>1/2$ the Burgers drift is well defined by approximating it and passing to the limit. Let $\rho:\mathbb{R}\to\mathbb{R}$ be a positive smooth test function with unit integral and $\rho^\varepsilon(\xi)=\rho(\xi/\varepsilon)/\varepsilon$ for all $\varepsilon>0$. For simplicity in the proofs we require that the function $\rho$ has a Fourier transform $\hat\rho$ supported in some ball and such that $\hat\rho = 1$ in a smaller ball. This is a technical condition which is easy to remove but we refrain to do so here not to obscure the main line of the arguments.
\begin{lemma}
\langlebel{lemma:burgers-drift}
If $u\in\mathcal{R}_\theta$ and if $\theta >1/2$ then almost surely
$$
\lim_{\varepsilon\to 0} \int_0^t F(\rho^\varepsilon* u_s) \mathrm{d} s
$$
exists in the space $C([0,T],\mathcal{F} L^{\zeta,\infty})$ for some $\zeta<0$. We \emph{denote} with $\int_0^t F( u_s) \mathrm{d} s$ the resulting process with values in $C([0,T],\mathcal{F} L^{\zeta,\infty})$.
\end{lemma}
\begin{proof} We postpone the proof in Sect.~\ref{sec:estimates}.
\end{proof}
It will turn out that for this process we have a good control of its space and time regularity and also some exponential moment estimates. Then it is relatively natural to \emph{define} solutions of eq.~\eqref{eq:burgers-theta} by the following self-consistency condition.
\begin{definition}[Controlled solution] Let $\theta>1/2$, then a process $u\in\mathcal{R}_\theta$ is a \emph{controlled solution} of SBE$_\theta$ if almost surely
\begin{equation}
\langlebel{eq:self-consistent}
\mathcal{A}_t(\varphi) = \langlengle \varphi, \int_0^t F(u_s) \mathrm{d} s \ranglengle
\end{equation}
for any test function $\varphi \in \mathcal S$ and any $t\in[0,T]$.
\end{definition}
Note that these controlled solutions are a generalization of the notion of probabilistically weak solutions of SBE$_\theta$. The key point is that the drift term is not given explicitly as a function of the solution itself but characterized by the
self-consistency relation~\eqref{eq:self-consistent}. In this sense controlled solutions are to be understood as a couple $(u,\mathcal{A})$ of processes satisfying compatibility relations.
An analogy which could be familiar to the reader is that with a diffusion on a bounded domain with reflected boundary where the solution is described by a couple of processes $(X,L)$ representing the position of the diffusing particle and its local time at the boundary~\cite{RY}.
Note also that there is no requirement on $\mathcal{A}$ to be adapted to $u$. Our analysis below cannot exclude the possibility that $\mathcal{A}$ contains some further randomness and that the solutions are strictly weak, that is not adapted to the filtration generated by the martingale term and the initial condition.
\section{The It\^o trick}
\langlebel{sec:ito-trick}
In order to prove the regularization properties of controlled processes we will need some stochastic calculus and in particular an It\^o formula and some estimates for martingales. Let us recall here some basic elements here. In this section $u$ will be always a controlled process in $\mathcal{R}_\theta$.
For any test function $\varphi\in\mathcal{S}$ the processes $(u_t(\varphi))_{t}$ and $(\hat u_t(\varphi))_{t}$ are Dirichlet processes: sums of a martingale and a zero quadratic variation process.
Note that we do not want to assume controlled processes to be semimartingales (even when tested with smooth functions). This is compatible with the regularity of our solutions and there is no clue that solutions of SBE$_\theta$ even with $\theta=1$ are distributional semimartingales. A suitable notion of stochastic calculus which is valid for a large class of processes and in particular for Dirichlet processes is the stochastic calculus via regularization developed by Russo and Vallois~\cite{RV}. In this approach the It\^o formula can be extended to Dirichlet processes. In particular if $(X^i)_{i=1,\dots,k}$ is an $\mathbb{R}^k$ valued Dirichlet process and $g$ is a $C^2(\mathbb{R}^k;\mathbb{R})$ function then
$$
g(X_t) = g(X_0) + \sum_{i=1}^k\int_0^t \partial_i g(X_s) \mathrm{d}^- X^i_s + \frac12 \sum_{i,j=1}^k \int_0^t \partial^2_{i,j} g(X_s) \mathrm{d}^- [X^i,X^j]_s
$$
where $\mathrm{d}^-$ denotes the forward integral and $[X,X]$ the quadratic covariation of the vector process $X$. Decomposing $X=M+N$ as the sum of a martingale $M$ and a zero quadratic variation process $N$ we have $[X,X]=[M,M]$ and
$$
g(X_t) = g(X_0) + \sum_{i=1}^k \int_0^t \partial_i g(X_s) \mathrm{d}^- M^i_s + \sum_{i=1}^k \int_0^t \partial_i g(X_s) \mathrm{d}^- N^i_s
$$
$$ + \sum_{i,j=1}^k\frac12 \int_0^t \partial^2_{i,j} g(X_s) \mathrm{d}^- [M^i,M^j]_s
$$
where now $\mathrm{d}^- M$ coincide with the usual It\^o integral and $[M,M]$ is the usual quadratic variation of the martingale $M$. The integral $\int_0^t \partial_i g(X_s) \mathrm{d}^- N^i_s$ is well-defined due to the fact that all the other terms in this formula are well defined. The case the function $g$ depends explicitly on time can be handled by the above formula by considering time as an additional (0-th) component of the process $X$ and using the fact that $[X^i,X^0]=0$ for all $i=1,..,k$. In the computations which follows we will only need to apply the It\^o formula to smooth functions.
Let us denote by $L_0$ the generator of the Ornstein-Uhlenbeck process associated to the operator $A^\theta$:
\begin{equation}
\langlebel{eq:generator-ou}
L_0 \varphi(x) = \sum_{k \in \mathbb Z_0} |k|^{2\theta} \big(- x_k D_k \varphi(x) + \tfrac{1}{2} D_{-k}D_k \varphi(x)\big).
\end{equation}
Consider now a smooth cylinder function $h:[0,T]\times \Pi_N H\to \mathbb{R}$. The It\^o formula for the finite quadratic variation process $(u^N_t = \Pi_N u_t)_t$ gives
$$
h(t,u^N_t)=h(0,u^N_0)+\int_0^t (\partial_s + L^N_0) h(s,u^N_s) \mathrm{d} s +\int_0^t D h(s,u^N_s) \mathrm{d}\Pi_N \mathcal{A}_s + M^+_t
$$
where
$$
L^N_0 h(s,x) = \sum_{k\in\mathbb{Z}_0 : |k|\le N} |k|^{2\theta} ( x_{k} D_k h(s,x) + D_k D_{-k} h(s,x))
$$
is the restriction of the operator $L_0$ to $\Pi_N H$ and where
the martingale part denoted $M^+$ has quadratic variation given by
$
[ M^+ ]_t = \int_0^t \mathcal{E}^\theta_N(h(s,\cdot))(u^N_s) \mathrm{d} s
$,
where
$$
\mathcal{E}_N^\theta(\varphi)(x) = \frac12 \sum_{k\in \mathbb{Z}_0: |k|\le N}|k|^{2\theta} |D_k \varphi(x)|^2 ,
$$
Similarly the It\^o formula on the backward process reads
$$
h(T-t,u^N_{T-t})=h(T,u^N_T)+ \int_0^{t} (-\partial_s + L^N_0) h(T-s,u^N_{T-s}) \mathrm{d} s
$$
$$
- \int_0^{t} D h(T-s,u^N_{T-s}) \mathrm{d} \Pi_N \mathcal{A}_{T-s} + M^-_t
$$
with $
[ M^- ]_t = \int_0^t \mathcal{E}^\theta_N(h(T-s,\cdot))(u^N_{T-s}) \mathrm{d} s
$
so we have the key equality
\begin{equation}
\langlebel{eq:key-representation}
\int_0^t 2 L_0^N h(s,u^N_{s})\mathrm{d} s= -M^+_t + M^-_{T-t}-M^-_T.
\end{equation}
which allows us to represent the time integral of $h$ as a sum of martingales which allows better control.
On this martingale representation result we can use the Burkholder--Davis--Gundy inequalities to prove the following bound.
\begin{lemma}[It\^o trick]
\langlebel{lemma:ito-trick}
Let $h : [0,T]\times \Pi_N H \to \mathbb{R}$ be a cylinder function. Then
for any $p \geq 1$,
\begin{equation}
\langlebel{eq:ito-trick}
\left\|\sup_{t\in[0,T]}\left|\int_0^t L_0 h(s,\Pi_N u_s) \mathrm{d} s\right|\right\|_{L^p(\mathbb{P}_\mu)} \lesssim_p
T^{1/2} \sup_{s\in[0,T]}\left\| \mathcal{E}^\theta(h(s,\cdot)) \right\|^{1/2}_{L^{p/2}(\mu)}
\end{equation}
where
$
\mathcal{E}^\theta(\varphi)(x) = \frac12 \sum_{k\in \mathbb{Z}_0}|k|^{2\theta} |D_k \varphi(x)|^2
$.
In the particular case $h(s,x)= e^{a(T- s)}\tilde h(x)$ for some $a\in\mathbb{R}$ we have the improved estimate
\begin{equation}
\langlebel{eq:ito-trick-conv}
\begin{split}
\left\|\int_0^T e^{a(T-s)} L_0 \tilde h(\Pi_N u_s) \mathrm{d} s\right\|_{L^p(\mathbb{P}_\mu)}
\lesssim_p
\left(\frac{1-e^{2aT}}{2a}\right)^{1/2}
\left\| \mathcal{E}^\theta(\tilde h) \right\|^{1/2}_{L^{p/2}(\mu)} .
\end{split}
\end{equation}
\end{lemma}
\begin{proof}
$$
\left\|\sup_{t\in[0,T]}\left|\int_0^t 2 L_0^N h(s,u_s) \mathrm{d} s\right|\right\|_{L^p(\mathbb{P}_\mu)} \le
\left\|\sup_{t\in[0,T]}|M^+_t| \right\|_{L^p(\mathbb{P}_\mu)}+
2 \left\|\sup_{t\in[0,T]}|M^-_t| \right\|_{L^p(\mathbb{P}_\mu)}
$$
$$
\lesssim_p \left\| \langlengle M^+\ranglengle_T \right\|_{L^{p/2}(\mathbb{P}_\mu)}^{1/2}+\left\| \langlengle M^-\ranglengle_T \right\|_{L^{p/2}(\mathbb{P}_\mu)}^{1/2}
\lesssim_p \left\|\int_0^T \mathcal{E}^\theta(h(s,\cdot))(u_s) \mathrm{d} s \right\|_{L^{p/2}(\mathbb{P}_\mu)}^{1/2}
$$
$$
\lesssim_p \left(\int_0^T\left\| \mathcal{E}^\theta(h(s,\cdot))(u_s) \right\|_{L^{p/2}(\mathbb{P}_\mu)} \mathrm{d} s\right)^{1/2}
\lesssim_p T^{1/2} \sup_{s\in[0,T]} \left\| \mathcal{E}^\theta(h(s,\cdot)) \right\|_{L^{p/2}(\mu)}^{1/2} .
$$
For the convolution we bound as follows
\begin{equation*}
\begin{split}
\left\|\int_0^T e^{a(T-s)} 2 L_0^N \tilde h(u_s) \mathrm{d} s\right\|_{L^p(\mathbb{P}_\mu)}
& \lesssim_p \left(\int_0^T e^{2a(T-s)}\mathrm{d} s\right)^{1/2}
\left\| \mathcal{E}^\theta(\tilde h)(u_0) \right\|^{1/2}_{L^{p/2}(\mathbb{P}_\mu)}
\\ & \lesssim_p
\left(\frac{1-e^{2aT}}{2a}\right)^{1/2}
\left\| \mathcal{E}^\theta(\tilde h) \right\|^{1/2}_{L^{p/2}(\mu)}
\end{split}
\end{equation*}
\end{proof}
The bound~\eqref{eq:ito-trick} in the present form (with the use of the backward martingale to remove the drift part) has been inspired by~\cite{CLO}*{Lemma 4.4}.
\begin{lemma}[Exponential integrability]
Let $h : [0,T]\times \Pi_N H \to \mathbb{R}$ be a cylinder function. Then
\begin{equation}
\langlebel{eq:exp-ito-trick}
\mathbb{E} \sup_{t\in[0,T]}e^{2 \int_0^t L_0^N h(s,\Pi_N u_s) \mathrm{d} s} \lesssim
\mathbb{E} e^{8 \int_0^T \mathcal{E}^\theta(h(s,u_s)) \mathrm{d} s }
\end{equation}
\end{lemma}
\begin{proof}
Let as above $M^\pm$ be the (Brownian) martingales in the representation of the integral $\int_0^t L_0^N h(s,\Pi_N u_s) \mathrm{d} s$. By Cauchy-Schwartz
$$
\mathbb{E} \sup_{t\in[0,T]}e^{2 \int_0^t L_0^N h(s,\Pi_N u_s) \mathrm{d} s} \le \left[\mathbb{E} \sup_{t\in[0,T]}e^{2 M^+_t}\right]^{1/2} \left[\mathbb{E} \sup_{t\in[0,T]}e^{2 (M^-_T-M^-_{T-t})}\right]^{1/2}.
$$
By Novikov's criterion
$
e^{4 M^+_t - 8 \langlengle M^+\ranglengle_t }
$ is a martingale for $t\in[0,T]$ if $\mathbb{E} e^{8 \langlengle M^+\ranglengle_T} < \infty$. In this case
$$
\mathbb{E} \sup_{t\in[0,T]}e^{2 M^+_t} \le \mathbb{E} \sup_{t\in[0,T]}(e^{2 M^+_t- 4 \langlengle M^+\ranglengle_t}\sup_{t\in[0,T]} e^{ 4 \langlengle M^+\ranglengle_t })
$$
$$
\le\left[\mathbb{E} \sup_{t\in[0,T]} e^{4 M^+_t- 8 \langlengle M^+\ranglengle_t}\right]^{1/2} \left[\mathbb{E} e^{8 \langlengle M^+\ranglengle_T }\right]^{1/2}
$$
and by Doob's inequality we get that the previous expression is bounded by
$$
\left[\mathbb{E} e^{4 M^+_T- 8 \langlengle M^+\ranglengle_T}\right]^{1/2} \left[\mathbb{E} e^{8 \langlengle M^+\ranglengle_T }\right]^{1/2}
\le \left[\mathbb{E} e^{8 \langlengle M^+\ranglengle_T }\right]^{1/2}.
$$
Reasoning similarly for $M^-$ we obtain that
$$
\mathbb{E} \sup_{t\in[0,T]}e^{2 \int_0^t L_0^N h(s,\Pi_N u_s) \mathrm{d} s} \le \mathbb{E} e^{8 \langlengle M^+\ranglengle_T } = \mathbb{E} e^{8 \int_0^T \mathcal{E}^\theta(h(s,u_s)) \mathrm{d} s }.
$$
\end{proof}
\section{Estimates on the Burgers drift}
\langlebel{sec:estimates}
In this section we provide the key estimates on the Burgers drift via the quadratic variations of the forward and backward martingales in its decomposition.
Let $F(x)(\xi) = B (x(\xi))^2$ and $F_N(x) = F(\Pi_N x)$. Define
$$
H_N(x) = -\int_0^\infty F_N(e^{-A^\theta t}x)\mathrm{d} t
$$
and consider $L_0 H_N(x)$ as acting on each Fourier coordinate of $H_N(x)$. Remark that the second order part of $L_0$ does not appear in the computation of $L_0 F_N$ since
$$
D_k D_{-k} F(\Pi_N e^{-A^\theta t} x)=0
$$ for each $k\in\mathbb{Z}_0$. Indeed
$$
D_{-k} D_k F(\Pi_N e^{-A^\theta t} x) = B [D_{-k} D_k (\Pi_N e^{-A^\theta t} x)^2]=2 B D_{-k} [(\Pi_N e^{-A^\theta t} x) (\Pi_N e^{-A^\theta t} e_k)]
$$
$$
=2 [B(\Pi_N e^{-A^\theta t} e_{-k}) (\Pi_N e^{-A^\theta t} e_k)+(\Pi_N e^{-A^\theta t} e_{-k}) B(\Pi_N e^{-A^\theta t} e_k)] = 0
$$
Then it is easy to check that
$$
L_0 H_N(\Pi_N x) = \langlengle A^\theta x, D H_N(\Pi_N x)\ranglengle = -2 \int_0^\infty B [(e^{-A^\theta t}\Pi_N x)(A^\theta e^{-A^\theta t} \Pi_N x) ]\mathrm{d} t
$$
$$
= -\int_0^\infty \frac{\mathrm{d} }{\mathrm{d} t}B [(e^{-A^\theta t}\Pi_N x)^2 ] = B (\Pi_N x)^2=F(\Pi_N x)
$$
since $\lim_{t\to \infty} B [(e^{-A^\theta t}\Pi_N x)^2 ] = 0$. Denote by $(x_k)_{k\in\mathbb{Z}}$ and $(H_N(x)_k)_{k\in\mathbb{Z}_0}$ the coordinates of $x=\sum_{k\in\mathbb{Z}_0} x_k e_k$ and $H_N(x)=\sum_{k\in\mathbb{Z}_0} H_N(x)_k e_k$ in the canonical basis $(e_k)_{k\in\mathbb{Z}_0}$. Then a direct computation gives an explicit formula for $H_N(x)$:
$$
(H_{N}(x))_k =
2 ik \sum_{k_1,k_2 : k=k_1+k_2} \frac{\mathbb{I}_{|k|,|k_1|,|k_2|\le N}}{|k_1|^{2\theta}+|k_2|^{2\theta}} x_{k_1} x_{k_2}.
$$
Let us denote with $(H_{N}(x))_k^{\pm}$ respectively the real and imaginary parts of this quantity: $(H_{N}(x))_k^{\pm}= ((H_{N}(x))_k\pm (H_{N}(x))_{-k})/(2 i^{\pm})$ where $i^+=1$ and $i^-=i$. Now
$$
(H_{N}(x))^\pm_k =
i^{\mp}k \sum_{k_1,k_2 : k=k_1+k_2} \frac{\mathbb{I}_{|k|,|k_1|,|k_2|\le N}}{k_1^{2\theta}+k_2^{2\theta}} (x_{k_1} x_{k_2}\mp x_{-k_1} x_{-k_2})
$$
and recall that
$
\mathcal{E}^\theta((H_N)^\pm_k)(x) = \sum_{q\in\mathbb{Z}_0} |q|^{2\theta} |D_q H^\pm_{N,k}(x)|^2
$.
\begin{lemma}
\langlebel{lemma:energy-estimates}
For $\langlembda >0$ small enough we have
\begin{equation}
\langlebel{eq:first-energy-estimate}
\sup_{k\in\mathbb{Z}_0} \mathbb{E} \exp\left[\langlembda |k|^{2\theta-3} \mathcal{E}^\theta((H_N)^\pm_k)(u_0)\right] \lesssim 1
\end{equation}
and
\begin{equation}
\langlebel{eq:second-energy-estimate}
\sup_{1\le M \le N}\sup_{k\in\mathbb{Z}_0} \mathbb{E} \exp\left[\langlembda |k|^{-2} M^{2\theta-1} \mathcal{E}^\theta((H_N-H_M)^\pm_k)(u_0)\right] \lesssim 1.
\end{equation}
\end{lemma}
\begin{proof}
We start by computing $\mathcal{E}((H_N)^\pm_k)$: noting that
$$
D_q (H_{N})^\pm_k(x)= i^\mp k \left[ \frac{\mathbb{I}_{|k|,|q|,|k-q|\le N}}{|q|^{2\theta}+|k-q|^{2\theta}} x_{k-q}\mp \frac{\mathbb{I}_{|k|,|q|,|k+q|\le N}}{|q|^{2\theta}+|k+q|^{2\theta}} x_{-k-q}\right]
$$
we have
\begin{equation*}
\begin{split}
\mathcal{E}^\theta((H_N)^\pm_k)(x) & = \sum_{q\in\mathbb{Z}_0} |k|^2 |q|^{2\theta}\left[
2 \frac{\mathbb{I}_{|k|,|q|,|k-q|\le N}}{(|q|^{2\theta}+|k-q|^{2\theta})^2} |x_{k-q}|^2
\right . \\
& \left .
\qquad \qquad \mp \frac{\mathbb{I}_{|k|,|q|,|k-q|\le N}}{|q|^{2\theta}+|k-q|^{2\theta}} \frac{\mathbb{I}_{|k|,|q|,|k+q|\le N}}{|q|^{2\theta}+|k+q|^{2\theta}} (x_{k-q} x_{k+q}+x_{-k+q} x_{-k-q})
\right]
\end{split}
\end{equation*}
which gives the bound
$$
\mathcal{E}^\theta((H_N)^\pm_k)(x)\lesssim |k|^2 \sum_{\substack{k_1,k_2 :k_1+k_2=k\\|k|,|k_1|,|k_2|\le N}} \frac{|k_1|^{2\theta}\mathbb{I}_{|k|,|k_1|,|k_2|\le N}}{(|k_1|^{2\theta}+|k_2|^{2\theta})^2} |x_{k_2}|^2
$$
$$
\lesssim |k|^2 \sum_{\substack{k_1,k_2 :k_1+k_2=k\\|k|,|k_1|,|k_2|\le N}} \frac{\mathbb{I}_{|k|,|k_1|,|k_2|\le N}}{|k_1|^{2\theta}+|k_2|^{2\theta}} |x_{k_2}|^2 = \sum_{\substack{k_1,k_2 :k_1+k_2=k\\|k|,|k_1|,|k_2|\le N}}c(k,k_1,k_2) |x_{k_2}|^2 = h_N(x)
$$
where $c(k,k_1,k_2) = |k|^2/(|k_1|^{2\theta}+|k_2|^{2\theta})$.
Let $$
I_N(k) = \sum_{\substack{k_1,k_2 :k_1+k_2=k\\|k|,|k_1|,|k_2|\le N}} c(k,k_1,k_2)
$$
and note that the sum in $I_N(k)$ can be bounded by the equivalent integral giving (uniformly in $N$)
$$
I_N(k) \lesssim |k|^{2} \int_{\mathbb{R}} \frac{\mathrm{d} q}{|q|^{2\theta}+|k-q|^{2\theta}}
= |k|^{3-2\theta} \int_{\mathbb{R}} \frac{\mathrm{d} q}{|q|^{2\theta}+|1-q|^{2\theta}} \lesssim |k|^{3-2\theta}
$$
since that the last integral is finite for $\theta > 1/2$. Then
$$
\mathbb{E} e^{\langlembda |k|^{2\theta-3}\mathcal{E}^\theta((H_N)^\pm_k)(u_0)} \le \mathbb{E} e^{\langlembda C|k|^{2\theta-3} h_N(u_0)}
$$
$$
\le \sum_{\substack{k_1,k_2 :k_1+k_2=k\\|k|,|k_1|,|k_2|\le N}} c(k,k_1,k_2) \mathbb{E} \frac{e^{\langlembda C |k|^{2\theta-3} I_N(k) |(u_0)_{k_2}|^2}}{I_N(k)}
\le \sum_{\substack{k_1,k_2 :k_1+k_2=k\\|k|,|k_1|,|k_2|\le N}} c(k,k_1,k_2) \mathbb{E} \frac{e^{\langlembda C'|(u_0)_{k_2}|^2}}{I_N(k)}
$$
where we used the previous bound to say that $C|k|^{2\theta-3} I_N(k) \le C'$ uniformly in $k$. Remind that $(u_0)_k$ has a Gaussian distribution of mean zero and unit variance. Therefore for $\langlembda$ small enough $\mathbb{E} e^{\langlembda C'|(u_0)_{k_2}|^2}\lesssim 1$ uniformly in $k_2$ so that
$$
\mathbb{E} e^{\langlembda |k|^{2\theta-3}\mathcal{E}^\theta((H_N)^\pm_k)(u_0)} \lesssim 1.
$$
This establishes the claimed exponential bound for $\mathcal{E}^\theta((H_N(x))_k^\pm)$.
Similarly we have
$$
\mathcal{E}^\theta((H_N-H_M)^\pm_k)(x) \lesssim \sum_{k_1,k_2 :k_1+k_2=k} (\mathbb{I}_{|k|,|k_1|,|k_2|\le N}-\mathbb{I}_{|k|,|k_1|,|k_2|\le M})^2 c(k,k_1,k_2) | x_{k_2}|^2 .
$$
Let
$$
I_{N,M}(k) =\sum_{k_1,k_2 :k_1+k_2=k} (\mathbb{I}_{|k|,|k_1|,|k_2|\le N}-\mathbb{I}_{|k|,|k_1|,|k_2|\le M})^2 c(k,k_1,k_2)
$$
and note that, for $N\ge M$,
$$
(\mathbb{I}_{|k|,|k_1|,|k_2|\le N}-\mathbb{I}_{|k|,|k_1|,|k_2|\le M}) \lesssim \mathbb{I}_{|k|,|k_1|,|k_2|\le N}(\mathbb{I}_{|k|> M}+\mathbb{I}_{|k_1|> M}+\mathbb{I}_{|k_2|> M}).
$$
Then, by estimating the sums with the corresponding integrals and after easy simplifications we remain with the following bound
$$
I_{N,M}(k)\lesssim |k|^{2} \mathbb{I}_{|k|> M} \int_{\mathbb{R}} \frac{\mathrm{d} q}{|q|^{2\theta}+|k-q|^{2\theta}}
+ |k|^{2} \int_{\mathbb{R}} \frac{\mathbb{I}_{|q|> M}\mathrm{d} q}{|q|^{2\theta}+|k-q|^{2\theta}}
$$
The first integral in the r.h.s.~is easily handled by
$$
|k|^{2} \mathbb{I}_{|k|> M} \int_{\mathbb{R}} \frac{\mathrm{d} q}{|q|^{2\theta}+|k-q|^{2\theta}} \lesssim |k|^{3-2\theta} \mathbb{I}_{|k|> M} \lesssim |k|^2 M^{1-2\theta}
$$
since $\theta > 1/2$. For the second we have the analogous bound
$$
|k|^{2} \int_{\mathbb{R}} \frac{\mathbb{I}_{|q|> M}\mathrm{d} q}{|q|^{2\theta}+|k-q|^{2\theta}} \lesssim
|k|^{2} \int_{\mathbb{R}} \frac{\mathbb{I}_{|q|> M}\mathrm{d} q}{|q|^{2\theta}} \lesssim |k|^2 M^{1-2\theta}
$$
which concludes the proof.
\end{proof}
Using Lemma~\ref{lemma:ito-trick} and the estimates contained in Lemma~\ref{lemma:energy-estimates} we are led to the next set of more refined estimates for the drift and his small scale contributions.
\begin{lemma}
\langlebel{lemma:main-bounds}
Let
$
G^M_t = \int_0^t F_{M}(u_s) \mathrm{d} s
$.
For any $M\le N$ we have
\begin{equation}
\langlebel{eq:basic-est-1}
\|\sup_{t\in[0,T]} \left|(G^M_t)_k\right| \|_{L^p(\mathbb{P}_\mu)}\lesssim_p |k| M T ,
\end{equation}
\begin{equation}
\langlebel{eq:basic-est-2}
\|\sup_{t\in[0,T]} \left|(G^M_t)_k\right| \|_{L^p(\mathbb{P}_\mu)}\lesssim_p |k|^{3/2-\theta} T^{1/2} ,
\end{equation}
\begin{equation}
\langlebel{eq:basic-est-3}
\|\sup_{t\in[0,T]}\left|(G^M_t)_k-(G^N_t)_k\right| \|_{L^p(\mathbb{P}_\mu)}\lesssim_p |k| T^{1/2} M^{1/2-\theta} ,
\end{equation}
\begin{equation}
\langlebel{eq:basic-est-4}
\sup_{M\ge 0}\|\sup_{t\in[0,T]}\left|(G^M_t)_k\right| \|_{L^p(\mathbb{P}_\mu)}\lesssim_p |k| T^{2\theta/(1+2\theta)} .
\end{equation}
\end{lemma}
\begin{proof}
The Gaussian measure $\mu$ satisfies the hypercontractivity estimate (see for example~\cite{janson_gaussian_1997}): for any complex-valued finite order polynomial $P(x)\in\mathcal{C}yl$ we have
\begin{equation}
\langlebel{eq:hypercontractivity}
\left\| P(x) \right\|_{L^p(\mu)}\lesssim_p \left\| P(x) \right\|_{L^2(\mu)}.
\end{equation}
Then we have $(F_M(x))_k = ik \sum_{k_1+k_2=k} x_{k_1} x_{k_2}$ and for all $k\neq 0$
$$
\int |(F_M(x))_k|^2 \mu(\mathrm{d} x) = |k|^2 \sum_{k_1+k_2=k} \sum_{k'_1+k'_2=k} \mathbb{I}_{|k_1|,|k_2|,|k'_1|,|k'_2|\le M}\int x_{k_1} x_{k_2} x_{k'_1}^* x_{k'_2}^* \mu(\mathrm{d} x)
$$
$$
=4 |k|^2 M^2
$$
This allows us to obtain the bound~\eqref{eq:basic-est-1}. Indeed
$$
\|\sup_{t\in[0,T]}\left| (G^M_t)_k \right| \|_{L^p(\mathbb{P}_\mu)}
\lesssim
\int_0^T \left\| (F_{M}(u_s))_k \right\|_{L^p(\mathbb{P}_\mu)}\mathrm{d} s
$$
$$\lesssim
T \left\| (F_{M}(\cdot))_k \right\|_{L^p(\mu)}
\lesssim_p T \left\| (F_{M}(\cdot))_k \right\|_{L^2(\mu)} \lesssim_p
|k| M T.
$$
For the bound~\eqref{eq:basic-est-2} we use the fact that $L_0 H_N = F_N$ and Lemma~\ref{lemma:ito-trick} to get
\begin{equation*}
\|\sup_{t\in[0,T]} \left|(G^M_t)_k\right| \|_{L^p(\mathbb{P}_\mu)}\lesssim_p T^{1/2} \sup_{t\in[0,T]}
\| \mathcal{E}^\theta(H_N(\cdot)) \|_{L^{p/2}(\mu)}^{1/2}
\lesssim
|k|^{3/2-\theta} T^{1/2}
\end{equation*}
where we used the first energy estimate~\eqref{eq:first-energy-estimate}
of Lemma~\ref{lemma:energy-estimates} and the fact that
$
\|Q\|_{L^p(\mu)}^p \lesssim_p \int [e^{Q(x)^+}+e^{Q(x)^-}] \mu(\mathrm{d} x)
$
where again $Q^\pm$ are the real and imaginary parts of $Q$.
The bound~\eqref{eq:basic-est-3} is obtained in the same way using the second energy estimate~\eqref{eq:second-energy-estimate}.
Finally the last bound~\eqref{eq:basic-est-4} is obtained from the previous two by taking $0\le N\le M$, decomposing $F_M(x) = F_N(x)-F_{N,M}(x)$:
$$
\|\sup_{t\in[0,T]} \left|(G^M_t)_k\right| \|_{L^p(\mathbb{P}_\mu)} \le \|\sup_{t\in[0,T]} \left|(G^N_t)_k\right| \|_{L^p(\mathbb{P}_\mu)}+\|\sup_{t\in[0,T]} \left|(G^M_t)_k-(G^N_t)_k\right| \|_{L^p(\mathbb{P}_\mu)}
$$
$$
\lesssim_p |k| ( N T+ N^{1/2-\theta} T^{1/2})
$$
and performing the optimal choice $N \sim T^{-1/(1+2\theta)}$.
\end{proof}
Analogous estimates go through also for the functions obtained via convolution with the $e^{-A^\theta t}$ semi-group.
\begin{lemma}
\langlebel{lemma:main-bounds-conv}
Let
$$
\tilde G^M_t = \int_0^t e^{-A^\theta (t-s)} F_{M}(u_s) \mathrm{d} s
$$
then for any $M\le N$ we have
\begin{equation}
\langlebel{eq:basic-est-1-conv}
\|(\tilde G^M_t)_k\|_{L^p(\mathbb{P}_\mu)}\lesssim_p |k| M \left(\frac{1-e^{-2k^{2\theta} t/2}}{2k^{2\theta}}\right)
\end{equation}
\begin{equation}
\langlebel{eq:basic-est-2-conv}
\|(\tilde G^M_t)_k\|_{L^p(\mathbb{P}_\mu)}\lesssim_p |k|^{3/2-\theta} \left(\frac{1-e^{-2k^{2\theta} t/2}}{2k^{2\theta}}\right)^{1/2}
\end{equation}
\begin{equation}
\langlebel{eq:basic-est-3-conv}
\|(\tilde G^M_t)_k-(\tilde G^N_t)_k\|_{L^p(\mathbb{P}_\mu)}\lesssim_p |k| M^{1/2-\theta} \left(\frac{1-e^{-2k^{2\theta} t/2}}{2k^{2\theta}}\right)^{1/2}
\end{equation}
\end{lemma}
\begin{proof}
The proof follows the line of Lemma~\ref{lemma:main-bounds} using eq.~\eqref{eq:ito-trick-conv} instead of eq.~\eqref{eq:ito-trick}.
\end{proof}
\begin{corollary}
For all sufficiently small $\varepsilon > 0$
\begin{equation}
\langlebel{eq:basic-est-4-conv}
\sup_{N\ge 0}\|(\tilde G^N_t)_k - (\tilde G^N_s)_k\|_{L^p(\mathbb{P}_\mu)} \lesssim_{p} |k|^{3/2-2\theta+2\varepsilon \theta} (t-s)^\varepsilon
\end{equation}
\end{corollary}
\begin{proof}
To control the time regularity of the drift convolution we consider $0\le s \le t$ and decompose
$$
\|(\tilde G^N_t)_k - (\tilde G^N_s)_k\|_{L^p(\mathbb{P}_\mu)}
$$
$$
\le \| \int_s^t (e^{-A^\theta(t-r)} F_{N}(u_r))_k \mathrm{d} r\|_{L^p(\mathbb{P}_\mu)} + (e^{-k^{2\theta}(t-s)}-1)\|(\tilde G^N_s)_k\|_{L^p(\mathbb{P}_\mu)}
$$
$$
\lesssim |k|^{3/2-\theta} (t-s)^{1/2} +|k|^{3/2-2\theta}(e^{-k^{2\theta}(t-s)}-1)
\lesssim |k|^{3/2-\theta} (t-s)^{1/2}
$$
Moreover a direct consequence of eq.~\eqref{eq:basic-est-2-conv} is
$$
\sup_{t\in[0,T]} \|(\tilde G^N_t)_k \|_{L^p(\mathbb{P}_\mu)}\lesssim_p |k|^{3/2-2\theta}.
$$
which give us a uniform estimate in the form
$$
\|(\tilde G^N_t)_k - (\tilde G^N_s)_k\|_{L^p(\mathbb{P}_\mu)} \le \|(\tilde G^N_t)_k\|_{L^p(\mathbb{P}_\mu)}+\|(\tilde G^N_s)_k\|_{L^p(\mathbb{P}_\mu)} \lesssim_{p} |k|^{3/2-2\theta}
$$
By interpolation we get the claimed bound.
\end{proof}
\begin{remark}
All these $L^p$ estimates can be replaced with equivalent exponential estimates. For example it is not difficult to prove that for small $\langlembda$ we have
$$
\sup_{t\in[0,T]} \sup_{k\in\mathbb{Z}_0} \mathbb{E} \exp\left(\langlembda |k|^{2\theta-3/2} (\tilde G^N_t)^\pm_k \right) \lesssim 1
$$
where $(\cdot)^\pm$ denote, as before, the real and imaginary parts, respectively.
\end{remark}
At this point we are in position to prove Lemma~\ref{lemma:burgers-drift} on the existence of the Burgers' drift for controlled processes.
\begin{proof}(of Lemma~\ref{lemma:burgers-drift})
Let $\mathcal{B}^\varepsilon_t = \int_0^t F(\rho^\varepsilon* u_s) \mathrm{d} s$.
We start by noting that since $\hat \rho$ has a bounded support we have $\rho^\varepsilon * (\Pi_N u_s) = \rho^\varepsilon * u_s$ for all $N \ge C/\varepsilon$ for some constant $C$ and $\varepsilon$ small enough. Moreover all the computation we made for $F_N$ remains true for the functions $F_{\varepsilon,N}(x) = F(\rho^\varepsilon * \Pi_N x)$ so we have estimates analogous to that in Lemma~\ref{lemma:main-bounds} for
$G^{\varepsilon,M}_t = \int_0^t \int_0^t F(\rho^\varepsilon* \Pi_M u_s) \mathrm{d} s$. In taking $\varepsilon>\varepsilon'>0$ and $N\ge C/\varepsilon$, $M\ge C/\varepsilon'$ and $M\ge N$ we have
$$
\left\|\sup_{t\in[0,T]}\left|(\mathcal{B}^\varepsilon_t)_k-(\mathcal{B}^{\varepsilon'}_t)_k\right| \right\|_{L^p(\mathbb{P}_\mu)}
=
\left\|\sup_{t\in[0,T]}\left|(G^{\varepsilon,N}_t)_k-(G^{\varepsilon',M}_t)_k\right|\right \|_{L^p(\mathbb{P}_\mu)}
$$
$$
\lesssim_p |k| T^{1/2} M^{1/2-\theta} \lesssim_p |k| T^{1/2} (\varepsilon')^{\theta-1/2}
$$
uniformly in $\varepsilon,\varepsilon',N,M$. This easily implies that the sequence of processes $(\mathcal{B}^\varepsilon)_{\varepsilon}$ converges almost surely to a limit in $C(\mathbb{R}_+,\mathcal{F} L^{-1-\varepsilon,\infty})$ if $\theta>1/2$. By similar arguments it can be shown that the limit does not depend on the function $\rho$.
\end{proof}
\section{Existence of controlled solutions}
\langlebel{sec:existence}
Fix $\alpha < 1/2$ and consider the SDE on $H^\alpha$ given by
\begin{equation}
\langlebel{eq:burgers-reg}
\mathrm{d} u^N_t = - A^\theta u^N_t \mathrm{d} t + F_N(u^N_t)\mathrm{d} t + A^{\theta/2} \mathrm{d} W_t,
\end{equation}
where $F_N : H\to H$ is defined by $F_N(x) = \frac12 \Pi_N B (\Pi_N x)^2$. Global solution of this equation starting from any $u_0^N\in H^\alpha$ can be constructed as follows. Let $(Z_t)_{t\ge 0}$ the unique OU process on $H^\alpha$ which satisfies the SDE
\begin{equation}
\langlebel{eq:ou}
\mathrm{d} Z_t = - A^\theta Z_t \mathrm{d} t + A^{\theta/2} \mathrm{d} W_t.
\end{equation}
with initial condition $Z_0 = u^N_0$. Let $(v^N_t)_{t\ge 0}$ the unique solution taking values in the finite dimensional vector space $\Pi_N H$ of the following SDE
$$
\mathrm{d} v^N_t = - A^\theta v^N_t \mathrm{d} t + F_N(v^N_t)\mathrm{d} t + A^{\theta/2}\mathrm{d} \Pi_N W_t,
$$
with initial condition $v^N_0 = \Pi_N u^N_0$. Note that this SDE has global solutions despite of the quadratic non-linearity. Indeed the vector field $F_N$ preserves the $H$ norm:
$$
\langlengle v^N_t,F_N(v^N_t)\ranglengle = \langlengle v^N_t,B (v^N_t)^2\ranglengle = \frac13 \int_\mathbb{T} \partial_\xi(v^N_t(\xi))\mathrm{d} \xi = 0
$$
and by It\^o formula we have
$$
\mathrm{d} \|v^N_t\|_H^2 = 2 \langlengle v^N_t,- A^\theta v^N_t \mathrm{d} t + F_N(v^N_t)\mathrm{d} t + A^{\theta/2}\mathrm{d} \Pi_N W_t \ranglengle + C_N \mathrm{d} t
$$
$$
= -2 \|A^{\theta/2} v^N_t\|^2_H \mathrm{d} t + 2 \langlengle v^N_t, A^{\theta/2}\mathrm{d} \Pi_N W_t \ranglengle + C_N \mathrm{d} t
$$
where $C_N = [A^{\theta/2}\Pi_N W]_t = \sum_{0<|k|\le N} |k|^{2\theta}$. From this equation we easily obtain that for any initial condition $v^N_0$ the process $(\|v^N_t\|_H)_{t\in[0,T]}$ is almost surely finite for any $T \ge 0$ which implies that the unique solution $(v^N_t)_{t \ge 0}$ can be extended to arbitrary intervals of time. Setting $u^N_t = v^N_t + (1-\Pi_N)Z_t$ we obtain a global solution of eq.~\eqref{eq:burgers-reg}. Moreover the diffusion $(u^N_t)_{t\ge 0}$ has generator
$$
L_N \varphi(x) = L_0\varphi(x)+ \sum_{k\in\mathbb{Z}_0, |k|\le N} (F_N(x))_k D_k \varphi(x)
$$
where $L_0$
is the generator of the Ornstein--Uhlenbeck defined in eq.~\eqref{eq:generator-ou} and which satisfies the integration by parts formula
$
\mu [\varphi L_0 \varphi] = \mu[ \mathcal{E}(\varphi)]
$ for $\varphi\in\mathcal{C}yl$.
This diffusion preserves the Gaussian measure $\mu$. Indeed if we take $u_0^N$ distributed according to the white noise $\mu$ we have that $((1-\Pi_N)Z_t)_{t \ge 0}$ is independent of $(v^N_t)_{t\ge 0}$. Moreover $Z_t$ has law $\mu$ of any $t\ge 0$ and an easy argument for the finite dimensional diffusion $(v^N_t)_{t\ge 0}$ shows that for any $t\ge0$ the random variable $v^N_t$ is distributed according to $\mu^N = (\Pi_N)_* \mu$: the push forward of the measure $\mu$ with respect to the projection $\Pi_N$.
We will use the fact that $u^N$ satisfy the mild equation~\cite{DZ}
\begin{equation}
\langlebel{eq:burgers-reg-mild}
u^N_t = e^{-A^\theta t} u_0 + \int_0^t e^{-A^\theta (t-s)} F_N(u^N_s) \mathrm{d} s + A^{\theta/2} \int_0^t e^{-A^\theta (t-s)} \mathrm{d} W_s
\end{equation}
where the stochastic convolution in the r.h.s is given by
$$
A^{\theta/2} \int_0^t e^{-A^\theta (t-s)} \mathrm{d} W_s = \sum_{k\in\mathbb{Z}_0} |k|^{\theta} e_k \int_0^t e^{-|k|^{2\theta} (t-s)} \mathrm{d} \beta^k_s .
$$
\begin{lemma}
Let
$$
\mathcal{A}_t^{N}=\int_0^t F_{N}(u^{N}_s) \mathrm{d} s ,\qquad \tilde \mathcal{A}_t^{N}=\int_0^t e^{-A^\theta(t-s)} F_{N}(u^{N}_s) \mathrm{d} s .
$$
and set $\sigma=(3/2-2\theta)_+$.
The family of laws of the processes $\{(u^N,\mathcal{A}^N,\tilde \mathcal{A}^N,W)\}_N$ is tight in the space of continuous functions with values in $\mathcal{X}=\mathcal{F} L^{\infty,\sigma-\varepsilon}\times \mathcal{F} L^{\infty,3/2-\theta-\varepsilon}\times \mathcal{F} L^{\infty,3/2-2\theta-\varepsilon}\times \mathcal{F} L^{\infty,-\varepsilon}$ for all small $\varepsilon > 0$.
\end{lemma}
\begin{proof}
The estimate~\eqref{eq:basic-est-4-conv} in the previous section readily gives that for any small $\varepsilon >0$ and sufficienly large $p$
$$
\mathbb{E}_\mu\left[\sum_{k\in\mathbb{Z}_0} |k|^{-(3/2-2\theta+3\theta\varepsilon) p} \left(|(\tilde \mathcal{A}_t^{N}-\tilde \mathcal{A}_s^{N})_k| \right)^p\right]
\lesssim_{p,\varepsilon} \sum_{k\in\mathbb{Z}_0} |k|^{-\theta\varepsilon p} |t-s|^{p \varepsilon} \lesssim |t-s|^{p \varepsilon}
$$
This estimates show that the family of processes $\{ \tilde \mathcal{A}^{N} \}_{N}$ is tight in $C([0,T],\mathcal{F} L^{\infty,\alpha})$ for $\alpha=3/2-2\theta+3\theta\varepsilon$ and sufficiently small $\varepsilon>0$.
An analogous argument using the estimate~\eqref{eq:basic-est-2}
shows that the family of processes $\{ \mathcal{A}^{N} \}_{N}$ is tight in $C^\gamma([0,T],\mathcal{F} L^{\infty,\beta})$ for any $\gamma<1/2$ and $\beta < 3/2-\theta$.
It is not difficult to show that the stochastic convolution $\int_0^t e^{-A^\theta(t-s)} A^{\theta/2} \mathrm{d} W_s$ belongs to $C([0,T],\mathcal{F} L^{\infty,1-\theta-\varepsilon})$ for all small $\varepsilon>0$.
Taking into account the mild equation~\eqref{eq:burgers-reg-mild}
we find that the processes $\{(u^{N}_t)_{t\in[0,T]}\}_{N}$ are tight in $C([0,T],\mathcal{F} L^{\infty,\sigma-\varepsilon})$.
\end{proof}
We are now ready to prove our main theorem on existence of (probabilistically weak) controlled solutions to the generalized stochastic Burgers equation.
\begin{theorem}
There exists a probability space and a quadruple of processes $(u,\mathcal{A},\tilde\mathcal{A},W)$ with continuous trajectories in $\mathcal{X}$ such that $W$ is a cylindrical Brownian motion in $H$, $u$ is a controlled process and they satisfy
\begin{equation}
\langlebel{eq:limit-1}
u_t = u_0 + \mathcal{A}_t - \int_0^t A^\theta u_s \mathrm{d} s + B W_t = e^{-A^\theta t} u_0 + \tilde \mathcal{A}_t + \int_0^t e^{-A^\theta(t-s)} B \mathrm{d} W_s
\end{equation}
where, as space distributions,
\begin{equation}
\langlebel{eq:limit-4}
\mathcal{A}_t = \lim_{M \to \infty}\int_0^t F_{M}(u_s) \mathrm{d} s
\quad \text{ and } \quad
\tilde \mathcal{A}_t = \int_0^t e^{-A^\theta(t-s)} \mathrm{d} \mathcal{A}_s .
\end{equation}
this last integral being defined as a Young integral.
\end{theorem}
\begin{proof}
Let us first prove~\eqref{eq:limit-4}. By tightness of the laws of $\{(u^N,\mathcal{A}^N,\tilde\mathcal{A}^N , W)\}_N$ in $C(\mathbb{R};\mathcal{X})$ we can extract a subsequence which converges weakly (in the probabilistic sense) to a limit point in $C(\mathbb{R};\mathcal{X})$. By Skhorohod embedding theorem, up to a change of the probability space, we can assume that this subsequence which we call $\{N_n\}_{n\ge 1}$ converges almost surely to a limit $u = \lim_n u^{N_n} \in C(\mathbb{R};\mathcal{X})$. Then
$$
\int_0^t F_{M}(u_s) \mathrm{d} s = \int_0^t (F_{M}(u_s) - F_{M}(u^{N_n}_s)) \mathrm{d} s
$$
$$\qquad
+ \int_0^t (F_{M}(u^{N_n}_s) - F_{N_n}(u^{N_n}_s)) \mathrm{d} s
+ \int_0^t F_{N_n}(u^{N_n}_s) \mathrm{d} s .
$$
But now, in $C(\mathbb{R}_+,\mathcal{F} L^{\infty,3/2-\theta-\varepsilon})$ we have the almost sure limit
$$
\lim_n \int_0^\cdot F_{N_n}(u^{N_n}_s) \mathrm{d} s =\lim_n \mathcal{A}^{N_n}_\cdot = \mathcal{A}_\cdot
$$
and, always almost surely in $C(\mathbb{R}_+,\mathcal{F} L^{\infty,3/2-\theta-\varepsilon})$, we have also
$$
\lim_n \int_0^\cdot (F_{M}(u_s) - F_{M}(u^{N_n}_s)) \mathrm{d} s = 0 ,
$$
since the functional $F_M$ depends only of a finite number of components of $u$ and $u^{N_n}$ and that we have the convergence of $u^{N_n}$ to $u$ in $C(\mathbb{R};\mathcal{F} L^{\infty,\sigma-\varepsilon})$ and thus distributionally uniformly in time.
Moreover, for all $k\in\mathbb{Z}_0$,
$$
\lim_M \sup_{N_n : M<N_n} \left\|\sup_{t\in[0,T]} \left| \int_0^t (F_{M}(u^{N_n}_s) - F_{N_n}(u^{N_n}_s))_k \mathrm{d} s\right| \right\|_{L^p(\mathbb{P}_\mu)}= 0.
$$
By the apriori estimates, $\mathcal{A}^{N_n}$ converges to $\mathcal{A}$ in $C^\gamma(\mathcal{F} L^{\infty,3/2-\theta-\varepsilon})$ for all $\gamma <1/2$ and $\varepsilon > 0$ so that we can use Young integration to define $\int_0^t e^{-A^\theta(t-s)} \mathrm{d} \mathcal{A}^{N_n}_s$ as a space distribution and to obtain its distributional convergence (for example for each of its Fourier components) to $\int_0^t e^{-A^\theta(t-s)} \mathrm{d} \mathcal{A}^{N_n}_s$. At this point eq.~\eqref{eq:limit-1} is a simple consequence. The backward processes $\hat u^{N_n}_{t}=u^{N_n}_{T-t}$ and $\hat \mathcal{A}^{N_n}_t = -\mathcal{A}^{N_n}_{T-t}$ converge to $\hat u_{t}=u_{T-t}$ and $\hat \mathcal{A}_t = -\mathcal{A}_{T-t}$ respectively and moreover note that $\mathcal{A}$ as a distributional process has trajectories which are H\"older continuous for any exponent smaller than $2\theta/(1+2\theta)>1/2$ as a consequence of the estimate~\eqref{eq:basic-est-4} and this directly implies that $\mathcal{A}$ has zero quadratic variation. So $u$ is a controlled process in the sense of our definition.
\end{proof}
\section{Uniqueness for $\theta>5/4$}
\langlebel{sec:uniq}
In this section we prove a simple pathwise uniqueness result for controlled solutions which is valid when $\theta > 5/4$. Note that to each controlled solution $u$ is naturally associated a cylindrical Brownian motion $W$ on $H$ given by the martingale part of the controlled decomposition~\eqref{eq:controlled-decomposition}. Pathwise uniqueness is then understood in the following sense.
\begin{definition}
SBE$_\theta$ has pathwise uniqueness if given two controlled processes $u,\tilde u\in\mathcal{R}_\theta$ on the same probability space which generate the same Brownian motion $W$ and such that $\tilde u_0 = u_0$ amost surely then there exists a negligible set $\mathcal{N}$ such that for all $\varphi\in\mathcal{S}$ and $t\ge 0$ $\{u_t(\varphi) \neq \tilde u_t(\varphi)\} \subseteq \mathcal{N}$.
\end{definition}
\begin{theorem}
\langlebel{th:uniqueness}
The generalized stochastic Burgers equation has pathwise uniqueness when $\theta > 5/4$.
\end{theorem}
\begin{proof}
Let $u$ be a controlled solution to the equation and let $u^N$ be the Galerkin
approximations defined above with respect to the cylindrical Brownian motion $W$ obtained from the martingale part of the decomposition of $u$ as a controlled process. We will prove that $u^N \to u$ almost surely in $C(\mathbb{R}_+;\mathcal{F} L^{2\theta-3/2-2\varepsilon,\infty})$ for any small $\varepsilon >0$. Since Galerkin
approximations have unique strong solutions we have $\tilde u^N = u^N$ almost surely and in the limit $\tilde u = u$ in $C(\mathbb{R}_+;\mathcal{F} L^{2\theta-3/2-2\varepsilon,\infty})$ almost surely. This will imply the claim by taking as negligible set in the definition of pathwise uniqueness the set $\mathcal{N}=\{\sup_{t\ge 0}\|u_t-\tilde u_t\|_{\mathcal{F} L^{2\theta-3/2-2\varepsilon,\infty}}>0\}$.
Let us proceed to prove that $u^N \to u$.
By bilinearity,
$$
F_N \left( u \right) - F_N \left( u^N \right) = F_N ( \Pi_N u_s+u^N_s,\Delta^N_s)
$$
and the difference $\Delta^N = \Pi_N ( u - u^N )$
satisfies the equation
$$
\Delta^N_t = \Pi_N \int_0^t e^{- A^{\theta} ( t - s )}
F_N ( u_s+u^N_s,\Delta^N_s) \mathrm{d} s +\varphi^N_t
$$
where
$$
\varphi^N_t = \int_0^t e^{- A^{\theta} \left( t - s \right)} \left( F_{} \left( u \right)
- F_N \left( u^{} \right) \right) \mathrm{d} s .
$$
Note that
\[ \| \sup_{t \in [ 0, T ]} | ( \varphi^N_t )_k | \|_{L^p (
\mathbb{P}_{\mu} )} \lesssim_p \max(| k |^{1 - 2 \theta} N^{1
/ 2 - \theta},| k |^{3/2 - 2 \theta}) \]
which by interpolation gives
\[ \| \sup_{t \in [ 0, T ]} | ( \varphi^N_t )_k | \|_{L^p (
\mathbb{P}_{\mu} )}\lesssim_p | k |^{3/2 - 2 \theta+\varepsilon} N^{-\varepsilon} \]
for any small $\varepsilon >0$. Now let
$$
\Phi_N = \sup_{k\in\mathbb{Z}_0} \sup_{t \in [ 0, T ]} |k|^{2\theta-3/2-2\varepsilon} | ( \varphi^N_t )_k |
$$
then
$$
\mathbb{E} \sum_{N>1} N \Phi_N^p \le\sum_{N>1} N \sum_{k\in\mathbb{Z}_0} \sup_{t \in [ 0, T ]} |k|^{p(2\theta-3/2-2\varepsilon)}\mathbb{E} | ( \varphi^N_t )_k |^p
$$
$$
\lesssim_p \sum_{N>1} N^{1-\varepsilon p} \sum_{k\in\mathbb{Z}_0} |k|^{-p\varepsilon}<+\infty
$$
for $p$ large enough, which implies that almost surely
$
\Phi_N \lesssim_{p,\omega} N^{-1/p}
$.
For the other term we have
$$
\sup_{t\in[0,T]}\left|\left( \int_0^t e^{- A^{\theta} \left( t - s \right)} F_N \left(
\Pi_N u + u^N, \Delta_N \right) \mathrm{d} s \right)_k\right|
\lesssim A_N |k|^{3/2-2\theta+2\varepsilon} Q_T
$$
where
$
A_N = \sup_{t \in \left[ 0, T \right]} \sup_k \left| k \right|^{2 \theta -
3/2 - 2\varepsilon} \left| \left( \Delta^N_t \right)_k \right| $
and
$$
Q_T = \sup_{t\in[0,T]} |k|^{2\theta-1/2-2\varepsilon} \int_0^t e^{- |k|^{2\theta} \left( t - s \right)} \sum_{q\in \mathbb{Z}_0} |(
\Pi_N u_s + u^N_s)_q| |k-q|^{3/2-2\theta+2\varepsilon} \mathrm{d} s
$$
This gives
\[
A_N \leqslant Q_T A_N + \Phi_N.
\]
Since $3/2-2 \theta <-1 $ (that is $\theta > 5/4$), we have the estimate:
$$
Q_T \lesssim \sup_{t\in[0,T]} |k|^{2\theta-1/2-2\varepsilon} \left[\int_0^t e^{- p' |k|^{2\theta} \left( t - s \right)} \mathrm{d} s \right]^{1/p'} \left[ \int_0^T \sum_{q\in \mathbb{Z}_0}\frac{ |(
\Pi_N u_s + u^N_s)_q|^{p}}{ |k-q|^{-3/2+2\theta-2\varepsilon}} \mathrm{d} s \right]^{1/p}
$$
valid for some $p> 1$ (with $1/p'+1/p=1$). Then
$$ Q_T \lesssim |k|^{2\theta-1/2-2\varepsilon-2\theta/p'} \left[ \int_0^T \sum_{q\in \mathbb{Z}_0}\frac{ |(
\Pi_N u_s + u^N_s)_q|^{p}}{ |k-q|^{-3/2+2\theta-2\varepsilon}} \mathrm{d} s \right]^{1/p}
$$
and taking $p$ large enough such that $2\theta-1/2-2\varepsilon-2\theta/p'\le 0$ we obtain
$$
Q_T \lesssim_p \left[ \int_0^T \sum_{q\in \mathbb{Z}_0}\frac{ |(
\Pi_N u_s + u^N_s)_q|^{p}}{ |k-q|^{-3/2+2\theta-2\varepsilon}} \mathrm{d} s \right]^{1/p}
$$
By the stationarity of the processes $u$ and $u^N$ and the fact that their marginal laws are the white noise we have
$$
\mathbb{E}[ Q_T^p] \lesssim_p \int_0^T \sum_{q\in \mathbb{Z}_0}\frac{\mathbb{E} |(
\Pi_N u_s + u^N_s)_q|^{p}}{ |k-q|^{-3/2+2\theta-2\varepsilon}} \mathrm{d} s = T \sum_{q\in \mathbb{Z}_0}\frac{1}{ |k-q|^{-3/2+2\theta-2\varepsilon}} \lesssim_p T
$$
Then by a simple Borel-Cantelli argument, almost surely $Q_{1/n} \lesssim_{p,\omega} n^{-1+1/p}$. Putting together the estimates for $\Phi_N$ and that for $Q_{1/n}$ we see that there exists a (random) $T$ such that $C Q_T\le 1/2$ almost surely and that for this $T$:
$
A_N \leqslant 2 \Phi_N
$,
which given the estimate on $\Phi_N$ implies that $A_N \to 0$ as $N\to\infty$ almost surely and that the solution of the equation is unique and is the
(almost-sure) limit of the Galerkin approximations.
\end{proof}
\section{Alternative equations}
\langlebel{sec:alternative}
The technique of the present paper extends straighforwardly to some other modifications of the stochastic Burgers equation.
\subsection{Regularization of the convective term}
Consider for example the equation
\begin{equation}
\langlebel{eq:burgers-daprato}
\mathrm{d} u_t = - A u_t \mathrm{d} t + A^{-\sigma}F(A^{-\sigma} u_t) \mathrm{d} t + B \mathrm{d} W_t
\end{equation}
which is the equation considered by Da~Prato, Debbussche and Tubaro in~\cite{DDT}. Letting $F_\sigma(x) = A^{-\sigma}F(A^{-\sigma} x)$, denoting by $H_\sigma$ the corresponding solution of the Poisson equation and following the same strategy as above we obtain the same bounds
$$
\mathcal{E}((H_{\sigma,N})^\pm_k)(x)\lesssim \sum_{\substack{k_1,k_2 :k_1+k_2=k\\|k|,|k_1|,|k_2|\le N}}c_\sigma(k,k_1,k_2) |x_{k_2}|^2
$$
where $c_\sigma(k,k_1,k_2) = |k|^{2-4\sigma}/[|k_1|^{4\sigma}|k_1|^{4\sigma}(|k_1|^{2}+|k_2|^{2})]$.
This quantity can then be bounded in terms of the sum
$$
I_{\sigma,N}(k) = \sum_{\substack{k_1,k_2 :k_1+k_2=k\\|k|,|k_1|,|k_2|\le N}}c_\sigma(k,k_1,k_2) \lesssim |k|^{1-12\sigma}
$$
From which we can reobtain similar bounds to those exploited above.
For example
$$
\left\|\int_0^t (e^{-A(t-s)}F_{\sigma,M}(u_s))_k \mathrm{d} s \right\|_{L^p(\mathbb{P}_\mu)}\lesssim_p |k|^{-1/2-6\sigma}
$$
And in particular we have existence of weak controlled solutions when $8\sigma+2>1$, that is $\sigma>-1/8$ and pathwise uniqueness when $-1/2-6\sigma<-1$ that is $\sigma> 1/12$. Which is an improvement over the result in~\cite{DDT} which has uniqueness for $\sigma>1/8$.
\subsection{The Sasamoto--Spohn discrete model}
Another application of the above techniques is to the analysis of the discrete approximation to the stochastic Burgers equation proposed by Spohn and Sasamoto in~\cite{Spohn}. Their model is the following:
\begin{equation}
\langlebel{eq:sasamoto-spohn}
\begin{split}
\mathrm{d} u_j & = (2N+1) (u_j^2+u_j u_{j+1}-u_{j-1}u_j-u^2_{j-1})\mathrm{d} t
\\
& \qquad + (2N+1)^2(u_{j+1}-2 u_j+u_{j-1})\mathrm{d} t + (2N+1)^{3/2} (\mathrm{d} B_j - \mathrm{d} B_{j-1})
\end{split}
\end{equation}
for $j=1,\dots,2N+1$ with periodic boundary conditions $u_0=u_{2N+1}$ and where the processes $(B_j)_{j=1,\dots,2N+1}$ are a family of independents standard Brownian motions with $B_0=B_{2N+1}$. This model has to be tought as the discretization of the dynamic of the periodic velocity field $u(x)$ with $x\in(-\pi,\pi]$ sampled on a grid of mesh size $1/(2N+1)$, that is $u_j = u(\xi^N_j)$ with $\xi^N_j = -\pi+2\pi(j/(2N+1))$. This fixes also the scaling factors for the different contributions to the dynamics if we want that, at least formally, this equation goes to a limit described by a SBE. Passing to Fourier variables $\hat u(k) = (2N+1)^{-1}\sum_{j=0}^{2N-1} e^{i \xi^N_j k} u_j$ for $k\in \mathbb{Z}^N$ with $\mathbb{Z}^N = \mathbb{Z}\cap [-N,N]$ and imposing that $\hat u(0)=0$, that is, considering the evolution only with zero mean velocity we get the system of ODEs:
$$
\mathrm{d} \hat u_t(k) = F^\flat_N(\hat u_t)_k \mathrm{d} t
- |g_N(k)|^2 \hat u_t(k) \mathrm{d} t + (2N+1)^{1/2} g_N(k) \mathrm{d} \hat B_t(k)
$$
for $k\in \mathbb{Z}_0^N=\mathbb{Z}_0\cap [-N,N]$, where $g_N(k)=(2N+1)(1-e^{i k/(2N+1)})$,
$$
F^\flat_N(u_t)_k = \sum_{k_1,k_2\in\mathbb{Z}^N_0} \hat u_t(k_1)\hat u_t(k_2)[ g_N(k)-g_N(k)^*+g_N(k_1)-g_N(k_2)^*]
$$
and $(\hat B_\cdot(k))_{k\in\mathbb{Z}_0^N}$ is a family of centred complex Brownian motions such that $\hat B(k)^* = \hat B(-k)$ and with covariance $\mathbb{E} \hat B_t(k) \hat B_t(-l) = \mathbb{I}_{k=l} t (2N+1)^{-1}$.
If we then let $\beta(k) = (2N+1)^{1/2} \hat B(k)$ we obtain a family of complex BM of covariance
$\mathbb{E} \beta_t(k) \beta_t(-l) = t \mathbb{I}_{k=l} $. The generator $L^\flat_N$ of this stochastic dynamics is given by
$$
L^{\flat}_N \varphi( x) = \sum_{k\in\mathbb{Z}^N_0} F^\flat_N(x)_k D_k \varphi( x)+L^{g_N,OU}_N \varphi (x)
$$
with $$
L^{g_N}_N \varphi( x) = \sum_{k\in\mathbb{Z}^N_0} |g_N(xk)|^2 (x_k D_{k}+ D_{-k} D_k) \varphi( x)
$$
the generator of the OU process corresponding to the linear part associated with the multiplier $g_N$. It is easy to check that the complete dynamics preserves the (discrete) white noise measure, indeed
$$
\sum_{k\in\mathbb{Z}_0^N} x_{-k} F^\flat_N(x)_k = \sum_{\substack{k,k_1,k_2\in\mathbb{Z}_0^N\\k+k_1+k_2=0}} x_{k} x_{k_1} x_{k_2}[ g_N(k)^*-g_N(k)+g_N(k_1)-g_N(k_2)^*] =0
$$
since the symmetrization of the r.h.s. with respect to the permutations of the variables $k,k_1,k_2$ yields zero. Then defining suitable controlled process with respect to the linear part of this equation we can prove our apriori estimates on additive functionals which are now controlled by the quantity
$$
\mathcal{E}^{g_N}((H_{g_N,N})^\pm_k)(x)\lesssim \sum_{\substack{k_1,k_2 :k_1+k_2=k\\|k|,|k_1|,|k_2|\le N}}c_{g_N}(k,k_1,k_2) |x_{k_2}|^2
$$
with $c_{g_N}(k,k_1,k_2) = |g_N(k)|^2/[(|g_N(k_1)|^{2}+|g_N(k_2)|^{2})]$. Moreover noting that
$$
|g_N(k)|^2 = 2 (2N+1)^2(1-\cos(2\pi k/(2N+1)) \sim |k|^2
$$
uniformly $N$, it is possible to estimate this energy in the same way we did before in the case $\theta=1$ and obtain that the family of stationary solutions of equation~\eqref{eq:sasamoto-spohn} is tight in $C([0,T],\mathcal{F} L^{\infty,-\varepsilon})$ for all $\varepsilon >0$. Moreover using the fact that $g_N(k) \to ik$ as $N\to \infty$ uniformly for bounded $k$ and that
$$
\pi_M F^\flat_N(\pi_M x)_k = \sum_{k_1,k_2\in\mathbb{Z}^N_0} \mathbb{I}_{|k|,|k_1|,|k_2|\le M} x_{k_1} x_{k_2}[ g_N(k)-g_N(k)^*+g_N(k_1)-g_N(k_2)^*]
$$
$$
\to 3 i k \sum_{k_1,k_2\in\mathbb{Z}_0} \mathbb{I}_{|k|,|k_1|,|k_2|\le M} x_{k_1} x_{k_2} = 3 F_M(x)_k
$$
it is easy to check that any accumulation point is a controlled solution of the stochastic Burgers equations~\eqref{eq:burgers-theta}.
\section{2d stochastic Navier-Stokes equation}
\langlebel{sec:ns}
We consider the problem of stationary solutions to the 2d stochastic Navier-Stokes equation considered in~\cite{albeverio-cruzeiro} (see also~\cite{albeverio-ferrario}). We would like to deal with invariant measures obtained by formally taking the kinetic energy of the fluid and considering the associated Gibbs measure. However this measure is quite singular and we need a bit of hyperviscosity in the equation to make our estimates work.
\subsection{The setting}
Fix $\sigma>0$ and consider the following stochastic differential equation
\begin{equation}
\langlebel{eq:ns}
\mathrm{d} (u_{t})_k = - |k|^{2+2\sigma} (u_{t})_k \mathrm{d} t + B_k(u_t) \mathrm{d} t + |k|^{\sigma} \mathrm{d} \beta^k_t
\end{equation}
where $(\beta^k)_{k\in \ZZ^2\backslash\{0\}}$ is a family of complex BMs for which $(\beta^k)^* = \beta^{-k}$ and $\mathbb{E}[\beta^k \beta^q] = \mathbb{I}_{q+k=0}$, $u$ is a stochastic process with continuous trajectories in the space of distributions on the two dimensional torus $\mathbb{T}^2$,
$$
B_k(x) = \sum_{k_1+k_2=k} b(k,k_1,k_2) x_{k_1} x_{k_2}
$$
where $x: \ZZ^2\backslash\{0\} \to \mathbb{C}$ is such that $x_{-k} = x_k^*$ and
$$
b(k,k_1,k_2) = \frac{(k^\bot \cdot k_1)(k \cdot k_2)}{k^2}
$$
with $(\xi,\eta)^\bot = (\eta,-\xi) \in \mathbb{R}^2$. Apart from the two-dimensional setting and the difference covariance structure of the linear part this problem has the same structure as the one dimensional stochastic Burgers equation we considered before.
Note that to make sense of it (and in order to construct controlled solutions)
we can consider the Galerkin approximations constructed as follows. Fix $N$ and solve the problem finite dimensional problem
\begin{equation}
\langlebel{eq:ns-N}
\mathrm{d} (u^N_{t})_k = - |k|^{2+\sigma} (u^N_{t})_k \mathrm{d} t + B^N_k(u^N_t) \mathrm{d} t + |k|^{-\sigma} \mathrm{d} \beta^k_t
\end{equation}
for $k \in \mathbb{Z}^2_N = \{k \in \mathbb{Z}^2 : |k| \le N\}$, where
\begin{equation}
\langlebel{eq:drift-N}
B^N_k(x) =\mathbb{I}_{|k|\le N} \sum_{\substack{k_1+k_2=k\\|k_1|\le N, |k_2|\le N}} b(k,k_1,k_2) x_{k_1} x_{k_2}
\end{equation}
The generator of the process $u^N$ is given by
$
L^N\varphi(x) = L_0\varphi(x) + \sum_{k\in \ZZ^2\backslash\{0\}} B^N_k(x) D_k \varphi(x)
$
where
$$
L^0 \varphi(x) = \frac12 \sum_{k\in \ZZ^2\backslash\{0\}} |k|^{2\sigma}( D_{-k}D_k\varphi(x) -|k|^2 x_k D_k \varphi(x))
$$
is the generator of a suitable OU flow. Note moreover that the kinetic energy of $u$ given by
$
E(x) = \sum_k |k|^{2} |x_k|^2
$
is invariant under the flow generated by $B^N$. Moreover
$
D_{k} B^N_k(x) = 0
$
since $x_k$ does not enter in the expression of $B^N_k(x)$, so the vectorfields $B^N$ leave also the measure
$
\prod_{k \in \ZZ^2\backslash\{0\}N} dx_k
$
invariant. Then the (complex) Gaussian measures
$$
\gamma(dx) = \prod_{k \in \ZZ^2\backslash\{0\}} Z_k e^{-|k|^2 |x_k|^2} dx_k
$$
is invariant under the flow generated by $B^N$. (This measure should be understood restricted to the set $\{x \in \mathbb{C}^{\ZZ^2\backslash\{0\}} : x_{-k} = \overline{x_k} \}$).
The measure $\gamma$ is also invariant for the $u^N$ diffusion since it is invariant for $B^N$ and for the OU process generated by $L^0$.
Intoduce standard Sobolev norms
$
\|x\|_\sigma^2 = \sum_{k \in \ZZ^2\backslash\{0\}} |k|^{2\sigma} |x_k|^2
$
and denote with $H^\sigma$ the space of elements $x$ with $\|x \|_{\sigma}<\infty$.
The measure $\gamma$ is the Gaussian measure associated to $H^1$ and is supported on any $H^\sigma$ with $\sigma<0$
$$
\int \|x\|_\sigma^2 \gamma(dx) = \sum_{k \in \ZZ^2\backslash\{0\}} |k|^{2\sigma-2} < \infty
$$
so $(\gamma, H^1, \cap_{\varepsilon < 0}H^\varepsilon)$ is an abstract Wiener space in the sense of Gross. Note that the vectorfield $B_k(x)$ in not defined on the support of $\gamma$. To give sense of controlled solutions to this equation we need to control
$$
\mathcal{E}((H_{N})^\pm_k)(x)\lesssim \sum_{\substack{k_1,k_2 :k_1+k_2=k\\|k|,|k_1|,|k_2|\le N}}c_{\text{ns}}(k,k_1,k_2) |x_{k_2}|^2
$$
with $c_{\text{ns}}(k,k_1,k_2) = |k_1|^{2\sigma} |k_1|^2|k_2|^2/(|k_1|^{2+2\sigma}+|k_2|^{2+2\sigma})^2$ and note that the stationary expectation of this term can be estimated by
$$
I_N(k) = \sum_{\substack{k_1,k_2 :k_1+k_2=k\\|k|,|k_1|,|k_2|\le N}}c_{\text{ns}}(k,k_1,k_2) |k_2|^{-2} \lesssim \sum_{\substack{k_1,k_2 :k_1+k_2=k\\|k|,|k_1|,|k_2|\le N}}\frac{|k_1|^{2+2\sigma}}{ (|k_1|^{2+2\sigma}+|k_2|^{2+2\sigma})^2}\lesssim
$$
$$
\lesssim \sum_{\substack{k_1,k_2 :k_1+k_2=k\\|k|,|k_1|,|k_2|\le N}}\frac{1}{ |k_1|^{2+2\sigma}+|k_2|^{2+2\sigma}}\lesssim |k|^{-2\sigma}
$$
for any $\sigma > 0$. This estimate allows to apply our machinery and obtain stationary controlled solutions to this equation.
\begin{bibdiv}
\begin{biblist}
\bib{albeverio-cruzeiro}{article}{
title = {Global flows with invariant {(Gibbs)} measures for Euler and {Navier-Stokes} two dimensional fluids},
volume = {129},
issn = {0010-3616},
url = {http://www.springerlink.com/content/u1406006h7x32575/abstract/},
doi = {10.1007/BF02097100},
number = {3},
journal = {Communications in Mathematical Physics},
author = {Albeverio, S.},
author = {Cruzeiro, {A.-B.}},
year = {1990},
keywords = {Physics and Astronomy},
pages = {431--444},
}
\bib{albeverio-ferrario}{incollection}{
series = {Lecture Notes in Mathematics},
title = {Some Methods of Infinite Dimensional Analysis in Hydrodynamics: An Introduction},
volume = {1942},
isbn = {978-3-540-78492-0},
shorttitle = {Some Methods of Infinite Dimensional Analysis in Hydrodynamics},
url = {http://www.springerlink.com/content/2n14280t40q7v34q/abstract/},
booktitle = {{SPDE} in Hydrodynamic: Recent Progress and Prospects},
publisher = {Springer Berlin / Heidelberg},
author = {Albeverio, S.},
author = {Ferrario, B.},
year = {2008},
keywords = {Mathematics and Statistics},
pages = {1--50},
}
\bib{assing1}{article} {
title = {A Pregenerator for Burgers Equation Forced by Conservative Noise},
volume = {225},
issn = {0010-3616},
url = {http://www.springerlink.com/content/t48f9yxafdddjnwx/abstract/},
doi = {10.1007/s002200100606},
number = {3},
journal = {Communications in Mathematical Physics},
author = {Assing, S.},
year = {2002},
keywords = {Physics and Astronomy},
pages = {611--632},
}
\bib{assing2}{article} {
title = {A rigorous equation for the {Cole-Hopf} solution of the conservative {KPZ} dynamics},
url = {http://arxiv.org/abs/1109.2886},
journal = {{arXiv:1109.2886}},
author = {Assing, S.},
month = {sep},
year = {2011},
}
\bib{babin-kdv}{article}{
title = {On the regularization mechanism for the periodic Korteweg-de Vries equation},
volume = {64},
issn = {0010-3640},
url = {http://dx.doi.org/10.1002/cpa.20356},
doi = {10.1002/cpa.20356},
number = {5},
journal = {Communications on Pure and Applied Mathematics},
author = {Babin, A. V.},
author = {Ilyin, A. A.},
author = {Titi, E. S.},
year = {2011},
pages = {591--648}
}
\bib{babin-ns}{article}{
title = {Regularity and integrability of {\$3\$D} Euler and {Navier-Stokes} equations for rotating fluids},
volume = {15},
issn = {0921-7134},
number = {2},
journal = {Asymptotic Analysis},
author = {Babin, A.},
author = {Mahalov, A.},
author = {Nicolaenko, B.},
year = {1997},
pages = {103--150}
}
\bib{BG}{article}{
author={Bertini, L.},
author={Giacomin, G.},
title={Stochastic Burgers and KPZ equations from particle systems},
journal={Comm. Math. Phys.},
volume={183},
date={1997},
number={3},
pages={571--607},
issn={0010-3616},
review={\MR{1462228 (99e:60212)}},
doi={10.1007/s002200050044},
}
\bib{CLO}{article}{
author={Chang, C.-C.},
author={Landim, C.},
author={Olla, S.},
title={Equilibrium fluctuations of asymmetric simple exclusion processes
in dimension $d\geq 3$},
journal={Probab. Theory Related Fields},
volume={119},
date={2001},
number={3},
pages={381--409},
issn={0178-8051},
review={\MR{1821140 (2002e:60157)}},
doi={10.1007/PL00008764},
}
\bib{DDT}{article}{
author={Da Prato, G.},
author={Debussche, A.},
author={Tubaro, L.},
title={A modified Kardar-Parisi-Zhang model},
journal={Electron. Comm. Probab.},
volume={12},
date={2007},
pages={442--453 (electronic)},
issn={1083-589X},
review={\MR{2365646 (2008k:60147)}},
}
\bib{DF}{article}{
author={Da Prato, G.},
author={Flandoli, F.},
title={Pathwise uniqueness for a class of SDE in Hilbert spaces and
applications},
journal={J. Funct. Anal.},
volume={259},
date={2010},
number={1},
pages={243--267},
issn={0022-1236},
review={\MR{2610386}},
doi={10.1016/j.jfa.2009.11.019},
}
\bib{DZ}{book}{
author={Da Prato, G.},
author={Zabczyk, J.},
title={Stochastic equations in infinite dimensions},
series={Encyclopedia of Mathematics and its Applications},
volume={44},
publisher={Cambridge University Press},
place={Cambridge},
date={1992},
pages={xviii+454},
isbn={0-521-38529-6},
review={\MR{1207136 (95g:60073)}},
doi={10.1017/CBO9780511666223},
}
\bib{FGP}{article}{
author={Flandoli, F.},
author={Gubinelli, M.},
author={Priola, E.},
title={Well-posedness of the transport equation by stochastic
perturbation},
journal={Invent. Math.},
volume={180},
date={2010},
number={1},
pages={1--53},
issn={0020-9910},
review={\MR{2593276}},
doi={10.1007/s00222-009-0224-4},
}
\bib{MR1988703}{article}{
author={Flandoli, F.},
author={Russo, F.},
author={Wolf, J.},
title={Some SDEs with distributional drift. I. General calculus},
journal={Osaka J. Math.},
volume={40},
date={2003},
number={2},
pages={493--542},
issn={0030-6126},
review={\MR{1988703 (2004e:60110)}},
}
\bib{MR2065168}{article}{
author={Flandoli, F.},
author={Russo, F.},
author={Wolf, J.},
title={Some SDEs with distributional drift. II. Lyons-Zheng structure,
It\^o's formula and semimartingale characterization},
journal={Random Oper. Stochastic Equations},
volume={12},
date={2004},
number={2},
pages={145--184},
issn={0926-6364},
review={\MR{2065168 (2006a:60105)}},
doi={10.1163/156939704323074700},
}
\bib{JG}{article}{
author = {{Gon\c{c}alves}, P.},
author= { {Jara}, M.},
title = {Universality of KPZ equation},
journal = {ArXiv e-prints},
archivePrefix = {arXiv},
eprint = {arXiv:1003.4478},
primaryClass = {math.PR},
year = {2010}
}
\bib{controlling}{article}{
title = {Controlling rough paths},
volume = {216},
issn = {0022-1236},
url = {http://dx.doi.org/10.1016/j.jfa.2004.01.002},
doi = {10.1016/j.jfa.2004.01.002},
number = {1},
journal = {Journal of Functional Analysis},
author = {Gubinelli, M.},
year = {2004},
pages = {86--140}
}
\bib{kdv}{article}{
title = {Rough solutions for the periodic Korteweg--de Vries equation},
volume = {11},
issn = {1534-0392},
number = {2},
journal = {Communications on Pure and Applied Analysis},
author = {Gubinelli, M.},
year = {2012},
pages = {709--733}
}
\bib{Hairer}{article}{
title = {Solving the {KPZ} equation},
url = {http://fr.arxiv.org/pdf/1109.6811v2},
journal = {{ArXiv} e-prints},
author = {Hairer, M.},
month = {sep},
year = {2011}
}
\bib{janson_gaussian_1997}{book}{
title = {Gaussian Hilbert Spaces},
publisher = {Cambridge University Press},
author = {Janson, S.},
month = {jun},
year = {1997},
}
\bib{KPZ}{article}{
title = {Dynamic Scaling of Growing Interfaces},
author = {Kardar, M.},
author = {Parisi, G.},
author = {Zhang, Y.-C. },
journal = {Phys. Rev. Lett.},
volume = {56},
number = {9},
pages = {889--892},
numpages = {3},
year = {1986},
month = {Mar},
doi = {10.1103/PhysRevLett.56.889},
publisher = {American Physical Society}
}
\bib{KV}{article}{
author={Kipnis, C.},
author={Varadhan, S. R. S.},
title={Central limit theorem for additive functionals of reversible
Markov processes and applications to simple exclusions},
journal={Comm. Math. Phys.},
volume={104},
date={1986},
number={1},
pages={1--19},
issn={0010-3616},
review={\MR{834478 (87i:60038)}},
}
\bib{RY}{book}{
edition = {3rd},
title = {Continuous Martingales and Brownian Motion},
isbn = {3540643257},
publisher = {Springer},
author = {Revuz, Daniel},
author = {Yor, Marc},
month = {dec},
year = {2004},
}
\bib{MR2353387}{article}{
author={Russo, F.},
author={Trutnau, G.},
title={Some parabolic PDEs whose drift is an irregular random noise in
space},
journal={Ann. Probab.},
volume={35},
date={2007},
number={6},
pages={2213--2262},
issn={0091-1798},
review={\MR{2353387 (2008j:60153)}},
doi={10.1214/009117906000001178},
}
\bib{RV}{incollection}{
series = {Lecture Notes in Mathematics},
title = {Elements of Stochastic Calculus via Regularization},
volume = {1899},
isbn = {978-3-540-71188-9},
url = {http://www.springerlink.com/content/63u35k7n446q1t64/abstract/},
booktitle = {S\'eminaire de Probabilit\'es {XL}},
publisher = {Springer Berlin / Heidelberg},
author = {Russo, F.},
author = {Vallois, P.},
editor = {{Donati-Martin}, C.},
editor = {\'Emery, Michel},
editor = {Rouault, A.},
editor = {Stricker, C.},
year = {2007},
keywords = {Mathematics and Statistics},
pages = {147--185}
}
\bib{Spohn}{article}{
title = {Superdiffusivity of the {1D} Lattice {Kardar-Parisi-Zhang} Equation},
volume = {137},
issn = {0022-4715},
url = {http://www.springerlink.com/content/54h7q2682842g701/abstract/},
doi = {10.1007/s10955-009-9831-0},
number = {5},
journal = {Journal of Statistical Physics},
author = {Sasamoto, T.},
author = {Spohn, H.},
year = {2009},
keywords = {Physics and Astronomy},
pages = {917--935},
}
\end{biblist}
\end{bibdiv}
\end{document} |
\begin{equation}gin{document}
\title{Quantum State Transfer via Noisy Photonic and Phononic Waveguides}
\author{B. Vermersch}
\thanks{These two authors contributed equally.}
\affiliation{Institute for Theoretical Physics, University of Innsbruck, A-6020, Innsbruck, Austria}
\affiliation{Institute for Quantum Optics and Quantum Information of the Austrian Academy of Sciences, A-6020 Innsbruck, Austria}
\author{P.-O. Guimond}
\thanks{These two authors contributed equally.}
\affiliation{Institute for Theoretical Physics, University of Innsbruck, A-6020, Innsbruck, Austria}
\affiliation{Institute for Quantum Optics and Quantum Information of the Austrian Academy of Sciences, A-6020 Innsbruck, Austria}
\author{H. Pichler}
\affiliation{ITAMP, Harvard-Smithsonian Center for Astrophysics, Cambridge, Massachusetts 02138, USA}
\affiliation{Physics Department, Harvard University, Cambridge, Massachusetts 02138, USA}
\author{P. Zoller}
\affiliation{Institute for Theoretical Physics, University of Innsbruck, A-6020, Innsbruck,
Austria}
\affiliation{Institute for Quantum Optics and Quantum Information of the Austrian Academy
of Sciences, A-6020 Innsbruck, Austria}
\begin{equation}gin{abstract} {We} describe a quantum state transfer protocol, where a quantum state of photons stored in a first cavity can be faithfully transferred to a second distant cavity via an infinite 1D waveguide, while being immune to arbitrary noise (e.g. thermal noise) injected into the waveguide.
We extend the model and protocol to a cavity QED setup, where atomic ensembles, or single atoms representing quantum memory, are coupled to a cavity mode. We present a detailed study of sensitivity to imperfections, and apply a quantum error correction protocol to account for random losses (or additions) of photons in the waveguide. Our numerical analysis is enabled by matrix product state techniques to simulate the complete quantum circuit, which we generalize to include thermal input fields. Our discussion applies both to photonic and phononic quantum networks.
\end{abstract}
\date{\today}
\maketitle
\textit{Introduction.---}
The ability to transfer quantum states between
distant nodes of a quantum network via a quantum channel is a basic task in quantum information processing~\cite{Kimble2008,Northup2014,Hammerer2010,Reiserer2015}. An outstanding challenge is to achieve quantum state transfer~\cite{Cirac1997,nikolopoulos2013quantum} (QST) with high fidelity despite the presence of noise and decoherence in the quantum channel. In a quantum optical
setup the quantum channels are realized as 1D waveguides, where quantum information is carried by `flying
qubits' implemented either by photons in the optical~\cite{Ritter2012,Tiecke2014,Goban2015} or microwave regime~\cite{Eichler2012,VanLoo2013,Wenner2014,Grezes2014},
or phonons~\cite{Eisert2004,Hatanaka2014}. Imperfections in
the quantum channel thus include photon or phonon loss, and, in particular
for microwave photons and phonons, a (thermal) noise background~\cite{Habraken2012}. In
this Letter we propose a QST protocol and a corresponding quantum
optical setup which allow for state transfer with high fidelity,
undeterred by these imperfections. A key feature is that our protocol
and setup are \emph{a priori} immune to quantum or classical noise
\emph{injected} into the 1D waveguide, while imperfections such as
random generation and loss of photons or phonons during transmission
can be naturally corrected with an appropriate quantum error correction (QEC)
scheme~\cite{Michael2016}.
\begin{equation}gin{figure}
\includegraphics[width=\columnwidth]{Fig1.pdf}
\caption{{\it Quantum state transfer (QST) via a noisy waveguide.}\label{fig:setup}
(a)~QST where qubits are coupled {\em directly} with chiral coupling to a waveguide representing the quantum channel.
(b)~QST in a cavity QED setup, where atoms representing qubits are coupled to the waveguide with a cavity as mediator.
(c)~Fidelity $\cal F$ for QST of a qubit as a function of photon occupation $n_\mathrm{th}$ representing a thermal noise injected into the waveguide for setups (a) and (b). For the protocol described in the text, setup (b) is robust to injected noise. (d) `Write' of a quantum state from cavity~$1$ to a temporal mode in the (noisy) waveguide, and `Read' back to cavity~$2$ as a linear multimode encoder and decoder with encoding [decoding] functions $\kappa_1(t)$ [$\kappa_2(t)$] (see text).}
\end{figure}
The generic setup for QST in a quantum optical
network is illustrated in Fig.~\ref{fig:setup} as transmission of a qubit state from a first to a second distant two-level atom via an infinite 1D bosonic open waveguide.
The scheme of Fig.~\ref{fig:setup}(a) assumes a \emph{chiral
coupling} of the two-level atoms to the waveguide~\cite{Lodahl2017,Bliokh2015}, as demonstrated in recent experiments with atoms~\cite{Mitsch2014} and quantum dots~\cite{Sollner2015}. The atomic
qubit is transferred in a decay process with a time-varying coupling to a \emph{right-moving} photonic (or phononic) wavepacket propagating in the waveguide, {\it i.e.}~$\big(c_g\ket{g}_{1}+c_e\ket{e}_{1}\big)\ket{0}_{p}\rightarrow\ket{g}_{1}\big(c_g\ket{0}_{p}+c_e\ket{1}_{p}\big)$ where $\ket{}_1$ and $\ket{}_p$ denote the atomic and channel states. The transfer of the qubit state is then completed by reabsorbing
the photon (or phonon) in the second atom via the inverse operation, essentially mimicking the time-reversed process of the initial decay. Such transfer protocols have
been discussed in the theoretical literature~\cite{Cirac1997,Stannigel2010,nikolopoulos2013quantum,Ramos2016,Yao2013,Dlaska2017}, and demonstrated in recent experiments~\cite{Ritter2012}. A central assumption underlying these studies is, however, that the waveguide is initially prepared in the vacuum state, {\it i.e.}~at zero temperature, and --~as shown in Fig.~\ref{fig:setup}(c)~-- the fidelity for QST (formally defined in App.~\ref{sec:avfid}) will degrade significantly in the presence of noise, e.g. thermal~\cite{Habraken2012}. Below we show that a simple variant of the setup with a cavity as mediator makes the QST protocol immune against arbitrary injected noise~\cite{Cirac1997,Clark2003} [{\it cf.}~Fig.~\ref{fig:setup}(b)]. Robust QST also provides the basis for distribution of entanglement in a quantum network.
\textit{Photonic quantum network model.---} We consider the setup illustrated in Fig.~\ref{fig:setup}(b), where each `node' consists of a two-level atom as qubit coupled to
a cavity mode. We assume that the cavity QED setup is designed with
a chiral light-matter interface with coupling to right-moving modes of the waveguide \footnote{The use of cavities also allows to efficiently decouple the atoms from unwanted emission into non-guided modes~\cite{Sayrin2015}}\nocite{Sayrin2015}.
In the language of quantum optics the setup of Fig.~\ref{fig:setup}(b) is a cascaded quantum system~\cite{gardiner2015}, where the
first node is unidirectionally coupled to the second one. The dynamics is described by a quantum stochastic Schr\"odinger equation
(QSSE)~\cite{gardiner2015} for the
composite system of nodes and waveguide as $i\frac{d}{dt}\ket{\Psi(t)}=H(t)\ket{\Psi(t)}$ ($\hbar=1$). The Hamiltonian is \mbox{$H(t)=\sum_{j=1,2}H_{n_{j}}(t)+V(t)$}
with \mbox{$H_{n_{j}}(t)=i g_j(t)\left(a_{j}^{\dagger}\sigma_{j}^{-}-\mathrm{H.c.}\!\right)$}
the Jaynes-Cummings Hamiltonian for node $j=1,2$ in the rotating wave approximation (RWA). Here $a_{j}$ are annihilation
operators for the cavity modes and $\sigma_j$'s are Pauli operators for
the two-level atoms with levels $\ket{g}_{j},\ket{e}_{j}$. We assume that the cavities are tuned to resonance with the two-level
atoms ($\omega_{c}=\omega_{eg}$), and the Hamiltonian is written in
the rotating frame. The coupling of the first and second cavity (located at
$x_{1}<x_2$) to the right-moving modes of the channel is described by the interaction
Hamiltonian
\begin{equation}gin{eqnarray}
\nonumber V(t)\! &=&i\sum_{j=1,2}\sqrt{\tfrac{\kappa_{j}(t)}{2\pi}}\int_{{\cal B}}d\omega b_{\mathrm{R}}^{\dagger}(\omega)e^{i(\omega-\omega_{c})t-i\omega x_{j}/c}a_{j} -\text{H.c.}\\
&\equiv i&\big(\sqrt{\kappa_{1}(t)}b_{\mathrm{R}}^{\dagger}(t)a_{1}\!+\!\sqrt{\kappa_{2}(t)}b_{\mathrm{R}}^{\dagger}(t\!-\!\tau)e^{i\phi}a_{2}-\text{H.c.}\big) \label{eq:defHint}
\end{eqnarray} in the RWA.
Here $b_{R}(\omega)$ denotes the annihilation operators of the continuum
of right-moving modes with frequency $\omega$ within a bandwidth $\mathcal B$ around the atomic transition frequency, $c$ is the velocity of light, and $\kappa_{1,2}(t)$
is a decay rate to the waveguide. In the second line
of Eq.~\eqref{eq:defHint} we have rewritten this interaction in terms of quantum
noise operators $b_{\mathrm{R}}(t)$ satisfying white noise commutation relations
$[b_{\mathrm{R}}(t),b_{\mathrm{R}}^\dagger(s)]=\delta(t-s)$. The parameter $\tau=d/c$,
with $d=x_{2}-x_{1}>0$, denotes the time delay of the propagation between
the two nodes, and $\phi=-\omega_c\tau$ is the propagation phase.
For a cascaded quantum system with purely unidirectional
couplings, $\tau$ and $\phi$ can always be absorbed in a redefinition
of the time and phase of the second node. Noise injected into the waveguide
is specified by the hierarchy of normally ordered correlation functions
of $b_{\mathrm{R}}(t)$. In particular the Fourier transform of the correlation function $\langle b_{\mathrm{R}}^{\dagger}(t)b_{\mathrm{R}}(s)\rangle$ provides
the spectrum of the incident noise $S(\omega)$, which for white (thermal) noise corresponds
to $\langle b_{\mathrm{R}}^{\dagger}(t)b_{\mathrm{R}}(s)\rangle=n_\text{th}\delta(t-s)$ with
occupation number $n_\text{th}$ and flat spectrum $S(\omega)=n_\text{th}$.
\textit{Quantum state transfer protocol.---}
To illustrate immunity to injected noise in QST we consider first a minimal model of a pair of cavities coupled to the waveguide. The quantum Langevin equations (QLEs) for the annihilation operators of the two cavity modes $a_{1,2}(t)$ in the Heisenberg picture read [{\it cf.}~App.~\ref{sec:model}]
\begin{equation}gin{eqnarray}
\label{eqda1}\frac{d{a}_{1}}{dt}&=& -\dfrac{1}{2}\kappa_{1}(t){a}_{1}(t)-\sqrt{\kappa_{1}(t)}b_{\mathrm{R}}(t),\\
\frac{d{a}_{2}}{dt}&=& -\frac{1}{2}\kappa_{2}(t){a}_{2}(t)-\sqrt{\kappa_{2}(t)}\big[b_{\mathrm{R}}(t)+\sqrt{\kappa_{1}(t)}{a}_{1}(t)\big].\nonumber
\end{eqnarray}
These equations describe the driving of the first cavity by an input noise field $b_{\mathrm{R}}(t)$ \footnote{$b_{\mathrm{R}}(t)$ is always written in the interaction picture and acts as a source term for the nodes in \eqref{eqda1}.},
while the second cavity is driven by both $b_{\mathrm{R}}(t)$
and the first cavity. We can always find
a family of coupling functions $\kappa_{1,2}(t)$, satisfying
the time-reversal condition $\kappa_{2}(t)=\kappa_{1}(-t)$ [see inset Fig.~\ref{fig:setup}(a)], which achieves a mapping
\begin{equation}gin{eqnarray}
\label{eq:mapping}a_{1}(t_{i})\rightarrow-a_{2}(t_{f}),
\end{eqnarray}
{\it i.e.}~the operator of the first cavity mode at initial time $t_{i}$ is mapped
to the second cavity mode at final time $t_{f}$, with no admixture from $b_{\mathrm{R}}(t)$ [{\it cf.}~App.~\ref{sec:mapping}].
In other words, an arbitrary photon superposition state prepared initially in the first cavity can be faithfully transferred to the second distant cavity without being contaminated by incident
noise. This result holds without any assumption on the noise statistics. It is intrinsically related to the linearity of the
above QLEs, which allows
the effect of noise acting equally on both cavities to drop
out by quantum interference.
The setup can thus be combined with other elements of linear optics, such as beamsplitters [{\it cf.}~App.~\ref{sec:mapping}].
Robustness of QST to injected noise generalizes immediately to more complex systems representing effective `coupled harmonic oscillators'. We can then add atomic ensembles of $N$ two-level atoms represented by atomic hyperfine states~\cite{Julsgaard2004,Reimann2015,Hammerer2010} to the first and second cavities ($j=1,2$).
Spin-excitations in atomic ensembles~\cite{Colombe2007,Brennecke2007}, generated by the collective spin operator $S_j^+=\frac{1}{\sqrt{N}}\sum_{i=1}^N\sigma_{i,j}^+$ with $i$ the sum over atomic spin-operators of node $j$, are again harmonic for low densities. Moreover, they can be coupled in a Raman process to the cavity mode, $H_{n_j}= \tilde g_j (t) (S_j^+ a_j +\mathrm{H.c.})$, as familiar from the read and write of photonic quantum states to atomic ensembles as quantum memory
\footnote{Adiabatic elimination of the cavity provides QLEs for $S^+_j$ analogous to (\ref{eqda1}).}.
This provides a way of getting an effective time-dependent coupling to the waveguide in a setup with constant cavity decay.
Our protocol thus generalizes to transfer of quantum states stored as long-lived spin excitation in a first atomic ensemble to a second remote ensemble [{\it cf.}~App.~\ref{sec:atomicensembles}].
Returning to the setup of Fig.~\ref{fig:setup}(b) with a single atom as qubit coupled to a cavity mode, we achieve -- in contrast to the setup of Fig.~\ref{fig:setup}(a) -- QST immune to injected noise in a three step process. (i) We first map the atomic qubit state $c_g\ket{g}_{1}+c_e\ket{e}_{1}$ to the cavity mode $c_g\ket{0}_{1}+c_e\ket{1}_{1}$ with the cavity decoupled from the waveguide
\footnote{The cavity modes can be prepared initially in a vacuum state via a dissipative optical pumping process with atoms, analogous to Ref.~\cite{Habraken2012}}. (ii) With atomic qubits decoupled from cavities we transfer the photon superposition state to the second cavity as above~\footnote{If there is an imperfection in step (i), the resulting mixed state is transferred without additional error to the second cavity.}. (iii)~We perform the time-inverse of step (i) in the second node. This QST protocol generalizes to several atoms as a quantum register representing an entangled state of qubits, which can either be transferred sequentially, or mapped collectively to a multiphoton superposition state in the cavity, to be transferred to the second node~\footnote{This is achieved, for example, with quantum logic operations available with trapped ions stored in a cavity~\cite{Casabone2015}.}\nocite{Casabone2015}.
As depicted in Fig.~\ref{fig:setup}(d), we can understand our QST protocol in the chiral cavity setup [Fig.~\ref{fig:setup}(b)], consisting of a {\em write operation} of the qubit in the first cavity to the waveguide as a quantum data bus, followed by a {\em read} into the second cavity. This `write' and `read' are both linear operations on the set of operators consisting of cavity and waveguide modes, or as an encoder and decoder into temporal modes specified by $\kappa_{1,2}(t)$, and physically implemented by the chiral cavity-waveguide interface.
\textit{Numerical techniques.---} We now study the sensitivity of the above protocol to errors. Imperfections may arise from inexact external control parameters including timing and deviations from perfect chirality. Moreover, loss or addition of photons can occur during propagation. We describe below a
QEC scheme which corrects for such single photon errors.
\begin{equation}gin{figure}
\includegraphics[width=\columnwidth]{Fig2.pdf}
\caption{{\it Role of imperfections.} (a) Effect on the fidelity of a finite transfer time $T=t_f-t_i$, and (b) of an imperfect timing of $\kappa_2(t)$.
(c) Fidelity as a function of $\phi$ for different $\begin{equation}ta$ factors and $\kappa_{\max}\tau\approx0$ (see text). Solid lines: $n_\text{th}=0$. Dashed lines: $n_\text{th}=0.25$. The fidelity is maximal when $\phi$ is multiple of $\pi$.
(d) By increasing $\kappa_{\max}\tau$ for $\phi=0$ the fidelity decreases.
(e) QEC with the setup subject to waveguide and cavity losses. (f) QEC with the setup coupled to a reservoir with photon occupation $n_\text{th}'=1$. Black: no error correction. Red: correction against single photon losses. Blue: correction against single photon losses or additions. Solid lines: $n_\text{th}=0$, $\kappa'=0$. Full circles: $n_\text{th}=0.5$, $\kappa'=0$. Empty circles: $n_\text{th}=0$, $\kappa_f=0$. \label{fig:error}}
\end{figure}
A study of imperfections in QST will necessarily be numerical in nature, as it requires solution of the QSSE with injected noise accounting for nonlinearities in atom-light coupling.
Beyond Eq.~\eqref{eq:defHint}, the Hamiltonian must include coupling to both right- and left-propagating modes in the waveguide, and should account for possible couplings of waveguide and cavities to additional reservoirs representing decoherence [{\it cf.}~App.~\ref{sec:model}]. We have developed and employed three techniques to simulate the complete dynamics of the quantum circuits as depicted in Figs.~\ref{fig:setup}(a) and \ref{fig:setup}(b). First, we use matrix product states (MPS) techniques to integrate the QSSE discretized in time steps, as developed in Ref.~\cite{Pichler2016}, which we generalize to include injected quantum noise. Our method allows a general input field to be simulated using purification techniques, by entangling time-bins of the photonic field with ancilla copies in the initial state (for related techniques developed in condensed matter physics see Ref.~\cite{Schollwock2011}).
This method also allows the study of non-Markovian effects ({\it i.e.}~for finite retardation $\tau>0$) in the case of imperfect chiral couplings, and is well suited to represent various kinds of noise.
Second, we solve the master equation describing the nodes, which allows for efficient simulations valid in the Markovian limit.
Finally, to simulate the QST in non-chiral setups as described at the end of this Letter, we solve the dynamics of the nodes and of a discrete set of waveguide modes, following Ref.~\cite{Ramos2016}. For a detailed description of the complete model and numerical methods we refer to Apps.~\ref{sec:model} and \ref{sec:MPS}, and present below our main results assuming thermal injected noise $n_{\rm th}$.
{\it Sensitivity to coupling functions $\kappa_{1,2}(t)$.---}
In Figs.~\ref{fig:error}(a) and \ref{fig:error}(b) we study the sensitivity of QST to the functions $\kappa_{1,2}(t)$ for the minimal model of nodes represented by cavities. Figure~\ref{fig:error}(a) shows the effect of the protocol duration $T=t_f-t_i$ which in the ideal case is required to fulfill \mbox{$T\gg 1/\kappa_{\max}$}, with $\kappa_{\max}$ the maximum value of $\kappa_{1,2}(t)$. For finite durations, the effect on the fidelity scales linearly with the noise intensity but quickly vanishes for $\kappa_{\max}T \gtrsim 10$, above which $\mathcal F \geq 0.99$. In all other figures of this work we use $\kappa_{\max}T=20$.
In Fig.~\ref{fig:error}(b), we show the effect of an imperfection $\delta_\tau$ in the timing of the coupling functions, namely $\kappa_2(t)=\kappa_1(\delta_\tau-t)$. The digression from unity is quadratic in $\delta_\tau$ but linear in noise intensity. This result illustrates that only the proper decoding function allows one to unravel the quantum state emitted by the first cavity on top of the injected noise.
Note that in addition to errors in the coupling functions, the fidelity is also sensitive to the frequency matching of the cavities~\cite{Korotkov2011}, which we discuss in App.~\ref{sec:mismatch}.
{\it Imperfect chirality.---} For an optical fiber with chirally coupled resonators~\cite{Sayrin2015}, the nodes emit only a fraction $\begin{equation}ta<1$ of their excitations in the right direction.
The dynamics then also depends on the propagation phase $\phi$~\cite{Lodahl2017} and on the time delay $\tau$. As illustrated in Fig.~\ref{fig:error}(c), the effect of imperfect chirality in the Markovian regime ($\kappa_{\max}\tau\approx0$) crucially depends on $\phi$, as a consequence of interferences between the photon emissions of the two cavities in the left direction. In particular, for $\phi=0$, they interfere destructively, leading to a higher fidelity. This interference decreases for finite values of $\kappa_{\max}\tau$, as shown in Fig.~\ref{fig:error}(d).
{\it Quantum error correction.---}
In contrast to `injected' noise, loss and injection of photons occurring during propagation between the two cavities represent decoherence mechanisms, which affect the fidelity of the protocol~\cite{Northup2014}.
Such errors can be corrected in the framework of QEC. Instead of encoding the qubits in the Fock states $\ket{0}$ and $\ket{1}$, we use multiphoton states, with the requirement that the loss or addition of a photon projects them onto a new subspace where the error can be detected and corrected.
A possibility is to use a basis of cat states, {\it i.e.}~superposition of coherent states~\cite{Haroche2006,Ourjoumtsev2006}, where a photon loss only induces a change of parity of the photon number~\cite{Ofek2016}. While we present the efficiency of QST with cat states in App.~\ref{sec:qec}, we use here a basis of orthogonal photonic states for the qubit encoding~\cite{Michael2016}.
We first consider a protocol protecting against single photon losses. Here, the state of the first qubit is mapped to the first cavity as \mbox{$c_g \ket{g}_1 + c_e \ket{e}_1 \to c_g \ket{+}_1 + c_e \ket{-}_1$}, where the cavity logical basis \mbox{$\ket{\pm}_j=(\ket{0}_j\pm\sqrt{2}\ket{2}_j+\ket{4}_j )/2$} has even photon parity. This unitary transformation can be realized with optimal control pulses driving the qubit and the cavity while using the dispersive shift between the qubit and the cavity mode as nonlinear element~\cite{Ofek2016}.
Waveguide losses, with rates $\kappa_f$, can be modeled with a beamsplitter with transmission probability $\exp(-\kappa_f \tau)$, whereas the rate of cavity losses is denoted $\kappa'$. The single photon loss probability is then $\mathcal P = 1-\exp(-\kappa_f \tau -\kappa' T)$. The density matrix $\rho_f$ of the second cavity at the end of the protocol reads
\begin{equation}gin{equation}
\rho_f = \ket{\Psi_0} \bra{\Psi_0} + \mathcal P \ket{\Psi_{-1}} \bra{\Psi_{-1}} + \mathcal{O}(\mathcal{P}^2),
\end{equation}
where the unnormalized states $\ket{\Psi_0}$ and $\ket{\Psi_{-1}}$, written explicitly in App.~\ref{sec:qec}, have even or odd parity, respectively, and satisfy $\ket{\Psi_{-1}}= a_2 \ket{\Psi_0}$.
The state $\ket{\Psi_0}$ corresponds to the case where no stochastic photon loss occurred, whereas the state $\ket{\Psi_{-1}}$ is obtained if one photon was lost in the process.
The last step of the protocol consists in measuring the photon number parity in the second cavity, and -- conditional on the outcome -- apply unitary operations transferring the photon state to qubit $2$.
As shown in Fig.~\ref{fig:error}(e), this encoding improves significantly the fidelity for small losses $\mathcal P \ll 1$, up to a threshold value $\mathcal P\approx 0.29$. Note that both protocols are insensitive to injected noise.
We consider now a situation where the waveguide is coupled to a finite temperature reservoir with $n'_\text{th}=1$ thermal occupation number which stochastically adds and absorbs photons.
Here the qubit state is encoded as $c_g\ket{+}_1+c_e\ket{-}_1$, where $\ket{\pm}_1=(\ket{0}_1\pm\sqrt{2}\ket{3}_1+\ket{6}_1)/2$ have photon number $0$ modulo $3$. The state $\rho_f$ after the transfer is a mixture of $\ket{\Psi_k}\bra{\Psi_k}$ with $k=-1,0,+1$ corresponding to the cases of a single photon loss, of no photon loss or addition, and of a single photon addition. These states satisfy $\ket{\Psi_{-1}}=a_2\ket{\Psi_0}$ and $\ket{\Psi_{+1}}=a_2^\dagger\ket{\Psi_0}$ and are distinguishable by measurement of the photon number modulo $3$.
In the limit of small error probabilities, one retrieves the original qubit state by applying a unitary operation conditioned on the measurement outcome. In Fig.~\ref{fig:error}(f) we show that this protocol corrects the errors for $\mathcal P\ll 1$ independently of injected noise intensity. This approach extends to arbitrary number of photon losses and additions, although at the cost of a lower range of achievable $\mathcal P$~\cite{Michael2016}.
{\it Closed systems.---}
\begin{equation}gin{figure}
\includegraphics[width=\columnwidth]{Fig3.pdf}
\caption{{\it QST in non-cascaded systems.}
(a) QST in a closed system with two cavities coupled to a finite waveguide. (b) Fidelity as a function of the cavity nonlinearities $\chi$ and for different initial occupation of the waveguide. The fidelity approaches unity in the linear limit $\chi\to0$.
\label{fig:chirality}}
\end{figure}
Our results can also be observed in closed systems [{\it cf.}~Fig.~\ref{fig:chirality}(a)], where two cavities are coupled, for instance, via a finite optical fiber or a microwave transmission line \cite{Blais2004}. Note that in circuit QED setups, time-dependent couplings $\kappa_j(t)$ can be realized via tunable couplers~\cite{Korotkov2011,Wenner2014,Srinivasan2014}.
This system is not chiral, as the dynamics of the first cavity can be perturbed by reflections from the second one.
We numerically demonstrate robustness against noise, which is here represented as initial occupation of the waveguide. In addition, we consider the effect of Kerr nonlinearities, {\it i.e.}~we add terms $-\chi a_j^\dagger a_j^\dagger a_j a_j$ [{\it cf.}~App.~\ref{sec:closed}], which are relevant for circuit QED setups~\cite{Ofek2016}, to the Hamiltonian.
The results are presented in Fig.~\ref{fig:chirality}(b) with each (discrete) waveguide mode initially in a coherent state $\ket{\alpha}$.
QST becomes robust against noise in the transition from the cavity as an effective two-level system ($\chi\to\infty$) to perfect harmonic oscillator ($\chi\to0$).
{\it Conclusion.---}
Robustness to arbitrary injected noise in transferring a quantum state between two cavities relies on the linearity of the write and read into temporal modes~[{\it cf.}~Fig.~\ref{fig:setup}(d)], with quantum noise canceled by quantum interference. While here we have focused on QST between two distant cavity modes, our approach generalizes to a setup involving many nonlocal bosonic resonator modes, which can be realized with various physical platforms, and as hybrid systems.
{\it Note added---} A related setup and protocol have been proposed in an independent work by Z. L. Xiang {\it et~al.}~\cite{Xiang2016}.
\begin{equation}gin{acknowledgments}
BV and POG contributed equally to this work. HP provided the matrix product state code to integrate the QSSE, which was extended by POG to noisy input. We thank C.~Muschik, M.~Leib, K.~M\o lmer, B.~Vogell and G.~Kirchmair for discussions. Simulations of the master equation were performed with the QuTiP toolbox~\cite{Johansson20131234}. Work at Innsbruck is supported by the EU projects UQUAM and RYSQ, the SFB FOQUS of the Austrian Science Fund (FWF Project No. F4016-N23) and the Army Research Laboratory Center for Distributed Quantum Information via the project Scalable Ion-Trap Quantum Network (SciNet).
\end{acknowledgments}
\appendix
\section{Calculation of average fidelities for quantum state transfer}
\label{sec:avfid}
Throughout this work the fidelity of quantum state transfer (QST) $\cal F$ is defined as the overlap between the final state of the second node at the end of the protocol and the state obtained in an ideal transfer, averaged over all initial qubit states for the first node. Formally, one can access this value by making use of a Choi-Jamiolkowski isomorphism between quantum processes and quantum states~\cite{Nielsen2002}, where the first node is prepared initially in an entangled state with an ancilla copy of itself which is otherwise completely decoupled from the dynamics.
For the situation depicted in Fig.~1(b) of the main text, we focus on the transfer between the cavities via noisy waveguide.
The initial state of the first node thus reads
\begin{equation}gin{equation}
\ket{\psi(t_i)}_{1,a} = \big(\ket{0}_1\ket{0}_a+\ket{1}_1\ket{1}_a\big)/\sqrt{2},\label{eq:inistate}
\end{equation}
where the index $1$ denotes the cavity and $a$ the ancilla, while the atom is decoupled.
The average fidelity is then obtained numerically by simulating the QST from cavity $1$ to cavity $2$, including imperfections. We denote the density matrix of second cavity and ancilla at the end of the protocol $\rho_{2,a}$ and its ideal value $\rho_\text{ideal}=\ket{\psi_\text{ideal}}_{2,a}\bra{\psi_\text{ideal}}$ with $\ket{\psi_\text{ideal}}_{2,a}=\ket{\psi(t_i)}_{2,a},$ up to propagation phase factors for the $\ket{1}_2\ket{1}_a$ component.
The fidelity then reads
\begin{equation}gin{equation}
\mathcal{F} = {Tr}\left(\sqrt{\sqrt{\rho_{a,2}} \rho_\text{ideal} \sqrt{\rho_{a,2}}} \right)^2.
\end{equation}
For nodes realized with atomic ensembles of $N$ atoms as two-level systems, we apply a similar procedure, where the Fock states $\ket{0}_1$ and $\ket{1}_1$ in Eq.~\eqref{eq:inistate} are replaced with the collective ground state of the ensemble $\ket{G}_1$ and the excited state $\ket{E}_1=\frac{1}{\sqrt{N}}\sum_{i=1}^N \sigma_{i,1}^+ \ket{G}_1$, with $\sigma_{i,j}^+$ the creation operator for excitation of atom $i$ in the node $j$. In the quantum error correction (QEC) protocols, these states are replaced with the multiphoton states $\ket{\pm}_1$ as defined in the main text.
\section{Dynamics of cavities chirally coupled to a waveguide}\label{sec:model}
Here we provide details on the model of two cavities coupled to a waveguide with time-dependent decay rates, including imperfections such as deviation from unidirectionality, propagation losses and cavity decays. We present the corresponding form of the quantum stochastic Schr\"odinger equation (QSSE), the quantum Langevin equations (QLEs) and the master equations supporting the analytical and numerical study of the QST dynamics.
\subsection{Model}
Our model consists of cavities as harmonic oscillators coupled on resonance to atomic two-level systems as qubits, and with chiral coupling to a waveguide. The dynamics is described by the Hamiltonian $H(t)=\sum_{j=1,2} H_{n_j}(t)+V_\begin{equation}ta(t)$, with the node Hamiltonians $H_{n_j}(t)$ as given in the main text, and couplings to the waveguide:
\begin{equation}gin{eqnarray}
\nonumber V_\begin{equation}ta(t) &=&i\sum_{j=1,2}\sqrt{\tfrac{\begin{equation}ta\kappa_{j}(t)}{2\pi}}\int_{{\cal B}}d\omega b_{{R}}^{\dagger}(\omega)e^{i(\omega-\omega_{c})t-i\omega x_{j}/c}a_{j} \\
&+& i \sum_{j=1,2}\sqrt{\tfrac{(1-\begin{equation}ta)\kappa_{j}(t)}{2\pi}} \int_{{\cal B}}d\omega b_{{L}}^{\dagger}(\omega)e^{i(\omega-\omega_{c})t+i\omega x_{j}/c}a_{j} \nonumber \\
&+&\text{H.c}.\label{eq:V}
\end{eqnarray}
The Hamiltonian $H(t)$ is written in a frame where node operators $a_j$ and $\sigma_j^-$ rotate with the cavity frequency $\omega_c$, and in an interaction picture with the waveguide bare Hamiltonian $H_B=\int_\mathcal{B} d\omega\, \omega[b^\dagger_L(\omega)b_L(\omega)+b^\dagger_R(\omega)b_R(\omega)]$. Here $b_L(\omega)$ and $b_R(\omega)$ are left- and right-moving waveguide modes satisfying $[b_i(\omega),b^\dagger_{i'}(\omega')]=\delta_{i,i'}\delta(\omega-\omega')$, and we assumed a linear dispersion relation around $\omega_c$.
Moreover, in writing Eq.~\eqref{eq:V}, we have assumed under the Born-Markov approximation that the decay rates $\kappa_j(t)$ can be considered constant over the bandwidth $\mathcal{B}$.
Here, the parameter $0\leq\begin{equation}ta\leq1$ defines the chirality of the coupling with rates $\begin{equation}ta \kappa_j(t)$, respectively $(1-\begin{equation}ta)\kappa_j(t)$, to the right- and left-moving modes. For the perfectly chiral case ($\begin{equation}ta=1$), $V_\begin{equation}ta(t)$ correspond to the interaction Hamiltonian $V(t)$ as written in Eq.~(1) of the main text.
The interaction Hamiltonian can be rewritten as
\begin{equation}gin{eqnarray}
V_\begin{equation}ta(t)&=& i\sum_{j=1,2}\sqrt{\begin{equation}ta\kappa_{j}(t)} b_{{R}}^{\dagger}\big(t-(j-1)\tau\big) e^{i(j-1)\phi} a_{j} \nonumber \\
&+& i\sum_{j=1,2} \sqrt{(1-\begin{equation}ta)\kappa_{j}(t)} b_{{L}}^{\dagger}\big(t+(j-1)\tau\big) e^{-i(j-1)\phi} a_{j} \nonumber \\
&+&\text{H.c},
\end{eqnarray}
where we have set $x_1=0$, $x_2=d$. Here $\phi=-\omega_c \tau$ is the propagation phase and $\tau=d/c$ is the time delay. The quantum noise operators
\begin{equation}gin{equation}
b_{R,L}(t) = \frac{1}{\sqrt{2\pi}} \int_{{\cal B}}d\omega\, b_{{R,L}}(\omega)e^{-i(\omega-\omega_{c})t}, \label{eq:b}
\end{equation}
satisfy $[b_{i}(t),b^\dagger_{j}(s)] = \delta_{i,j}\delta(t-s)$ and represent the incoming or `injected' light field interacting with the cavities. Finally, we include in our description the effect of cavity and propagation losses by adding to the interaction Hamiltonian $V_\begin{equation}ta$ coupling terms to additional channels~\cite{gardiner2004quantum}.
\subsection{Quantum stochastic Schr\"odinger equation}
The quantum stochastic Schr\"odinger equation (QSSE) describes the stochastic evolution of the system as
\begin{equation}gin{equation}
i\frac{d\ket{\Psi(t)}}{dt}=H(t) \ket{\Psi(t)},
\end{equation}
where $\ket{\Psi(t)}$ represents the wavefunction of the system of nodes and quantum channels, which can be interpreted within the framework of Ito or Stratonovich calculus~\cite{gardiner2015}.
As detailed in App.~\ref{sec:MPS}, the numerical simulation of QSSE can be performed using a matrix product state (MPS) ansatz for the wavefunction $\ket{\Psi(t)}$, where the state of the waveguide (e.g. vacuum, thermal or coherent) can be efficiently represented.
It is particularly well-suited in the non-cascaded case $\begin{equation}ta<1$ and for long delay times ($\kappa_j \tau \gg 1$) where non-Markovian effects arise~\cite{Pichler2015,Ramos2016}.
\subsection{Quantum Langevin Equations}
\label{sec:langevin}
The quantum Langevin equations (QLEs), describing the dynamics of an arbitrary operator $a$ acting on the nodes in the Heisenberg picture, is the starting point of our analytical study of the QST dynamics. In the following, we present their explicit form for the various situations addressed in the main text.
\subsubsection{Ideal setup}
In the ideal setup the system has no (additional) losses and the coupling between cavities and waveguide is perfectly chiral with the two nodes decoupled from left-moving modes. In the Heisenberg picture the dynamics of an arbitrary operator $a$ acting on the nodes can be obtained by formal integration of the Heisenberg equation for right-moving modes $b_R(\omega)$ around the resonant frequency $\omega_c$ (see for instance Ref.~\cite{gardiner2015}), which leads to
\begin{equation}gin{align}
\dot a =
&-\sum_{j=1,2} [a, a_j^\dagger] \left(\frac{\kappa_j(t)}{2}a_j+\sqrt{\kappa_j(t)} b_{R} (t) + g_j(t) \sigma_j^- \right) \nonumber\\
&-\sum_{j=1,2}[a,\sigma_j^+] g_j(t) a_j- [a, a_2^\dagger] \sqrt{\kappa_2(t)\kappa_1(t)} a_1 +\widetilde{\text{H.c.}},
\label{eq:langevin}
\end{align}
where we redefined the time of the second cavity to eliminate the time delay and absorbed the propagation phase in the definition of the cavity operators $a_j$.
Here, the notation $\widetilde{\text{H.c.}}$ refers to taking the complex conjugate of the total expression, except for operator $a$. Note that the operator $b_R(t)$ is expressed as in Eq.~\eqref{eq:b}, {\it i.e.}~in the interaction picture, and represents here a source term driving the cavities.
For $a=a_1$ and $a=a_2$, we obtain Eqs.~(2) of the main text when atoms and cavities are decoupled [$g_j(t)=0$].
\subsubsection{Imperfect chirality}
We now consider the case where the chiral coupling between nodes and waveguide is not perfect ({\it i.e.}~unidirectional), that is the nodes also couple to left-moving modes $b_{{L}}(t)$.
In order to obtain a system of coupled local differential equations, we neglect the time delay $\tau$ assuming the Born-Markov approximation $\kappa_{\max}\tau \ll 1$, with $\kappa_{\max}$ the maximum value of $\kappa_1(t)$. The QLEs then read~\cite{Pichler2015}
\begin{equation}gin{eqnarray}
\dot a &=& -\sum_{j=1,2} [a, a_j^\dagger] \left(\frac{\begin{equation}ta\kappa_j(t)}{2}a_j+\sqrt{\begin{equation}ta\kappa_j(t)} b_{R} (t) \right) \nonumber \\
&-& [a, a_2^\dagger] \left(\begin{equation}ta\sqrt{\kappa_2(t) \kappa_1(t)} a_1\right) , \nonumber \\
&-& \sum_{j=1,2} [a, a_j^\dagger] \left(\frac{(1-\begin{equation}ta)\kappa_j(t)}{2}a_j \right. \label{eq:langevinlchiral}\\
&&\hspace{1cm}\left. +\sqrt{(1-\begin{equation}ta)\kappa_j(t)} e^{2i\phi\delta_{j,2}} b_{L} (t) \right) \nonumber \\
&-& [a, a_1^\dagger] \left(e^{-2i\phi}(1-\begin{equation}ta)\sqrt{\kappa_1(t) \kappa_2(t)} a_2\right) +\widetilde{\text{H.c.}} \nonumber,
\end{eqnarray}
where we redefined $b_L(t)\to e^{-i\phi} b_L(t-\tau)$ and assumed $g_j(t)=0$.
Note the importance of the propagation phase $\phi$ in the above equation, which governs the interference between the emission of the two cavities.
\subsubsection{Additional losses}
In the case of cavity and waveguide losses, the QLEs can be written as~\cite{gardiner2004quantum}
\begin{equation}gin{eqnarray}
\dot a &=& - [a, a_1^\dagger] \left(\frac{\kappa_1(t)}{2}a_1+\sqrt{\kappa_1(t)} b_{R} (t) \right) \nonumber \\
&-& [a, a_2^\dagger] \left(\frac{\kappa_2(t)}{2}a_2+\sqrt{\kappa_2(t)} \cos(\theta) b_{R} (t) \right) \nonumber \\
&-& [a, a_2^\dagger] \left(\cos(\theta)\sqrt{\kappa_2(t) \kappa_1(t)} a_1\right) , \nonumber \\
&-& [a, a_2^\dagger] \left( \sqrt{\kappa_2(t)}\sin(\theta) b_{U}(t) \right) \nonumber \\
&-&\sum_{j=1,2} [a, a_j^\dagger] \left(\frac{\kappa'}{2}a_j+\sqrt{\kappa'} b_j'(t) \right)+\widetilde{\text{H.c.}} \label{qe:lamgevinlosses},
\end{eqnarray}
with $g_j(t)=0$, and where we have assumed perfect chirality ($\begin{equation}ta=1$). Here $\kappa'$ is the coupling of each cavity to unwanted (non-guided) modes with input fields $b_{1,2}'(t)$, and the waveguide losses with rates $\kappa_f$ are modelled by a beamsplitter mixing to an additional waveguide with $\cos(\theta)=\exp(-\kappa_f\tau/2)$ and input field $b_{U}(t)$.
\subsection{Master equation}
The master equation allows to perform numerical simulations of the evolution of the node reduced density matrix $\rho(t)={Tr}_\text{B}\big(\ket{\Psi(t)}\bra{\Psi(t)}\big)$, where $\text{Tr}_\text{B}$ denotes the trace over propagating modes. The mapping from the QLEs to the master equation can be realized using different techniques~\cite{gardiner2004quantum,vogel2006,gardiner2015}. To do so, one writes the QLE in the following form
\begin{equation}gin{eqnarray}
\dot a &=&- \sum_\alpha [a,c_\alpha^\dagger]d_\alpha - \sum_k [a,e_k^\dagger] b_k(t) + \widetilde{\text{H.c.}} ,\label{eq:langevincanonical}
\end{eqnarray}
where the $\alpha$ index refers to interactions involving node operators and the $k$ index to input fields $b_k(t)$.
For instance, in the case of Eq.~\eqref{eq:langevin}, we have $c_1=a_1$, $d_1=(\kappa_1/2) a_1$, $c_2=a_2$, $d_2=(\kappa_2/2) a_2$, $c_3=a_2$, $d_3=\sqrt{\kappa_1\kappa_2}a_1$, \mbox{$e_1=\sqrt{\kappa_1}a_1+\sqrt{\kappa_2}a_2$}, $b_1=b_{R}(t)$.
In the case where each channel is in a thermal state with $n_{th}(k)$ average photons, we obtain the master equation~\cite{gardiner2004quantum,vogel2006,gardiner2015}
\begin{equation}
\frac{d}{dt} \rho = \sum_\alpha \mathcal{L}_\alpha(\rho) +\sum_k \mathcal{L}_k(\rho),
\end{equation} where
\begin{equation}gin{eqnarray}
\mathcal{L}_\alpha(\rho) =&& [d_\alpha \rho, c_\alpha^\dagger]+[c_\alpha,\rho d_\alpha^\dagger], \\
\mathcal{L}_k(\rho) =&& n_{th}(k) \left( \mathcal{D}[e_k](\rho)+\mathcal{D}[e_k^\dagger](\rho) \right),
\end{eqnarray}
with $\mathcal{D}[e](\rho)=e \rho e^\dagger-\tfrac12(e^\dagger e \rho+\rho e^\dagger e)$.
\section{Operator mapping $a_{1}(t_{i})\rightarrow-a_{2}(t_{f})$ }\label{sec:mapping}
Here we show that the QST protocol between two nodes as linear harmonic oscillators performs the operator mapping of Eq.~(3) of the main text, assuming the ideal scenario where couplings to the environment are negligible ($\kappa'=\kappa_f=0$) and couplings to the waveguide are perfectly chiral ($\begin{equation}ta=1$). The system is described by Eqs.~(2) of the main text.
\subsection{Integration of the quantum Langevin equations}
In the input-output formalism, the output field of the first node in the waveguide is given by $b_{R}(t)+v(t)$, with the contribution from the first node $v(t)=\sqrt{\kappa_1(t)}a_1(t)$.
This variable represents the emission of the first node, containing the information about its initial quantum state.
Conversely, the output field of the two cavities is given by $b_{R}(t)+w(t)$, where the contribution of the two nodes is $w(t)=v(t)+\sqrt{\kappa_2(t)}a_2(t)$.
Using the Langevin equation~\eqref{eq:langevin}, these two variables evolve according to
\begin{equation}gin{eqnarray}\label{eq: motionv}
\frac{dv}{dt}&=&f_1 v-\kappa_1 b_{R}(t) \label{eq:motionv} \\
\frac{dw}{dt}&=&f_2 w +(f_1-\kappa_2-f_2)v-(\kappa_1+\kappa_2) b_{R}(t) \label{eq:motionw}
\end{eqnarray}
where $f_j=(\dot \kappa_j/\kappa_j-\kappa_j)/2$.
We require that these functions satisfy the condition \begin{equation} \label{eq:req1}f_1(t)=f_2(t)+\kappa_2(t),\end{equation} so that the equations for $v$ and $w$ decouple.
In the standard QST description, {\it i.e.}~without injected noise, this condition implies that the output field $w$ vanishes, or, in other words, that the second cavity absorbs all the emission of the first one.
The general solution of Eqs.~\eqref{eq: motionv},\eqref{eq:motionw} reads
\begin{equation}gin{eqnarray}
\label{eqsolv}
v(t) &= &v(t_i)e^{\int_{t_i}^tdt' f_1(t')}\\ &&-\int_{t_i}^{t}dt'\, e^{{\int_{t'}^{t}dt'' f_1(t'')}} \kappa_1(t') b_{R}(t'),\nonumber \\
w(t) &=& w(t_i)\, e^{\int_{t_i}^tdt' f_2(t')} \\ &&-\int_{t_i}^{t}dt'\, e^{{\int_{t'}^{t}dt'' f_2(t'')}} \big(\kappa_1(t')+\kappa_2(t')\big) b_{R}(t') \nonumber.
\end{eqnarray}
The second node operator $a_2(t)=(w(t)-v(t))/\sqrt{\kappa_2(t)}$ can then be written in the form
\begin{equation}\begin{equation}gin{aligned} \label{eqSM:a2t}a_2(t_f)=& G_1(t_f,t_i) a_1(t_i) + G_2(t_f,t_i)a_2(t_i)\\&+\int_{t_i}^{t_f}dt'\, G(t,t') b_{R}(t'),\end{aligned}\end{equation}
where the node propagators are expressed as
\begin{equation}gin{eqnarray}
\label{G12}
G_1(t,t') &= & \sqrt{\frac{\kappa_1(t')}{\kappa_2(t)}} e^{\int_{t'}^tdt'' f_1(t'')}\left(e^{-\int_{t'}^tdt'' \kappa_2(t'')}-1\right) \nonumber \\
G_2(t,t') &= & e^{-\frac12\int_{t'}^tdt'' \kappa_2(t'')},
\end{eqnarray}
and the noise propagator as
\begin{equation}gin{eqnarray}
\label{eq:smdefG}
G(t,t') &= & -\sqrt{\kappa_1(t')} G_1(t,t') - \sqrt{\kappa_2(t')} G_2(t,t') \label{eq:G}.
\end{eqnarray}
In the limit of transfer times $T=t_f-t_i$ large compared to the typical emission rates $\sim 1/\kappa_{\max}$, the node propagators satisfy (see for example Ref.~\cite{Stannigel2011})
\begin{equation}gin{eqnarray}
G_1(t_f,t_i)&=&-1\\G_2(t_f,t_i)&=&0,
\end{eqnarray}
with effects of finite pulse duration $T$ depending on the specific shape of the functions $\kappa_{1}(t),\kappa_2(t)$.
\subsection{Vanishing noise contribution}
We now show that the noise contribution at the end of the QST vanishes due to opposite contributions from $G_1(t_f,t')$ and $G_2(t_f,t')$ in Eq.~\eqref{eq:smdefG}, leading to
the operator mapping $a_2(t_f) = -a_1(t_i)$.
We first note that the state of the injected noise field can always be decomposed as a distribution of classical states by expressing the injected noise field with a P-representation~\cite{gardiner2004quantum}. If we consider any particular component $\ket{\{\begin{equation}ta(\omega)\}}$ with $b_R(t)\ket{\{\begin{equation}ta(\omega)\}}=\begin{equation}ta(t)\ket{\{\begin{equation}ta(\omega)\}}$, we can bound the noise contribution using the Cauchy-Schwarz inequality:
\begin{equation}gin{eqnarray}
\left| \int_{t_i}^{t_f}dt'\, G(t_f,t') b_{R}(t')\right|^2 \le B(t_f,t_i) \int_{t_i}^{t_f}dt'\, |G(t_f,t') |^2, \nonumber
\end{eqnarray}
where $B(t_f,t_i)=\int_{t_i}^{t_f} |\begin{equation}ta(t')|^2 dt'$ is the integrated noise intensity.
Below we provide two examples of coupling functions $\kappa_1(t),\kappa_2(t)$ satisfying Eq.~\eqref{eq:req1}, and we show that the integral of $|G(t_f,t') |^2$ in the last equation vanishes.
(i) We first consider the functions
\begin{equation}gin{eqnarray} \kappa_1(t)&=& \kappa_{\max} \frac{e^{\kappa_{\max}t}}{2-e^{\kappa_{\max}t}} \Theta(-t)+\kappa_{\max}\Theta(t),\label{eq:kappaexp}\\ \kappa_2(t)&=&\kappa_1(-t)\end{eqnarray} where we set for convenience $t_f=-t_i=T/2$ and $\kappa_{\max}$ is the maximum decay rate. These are the coupling functions used in our numerical simulations. The integral reads
\begin{equation}gin{equation}
\int_{t_i}^{t_f}dt'\, |G(t_f,t') |^2 = \frac{2 \left(e^{\kappa_{\max}T/2}-1\right)}{\left(1-2 e^{\kappa_{\max}T/2}\right)^2},
\end{equation}
which vanishes like $e^{-\kappa_{\max}T/2}$ in the limit $\kappa_{\max}T\gg1$. This implies that, provided the integrated noise intensity does not grow exponentially with $T$ (typically the growth is linear), the noise contribution vanishes. Unless stated otherwise, we use $\kappa_{\max}T=20$.
(ii) This cancellation of noise is not restricted to coupling functions satisfying the time-reverse property $\kappa_2(-t)=\kappa_1(t)$. For example if one considers
\begin{equation}gin{eqnarray}
\kappa_1(t) &=& \kappa_{\max} \\
\kappa_2(t) &=& \kappa_{\max} \frac{e^{-\kappa_{\max}t}}{1-e^{-\kappa_{\max}t}},
\end{eqnarray}
where we set $t_i=0$ and $t_f=T$, then the integral reads
\begin{equation}gin{equation}
\int_{t_i}^{t_f}dt'\, |G(t_f,t') |^2 = e^{-\kappa_{\max}T}.
\end{equation}
Note however that in practice one needs to introduce a cut-off for $\kappa_2(t)$ to avoid the divergence at time $t=0$.
\subsection{Extension: beamsplitter operation}\label{sec:bsop}
\begin{equation}gin{figure}
\includegraphics[width=\columnwidth]{FigSM1.pdf}
\caption{{\it Operator mapping between four cavities.} We consider four cavities in the beamsplitter configuration and subject to injected noise. \label{fig:beamsplitter}}
\end{figure}
The operator mapping between two nodes can be extended to the beamsplitter mixing between four nodes, as represented in Fig.~\ref{fig:beamsplitter} where two pairs of cascaded nodes as harmonic oscillators undergo the QST protocol along waveguides mixed by a beamsplitter.
If we assume no imperfections in the couplings, the QLEs read
\begin{equation}gin{eqnarray}
\dot a_1 =&& -\frac{\kappa_1(t)}{2}a_1(t) - \sqrt{\kappa_1(t)}b_{R}(t) \\
\dot a_2 =&& -\frac{\kappa_2(t)}{2}a_2(t) -\sqrt{\kappa_2(t)}\Big(\sqrt{\kappa_1(t)}a_1(t)\cos(\theta)\\&&\nonumber + b_{R}(t)\cos(\theta) - \sqrt{\kappa_1(t)}a_3(t)\sin(\theta) - b_{U}(t)\sin(\theta)\Big),\\
\dot a_3 =&& -\frac{\kappa_3(t)}{2}a_3(t) - \sqrt{\kappa_3(t)}b_{U}(t) \\ \label{eqdam4}
\dot a_4=&& -\frac{\kappa_4(t)}{2}a_4(t) -\sqrt{\kappa_4(t)}\Big(\sqrt{\kappa_3(t)}a_3(t)\cos(\theta)\\&&\nonumber + b_{U}(t)\cos(\theta) + \sqrt{\kappa_3(t)}a_1(t)\sin(\theta) + b_{R}(t)\sin(\theta)\Big).
\label{eq:langevinbeamsplitter}
\end{eqnarray}
Here nodes $3$ and $4$ are coupled only to the modes propagating upwards in the vertical waveguide, and $b_{U}(t)$ denotes the corresponding injected noise. In particular, if the coupling functions satisfy $\kappa_3(t)=\kappa_1(t)$ and $\kappa_4(t) = \kappa_2(t)$, the equations can be decoupled using the linear combinations
\begin{equation}gin{eqnarray}\tilde a_2(t) &=& \cos(\theta)a_2(t)+\sin(\theta) a_4(t),\\\tilde a_4(t) &=& \cos(\theta)a_4(t)-\sin(\theta) a_2(t), \end{eqnarray}
whose evolution maps directly to Eq.~(2) of the main text with the set ($a_1,a_2$) replaced by the sets ($a_1,\tilde a_2$) or ($a_3,\tilde a_4$). The operator mapping can thus be applied to these sets and we obtain the mixed QST
\begin{equation} \begin{equation}gin{pmatrix} a_2(t_f) \\ a_4(t_f) \end{pmatrix} = - \begin{equation}gin{pmatrix} &\cos(\theta) &-\sin(\theta) \\ &\sin(\theta) &\cos(\theta) \end{pmatrix} \begin{equation}gin{pmatrix} a_1(t_i)\\ a_3(t_i) \end{pmatrix}. \label{eq:beamsplitter}\end{equation}
This result shows that one can realize beamsplitter mappings between four distant cavities, where cavities $1$ and $3$ imprint their quantum states onto noisy temporal modes of the two quantum channels. These modes interfere at the beamsplitter, similarly to single photon wave-packets, before being reabsorbed perfectly by cavities $2$ and $4$.
\section{Effective tunable cavity couplings $\kappa_{1,2}(t)$ via atomic ensembles}\label{sec:atomicensembles}
\subsection{Model}
Here we provide details on the realization of QST between two nodes ($j=1,2$) using atomic ensembles as harmonic oscillators.
As depicted in Fig.~\ref{fig:ensembles}(a), we consider two nodes consisting of an atomic qubit, a cavity, and an ensemble of two-level atoms $i=1,..,N$ with ground states $\ket{g}_{j,i}$ and excited states $\ket{e}_{j,i}$.
The Hamiltonian governing the dynamics of the system can be written in the form of $H(t)$ (see main text) where the node Hamiltonian is now given by
\begin{equation}gin{equation}
H_{n_j} = g_j \left( a_j^\dagger \sigma_j^- + \text{H.c} \right)+\tilde g_j(t) \sqrt{N} \left( a_j^\dagger S_j^- + \text{H.c} \right),
\end{equation}
and, having in mind a typical quantum optical setup, the coupling rates between cavities and waveguide $\kappa_j$ are considered to be constant over time.
The node Hamiltonian $H_{n_j}$ can be realized via a laser-assisted Raman transition~\cite{Hammerer2010} with the assumption that additional Stark-shifts terms can be cancelled using for instance another laser coupling.
Here $S_j^{\pm}=(S_j^x\pm i S_j^y)/\sqrt{N}$ are the collective spin operators associated to the total angular momentum $S_j^l=\frac12\sum_{i=1}^N \sigma_{i,j}^l$, with spin $S=N/2$ and where $\{\sigma_{i,j}^l,l=x,y,z\}$ denotes the set of Pauli matrices of the $i$-th atom in node $j$. In the following we will use the ground states $\ket{G}_j=\otimes_{i}\ket{g}_{j,i}$ and first excited states $\ket{E}_j=S_j^+\ket{G}_j$ of the atomic ensemble to mediate the QST between the two nodes, with effective time-dependent coupling to the waveguide governed by the functions $\tilde{g}_j(t)$.
\begin{equation}gin{figure}
\includegraphics[width=\columnwidth]{FigSM2}
\caption{{\it QST via atomic ensembles.} (a) Setup including atomic ensembles as effective harmonic oscillators in the nodes. (b) Fidelity of QST as a function of $n_\text{th}$ and for different atom numbers. (c) Error $1-\mathcal{F}$ as a function of the scaling parameter $x=(N+1)^2/n_{th}$. The dashed line represents $2.5/x$.}\label{fig:ensembles}
\end{figure}
The protocol, which becomes robust against noise in the limit of large atomic ensembles, is very similar to the one described in the main text in Fig.~1(b).
The first step (i) now consists in mapping the qubit state $c_g\ket{g}_1+c_e\ket{e}_1$ of the atom to a collective state of the atomic ensemble $c_g\ket{G}_1+c_e\ket{E}_1$.
This can be done for instance by detuning the atom and atomic ensemble with respect to the cavity in the Raman transitions, hence realizing an effective coupling between qubit and atomic ensemble with the cavity adiabatically eliminated.
The second step (ii) consists in realizing the operator mapping $S_1^-(t_i)\to -S_2^-(t_f)$ in analogy to the operator mapping between cavities, with the atomic ensemble now resonantly coupled to the cavity. Finally, (iii) the atomic ensemble state is mapped to the qubit state of the second node in the reverse process of step (i).
\subsection{Adiabatic elimination of the cavities}
We now describe in details step (ii) where the atomic ensemble realizes the QST in presence of injected noise. During this operation, the qubits are excluded from the dynamics ($g_j=0$). The Langevin equations associated with $H(t)$ can be written in the form (see for instance Ref.~\cite{Habraken2012} for the single atom case)
\begin{equation}gin{eqnarray}
\dot S^-_j &=& -i \tilde g_j(t) \sqrt{N} [S_j^-,S_j^+] a_j \nonumber \\
\dot a_j &=& -i \tilde g_j(t) \sqrt{N} S_j^- -\frac{\kappa_j}{2} a_j - \sqrt{\kappa_j(t)} \left(b_{R}(t)+\delta_{j,2} a_1(t) \right), \nonumber
\end{eqnarray}
with $j=1,2$. We assume here the bad-cavity regime where the decay of the cavity $\kappa_j$ to the waveguide is large compared to the coupling $\tilde g_i$. The cavity mode can then be adiabatically eliminated, leading to
\begin{equation}gin{eqnarray}
\dot S_1^- &=& -[S_1^-,S_1^+] \left[\frac{\tilde\kappa_1(t)}{2} S_1^-+ \sqrt{\tilde \kappa_1(t)} b_{R}(t) \right]\nonumber \\
\dot S_2^- &=& -[S_2^-,S_2^+] \left[\frac{\tilde\kappa_2(t)}{2} S_2^- \right. \nonumber \\
&&\hspace{1.6cm}+\left. \sqrt{\tilde \kappa_2(t)} \left( b_{R}(t) + \sqrt{\tilde \kappa_1(t)} S_1^- \right) \right], \nonumber \\ \label{eq:langevincollec}
\end{eqnarray}
with the time-dependent coupling $\tilde \kappa_j(t)=4\tilde g_j(t)^2N/\kappa_j$. We have a absorbed a phase $i$ ($-i$) in the definition of $S_1$ ($S_2$, respectively).
In the case of low excitations $n\ll N$, the spin operators behave as linear operators~\cite{Hammerer2010} where \begin{equation} [S_j^-,S_j^+]\ket{n}_j=-\frac{2}{N}S_j^z\ket{n}_j=\left(1-\frac{2n}{N}\right)\ket{n}_j\approx \ket{n}_j,\end{equation} with $\ket{n}_j\propto (S_j^+)^n\ket{G}_j$. Eqs.~\eqref{eq:langevincollec} thus map to Eqs.~(2) of the main text with the identification $S_j^-=a_j$, and the ensemble behaves as a harmonic oscillator.
\subsection{Nonlinear effects}
In order to assess the importance of nonlinear effects for finite number of atoms, we simulate the QST dynamics by mapping the QLEs~\eqref{eq:langevincollec} to a master equation, which is then integrated numerically.
In Fig.~\ref{fig:ensembles}(b) we show how the QST fidelity approaches unity in the transition from the nonlinear ($N\approx 1$) to the linear regime ($N\gg 1$). In Fig.~\ref{fig:ensembles}(c), we represent the same data in a logarithmic scale and with a rescaled axis $x=(N+1)^2/n_{th}$. This shows that the error scales a power law $1-\mathcal{F}\propto 1/x$ in the limit of large $x$.
\section{Matrix product state approach to solve the QSSE with injected noise }\label{sec:MPS}
Our numerical approach based on matrix product state (MPS) builds on the techniques developed in Ref.~\cite{Pichler2016}. The state of the system of nodes and photons is described as a MPS~\cite{Schollwock2011} and evolved according to the QSSE. This allows one to (i) simulate the non-Markovian dynamics of the system for large retardation times $\tau$ when the system is not purely cascaded, (ii) inject arbitrary noise, and (iii) have access to the entangled state of the electromagnetic field inside the waveguide.
We first provide the procedure for the scenario, where the nodes consist of two purely cascaded cavities, {\it i.e.}~we assume perfect chirality. In the interaction picture the system evolves according to the QSSE \begin{equation}\label{eq:smqsse} i\frac{d}{dt}\ket{\Psi(t)}=V(t) \ket{\Psi(t)}, \end{equation} with $V(t)$ defined in Eq.~(1) of the main text.
We now discretize time $t_{k+1} = t_k + \Delta t$ as time-bins with time-step $\Delta t$, with initial time $t_1=t_i$, final time $t_f=t_M$ and $T=t_f-t_i= M \Delta t$. We require $\Delta t$ to be smaller than the timescale of the system dynamics, {\it i.e.}~$\kappa_{\max} \Delta t \ll 1$. For each time-bin we define the quantum noise increment \begin{equation} \Delta B_k=\int_{t_k}^{t_{k+1}} dt\,b_R(t), \end{equation} with $b_R(t)$ the Fourier transform operators of $b_R(\omega)$, which obey bosonic commutation relations $[\Delta B_k, \Delta B_{k'}^\dagger]=\Delta t\,\delta_{k,k'}$. The time-bins thus describe bosons with creation operators $\Delta B_k^\dagger/\sqrt{\Delta t}$, and the operator $\Delta B_k^\dagger \Delta B_k/\Delta t$ is interpreted as the total number of photons with label $t\in [t_k,t_{k+1}]$.
The time evolution can then be viewed as a stroboscopic mapping $\ket{\Psi(t_{p+1})}=U_p \ket{\Psi(t_p)},$ where the unitary operator is given to lowest order in $\Delta t$ by \begin{equation}gin{equation} U_p =\exp\left( \sqrt{\kappa_1(t_p)} \Delta B_p^\dagger a_1 + \sqrt{\kappa_2(t_{p})} \Delta B_{p-l}^\dagger a_2 -\text{H.c.}\right)\label{eq:defUk} \end{equation} with $\tau=l\Delta t$ and the propagation phase has been absorbed in the definition of $a_2$.
The (entangled) state of the system at each time-step is written as \begin{equation} \label{eq:psitp}\ket{\Psi(t_p)}=\sum_{i_{n1},i_{n2},\{i_k\}} \psi_{i_{n1},i_{n2},\{i_k\}}(t_p)\ket{i_{n1},i_{n2},\{i_k\}}, \end{equation} where $i_{n1},i_{n2}=0,1,...$ label the Fock states of nodes $1$ and $2$, and $\{i_k\}=\{i_1,i_2,...,i_M\}$ label the Fock states of time-bins $k=1,2,...,M$.
If the injected noise is in a thermal state, the state is no longer pure, and we have the correlations $\langle b(\omega)^\dagger b(\omega') \rangle =n_\text{th} \delta(\omega-\omega')$ with flat spectrum $S(\omega)=n_\text{th}$, or in the time representation $\langle b(t)^\dagger b(t') \rangle = n_\text{th} \delta(t-t')$. This translates for the noise increments into \begin{equation} \langle \Delta B^\dagger_k \Delta B_{k'}\rangle = n_\text{th} \delta_{k,k'} \Delta t, \end{equation} where $n_\text{th}$ is now interpreted as the average occupation number of each time-bin. This is generated by having each time-bin $k$ initially in the mixed state \begin{equation} \rho_k = (1-e^{-\begin{equation}ta \omega_c})e^{-\begin{equation}ta \omega_c {\Delta B^\dagger_k \Delta B_k}/{\Delta t}}, \end{equation} where $e^{\begin{equation}ta\omega_c}=1+1/n_\text{th}$.
In order to perform the simulations using mixed instead of pure states, one usually either averages the evolved states obtained with a set of stochastic trajectories, or simulate the evolution of the density matrices \cite{Bonnes2014}. Here we employ an alternative method, where we purify the state by extending its definition to auxiliary virtual time-bins. The time-bin $k$ now consists of a pair of one `real' and one `auxiliary' parts, which are initially in the pure state \begin{equation}\label{eqqsm:psik} \ket{\psi_k}=\sqrt{1-e^{-\begin{equation}ta \omega_c}}\sum_{n=0}^\infty e^{-\begin{equation}ta \omega_c n/2} \ket{n}_\text{real}\ket{n}_\text{aux}. \end{equation} The original density matrix $\rho_k$ is obtained by tracing out the auxiliary part, which is done at the end of the evolution when computing observables. We thus want to solve the evolution of a state of the form of Eq.~\eqref{eq:psitp}, where now the photon indices $i_k$ are multi-indices \mbox{$i_k=(i_k^r,i_k^a),$} where $i_k^r$ and $i_k^a$ are indices on the Fock spaces of the real and auxiliary parts of time-bin $k$.
The MPS ansatz consists in writing each coefficient of $\psi_{i_{n1},i_{n2},\{i_k\}}(t_p)$ as the trace of a product of $M+2$ tensors, such as \begin{equation}\label{eq:defmps} \psi_{i_{n1},i_{n2},\{i_k\}}=\text{Tr}\left(A[n_1]^{i_{n1}} A[n_2]^{i_{n2}} A[1]^{i_1} ...\, A[M]^{i_M} \right). \end{equation}
where each object $A[\cdot]$ is a tensor. As long as the entanglement in the system is low enough, the matrices (bond) dimensions, which are changed dynamically during the evolution, can be truncated by a value $D_{\max}$, which restricts the spanned Hilbert space.
The order of the matrices [{\it cf.}~Eq.~\eqref{eq:defmps}] is arbitrary, that is, one can always find an MPS decomposition with matrices in a given order. For each time $t_p$ we chose the following ordering. The time-bin matrices are ordered in order of increasing indices. The matrix for node $1$ is located on the left of $A[p]^{i_{p}}$, and for node $2$ on the left of $A[p-l]^{i_{p-l}}$. The unitary evolution for time-step $p$ [{\it cf.}~Eq.~\eqref{eq:defUk}] thus requires to update only these four matrices. The initial thermal state of the matrices for the time-bins $A[k]^{i_k}$ can be decomposed as a product of matrices for real and auxiliary parts as \begin{equation} A[k]^{i_k}=B[k]^{i_k^r}\Lambda[k]C[k]^{i_k^a}, \end{equation} where \begin{equation}(B[k]^{i_k^r})_{i,j}=\delta_{i,0}\delta_{j,i_k^r},\ \ \ (C[k]^{i_k^a})_{i,j}=\delta_{j,0}\delta_{i,i_k^a},\end{equation} and \begin{equation}(\Lambda[k])_{i,j}=\delta_{i,j}\sqrt{1-e^{-\begin{equation}ta\omega_c}}e^{-\begin{equation}ta \omega_c i/2}\end{equation} contains the Schmidt values of the singular value decomposition of Eq.~\eqref{eqqsm:psik}. The algorithm then consists in updating the tensors $B[k],A[n_1],A[n_2]$ by successively applying operators $U_p$.
This model can immediately be extended to more elaborate nodes, e.g. adding atoms coupled to the cavities, by adding other contributions to the Hamiltonian in the QSSE~\eqref{eq:smqsse} and adapting accordingly the Hilbert spaces of the node and the definition of $U_p$ in Eq.~\eqref{eq:defUk}. In practice we obtain the fidelity of QST as described in Sec.~\ref{sec:avfid}, by adding an additional node, decoupled from the dynamics, which is initially in a maximally entangled state with node $1$.
If the nodes couple also to photons propagating in the left direction, one needs to distinguish between the left- and right-moving photons. This evolution is generated by a unitary operator which now reads \cite{Pichler2016}
\begin{equation}gin{equation} \begin{equation}gin{aligned}U_p =\exp\Big(& \sqrt{\begin{equation}ta\kappa_1(t_p)} \Delta B_{\text{R},p}^\dagger a_1 + \sqrt{\begin{equation}ta\kappa_2(t_p)} \Delta B_{\text{R},p-l}^\dagger a_2 \\
+&\sqrt{(1-\begin{equation}ta)\kappa_1(t_p)} \Delta B_{\text{L},p-l}^\dagger a_1 \\+& \sqrt{(1-\begin{equation}ta)\kappa_2(t_p)} \Delta B_{\text{L},p}^\dagger a_2 e^{-2i\phi} -\text{H.c.}\Big),\label{eq:defUk_notcascade} \end{aligned} \end{equation}
where we define noise increments for right- and left-moving photons as $\Delta B_{\text{R/L},k}=\int_{t_k}^{t_{k+1}}dt\,b_\text{R/L}(t)$. Now the definition of the system extends to the time-bins for left-moving photons. A detailed description of the ordering of the matrices in the MPS decomposition, as well as the treatment of the long-range interactions induced by the time-delay can be found in the Supplemental Material of Ref.~\cite{Pichler2016}. The same purification technique can be applied for both types of photons in order to represent thermal noise.
\section{Frequency mismatch}\label{sec:mismatch}
We study here the effect of a mismatch in the frequencies of the cavity modes, assuming that they are detuned with a frequency $2\Delta$. In the numerical simulations this is described by an additional Hamiltonian term $\Delta(a^\dagger_1 a_1 - a^\dagger_2 a_2)$. In the frame rotating with this new term, we replace $\sqrt{\kappa_2(t)}\to\sqrt{\kappa_2(t)}e^{-2i\Delta t}$ in the QLEs (Eq.~(2) of the main text). The mismatch thus acts as an error on the coupling functions, hence one would now need to add a phase in the coupling between cavities and waveguide in order to emit and absorb the photon in the correct temporal mode.
In Fig.~\ref{fig:mismatch}(a) we see that the effect is similar to the one of timing errors represented in Fig.~2(b) of the main text. Here we need $\Delta/\kappa_\text{max}\lesssim 0.025$ in order to ensure a fidelity of $\mathcal F\geq0.99$. For comparison we show the same plot in the case where the qubits are coupled directly to the waveguide, with $\Delta$ now the detuning between the qubits.
\begin{equation}gin{figure}
\includegraphics[width=\columnwidth]{FigSM4.pdf}
\caption{{\it Effect of frequency mismatch.} Average fidelity of the state transfer as a function of the detuning $\Delta$ for different values of $n_{th}$, (a) with and (b) without cavities [see respectively Fig.~1(b) and Fig.~1(a) of the main text].}\label{fig:mismatch}
\end{figure}
\section{Quantum error correction}\label{sec:qec}
Here we provide details on the quantum error correction scheme.
\subsection{Operator mapping with waveguide losses}\label{sec:oplosses}
We first consider the case of waveguide losses, with rates $\kappa_f$.
These losses can be modelled as a beamsplitter operation, as presented in Sec.~\ref{sec:bsop}, with the angle defined as $\cos(\theta)=\exp(-\kappa_f\tau/2)$. Here the operators $a_3$ and $ a_4$ represent fictitious cavities. Using the beamsplitter mapping of Eq.~\eqref{eq:beamsplitter}, an initial state of the first cavity $\ket{\Psi}_i = \sum_{n=0}^\infty f_n (a_1^\dagger)^n\ket{0}$ is mapped via the QST protocol to the state
\begin{equation} \label{eq:psifdef}\ket{\Psi_f} = \sum_{n=0}^\infty f_n (-a_2^\dagger \cos(\theta) -a_4^\dagger \sin(\theta))^n \ket{0}, \end{equation}
where the term associated to $a_4$ represents the photons lost in the process.
By tracing over the state of the fictitious cavity $a_4$, we obtain the density matrix
\begin{equation}gin{equation}
\rho_f = \rho_0 + \rho_{-1}+ \rho'_f
\end{equation}
of the second cavity with $\rho_0 = \ket{\Psi_0}\bra{\Psi_0}$, \mbox{$\rho_{-1} = \mathcal{P}\ket{\Psi_{-1}}\bra{\Psi_{-1}}$} and $\rho'_f$, corresponding respectively to the cases of no photon loss, one loss and more than one loss. Here, $\ket{\Psi_0} = \sum_n f_n (-\cos(\theta) a_2^\dagger)^n \ket{0}$, $\ket{\Psi_{-1}} =a_2 \ket{\Psi}_0$ and \mbox{$\mathcal{P}=\sin^2(\theta)=1-\exp(-\kappa_f\tau)$} is the single-photon loss probability.
Remarkably, the result is still independent of the injected noise.
\subsection{Application to quantum error correction}
We now provide additional details on the simulation of the QST protocol including QEC.
In the case of a code protecting against single photon losses [{\it cf.}~Fig.~2(e) of the main text], the state of the first cavity is written in the orthogonal basis $\ket{\pm}_1=(\ket{0}_1\pm \sqrt{2}\ket{2}_1+\ket{4}_1)/2$.
After the transfer, the error is detected by realizing a projective measurement of the photon parity number in the second cavity.
We then apply a unitary $U_{0}$ or $U_{-1}$ depending on the measurement outcome $p=0,-1$ [for even and odd respectively], which maps the state of the cavity to the qubit as
\begin{equation}gin{eqnarray}
\Big(c_g\! \ket{-^{(p)}}_2\!+\! c_e \ket{+^{(p)}}_2\Big) &\otimes& \ket{g}_2 \nonumber \\ &\downarrow& \\ \ket{-^{(p)}}_2 &\otimes& \Big(c_g\! \ket{g}_2\! + \!c_e \ket{e}_2\Big) \nonumber. \label{eq:unitary}
\end{eqnarray}
Here $\ket{\pm^{(0)}}=\ket{0}\pm\sqrt{2}\cos^2(\theta)\ket{2}+\cos^4(\theta)\ket{4}$ and $\ket{\pm^{(-1)}}=a_2\ket{\pm^{(0)}}$, up to normalization factors.
The factors $\cos^n\theta$ arise from the deterministic decay of the Fock state components.
Note that for $\theta\neq0$, the states $\ket{\pm^{(p)}}$ are no exactly orthogonal so that the unitary operation realizes the process of Eq.~\eqref{eq:unitary} only approximately.
As shown in Fig.~2(e) of the main text, the QEC approach applies to cavity losses.
Finally in the case of external photon additions [{\it cf.}~Fig.~2(f) of the main text], we can adapt the above procedure using instead the basis \mbox{$\ket{\pm}_1=(\ket{0}_1\pm\sqrt{2}\ket{3}_1+\ket{6}_1)/2$} to encode the state of the first cavity and measuring the photon number modulo~$3$.
For the measurement outcome $p=0$ (modulo 3), the basis states of the second cavity are \mbox{$\ket{\pm^{({0})}}_2=(\ket{0}_2\mp\sqrt{2}\cos^3(\theta)\ket{3}_2+\cos^6(\theta)\ket{6}_2)$} up to normalization factors.
For $p=-1$, the basis states are $\ket{\pm^{({-1})}}_2=a_2\ket{\pm^{({0})}}_2$, corresponding to a single photon loss.
Finally, for \mbox{$p=1$}, we obtain $\ket{\pm^{({1})}}_2=a_2^\dagger \ket{\pm^{({0})}}_2$ meaning a photon is added.
\subsection{Efficiency with coherent cat states}
Compared to the binomial codes given above, `cat codes' are based on non-orthogonal states for $\ket{\pm}_1$, where the first cavity state is encoded as superposition of coherent states, or cat states: \begin{equation}gin{equation*}\ket{\pm}_1=(\ket{\alpha_\pm}_1+\ket{-\alpha_\pm}_1)/\mathcal{N}\mathcal{\pm},\end{equation*} with $\alpha_+= \alpha$, $\alpha_-=i\alpha$ and $\mathcal{N}_\pm$ is a normalization factor. These states can however be realized in current experimental setups by projective measurement of the photon number parity~\cite{Haroche2006,Ofek2016}.
In Fig.~\ref{fig:cat}, we compare the fidelity of the cat states encoding with the binomial code for waveguide losses. The fidelity of the cat code is always lower than the one of the binomial code.
However, we note that, compared with the absence of correction (black solid line) and for the optimal value of $\alpha$ (here $\sqrt{2}$ as blue dashed line), this code is efficient for loss probabilities up to $\mathcal{P}\approx 0.25$.
\begin{equation}gin{figure}
\includegraphics[width=0.8\columnwidth]{FigSM3.pdf}
\caption{{\it Quantum error correction with cat states in the QST protocol.} (a) Fidelity as a function of the loss probability $\mathcal{P}$ considering exclusively waveguide losses ($\kappa'=0$). The non-corrected code is shown in black, the binomial code in red and the cat code in blue with the solid, dashed, dotted lines corresponding respectively to $\alpha=1,\sqrt{2},2$. \label{fig:cat}}
\end{figure}
\section{Quantum state transfer in closed systems}\label{sec:closed}
We describe the model corresponding to the setup depicted in Fig.~3(a) of the main text, where two cavities are coupled to the extremities of a waveguide of length $L$ with discretized modes $b_n$. Thus our setup consists of a multimode cavity sandwiched between two single mode cavities.
The Hamiltonian is given by~\cite{Pellizzari1997}
\begin{equation}gin{eqnarray}
H_\mathrm{closed} &=& \sum_{j=1,2}\left(g_j(t) a_j\sum_n (-1)^{(j-1)n} b^\dagger_n+\textrm{H.c.}\right) \nonumber \\
&+& \sum_n n \delta b^\dagger_n b_n, \label{eq:Hfiber}
\end{eqnarray}
with the mode spacing $\delta = \pi c/L$ and where the waveguide mode $n=0$ is resonant with the two cavity modes $ a_{1,2}$. In the Heisenberg picture, the formal solution for $b_n$ reads
\begin{equation}gin{eqnarray}
b_n &&= b_n(t_i)e^{-i \delta n (t-t_i)} \nonumber \\
&&- i \int_{t_i}^t dt' e^{-i \delta n (t-t')} \left( g_1(t')a_1(t')+(-1)^n g_2(t') a_2(t') \right). \nonumber
\end{eqnarray}
Based on this solution, the equation of motion of $a_j$ reads
\begin{equation}gin{eqnarray}
\dot a_j &&= -i g_j(t) \sum_n b_n(t_i) e^{-i \delta n (t-t_i)} \nonumber \\
&& -\frac{2 \pi g_j(t)}{\delta}\sum_{k=0}^\infty c_k g_j(t-2k\tau )a_j(t-2k\tau) \nonumber \\
&& -\frac{2 \pi g_j(t)}{\delta}\sum_{k=0}^\infty g_{2-j}(t-(2k+1)\tau) a_{2-j}(t-(2k+1)\tau), \nonumber \\ \label{eq:langevinclosed}
\end{eqnarray}
where $c_0=1/2$, $c_{k>0}=1$, and the time delay is \mbox{$\tau=\pi/\delta=L/c$}. We used the Fourier series of the Dirac comb function \mbox{$\sum_n e^{2\pi i n t/T}=T \sum_{k=0}^\infty \delta(t-kT)$}.
In order to obtain the function $g_j(t)$ associated with the QST pulses $\kappa_j(t)$, we first consider a situation where the second cavity and the waveguide are initially in their vacuum state $\big(a_2\ket{\Psi(t_i)}=b_n\ket{\Psi(t_i)}=0\big)$. For times $t_i<t<t_i+2\tau$, Eq.~\eqref{eq:langevinclosed} is then equivalent to Eq.~(2) of the main text with
\begin{equation}gin{equation}
\kappa_j(t)=2\pi g_j^2(t)/\delta. \label{eq:kappaclosed}
\end{equation}
We can now test the robustness of the QST protocol against noise, which we simulate in this closed system by considering each mode of the waveguide to be initially in a coherent state.
With respect to the definitions of App.~\ref{sec:avfid}, the initial state reads
\begin{equation}gin{equation}
\ket{\Psi(t_i)}=\frac{(\ket{0}_1\ket{0}_a+ \ket{1}_1\ket{1})_a \otimes_n \ket{\alpha}_n \otimes \ket{0}_2}{\sqrt{2}},
\end{equation}
where $\ket{\alpha}_n$ is a coherent state for the waveguide mode $n$. Finally, in order to study the effect of cavity non-linearities, we include in $H_\mathrm{closed}$ a Kerr nonlinearity described by the term $-\chi_{j=1,2} \sum_j a_j^\dagger a_j^\dagger a_j a_j$. We then solve the Hamiltonian dynamics, with $g_j(t)$ varying according to Eq.~\eqref{eq:kappaclosed} and the time-symmetric functions $\kappa_j(t)$. The simulations of this system are in general numerically demanding. However, considering the single mode limit $\delta\gtrsim\kappa_{\max}$, only a few modes of the waveguide are excited. In our case, we chose $\kappa_{\max}=0.3\,\delta$, which allows us to restrict the simulation to only three modes $n=-1,0,1$.
\input{noise.bbl}
\end{document} |
\textbf{e}gin{document}
\title[Continuum families of non-displaceable Lagrangian tori in $(\mathbb{C}P^1)^{2m}$]{Continuum families of non-displaceable Lagrangian tori in $(\mathbb{C}P^1)^{2m}$}
\author{Renato Vianna}
\footnote{The author was supported by the Herschel Smith postdoctoral fellowship from
the University of Cambridge.}
\textbf{e}gin{abstract}
We construct a family of Lagrangian tori $\Theta^n_s \subset (\mathbb{C}P^1)^n$, $s \in
(0,1)$, where $\Theta^n_{1/2} = \Theta^n$, is the monotone twist Lagrangian
torus described in \cite{ChSch10}. We show that for $n = 2m$ and $s \ge 1/2$
these tori are non-displaceable. Then by considering $\Theta^{k_1}_{s_1} \times
\cdots \times \Theta^{k_l}_{s_l} \times (S^2_{\operatorname{eq}})^{n - \sum_i k_i} \subset
(\mathbb{C}P^1)^n$, with $s_i \in [1/2,1)$ and $k_i \in 2\mathbb{Z}_{>0}$, $\sum_i k_i \le n$ we
get several $l$-dimensional families of non-displaceable Lagrangian tori. We
also show that there exists partial symplectic quasi-states
$\zeta^{\mathfrak{b}_s}_{\textbf{e}_s}$ and linearly independent homogeneous Calabi
quasimorphims $\mathrm{m}u^{\mathfrak{b}_s}_{\textbf{e}_s}$ \cite{FO311b} for which $\Theta^n_sm$ are
$\zeta^{\mathfrak{b}_s}_{\textbf{e}_s}$-superheavy and $\mathrm{m}u^{\mathfrak{b}_s}_{\textbf{e}_s}$-superheavy. We also
prove a similar result for $(\mathbb{C}P^2\# \overline{\mathbb{C}P^2}II, \omega_\epsilon)$, where $\{\omega_\epsilon
;0 < \epsilon < 1\}$ is a family of symplectic forms in $\mathbb{C}P^2\# \overline{\mathbb{C}P^2}II$, for which $\omega_{1/2}$
is monotone.
\end{abstract}
\mathrm{m}aketitle
\section{Introduction}
In \cite{FO312}, Fukaya-Oh-Ohta-Ono construct a one-dimensional family of
non-displaceable Lagrangian tori in $(\mathbb{C}P^1)^2$. They arise as fibres of a
(informally called) semi-toric moment map \cite[Section~3]{Wu15}, where the
fibres over the interior of the semi-toric moment polytope are Lagrangian tori,
but over a special vertex of the polytope lies a Lagrangian $S^2$ (the
anti-diagonal) where the semi-toric moment map is not differentiable.
The weighted barycentre of the semi-toric polytope was proven by
Oakley-Usher \cite{OU13} to be the Chekanov torus \cite{ChSch10} in $(\mathbb{C}P^1)^2$.
The other regular fibres are Hamiltonian isotopic to so called Chekanov type tori
described in \cite[Example 3.3.1]{Au07}. In fact, the semi-toric Lagrangian
fibration described in \cite{FO312} can be seen as a limit of almost toric
fibrations, in which `most of the fibres' are Chekanov type tori, see
\cite[Section~6.4]{Vi16a} and \cite[Remark~3.1]{ToVi15}.
The definition of Chekanov type tori can be easily extended to higher
dimensions, see Definition \ref{def: Chekanov type tori}. In particular, we can
get analogues of the non-displaceable tori \cite{FO312}. We can show that these
tori are non-displaceable in $(\mathbb{C}P^1)^{2m}$.
\subsection{Results}
\textbf{e}gin{thm}\label{thm: main}
For a positive even integer $n = 2m$, there is a continuum of non-displaceable Lagrangian tori
$\Theta^{2m}_s \subset (\mathbb{C}P^1)^{2m}$, $s \in [1/2, 1)$, for which $\Theta^{2m}_{1/2} =
\Theta^{2m}$ is the monotone twist Lagrangian torus described in \cite{ChSch10}. More
precisely, for any Hamiltonian $\Psi \in \mathrm{Ham} ((\mathbb{C}P^1)^{2m})$, we have that
$| \Theta^n_sm \cap \Psi (\Theta^n_sm) | \ge 2^{2m}$.
\end{thm}
The case $n = 2$ was proven in \cite{FO312}. The case $n = 1$ is clearly
false, since only the monotone circle is non-displaceable.
\textbf{e}gin{qu}
For $n \ge 3$ odd and $s \in [1/2, 1)$, are the tori $\Theta^n_s$ from Definition \ref{def:
Ts} (non)-displaceable?
\end{qu}
An immediate consequence of the proof of Theorem \ref{thm: main} is
\textbf{e}gin{cor} \label{cor: ProductTori}
For $s_i \in [1/2,1)$, and positive even integers $k_i$, $i = 1, \dots, l$, and $n \ge \sum_i k_i$,
the Lagrangian tori
\[ \Theta^{k_1}_{s_1} \times \cdots \times \Theta^{k_l}_{s_l} \times
(S^1_{\operatorname{eq}})^{n - \sum_i k_i} \subset (\mathbb{C}P^1)^n \]
are non-displaceable.
\end{cor}
Just by looking to the symplectic area spectrum of Maslov index 2 relative
homology classes we can conclude:
\textbf{e}gin{prp} \label{prp:toridistinct1}
The tori $\Theta^n_s$ is not symplectomorphic to $\Theta^{k_1}_{s_1} \times \cdots \times \Theta^{k_l}_{s_l} \times
(S^1_{\operatorname{eq}})^{n - \sum_i k_i}$, if $n > \sum_i k_i$.
\end{prp}
Consider the counts of holomorphic (for the standard complex structure in
$(\mathbb{C}P^1)^n$) Maslov index 2 disks with boundary in $\Theta^n_s$, respectively
$\Theta^{k_1}_{s_1} \times \cdots \times \Theta^{k_l}_{s_l}$ ($n = \sum_i k_i$),
passing through a fixed point. Among these, look at the count of disks that have
minimal area. For $s, s_i \in (1/2,1)$, this area is $a = 1-s$, respectively $1 -
s_i$ for some $i \in \{1,\dots, l\}$. It follows from Proposition \ref{prp:
Poten} that these counts of disks of smaller area are different if $l > 1$.
Moreover, we show in Proposition \ref{prp:HighMaslovArea} that higher Maslov
index holomorphic disks with boundary on $\Theta^n_s$ must have symplectic area bigger
than $a$.
Hence, one expect that in a generic family $J_t$ of almost complex structures,
where $J_0$ is the standard complex structure and $J_1$ is another regular
almost complex structure, $J_t$-holomorphic disks of positive Maslov index and
area smaller than $a$ can only appear in a ``birth-death'' phenomenon. This
should imply that the count of Maslov index 2 disks of symplectic area $a$ with
boundary in $\Theta^n_s$ is an invariant under generic choice of almost complex
structure, and hence under symplectomorphisms (in particular Hamiltonian
isotopies) acting on $\Theta^n_s$. This would allow us to prove:
\textbf{e}gin{cnj} \label{cnj:toridistinct}
The tori $\Theta^n_s$ is not symplectomorphic to $\Theta^{k_1}_{s_1} \times \cdots \times
\Theta^{k_l}_{s_l}$, $n = \sum_i k_i$ -- unless $l=1$ and $s_1 = s$.
\end{cnj}
A rigorous statement proving the invariance of the count of the Maslov index 2 disks of minimal
area in the above scenario and hence Conjecture \ref{cnj:toridistinct} is
expected to appear in the forthcoming working of the author together with
Egor Shelukhin and Dmitry Tonkonog.
Therefore we see that -- up to a formal proof of Conjecture
\ref{cnj:toridistinct} -- the tori obtained here differ from products of copies of the tori
obtained in \cite{FO312} and copies of the equator in $\mathbb{C}P^1$.
The idea of the proof of Theorem \ref{thm: main} is that we are
able to find bulk deformations $\mathfrak{b}_s$ for which the bulk deformed Floer
Homology of $\Theta^n_sm$ (decorated with some weakly bounding cochain $\sigma$) is
non-zero. The invariance property of the bulk deformed Floer Cohomology under
the action of Hamiltonian diffeomorphisms \cite[Theorem~2.5]{FO311a},
allow us to conclude that the above Lagrangian tori are non-displaceable.
Based on the work of Fukaya-Oh-Ohta-Ono \cite{FO311b}, regarding spectral
invariants with bulk deformations, quasimorphisms and Lagrangian Floer theory,
we are able to strengthen our result and find families of homogeneous Calabi
quasimorphisms $\mathrm{m}u^{\mathfrak{b}_s}_{\textbf{e}_s}$ and partial symplectic quasi-states
$\zeta^{\mathfrak{b}_s}_{\textbf{e}_s}$, for which $\Theta^n_sm$ is $\mathrm{m}u^{\mathfrak{b}_s}_{\textbf{e}_s}$-superheavy and
$\zeta^{\mathfrak{b}_s}_{\textbf{e}_s}$-superheavy.
For the definition of homogeneous Calabi quasimorphisms, partial symplectic quasi-states and
the notion of superheaviness we refer the reader to \cite{EP03, EP06,
FO311b}.
Following closely the notation of \cite[Lemma~23.3, Theorem~23.4]{FO311b}
we summarise the above discussion as:
\textbf{e}gin{thm} \label{thm: heavy}
For $s \in [1/2,1)$, there exists a bulk-deformation $\mathfrak{b}_s \in
H^2((\mathbb{C}P^1)^{2m},\Lambda_{+})$, and a weak bounding cochain $b_s \in H^1(\Theta^n_sm,
\Lambda_0)$ for which
\[ HF(\Theta^n_sm, (\mathfrak{b}_s, b_s); \Lambda_{0,nov}) \cong H^*(\Theta^n_sm; \Lambda_{0,nov})\]
Moreover, there are idempotents $\textbf{e}_s$ in the bulk-deformed quantum-cohomology
$QH_{\mathfrak{b}_s}^*((\mathbb{C}P^1)^{2m}; \Lambda_{0,nov})$, so that $\Theta^n_sm$ is $\mathrm{m}u^{\mathfrak{b}_s}_{\textbf{e}_s}$-superheavy and
$\zeta^{\mathfrak{b}_s}_{\textbf{e}_s}$-superheavy. Here $\mathrm{m}u^{\mathfrak{b}_s}_{\textbf{e}_s}$,
$\zeta^{\mathfrak{b}_s}_{\textbf{e}_s}$ are respectively the homogeneous Calabi quasimorphism and partial
symplectic quasi-states coming from the bulk-deformed spectral invariant
associated with $\textbf{e}_s$ \cite[Section~14]{FO311b}.
\end{thm}
Here $\Lambda$, $\Lambda_0$, $\Lambda_{nov}$, $\Lambda_{0,nov}$ and $\Lambda_{+}$ are the Novikov rings:
\[\Lambda = \left\{ \sum_{i \ge 0} a_iT^{\lambda_i} | \, \, a_i \in \mathbb{C}, \lambda_i \in
\mathbb{R}, \lambda_i \le \lambda_{i +1}, \lim_{i \to \infty} \lambda_i = \infty \right\}, \]
\[\Lambda_0 = \left\{ \sum_{i \ge 0} a_iT^{\lambda_i} | \, \, a_i \in \mathbb{C}, \lambda_i \in
\mathbb{R}_{\ge 0}, \lambda_i \le \lambda_{i +1}, \lim_{i \to \infty} \lambda_i = \infty \right\}, \]
\[\Lambda_{nov} = \left\{ \sum_{i \ge 0} a_i q^{n_i} T^{\lambda_i} | \, \, n_i \in \mathbb{Z} \, \, a_i \in \mathbb{C}, \lambda_i \in
\mathbb{R}, \lambda_i \le \lambda_{i +1}, \lim_{i \to \infty} \lambda_i = \infty \right\}, \]
\[\Lambda_{0,nov} = \left\{ \sum_{i \ge 0} a_i q^{n_i} T^{\lambda_i} | \, \, n_i \in \mathbb{Z} \, \, a_i \in \mathbb{C}, \lambda_i \in
\mathbb{R}_{\ge 0}, \lambda_i \le \lambda_{i +1}, \lim_{i \to \infty} \lambda_i = \infty \right\}, \]
\[\Lambda_+ = \left\{ \sum_{i \ge 0} a_iT^{\lambda_i} | \, \, a_i \in \mathbb{C}, \lambda_i \in
\mathbb{R}_{> 0}, \lambda_i \le \lambda_{i +1}, \lim_{i \to \infty} \lambda_i = \infty \right\}, \]
The formal parameter $T$ is used to keep track of area of pseudo-holomorphic disks,
while the formal parameter $q \in \Lambda_{0,nov}$ is used to keep track of the Maslov index.
The following Corollary follows immediately from \cite[Corollary~1.10]{FO311b},
see \cite[Section~19]{FO311b} for a proof.
\textbf{e}gin{cor} \label{cor: linear indep. quasimorph}
The uncountable set $\{ \mathrm{m}u^{\mathfrak{b}_s}_{\textbf{e}_s} \}$ of homogeneous Calabi
quasimorphisms is linearly independent \cite[Definition~1.9]{FO311b}.
\end{cor}
To prove linear independency of the above homogeneous Calabi
quasimorphisms we use that the tori are disjoint, for different values of $s$.
One could ask:
\textbf{e}gin{qu}
Are the tori $\Theta^n_s$ Hamiltonian displaceable from $\Theta^{k_1}_{s_1} \times \cdots \times \Theta^{k_l}_{s_l} \times
(S^1_{\operatorname{eq}})^{n - \sum_i k_i}$, for $s,s_i \in (1/2,1)$?
\end{qu}
We note that by construction, these tori intersect for $s, s_i \ge 1/2$.
See \cite{ToVi15}, for non-displaceability in the case $n = 2$, between $\Theta^n_s$ (i.e. tori from
\cite{FO312}) $s \ge 3/2$ and the Clifford torus $S^1_{\operatorname{eq}} \times
S^1_{\operatorname{eq}}$.
\textbf{e}gin{qu}
Are the quasimorphisms arising from (particular choice of
bulk-deformation and weak-bounding cochain for) the tori in
Corollary \ref{cor: ProductTori} linearly independent for different
partitions $(k_1, \dots, k_l, n - \sum_i k_i)$ of $n$?
\end{qu}
We finish our results by pointing out that the family given in \cite{FO312}
remain non-displaceable after we perform two blowups (of the same size) on the
rank zero corners of the singular fibration described in \cite{FO312}, see Figure
\ref{fig: Bl3}. This follows from applying the same ideas as Fukaya-Oh-Ohta-Ono
did for the $\mathbb{C}P^1 \times \mathbb{C}P^1$ case.
\textbf{e}gin{thm} \label{thm: Bl3}
There exists a continuous family of non-displaceable Lagrangian tori $L_s^\epsilon$ in $(\mathbb{C}P^2\# \overline{\mathbb{C}P^2}II,
\omega_\epsilon) = (\mathbb{C}P^1 \times \mathbb{C}P^1BlII, \omega_\epsilon)$, where $s \in [1/2,1)$ and $\{ \omega_\epsilon | 0 < \epsilon <
1\}$ is a family of symplectic forms for which $(\mathbb{C}P^2\# \overline{\mathbb{C}P^2}II,
\omega_{1/2})$ is monotone, containing a monotone Lagrangian $L_{1/2}^{1/2}$.
\end{thm}
\textbf{e}gin{rmk} \label{rmk: Blk}
It is shown in \cite[Section~5]{FO311a} and \cite[Section~22]{FO311b} a family
of non-displaceable Lagrangian tori in $\mathbb{C}P^2\# k\overline{\mathbb{C}P^2}$, $k \ge 2$, endowed with some
\emph{non-monotone} symplectic form.
\end{rmk}
Theorem \ref{thm: Bl3} follows, in the same spirit as \cite[Theorem~1.11]{FO311b} and
Theorem \ref{thm: heavy}, from:
\textbf{e}gin{thm} \label{thm: Bl3heavy}
Let $(\mathbb{C}P^2\# \overline{\mathbb{C}P^2}II, \omega_\epsilon)$ and $L_s^\epsilon$ be as in Theorem \ref{thm: Bl3}.
For $s \in [1/2,1)$, there exists a bulk-deformation $\mathfrak{b}_s^\epsilon \in
H^2(\mathbb{C}P^2\# \overline{\mathbb{C}P^2}II,\Lambda_{+})$, and a weak bounding cochain $b_s^\epsilon \in H^1(L_s^\epsilon,
\Lambda_0)$ for which
\[ HF(\mathbb{C}P^2\# \overline{\mathbb{C}P^2}II, (\mathfrak{b}_s^\epsilon, b_s^\epsilon); \Lambda_{0,nov}) \cong H^*(L_s^\epsilon; \Lambda_{0,nov})\]
There are idempotents $\textbf{e}_s^\epsilon$ in the bulk-deformed quantum-cohomology $QH(\mathbb{C}P^2\# \overline{\mathbb{C}P^2}II;
\Lambda)$, so that $L_s^\epsilon$ is $\mathrm{m}u^{\mathfrak{b}_s^\epsilon}_{\textbf{e}_s^\epsilon}$-superheavy and
$\zeta^{\mathfrak{b}_s^\epsilon}_{\textbf{e}_s^\epsilon}$-superheavy, where $\mathrm{m}u^{\mathfrak{b}_s^\epsilon}_{\textbf{e}_s^\epsilon}$,
$\zeta^{\mathfrak{b}_s^\epsilon}_{\textbf{e}_s^\epsilon}$ are the homogeneous Calabi quasimorphism
and partial symplectic quasi-states coming from the bulk-deformed spectral
invariant associated with $\textbf{e}_s^\epsilon$ \cite[Section~14]{FO311b}. Moreover, the
uncountable set $\{ \mathrm{m}u^{\mathfrak{b}_s^\epsilon}_{\textbf{e}_s^\epsilon} \}$ of homogeneous Calabi quasimorphisms
is linearly independent.
\end{thm}
The rest of the paper is organised as follows:
In Section \ref{sec: FloerHom}, we make a quick introduction of bulk deformed
potential and Floer cohomology for a Lagrangian $L$ satisfying Assumption \ref{ass:
ass}. We refer the reader to \cite{FO311a,FO311b,FO312} for a complete account.
We then prove Lemma \ref{lem: H^1} and Corollary \ref{cor: PotFloerHom}, to show
that, for a Lagrangian torus $T$, critical points of the potential gives rise to
(bulk deformed) Floer cohomology isomorphic to the usual cohomology of $T$.
We believe that \ref{lem: H^1} is known by experts on the field, but we are
not aware of it being written.
In Section \ref{sec: Regularity}, we define the notion of a pair $(X,L)$
consisting of a K\"{a}hler manifold $X$ and a Lagrangian submanifold $L$
being \emph{$K$-pseudohomogeneous}, for some Lie group $K$ acting
holomorphically and Hamiltonianly on $X$, leaving $L$ invariant.
We showed that if $(X,L)$ is \emph{$K$-pseudohomogeneous}, any Maslov index
$2$ holomorphic disk with boundary on $L$ such that its boundary is
transverse to $K$-orbits, is regular. We use that to show regularity
for the Maslov index $2$ disks with boundary in $\Theta^n_s$.
In Section \ref{sec: LagTori}, we define the Lagrangian tori $\Theta^n_s$, establish
its potential function, essentially computed in \cite{Au07,Au09}, and prove
it satisfies Assumption \ref{ass: ass}, for some regular almost complex structure $J$
with the same potential function of the standard complex structure. We also prove
Proposition \ref{prp:toridistinct1} and show that holomorphic disks of Maslov index
bigger than $2$ have area bigger than $a = 1 - s$, which we use to argue why
Conjecture \ref{cnj:toridistinct} should hold.
In Section \ref{sec: Proof}, we compute the critical points of the potential
bulk deformed by some cocycle in $C^2((\mathbb{C}P^1)^n, \Lambda_+)$. We show that for $n=2m$,
there are bulk deformation $\mathfrak{b}_s$ and a weak bounding cochain $b_s$ which is
a critical point of the potential $\mathfrak{PO}_{\mathfrak{b}_s}^{\Theta^n_sm}$. It then follows from
Corollary \ref{cor: PotFloerHom} that the bulk deformed Floer cohomology
$HF(\Theta^n_sm, (b_s,\mathfrak{b}_s);\Lambda)$ is isomorphic to the cohomology of the torus.
Non-displaceability then follows from \cite[Theorem~G]{FO3Book} which is also
stated as \cite[Theorem~2.5]{FO311a}.
In Section \ref{sec: Quasimorph}, we finish the proof of Theorem \ref{thm:
heavy}.
Finally in Section \ref{sec: Bl3}, we describe $(\mathbb{C}P^2\# \overline{\mathbb{C}P^2}II, \omega_\epsilon) =
(\mathbb{C}P^1 \times \mathbb{C}P^1BlII,\omega_\epsilon)$ as two blowups of capacity $\epsilon$ on two corners
of the moment polytope of $\mathbb{C}P^1 \times \mathbb{C}P^1$. The Lagrangian tori $L^\epsilon_s$ on the
blowup comes from $\Theta^2_s \in \mathbb{C}P^1 \times \mathbb{C}P^1$. We compute the potential for
$L^\epsilon_s$ and show the existence of critical points for some bulk
deformation. This allow us to prove Theorems \ref{thm: Bl3} and \ref{thm:
Bl3heavy}. These tori are equivalent to the fibres of the singular
fibration given by blowing up the corners of the ``semi-toric polytope''
described in \cite{FO312}, see Figure \ref{fig: Bl3}.
\section{Floer homology and the potential function} \label{sec: FloerHom}
Let $X$ be a symplectic manifold and $J$ a regular and compatible almost complex
structure. Let $L$ be a Lagrangian submanifold of $X$ (with a chosen spin
structure). We consider a unital canonical $A_\infty$ algebra structure
$\{\mathrm{m}_k\}$ on the classical cohomology $H(L;\Lambda_{0,nov})$ \cite[Section~6]{FO312},
\cite[Corollary~5.4.6, Theorem~A]{FO3Book}. The potential function
is defined from the space of weak bounding cochains $\hat{\mathrm{m}athcal{M}}(L)$ of $L$ to
$\Lambda_0$. We refer the reader to \cite{FO311a, FO311b, FO312, FO3Book} for
the definition.
Suppose we are given an compatible almost complex
structure $J_0$ for which $(X,L,J_0)$ satisfy:
\textbf{e}gin{ass} \label{ass: ass}
Let $\textbf{e}ta \in \pi_2(X,L)$. Assume that:
\textbf{e}gin{enumerate}[label=(\subscript{A}{\arabic*})]
\item If $\textbf{e}ta$ is represented by a non-constant $J_0$-holomorphic disk, then
$\mathrm{m}u_L(\textbf{e}ta) \ge 2$, \label{ass: A1}
\item Maslov index 2 $J_0$-holomorphic disks are regular, \label{ass: A2}
\end{enumerate}
\end{ass}
\noindent where $\mathrm{m}u_L$ is the Maslov index.
Throughout the paper we say an almost complex structure $J$ is regular
if it satisfies assumption $(A_2)$.
An almost complex structure satisfying Assumption \ref{ass: ass}, automatically
satisfies \cite[Condition~6.1]{FO312}, hence by
\cite[Theorem~A.1,~Theorem~A.2]{FO312} there is an embedding of
$H^1(L,\Lambda_0)$ into $\hat{\mathrm{m}athcal{M}}(L)$ and restricted to
$H^1(L,\Lambda_0)$ the potential function $\mathfrak{PO}^L$ is so that
\textbf{e}gin{equation} \label{eq: Pot=m0}
\mathrm{m}_0^b(1) = \mathfrak{PO}^L(b)q[L],
\end{equation}
where
\textbf{e}gin{equation} \label{eq: m0}
\mathrm{m}_0^b(1) = \sum_{k=0}^\infty \mathrm{m}_k(b,\dots,b) = \underset{\mathrm{m}u_L(\textbf{e}ta) = 2}{\sum_{\textbf{e}ta \in \pi_2(X, L),}}
q^{\mathrm{m}u_L(\textbf{e}ta)/2}T^{\int_\textbf{e}ta \omega} \exp(b \cap \partial \textbf{e}ta) \mathrm{ev}_{0*}([\mathrm{m}athscr{M}_1(\textbf{e}ta)]).
\end{equation}
Here $[\mathrm{m}athscr{M}_1(\textbf{e}ta)]$ is the (virtual) fundamental class of the moduli space of
$J$-holomorphic disks in the class $\textbf{e}ta$ with 1 marked point and $\mathrm{ev}_0:
\mathrm{m}athscr{M}_1(\textbf{e}ta) \to L$ is the evaluation map.
Using a notation closer to \cite{Au07,Au09} we define for $\textbf{e}ta \in
\pi_2(X,L)$:
\textbf{e}gin{equation} \label{def: coord z}
z_\textbf{e}ta(L,b) = T^{\int_\textbf{e}ta \omega} \exp(b \cap \partial \textbf{e}ta).
\end{equation}
Letting $\eta_\textbf{e}ta$ be the degree of $\mathrm{ev}_0: \mathrm{m}athscr{M}_1(\textbf{e}ta) \to L$, we can
write:
\textbf{e}gin{equation} \label{eq: Pot}
\mathfrak{PO}^L(b) = \underset{\mathrm{m}u_L(\textbf{e}ta) = 2}{\sum_{\textbf{e}ta \in \pi_2(X, L),}}
\eta_\textbf{e}ta z_\textbf{e}ta(L,b)
\end{equation}
We want to consider the Floer cohomology of $L$ bulk-deformed by a class
$\mathfrak{b} = T^\rho[\mathfrak{s}] \in H^2(X, \Lambda_+)$ \cite{FO311a}. The potential function will
depend on the cocycle $\mathfrak{b} \in C^2(X, L; \mathbb{Z})$, even though the Floer cohomology
doesn't. Since we use a cocycle in degree $2$ (Poincar\'e dual to a cycle of
codimension 2) the degree of the bulked deformed $A_\infty$ maps $\mathrm{m}_k^\mathfrak{b}$
\cite[(2.6)]{FO311a} is unaffected by the bulk and the bulk deformed potential is
given by:
\textbf{e}gin{equation} \label{eq: PotBulked}
\mathfrak{PO}^{L}_{\mathfrak{b}}(b) = \underset{\mathrm{m}u_L(\textbf{e}ta) = 2}{\sum_{\textbf{e}ta \in \pi_2(X, L),}}
\eta_\textbf{e}ta \exp[(\mathfrak{s} \cap \textbf{e}ta) T^\rho] z_\textbf{e}ta(L,b),
\end{equation}
where $b \in H^1(L,\Lambda_0)$, is a weak bounding cochain for the curved $A_\infty$
algebra $(H(L,\Lambda_{0,nov}), \{\mathrm{m}_k^\mathfrak{b}\})$, with
\textbf{e}gin{equation} \label{eq: m0bulked}
\mathrm{m}_0^{b,\mathfrak{b}}(1) = \sum_{k=0}^\infty \mathrm{m}_k^\mathfrak{b}(b,\dots,b) = \mathfrak{PO}^{L}_{\mathfrak{b}}(b)q[L].
\end{equation}
The fact that $b \in H^1(L,\Lambda_0)$ is a weak bounding cochain for
$(H(L,\Lambda_0), \{\mathrm{m}_k^\mathfrak{b}\})$ implies that we can define a (not curved)
$A_\infty$ algebra $(H(L,\Lambda_{0,nov}), \{\mathrm{m}_k^{b,\mathfrak{b}}\})$, where
\textbf{e}gin{equation} \label{eq: mkbulked}
\mathrm{m}_k^{b,\mathfrak{b}}(x_1, \dots, x_k) = \sum_{j=0}^\infty
\mathrm{m}_j^\mathfrak{b}(b,\dots,b,x_1,b,\dots,b,x_2,b,\dots,b,x_k,b,\dots,b).
\end{equation}
In particular,
\textbf{e}gin{gather}
(\mathrm{m}_1^{b,\mathfrak{b}})^2 = 0; \label{eq: m_1 square} \\
\mathrm{m}_1^{b,\mathfrak{b}}(\mathrm{m}_2^{b,\mathfrak{b}}(x,y)) = \pm \mathrm{m}_2^{b,\mathfrak{b}}(\mathrm{m}_1^{b,\mathfrak{b}}(x),y) \pm
\mathrm{m}_2^{b,\mathfrak{b}}(x,\mathrm{m}_1^{b,\mathfrak{b}}(y)). \label{eq: Leibniz}
\end{gather}
\textbf{e}gin{dfn} \label{dfn: bFH}
We define the bulk deformed Floer cohomology:
\textbf{e}gin{equation}
HF(L,(b,\mathfrak{b}); \Lambda_{0,nov}) = \frac{\ker(\mathrm{m}_1^{b,\mathfrak{b}})}{\mathrm{im} (\mathrm{m}_1^{b,\mathfrak{b}})}
\end{equation}
\end{dfn}
\textbf{e}gin{rmk} Strengthening Assumption \ref{ass: ass} to assume regularity of
holomorphic disks with Maslov index smaller than $n-1$, one should be able to
define the Floer cohomology using the Pearl version \cite{BC09B}, and
analogously define its bulk-deformed version, which should be isomorphic to the
one in Definition \ref{dfn: bFH}. In that framework, the proof of Leibniz rule
\eqref{eq: Leibniz} follows the same ideas as \cite[Theorem~4]{Bu10}. \end{rmk}
By the work of Fukaya-Oh-Ohta-Ono, we have:
\textbf{e}gin{thm}[ Theorem G \cite{FO3Book}, Theorem 2.5 \cite{FO311a}] \label{thm: FOOOnonDisp}
If $\psi: X \to X$ is a Hamiltonian diffeomorphism, then the order of $\psi(L)\cap L$ is
not smaller than the rank of $HF(L,(b,\mathfrak{b}); \Lambda_{0,nov})\otimes_{\Lambda_{0,nov}} \Lambda_{nov}$.
\end{thm}
We would like to point out that the product $\mathrm{m}_2^{b,\mathfrak{b}}$ can be thought as
deformation of the cup product in the sense that for $x,y \in H(L,\Lambda_0)$
of pure degrees $|x|$ and $|y|$,
\textbf{e}gin{equation} \label{eq: cup*}
\mathrm{m}_2^{b,\mathfrak{b}}(x,y) = x \cup y + \text{other terms}
\end{equation}
where $x \cup y$ comes from counting constant disks and the other terms is a sum
of elements of degree smaller than $|x| + |y|$ in $H(L,\Lambda_{0,nov})$, since
it comes from evaluating moduli spaces $\mathrm{m}athscr{M}_{k,l+1}(\textbf{e}ta)$ to a cycle of
dimension $|x| + |y| - \mathrm{m}u_L(\textbf{e}ta)$ and $(X,L,J)$ satisfies Assumption \ref{ass: A1}.
The following Lemma is well established for the monotone case in \cite{Bu10}, and
in the general case in \cite{FO312}.
\textbf{e}gin{lem}[Theorem 2.3 of \cite{FO312}] \label{lem: H^1}
Take $(X,L)$ satisfying Assumption \ref{ass: ass}. Also assume that
$H(L,\Lambda_0)$ is generated by $H^1(L,\Lambda_0)$ as an algebra with respect to the
classical cup product. If $\mathrm{m}_1^{b,\mathfrak{b}}|_{H^1(L,\Lambda_{0,nov})} = 0$ then
$\mathrm{m}_1^{b,\mathfrak{b}} \equiv 0$.
\end{lem}
\textbf{e}gin{proof}
First we point out that $\mathrm{m}_1^{b,\mathfrak{b}}|_{H^0(L,\Lambda_{0,nov})} = 0$.
Since $H(L,\Lambda_0)$ is generated by $H^1(L,\Lambda_0)$ with respect to the
cup product, we only need to show by induction on the degree that for $x$ and
$y$ of pure degree $|x| \ge 1$, $|y|\ge 1$,
$\mathrm{m}_1^{b,\mathfrak{b}}(x \cup y) = 0$, if $\mathrm{m}_1^{b,\mathfrak{b}}(z) = 0$ for all $z$, such that
$|z| < |x| + |y|$. Using \eqref{eq: cup*},
\textbf{e}gin{equation*}
\mathrm{m}_1^{b,\mathfrak{b}}(x \cup y) = \mathrm{m}_1^{b,\mathfrak{b}}(\mathrm{m}_2^{b,\mathfrak{b}}(x,y)) - \mathrm{m}_1^{b,\mathfrak{b}}(\text{other terms})
= 0
\end{equation*}
by induction hypothesis and using the Leibniz rule \eqref{eq: Leibniz}.
\end{proof}
\textbf{e}gin{rmk} \label{rmk: Buh}
Lemma \ref{lem: H^1} strengthen the result of \cite[Theorem~6.4.35]{FO3Book}
and \cite{Bu10}, showing that the minimal Maslov number $M_L$ of any Lagrangian
torus $L$ (or any orientable Lagrangian such that the cohomology ring is
generated by $H^1$) in $\mathbb{C}^n$ is 2, provided $T$ satisfies Assumption
\ref{ass: ass} for some $J$. That is because the Lagrangian is orientable and
$HF(T, (b,\mathfrak{b}); \Lambda) \equiv 0$ (from Theorem \ref{thm: FOOOnonDisp}, since
$T$ is displaceable), so there must be a Maslov index $2$ disk. The inequality
$2 \le M_L \le n +1$ was proven in \cite[Theorem~6.1.17]{FO3Book}, for any
spin Lagrangian $L\subset \mathbb{C}^n$ satisfying Assumption \ref{ass: ass}, via the
use of spectral sequence.
\end{rmk}
\textbf{e}gin{dfn} \label{dfn: crit Point} Take $(X,L)$ satisfying the assumptions of
Lemma \ref{lem: H^1}. Assume that $\pi_1(L) \cong H_1(L, \mathbb{Z})$ and $\pi_2(X, L)
\cong \pi_2(X) \oplus H_1(L, \mathbb{Z})$. So, we are able to write the Potential
function \eqref{eq: PotBulked} in terms of $z_i = z_{\textbf{e}ta_i}$, for some $\textbf{e}ta_1, \dots,
\textbf{e}ta_n \in \pi_2(X, L)$, where $\partial \textbf{e}ta_1, \dots, \partial \textbf{e}ta_n$ is a basis
of $H_1(L, \mathbb{Z})$. We say that $b$ is a \emph{critical point} of $\mathfrak{PO}^{L}_{\mathfrak{b}}(b)$
if:
$$z_i \frac{\partial
\mathfrak{PO}^{L}_{\mathfrak{b}}(b)}{\partial z_i} = 0.$$
\end{dfn}
\textbf{e}gin{cor}[Theorem 2.3 of \cite{FO312}] \label{cor: PotFloerHom}
Take $(X,L)$ satisfying the assumptions of Lemma \ref{lem: H^1} and Definition
\ref{dfn: crit Point}. If $b$ is a critical point of $\mathfrak{PO}^{L}_{\mathfrak{b}}(b)$
\eqref{eq: PotBulked} for $\mathfrak{b} = T^{\rho}[\mathfrak{s}] \in H^2(X, \Lambda_+)$, then
$HF(L,(b,\mathfrak{b});\Lambda) \cong H(L; \Lambda)$.
\end{cor}
\textbf{e}gin{proof}
Take a basis $x_1, \dots, x_n$ a basis of $H_1(L, \mathbb{Z})$. Let $\textbf{e}ta_1, \dots,
\textbf{e}ta_n \in \pi_2(X, L) \cong \pi_2(X) \oplus H_1(L, \mathbb{Z})$, be so that $\partial
\textbf{e}ta_i = x_i \in H_1(L, \mathbb{Z})$ and write the Potential $\mathfrak{PO}^{L}_{\mathfrak{b}}(b)$
\eqref{eq: PotBulked} in terms of $z_i = z_{\textbf{e}ta_i}$.
Since $\mathfrak{s}$ is of degree $2$, we have that $\mathrm{m}_1^{b,\mathfrak{b}}(\sigma)$ for $\sigma
\in H^1(L,\Lambda)$, only counts contributions of Maslov index 2 disks. A
Maslov index 2 $J$-holomorphic disk in the class $\textbf{e}ta = \gamma + k_1 \textbf{e}ta_1
+ \cdots + k_n \textbf{e}ta_n$, $\gamma \in \pi_2(X)$ contributes to $\mathrm{m}_1^{b,\mathfrak{b}}(\sigma)$
as
$$ \sum _i k_i(\sigma \cap x_i) \eta_\textbf{e}ta \exp[(\mathfrak{s} \cap \textbf{e}ta)T^\rho]
T^{\int_\gamma \omega} z_1^{k_1}\cdots z_n^{k_n} $$
Summing all contributions of Maslov index 2 $J$-holomorphic disks we have:
$$ \mathrm{m}_1^{b,\mathfrak{b}}(\sigma) = \sigma \cap \sum_i x_i \left(z_i \frac{\partial
\mathfrak{PO}^{L}_{\mathfrak{b}}(b)}{\partial z_i} \right)$$
Therefore, if $b$ is a critical point of $\mathfrak{PO}_{\mathfrak{b}}^L(b)$, we have that
$\mathrm{m}_1^{b,\mathfrak{b}}|_{H^1(L,\Lambda)} = 0$ and by Lemma \ref{lem: H^1},
$\mathrm{m}_1^{b,\mathfrak{b}} \equiv 0$, so $HF(L,(b,\mathfrak{b});\Lambda_{0,nov}) \cong H(L; \Lambda_{0,nov})$.
\end{proof}
\section{Regularity Lemma} \label{sec: Regularity}
We now move to the K\"{a}hler setting and we discuss a Lemma that we will use to
prove regularity for Maslov index 2 disks with boundary on $\Theta^n_s$ with respect to
the standard complex structure in $(\mathbb{C}P^1)^n$. The following definition is inspired in
\cite[Definition~1.1.1]{EL15b}.
\textbf{e}gin{dfn} \label{dfn: almHomogeneuos}
Let $L$ be a $n$ dimensional Lagrangian in a K\"{a}hler manifold $X$. Assume
that $K$ is a Lie group of dimension $n-1$ acting Hamiltonianly and
holomorphically on $X$ preserving $L$. Assume that the action restricted to $L$
is free. Then we say that $(X,L)$ is \emph{$K$-pseudohomogeneous}.
\end{dfn}
We get then the following Lemma:
\textbf{e}gin{lem} \label{lem: almHom}
Let $(X,L)$ be $K$-pseudohomogeneous, for some Lie group $K$.
If $u$ is a Maslov index 2 holomorphic disk such that $\partial
u$ is transverse to the $K$-orbits, then $u$ is regular.
\end{lem}
The proof of the above Lemma relies on the Lemmas below, very similar to
\cite[Lemmas~5.19, \ 5.20]{Vi13}.
\textbf{e}gin{lem} \label{lem: Vi's}
Let $u: \mathbb{D} \to X$ be a Maslov index 2 disk in a K\"ahler manifold $X$ of
complex dimension $n$ with boundary on a Lagrangian $L$. Assume that
$u_{|\partial \mathbb{D}}$ is an immersion. Call $W = du(r\sfrac{\partial}{\partial \theta})$ a
holomorphic vector field along $u$ vanishing at $0$ and tangent to the boundary.
Assume also that there exists $V_1,\dots, V_{n-1}$ holomorphic vector fields in
$u^* TX$ such that $W \wedge V_1 \wedge \cdots \wedge V_{n-1} \ne 0$ along the
boundary of $u$. Then $u$ is an immersion and no linear combination of the
$V_i$'s is tangent to $u(\mathbb{D})$.
\end{lem}
\textbf{e}gin{proof} Up to reparametrization, we may assume $du(0) \ne 0$.
The result follows from the fact that the zeros of $\det^2 (W \wedge V_1 \wedge
\cdots \wedge V_{n-1})$ computes the Maslov index, which is assumed to be
$2$. So $W \wedge V_1 \wedge \cdots \wedge V_{n-1}$ can only vanish once
(with order 1). Since $W$ already vanishes at $0$, we cannot have either
$du(x) = 0$ or a linear combination of the $V_i$'s being a complex multiple
of $W$.
\end{proof}
\textbf{e}gin{lem} \label{lem: reg}
Let $u_{\theta_1, \dots, \theta_{n-1}}$ be an $n-1$ dimensional family of
Maslov index 2 holomorphic disks in a K\"{a}hler manifold $X$ of complex
dimension $n$, $\theta_i \in (-\epsilon, \epsilon)$. If $u:= u_{0, \dots,
0}$ and $V_i := \frac{\partial u}{\partial \theta_i}$ satisfy the hypothesis of
Lemma \ref{lem: Vi's}, then $u$ is regular.
\end{lem}
\textbf{e}gin{proof}
It follows similar arguments as in \cite[Lemma~5.19]{Vi13}. Using Lemma
\ref{lem: Vi's}, we are able to split $u^* TX = T\mathbb{D} \oplus \mathfrak{L}_1 \oplus \cdots
\oplus \mathfrak{L}_n$, as holomorphic vector bundles where $\mathfrak{L}_i$ is the trivial line
bundle generated by $V_i$. Also, $u_{|\partial \mathbb{D}}^* TL = T\partial \mathbb{D} \oplus \mathbf{R}e (\mathfrak{L}_1)
\oplus \cdots \oplus \mathbf{R}e(\mathfrak{L}_n)$. As in \cite[proof of Lemma~5.19]{Vi13}, we see
that the kernel of the linearised $\bar{\partial}$ operator is isomorphic to
$$ T_{\mathrm{m}athrm{Id}} \mathrm{Aut} (\mathbb{D}) \bigoplus_{i = 1}^{n-1} \mathrm{hol}((\mathbb{D}, \partial \mathbb{D}), (\mathbb{C},\mathbb{R})) $$
Hence the kernel has dimension $n + 2 = n + \mathrm{m}u_{\Theta^n_s}(u) = \operatorname{index}$.
\end{proof}
\textbf{e}gin{proof}[Proof of Lemma \ref{lem: almHom}]
Since the $K$ action is holomorphic and $\partial u$ is transverse to the
$K$-orbits, we can build $u_{\theta_1, \dots, \theta_n}$ from a neighbourhood
of $\mathrm{Id} \in K$, satisfying all the hypothesis of Lemma \ref{lem: reg}.
\end{proof}
\section{The Lagrangian tori $\Theta^n_s$} \label{sec: LagTori}
In this section we give an explicit description of the tori $\Theta^n_s$ and of its
potential function, which encodes the number of Maslov index 2 disks that $\Theta^n_s$
bounds. For a definition of the potential, we refer the reader to
\cite[Section~4]{FO310},\cite{FO3Book}. See also the definition of
superpotential in \cite[Section 2.2]{Au09}.
The tori $\Theta^n_s$ appears as fibres of a singular Lagrangian fibration analogous to
the one described in ~\cite[Example 3.3.1]{Au09}.
\subsection{Definition of $\Theta^n_s$} \label{subsec: DfnTs}
Consider $(\mathbb{C}P^1)^n$ with the standard symplectic form, for which the symplectic
area of each $\mathbb{C}P^1$ factor is $1$. For $1 \le i \le n$, let $[x_i : y_i]$ denote the
$i$-th coordinate of $(\mathbb{C}P^1)^n$. Consider the function $f = \prod_i
\frac{x_i}{y_i}$, defined from the complement of $V = \bigcup_{i,j} \{x_i = 0\}
\cap \{y_j = 0\}$ to $\mathbb{C}P^1$, whose fibres are preserved by the $T^{n-1}$ action given by
\textbf{e}gin{gather} (\theta_1, \dots, \theta_{n-1})\cdot ([x_1:y_1], \dots,
[x_{n-1}:y_{n-1}], [x_n:y_n]) \nonumber \\ = ([e^{\theta_1}x_1:y_1], \dots,
[e^{i\theta_{n-1}}x_{n-1},y_{n-1}], [e^{-i\sum_j \theta_j}x_n:y_n]), \label{eq: action}
\end{gather}
and $\mathrm{m}: (\mathbb{C}P^1)^n \to \mathbb{R}^{n-1}$ its moment map.
\textbf{e}gin{dfn} \label{def: Chekanov type tori} Let $\gamma$ be an embedded circle
on $\mathbb{C}^{\star}$, not enclosing $0 \in \mathbb{C}$, and $\lambda \in \mathbb{R}^{n-1}$. Define
the $\Theta^n$-type Lagrangian torus:
\[\Theta^n_{\gamma, \lambda} = \{ x \in (\mathbb{C}P^1)^n \setminus V ; f(x) \in
\gamma, \mathrm{m}(x) = \lambda\} \]
\end{dfn}
Noting that $\mathrm{m}^{-1}(0) = \{|x_i/y_i| = |x_n/y_n|, \forall i = 1, \dots, n-1
\}$, one can see, by using the maximum principle, that $\Theta^n_{\gamma, 0}$
bounds only one $(n-1)$-family of holomorphic disks that project injectively to
the interior of $\gamma$. Call $\textbf{e}ta_\gamma \in \pi_2((\mathbb{C}P^1)^n,
\Theta^n_{\gamma, 0})$ the class represented by each of the above disk. We note
that there are $n$ disjoint holomorphic disks in the class $\textbf{e}ta_\gamma$ inside
the line $\mathbb{D}elta = \{[x_i:y_i] = [x_n:y_n], \forall i = 1, \dots, n-1\}$. Since
$\int_{\mathbb{D}elta} \omega = n$, we see that $\int_{\textbf{e}ta_\gamma} \omega \in (0, 1)$.
Foliate $\mathbb{C} \setminus \mathbb{R}_{\le 0}$ by curves $\gamma_s$, $s \in [0,1)$ so that
$\gamma_0$ is a point, say $1 \in \mathbb{C}$, and for $s \in (0,1)$, $\gamma_s$ is an
embedded circle so that $\int_{\textbf{e}ta_{\gamma_s}} \omega = s$.
\textbf{e}gin{dfn} \label{def: Ts}
Define the Lagrangian torus $\Theta^n_s$ to be $\Theta^n_{\gamma_s, 0}$.
\end{dfn}
The hamiltonian isotopy class of $\Theta^n_s$, does not depend in the curve $\gamma_s$
inside $\mathbb{C} \setminus \mathbb{R}_{\le 0}$, but only on $s = \int_{\textbf{e}ta_{\gamma_s}}
\omega$.
Consider the divisor $D = f^{-1}(1) \bigcup_i \{y_i =0\}$ and the holomorphic
$n$-form $\Omega = (\prod_i x_i - 1)^{-1} dx_1\wedge \cdots \wedge dx_n$
defined on $(\mathbb{C}P^1)^n \setminus D$, in coordinates charts $y_i = 1$.
\textbf{e}gin{prp}[Auroux] \label{prp: SpecialLag}
The tori $\Theta^n_s$ are special Lagrangians \cite[Definition~2.1]{Au07} with respect
to $\Omega$
\end{prp}
\textbf{e}gin{proof}
See \cite[Example~3.3.1]{Au09} and \cite[Proposition~5.2]{Au07}.
\end{proof}
Also, we clearly have:
\textbf{e}gin{prp}
We have that $((\mathbb{C}P^1)^n,\Theta^n_s)$ is $T^{n-1}$-pseudohomogeneous, for the action
\eqref{eq: action}.
\end{prp}
\subsection{The Potential of $\Theta^n_s$} \label{subsec: PotTs}
We come back to our Lagrangian tori $\Theta^n_s$. We would like to describe the
potential $\mathfrak{PO}^L$ in coordinates of the form \eqref{def: coord z} with
respect to a nice basis for $\pi_2((\mathbb{C}P^1)^n, \Theta^n_s)$. Fix a point $a_s \in \gamma_s$.
Consider the $S^1$ action given by the $i$-th coordinate of the $T^{n-1}$ action
described in \eqref{eq: action}. Take the orbit lying in $\Theta^n_s \cap f^{-1}(a_s)$ and
consider its parallel transport over the segment $[0, a_s]$, formed by orbits of
the considered $S^1$ action that collapse to a point over $0$, giving rise to a
Lagrangian disk. Define $\alpha_i \in \pi_2(\Theta^n_s, (\mathbb{C}P^1)^n)$ to be the class of the
above disk. Also, from now one we write $\textbf{e}ta = \textbf{e}ta_{\gamma_s}$ and $H_i =
p_i^*[\mathbb{C}P^1] \in \pi_2((\mathbb{C}P^1)^n)$ the pullback of the class of the line by the $i$-th
projection. Note that $\textbf{e}ta, \alpha_1, \dots, \alpha_{n-1}, H_1, \dots, H_n$
are generators of $\pi_2((\mathbb{C}P^1)^n, \Theta^n_s)$. We assume that our monotone symplectic form
is so that $\int_{H_i} \omega = 1$.
Set $u = z_\textbf{e}ta$ and $w_i = z_{\alpha_i}$, $i \in (1, \dots, n-1)$. Note that
$z_{H_i}(\nabla') = T^{\int_{H_i} \omega} \exp(b \cap \partial H_i) = T$.
\textbf{e}gin{prp}[\cite{Au07, Au09}]\label{prp: Poten}
The potential function encoding the count of Maslov index 2 holomorphic
disks with boundary on the Lagrangian tori $\Theta^n_{s}$ (for some spin
structure) is given by
\textbf{e}gin{equation} \label{eq: PotPN}
\mathfrak{PO}^{\Theta^n_s} = u + \frac{T}{u}(1 + w_1 + \cdots + w_{n-1})\left(1 + \frac{1}{w_1} + \cdots +
\frac{1}{w_{n-1}}\right)
\end{equation}
\end{prp}
\textbf{e}gin{proof}[Idea of proof]
First we consider positivity of intersection of an holomorphic disk with the
complex submanifolds $\{x_i = 0\}$, $\{y_i = 0\}$, $\{\prod_i x_i = \prod_i
y_i\}$, for all $i \in (1, \dots, n)$, to conclude that Maslov index 2 classes
admitting holomorphic representatives must be of the form $\textbf{e}ta$, $H_i - \textbf{e}ta
- \alpha_i + \alpha_j$, where $i,j = 1, \dots, n$ and $\alpha_n = 0$.
Computations of the holomorphic disks and their algebraic count can be done
explictly. We omit here since it follows a straightforward procedure as in
~\cite[Proposition 5.12]{Au07}, see final remark after Proposition 3.3 in
\cite{Au09}. See also \cite[Section~5]{Vi13} for similar computations.
We can choose a spin structure so that every disk counts positively, i.e.,
$\mathrm{ev}_0: \mathrm{m}athscr{M}_1 \to \Theta^n_s$ is orientation preserving, e.g. by choosing a
trivialisation of $T\Theta^n_s$ using the boundary of $\{\alpha_1,\cdots,\alpha_{n-1},
\textbf{e}ta\}$, as spin structure. See \cite[Section~5.5]{Vi13} and
\cite[Section~8]{Cho04}, for a complete discussion in a similar scenario.
\end{proof}
\textbf{e}gin{rmk}
The potential of $\Theta^n_s$ can be obtained from the known potential
for the Clifford torus, $\underset{n}{\times} S^1_{\operatorname{eq}}$. It is given by
\[ \mathfrak{PO}^{\operatorname{Clif}} = z_1 + \cdots + z_n + \frac{T}{z_1} + \cdots + \frac{T}{z_n} .\]
We obtain the potential for $\Theta^n_s$ via wall-crossing transformation $u =
z_n(1 + w_1 + \cdots w_{n-1})$, $w_i = z_i/z_n$. See ~\cite[Example
3.3.1]{Au09}. \end{rmk}
\textbf{e}gin{prp} \label{prp: Ass}
The tori $\Theta^n_s$ satisfy Assumption \ref{ass: ass}, with respect to the
standard complex structure of $(\mathbb{C}P^1)^n$.
\end{prp}
\textbf{e}gin{proof}
To prove Assumption \ref{ass: A1} we use similar argument as in \cite[Example~3.3.1]{Au07}.
First we use that $\Theta^n_s$ are special Lagrangians, and hence, by
\cite[Lemma~3.1]{Au07}, the Maslov index is twice the intersection with the
divisor $D$. This shows that $\mathrm{m}u_{\Theta^n_s}(\textbf{e}ta) \ge 0$, $\forall \textbf{e}ta \in
\pi_2((\mathbb{C}P^1)^n,\Theta^n_s)$ represented by an holomorphic disk $u$. Now, if $u$ is a Maslov
index $0$ holomorphic disk, then $f \circ u$ is well define and lies in $\mathbb{C}
\setminus \{1\}$, hence it is a constant in $\gamma_s$. Since the regular fibres
of $f$ are diffeomorphic to $(\mathbb{C}^*)^{n-1}$, we have that $u$ is itself is constant.
The proof of Assumption \ref{ass: A2} follows from $((\mathbb{C}P^1)^n,\Theta^n_s)$ being $T^{n-1}$-pseudohomogeneous
together with Lemma \ref{lem: almHom}. We just need to check that since the
$T^{n-1}$-orbit in $\Theta^n_s$ is generated by $\partial \alpha_i$, therefore transverse
to the boundary of the Maslov index 2 disks with boundary in $\Theta^n_s$, whose relative
homotopy classes are $\textbf{e}ta$ and $H_i - \textbf{e}ta - \alpha_i + \alpha_j$, $i,j = 1, \dots,n$
and $\alpha_n = 0$.
\end{proof}
\subsection{Regarding Proposition \ref{prp:toridistinct1}, and Conjecture \ref{cnj:toridistinct}}
We start noting that Maslov index 2 classes in $H_2((\mathbb{C}P^1)^n ,\Theta^n_s;\mathbb{Z})$ are of the
form
\textbf{e}gin{equation}
\textbf{e}ta + k_1 (H_1 -2\textbf{e}ta) + \cdots + k_n(H_n - 2\textbf{e}ta) + l_1\alpha_1 + \cdots +
l_{n-1}\alpha_{n-1},
\end{equation}
where $\textbf{e}ta$ is the Maslov index 2 and $\alpha_i$ the Maslov index $0$ classes
described in Section \ref{subsec: PotTs}, viewed in $H_2((\mathbb{C}P^1)^n ,\Theta^n_s;\mathbb{Z})$ via
$\pi_2((\mathbb{C}P^1)^n, \Theta^n_s) \hookrightarrow H_2((\mathbb{C}P^1)^n ,\Theta^n_s;\mathbb{Z})$. Recalling that $\int_{H_i}
\omega = 1$ and $\int_{\alpha_i} \omega = 0$, we see that area of Maslov index
2 disks belongs to $\{s + (1 - 2s)\mathbb{Z}\} \subset \mathbb{R}$.
\textbf{e}gin{proof}[Proof of Proposition \ref{prp:toridistinct1}]
We note that each torus
$$\Theta^{k_1}_{s_1} \times \cdots \times \Theta^{k_l}_{s_l} \times
(S^1_{\operatorname{eq}})^{n - \sum_i k_i}$$
bounds a disk of Maslov index 2 and symplectic area $1/2$,
if $n > \sum_i k_i$, coming from a Maslov index 2 disk in the last $\mathbb{C}P^1$
factor, with boundary in its equator $S^1_{\operatorname{eq}}$. We see that $1/2$ is in
$\{s + (1 - 2s)\mathbb{Z}\}$ if and only if $s=1/2$. This rules out the possibility of
$\Theta^{k_1}_{s_1} \times \cdots \times \Theta^{k_l}_{s_l} \times
(S^1_{\operatorname{eq}})^{n - \sum_i k_i}$ being symplectomorphic to $\Theta^n_s$ for $s \ne 1/2$.
For $s = 1/2$ the torus $\Theta^n_s$ is monotone, hence the Maslov index 2
$J$-holomorphic disks becomes an invariant of its symplectomorphism class --
this was first pointed out in \cite{ElPo93}, see also \cite[Theorem~6.4]{Vi13}.
This invariant allows us to distinguish between (the symplectomorphism classes of)
$\Theta^n_s$ and $\Theta^{k_1}_{s_1} \times \cdots \times \Theta^{k_l}_{s_l} \times
(S^1_{\operatorname{eq}})^{n - \sum_i k_i}$. For instance, one could look for pairs
$(\sigma_1, \sigma_2)$ of (relative homotopy classes represented by) Maslov
index 2 holomorphic disks with $\partial \sigma _1 = -\partial \sigma_2$. For the
torus $\Theta^n_s$, we must have $\partial \sigma_i = \pm \partial \textbf{e}ta$, i.e., only one possibility
for $\partial \sigma_i$ modulo sign, see Proposition
\ref{prp: Poten}. But for each torus $\Theta^{k_1}_{s_1} \times \cdots \times
\Theta^{k_l}_{s_l} \times (S^1_{\operatorname{eq}})^{n - \sum_i k_i}$ we have more than
one possibility for $\partial \sigma_i$, modulo sign.
\end{proof}
\textbf{e}gin{rmk} Note that, by Proposition \ref{prp: Poten}, the total number of
Maslov index 2 holomorphic disks with boundary in $\Theta^n_s$ is $1 + n^2$, while for
the tori $\Theta^{k_1}_{s_1} \times \cdots \times
\Theta^{k_l}_{s_l} \times (S^1_{\operatorname{eq}})^{n - \sum_i k_i}$ it is
$\sum_{i=1}^{l} (1 + k_i^2) + 2(n - \sum_{i=1}^{l}k_i) = 2n + \sum_{i=1}^{l} (k_i - 1)^2$.
Hence they can be equal if $(n-1)^2 = \sum_{i=1}^{l} (k_i - 1)^2$.
\end{rmk}
\textbf{e}gin{rmk} The above argument also proves the monotone version ($s = 1/2$) of
Conjecture \ref{cnj:toridistinct}.
\end{rmk}
We proceed now to show that holomorphic disks with boundary in $\Theta^n_s$ with
Maslov index bigger than $2$ have area bigger than $a = 1 - s$ -- the minimal area
of Maslov index 2 holomorphic disks for $s > 1/2$.
\textbf{e}gin{prp}\label{prp:HighMaslovArea}
For $k>0$ and $s \in [1/2,1)$, the area of holomorphic Maslov index $2k$ disk with boundary on $\Theta^n_s$ is
least $1 - s$, with respect to the standard complex structure in $(\mathbb{C}P^1)^n$. The
minimum only occur if $k=1$.
\end{prp}
\textbf{e}gin{proof}
Maslov index $2k$ disks are in relative classes of the form
\textbf{e}gin{equation}\label{eq:M2k}
k\textbf{e}ta + k_1 (H_1 -2\textbf{e}ta) + \cdots + k_n(H_n - 2\textbf{e}ta) + l_1\alpha_1 + \cdots +
l_{n-1}\alpha_{n-1}.
\end{equation}
If they are represented by holomorphic disks, their intersection with the
divisors $\{y_i = 0\}$ and $\{\prod_{i=1}^n x_i = \prod_{i=1}^n y_i
\} = \overline{\{f^{-1}(1)\}}$ is non-negative -- recall from Definitions \ref{def: Chekanov type tori}, \ref{def: Ts}
that $1$ is in the interior of $\gamma \subset
\mathbb{C}^*$. Noting that $$\textbf{e}ta \cdot \{y_i = 0\} = 0, \ \ \alpha_j \cdot \{y_i = 0\} = 0, \ \
H_j \cdot \{y_i = 0\} = \partialta_{ij},$$ and $$\textbf{e}ta \cdot \overline{\{f^{-1}(1)\}} = 1, \ \
\alpha_j \cdot \overline{\{f^{-1}(1)\}} = 0, \ \
H_j \cdot \overline{\{f^{-1}(1)\}} = 1,$$
$i,j = 1, \dots, n$, we get that
$$ k_i \ge 0 \ \forall i = 1, \dots, n \ \ \text{and} \ \ k - \sum_{i=1}^n k_i \ge 0. $$
The result follows from taking the symplectic area of $\eqref{eq:M2k}$, which is
$$ ks + \sum_{i=1}^n k_i (1 - 2s) = s(k - \sum_{i=1}^n k_i) + (1 - s)(\sum_{i=1}^n k_i)
$$
\end{proof}
As pointed out before the above Proposition allows us to informally argue why
Conjecture \ref{cnj:toridistinct} should hold. Indeed, for $s > 1/2$, the number
of Maslov index $2$ holomorphic disks with boundary in $\Theta^n_s$ and with minimal
area $a = 1-s$ is $n^2$, by Proposition \ref{prp: Poten}. Hence the number of
Maslov index $2$ disks with boundary in $\Theta^{k_1}_{s_1} \times \cdots \times
\Theta^{k_l}_{s_l}$ and with minimal area is at most $\sum_{i=1}^l k_i^2 <
(\sum_{i=1}^l k_i)^2 = n^2$, for $l>1$.
\section{Proof of Theorem \ref{thm: main} - Bulk deformations} \label{sec: Proof}
In this section we use bulk deformations to prove that the tori $\Theta^n_s$ are
non-displaceable for $n$ even and $s \in [1/2, 1)$, as done in \cite{FO312}
for the case $n = 2$. In \cite{FO312}, Fukaya-Oh-Ohta-Ono used the cocycle
Poincar\'{e} dual to the anti-diagonal in $\mathbb{C}P^1 \times \mathbb{C}P^1$ to bulk-deform
Floer-homology. In this section we will bulk-deform Floer-homology by an element
of the form $T^{\rho}[h] \in H^*((\mathbb{C}P^1)^n, \Lambda_+)$, where $[h] \in H^2((\mathbb{C}P^1)^n, \mathbb{Z})$.
For $1 \le i \le n$, let $h_i$ be the cocycle Poincar\'e dual to
$\{ y_i = 0\} \subset (\mathbb{C}P^1)^n$.
\textbf{e}gin{prp} \label{prp: Bulk def Poten}
The potential for the Lagrangian tori $\Theta^n_{s}$, bulk deformed by the
cocycle $\mathfrak{b} = T^{\rho}[(k_1 + k_n)h_1 + \cdots + (k_{n-1} + k_n) h_{n-1} + k_n h_n ] \in
C^2((\mathbb{C}P^1)^n, \Lambda_+)$ is given by
\[ \mathfrak{PO}^{\Theta^n_s}_{\mathfrak{b}}(b) = u + \frac{T}{u}\left(1 + w_1 + \cdots + w_{n-1}\right)\left(1 +
\frac{e^{k_1T^{ \rho}}}{w_1} + \cdots + \frac{e^{k_{n-1}T^{
\rho}}}{w_{n-1}}\right)e^{k_{n}T^{ \rho}} \]
\end{prp}
\textbf{e}gin{proof}
The relative classes $\textbf{e}ta$, $\alpha_j$ have no intersection with
$\{ y_k = 0\}$ viewed as a cycle in $(\mathbb{C}P^1)^n \setminus \Theta^n_s$. Therefore
the disk in the class $H_i - \textbf{e}ta - \alpha_i + \alpha_j$ intersect
$\{ y_k = 0\}$ if and only if $k=i$, and with multiplicity 1. Hence,
the coefficient of the monomial $\sfrac{Tw_j}{uw_i}$ is bulk-deformed
by $\mathfrak{b}_s$ to $e^{(k_i + k_n)T^{\rho}}$.
\end{proof}
\textbf{e}gin{lem} \label{lem: Critical Pts}
The potential for the Lagrangian tori $\Theta^n_{s}$, bulk deformed by the
cocycle $\mathfrak{b} = T^{\rho}[(k_1 + k_n)h_1 + \cdots + (k_{n-1} + k_n) h_{n-1} + k_n h_n ] \in
C^2((\mathbb{C}P^1)^n, \Lambda_+)$ have its critical points given by:
\[ w_i = \epsilon_i e^{\frac{k_i}{2}T^\rho}, \: \: u = \epsilon_n e^{\frac{k_n}{2}T^\rho} T^{\frac{1}{2}} (1 + \sum_{i \ge
1}^{n-1} \epsilon_i e^{\frac{k_i}{2}T^\rho}), \]
where $\epsilon_i = \pm 1$.
\end{lem}
\textbf{e}gin{proof}
For easier notation, let $b_i = e^{k_iT^{\rho}}$. Taking the differential of the
bulk deformed potential $\mathfrak{PO}^{\Theta^n_s}_{\mathfrak{b}}(b) $ with respect to $w_i$ and equating
to $0$, we get, after multiplying by $w_i$, equations
\textbf{e}gin{gather} \label{eq: i} (i): w_i + \sum_{j \neq i} \frac{b_jw_i}{w_j} -
b_i(\frac{1}{w_i} + \sum_{j \neq i} \frac{w_j}{w_i}) = 0. \end{gather}
Summing all the equations $(1), \dots, (n)$, we end up with
\textbf{e}gin{equation*}
\sum_{i=1}^{n-1} w_i - \sum_{i=1}^{n-1} \frac{b_i}{w_i} = 0
\end{equation*}
Let \[L = \sum_{i=1}^{n-1} w_i = \sum_{i=1}^{n-1} \frac{b_i}{w_i}.\]
We have that
\textbf{e}gin{gather*} w_iL - b_i = \sum_{j \neq i} \frac{b_jw_i}{w_j},\\
\frac{L}{w_i} - 1 = \sum_{j \neq i} \frac{w_j}{w_i}.
\end{gather*}
Substituting the above into equations $(i)$ (see \eqref{eq: i}), we get that
\textbf{e}gin{equation} \label{eq: 1 + L}
\left(w_i - \frac{b_i}{w_i}\right)\left(1 + L \right) = 0
\end{equation}
So if $u, w_1, \dots w_{n-1}$ are critical points of the
bulk deformed potential $\mathfrak{PO}^{\Theta^n_s}_{\mathfrak{b}}(b)$, besides equation \eqref{eq: 1 + L}, we
must have
\textbf{e}gin{equation}\label{eq: del_u}
\partial_u \mathfrak{PO}^{\Theta^n_s}_{\mathfrak{b}} = 1 - \frac{b_nT}{u^2}(1 + L)^2 = 0
\end{equation}
Hence $L \neq -1$, and therefore
\[ w_i = \sqrt{b_i} = \epsilon_i e^{\frac{k_i}{2}T^\rho}, \: \: u = \sqrt{b_n}T^{\frac{1}{2}}(1 + L) =
\epsilon_n e^{\frac{k_n}{2}T^\rho}T^{\frac{1}{2}} (1 + \sum_{i \ge
1}^{n-1} \epsilon_i e^{\frac{k_i}{2}T^\rho}), \]
\end{proof}
We call the \emph{valuation} of an element in $\Lambda_+$ the smallest exponent
with non-zero coefficient. Looking at the expression of the critical points of
the previous Lemma, one can see that:
\textbf{e}gin{lem} \label{lem: val=1/2}
Looking at the critical points given on Lemma \ref{lem: Critical Pts} we have that,
the valuation of $u$ is not $1/2$ if and only if $n = 2m$ and $m-1$ $\epsilon_i$'s are
equal to $1$ while the other $m$ $\epsilon_i$'s are equal to $-1$, where $i = 1, \dots, 2m-1$.
In that case, the valuation of $u$ is $T^{1/2 + \rho}$, provided $\sum_{i=1}^{2m-1} \epsilon_i k_i \ne 0$.
\end{lem}
Now we recall that
$$u = z_\textbf{e}ta = T^s \exp(b \cap \partial \textbf{e}ta)$$
for the class $\textbf{e}ta$ defined in the beginning of Section \ref{subsec: PotTs}.
By Lemma \ref{lem: val=1/2}, we have:
\textbf{e}gin{cor} \label{cor: CritcPotTsm} Take $s> 1/2$ and consider the cocycle
$\mathfrak{b}_s = T^{s - 1/2}[(k_1 + k_{2m})h_1 + \cdots + (k_{2m-1} + k_{2m}) h_{2m-1} +
k_{2m} h_{2m} ] \in C^2((\mathbb{C}P^1)^{2m}, \Lambda_+)$. Assume that not all $k_i$'s are $0$,
for $i= 1, \dots, 2m-1$, i.e., $[\mathfrak{b}_s]$ is not a multiple of the monotone
symplectic form. Then there exists $b_s$ a critical point of
$\mathfrak{PO}^{\Theta^n_sm}_{\mathfrak{b}_s}$.
\end{cor}
Recalling that $\Theta^n_sm$ satisfy Assumption \ref{ass: ass} (Propositions \ref{prp: Ass}), for some almost complex structure $J$, and noting that
$\Theta^n_sm$ is a contractible Lagrangian torus of $(\mathbb{C}P^1)^{2m}$, we have that $((\mathbb{C}P^1)^{2m},\Theta^n_sm)$
satisfy all the hypothesis of Corollary \ref{cor: PotFloerHom}. Therefore, from
Corollaries \ref{cor: PotFloerHom} and \ref{cor: CritcPotTsm}, we deduce:
\textbf{e}gin{thm} \label{thm: FloerHomTsm}
For $s \ge 1/2$ there exists a bulk $[\mathfrak{b}_s] \in H^2((\mathbb{C}P^1)^{2m}, \Lambda_+)$ and a weak
bounding cochain $b_s \in H^1(\Theta^n_sm,\Lambda_0)$ such that $HF(\Theta^n_sm,
(b_s,\mathfrak{b}_s);\Lambda_{0,nov}) \cong H(\Theta^n_sm,\Lambda_{0,nov})$.
\end{thm}
This proves the first part of Theorem \ref{thm: heavy}.
Theorem \ref{thm: main} follows from Theorem \ref{thm: FOOOnonDisp} and
Theorem \ref{thm: FloerHomTsm}. $\qed$
Corollary \ref{cor: ProductTori} follows from the same arguments as above using
that
$$ \mathfrak{PO}^{\Theta^{k_1}_{s_1} \times \cdots \times \Theta^{k_l}_{s_l} \times
(S^2_{\operatorname{eq}})^{n - \sum_i k_i}}_{ \mathfrak{b}} = \mathfrak{PO}^{\Theta^{k_1}_{s_1}}_{\mathfrak{b}} + \cdots + \mathfrak{PO}^{\Theta^{k_l}_{s_l}}_{ \mathfrak{b}}
+ \mathfrak{PO}^{(S^2_{\operatorname{eq}})^{n - \sum_i k_i}}_{\mathfrak{b}} \qed$$
\section{Quasi-morphisms and quasi-states} \label{sec: Quasimorph}
In this section we prove the last part of Theorem \ref{thm: heavy}. It follows
arguments similar to \cite[Theorem~23.4]{FO311b}.
\textbf{e}gin{lem} \label{lem: QuantCohom}
For any $\mathfrak{b} = T^{\rho}[l_1 h_1 + \cdots + l_{n-1} h_{n-1} + l_n h_n ] \in
C^2((\mathbb{C}P^1)^n, \Lambda_+)$, the bulk deformed Quantum cohomology \cite[Section~5]{FO311b} is semi-simple.
\end{lem}
\textbf{e}gin{proof} By \cite[Theorem~1.1.1]{FO316} (see also
\cite[Theorem~6.1]{FO310}, for the Fano case) we have an isomorphism between the
bulk deformed Quantum cohomology of a toric symplectic manifold and the Jacobian
Ring of the bulk deformed toric potential. If the bulk deformed toric potential
has only non-degenerate critical points, we can split the Quantum cohomology
ring into orthogonal algebra summands according to the factors corresponding to
the critical points under the isomorphism given in \cite[Theorem~1.1.1]{FO316}.
Naming now $z_i = z_{\textbf{e}ta_i}$ \eqref{def: coord z}, for $\textbf{e}ta_i$ the class of
Maslov index 2 holomorphic disk intersecting $\{x_i = 0\}$, we have that the
bulk deformed potential of a toric fiber is:
\textbf{e}gin{equation} \label{eq: toricBulkPot}
\mathfrak{PO}_\mathfrak{b} = z_1 + \cdots + z_n + \frac{Te^{l_1T^\rho}}{z_1} + \cdots +
\frac{Te^{l_nT^\rho}}{z_n},
\end{equation}
whose critical points are given by $(z_1, \dots, z_n) = (\epsilon_1 T^{1/2}
e^{l_1T^\rho/2}, \dots , \epsilon_n T^{1/2} e^{l_nT^\rho/2})$. Hence, there
are $2^n$ idempotents of $QH_\mathfrak{b}((\mathbb{C}P^1)^n;\Lambda_{0,nov})$, $\textbf{e}_1^\mathfrak{b}$, \dots,
$\textbf{e}_{2^n}^\mathfrak{b}$ for which
$$QH_\mathfrak{b}((\mathbb{C}P^1)^n;\Lambda_{0,nov}) = \bigoplus_{i=1}^{2^n} \Lambda_{0,nov} \textbf{e}_i^\mathfrak{b} .$$
\end{proof}
In \cite[Section~17, (17.18)]{FO311b}, given $X$ a symplectic manifold
and $L$ a relatively spin Lagrangian submanifold, Fukaya-Oh-Ohta-Ono construct an homomorphism:
\textbf{e}gin{equation} \label{eq: iqmHom}
i^*_{\mathrm{qm},(b,\mathfrak{b})}: QH_\mathfrak{b}(X;\Lambda_{0,nov}) \to HF(L, (b,\mathfrak{b}); \Lambda_{0,nov}),
\end{equation}
which is proven to be a ring homomorphism in \cite{AFO3Prep1}, see
\cite[Remark~17.16]{FO311b} and \cite[Section~4.7]{FO310c}.
Applying Lemma \ref{lem: QuantCohom} for $(\mathbb{C}P^1)^{2m}$ and $\mathfrak{b}_s$ given in Theorem
\ref{thm: FloerHomTsm}, using that $i^*_{\mathrm{qm},(b_s,\mathfrak{b}_s)}$ is unital and $HF(\Theta^n_sm,(b_s,\mathfrak{b}_s); \Lambda_{0,nov})
\ne 0$, we have:
\textbf{e}gin{prp} \label{prp: idempotent}
There exists an idempotent $\textbf{e}_s \in QH_{\mathfrak{b}_s}((\mathbb{C}P^1)^{2m};\Lambda_{0,nov})$ for which $i^*_{\mathrm{qm},(b_s,\mathfrak{b}_s)}(\textbf{e}_s) \ne 0$
in $HF(\Theta^n_sm,(b_s,\mathfrak{b}_s); \Lambda_{0,nov})$.
\end{prp}
Theorem \ref{thm: heavy} follows then from Proposition \ref{prp: idempotent} and
Theorem 18.8 of \cite{FO311b}. \qed
\section{Tori in $\mathbb{C}P^2\# \overline{\mathbb{C}P^2}II$} \label{sec: Bl3}
In this section we prove Theorem \ref{thm: Bl3heavy}. We will describe a model
for $(\mathbb{C}P^2\# \overline{\mathbb{C}P^2}II, \omega_\epsilon) = (\mathbb{C}P^1 \times \mathbb{C}P^1BlII, \omega_\epsilon)$ which is
equivalent to performing two blowups of capacities $\epsilon$ centred at the
rank 0 elliptic singularities (corners) of the singular fibration of $\mathbb{C}P^1 \times \mathbb{C}P^1$
described in \cite{FO312}, see Figure \ref{fig: Bl3}.
\textbf{e}gin{figure}[h!]
\textbf{e}gin{center}
\centerline{\includegraphics[scale=0.4]{Bl3Fig.pdf}}
\caption{Singular fibrations of $\mathbb{C}P^1 \times \mathbb{C}P^1$ and $\mathbb{C}P^2\# \overline{\mathbb{C}P^2}II$.}
\label{fig: Bl3}
\end{center}
\end{figure}
Consider $\mathbb{C}P^1 \times \mathbb{C}P^1$ with coordinates $([x_1:y_1], [x_2:y_2])$ as in Section
\ref{subsec: DfnTs}. Consider also the tori $\Theta^2_s$, the function $f =
x_1x_2/y_1y_2$, the relative class $\textbf{e}ta$ and $\alpha := \alpha_1$ and the
divisor $D = f^{-1}(1)\cup \{y_1= 0\} \cup \{y_2 = 0\}$, as defined in Section
\ref{subsec: PotTs}.
From Proposition \ref{prp: SpecialLag} and \cite[Lemma~3.1]{Au07},
we have that $2[D] \in H_2(\mathbb{C}P^1 \times \mathbb{C}P^1 \setminus \Theta^2_s)$ is Poincar\'e dual to the
Maslov class $\mathrm{m}u_{\Theta^2_s} \in H^2(\mathbb{C}P^1 \times \mathbb{C}P^1,\Theta^2_s)$. In particular the
Maslov index 2 holomorphic disks, computed in Proposition \ref{prp: Poten} for $n=2$,
do not intersect $\overline{f^{-1}(1)} \cap \{y_1= 0\} = ([1:0],[0:1]) = p_1$ and
$\overline{f^{-1}(1)} \cap \{y_2= 0\} = ([0:1],[1:0]) = p_2$.
Let $B_i(\epsilon)$ be the ball of capacity \cite[Section~12]{MDSaBook_SympTop}
$\epsilon$ (radius $\sqrt{\epsilon / \pi}$) centered at $p_i$, in the coordinate
plane $x_i = 1$, $y_j = 1$, $i,j = 1 ,2$, $i \ne j$. Denote $S_i(\epsilon) =
\partial B_i(\epsilon)$. Let $(\mathbb{C}P^2\# \overline{\mathbb{C}P^2}II, \omega_\epsilon)$ be the result of blowing up
\cite[Section~7]{MDSaBook_SympTop} $\mathbb{C}P^1 \times \mathbb{C}P^1$ with respect to $B_1(\epsilon)$ and
$B_2(\epsilon)$, so that the exceptional curves $E_i$ (coming from collapsing
the Hopf fibration in $S_i(\epsilon)$) have symplectic area $\omega_\epsilon
(E_i) = \epsilon$, $i = 1,2$. Let $j_\epsilon$ be the induced complex structure
and $L_s^\epsilon$ correspond to $\Theta_s^2$ after the blowup. Note that
$\epsilon$ can take any value in $(0,1)$, so that $B_1(\epsilon) \cap
B_2(\epsilon) = \emptyset$.
Note also that $f = x_1x_2/y_1y_2$ is constant along the fibers of the Hopf
fibration of both $S_1(\epsilon)$ and $S_2(\epsilon)$. In particular it give
rise to a $(j_\epsilon,j)$-holomorphic function $\tilde{f}: \mathbb{C}P^2\# \overline{\mathbb{C}P^2}II \to \mathbb{C}P^1$.
For computing the potential for $L_s^\epsilon$ it is interesting that the disks
of Proposition \ref{prp: Poten}, remain essentially the same. This can be
obtained by stretching the complex structure $j_\epsilon$. So take $\partialta$ small
enough so that $B_1(\partialta) \cup B_2(\partialta)$ does not intersect any Maslov
index 2 holomorphic disk. Consider a diffeomorphism $\varphi: (\mathbb{C}P^2\# \overline{\mathbb{C}P^2}II,
\omega_\epsilon) \to (\mathbb{C}P^2\# \overline{\mathbb{C}P^2}II, \omega_\partialta)$ coming from a finite neck stretch
\cite{EliGiHo10,CompSFT03} along $S_i(\epsilon + \partialta') \subset (\mathbb{C}P^2\# \overline{\mathbb{C}P^2}II,
\omega_\epsilon)$ \cite{CompSFT03,EliGiHo10}, see also \cite[Section~3]{Vi14},
which sends $L_s^\epsilon$ to $L_s^\partialta$. The diffeomorphism $\varphi$ is
equivalent to considering an inflation along the exceptional curves $E_i$, $i =
1,2$. Set $J_\partialta = \varphi^*j_\partialta$, an $\omega_\epsilon$ compatible
almost complex structure.
\textbf{e}gin{lem} \label{lem: potBl3}
We have that $(\mathbb{C}P^2\# \overline{\mathbb{C}P^2}II,L_s^\epsilon,J_\partialta)$ satisfy Assumption \ref{ass: ass}.
The potential function for $L_s^\epsilon$ with respect to $J_\partialta$, is given by:
\textbf{e}gin{equation}
\mathfrak{PO}^{L_s^\epsilon} = u + \frac{T}{u}(1 + w)(1 + \frac{1}{w}) + T^{1 -
\epsilon}(w + \frac{1}{w})
\end{equation}
\end{lem}
\textbf{e}gin{proof}
It is enough to compute the $j_\partialta$-holomorphic disks with boundary in
$L_s^\partialta$. The $j_\partialta$-holomorphic disks that don't intersect the
exceptional divisors $E_1$, $E_2$, corresponds to the holomorphic disks in
$\mathbb{C}P^1 \times \mathbb{C}P^1$ with boundary in $\Theta^2_s$, which gives the terms
$$ u + \frac{T}{u}(1 + w)(1 + \frac{1}{w})$$ of $\mathfrak{PO}^{L_s^\partialta}$, and are regular.
Let $\tilde{D}$ be the proper transform of the divisor $D \in \mathbb{C}P^1 \times \mathbb{C}P^1$. It can be
checked that, twice $\tilde{D} + E_1 + E_2$ is Poincar\'e dual to the Maslov
class $\mathrm{m}u_{L_s^\partialta}$. This implies Assumption \ref{ass: A1}, as in the
proof of Proposition \ref{prp: Ass}. Moreover, Maslov index 2 disks intersects
$\tilde{D} + E_1 + E_2$ once. Which means that if a $j_\partialta$-holomorphic disk
$u$ intersects either $E_1$ or $E_2$, by positivity of intersection, it does
not intersect $\tilde{D}$ and hence $\tilde{f}\circ u: \mathbb{D} \to \mathbb{C}^*$ must be
constant. There are two Maslov index 2 disks in the fiber $ \tilde{f}^{-1}(c)$,
for $c \in \gamma_s$. Looking at the intersections with $E_i$, and the proper
transform of $\{x_i = 0\}$ and $\{y_i = 0 \}$, we can see that the relative classes
of these disks are $H_1 - E_1 + \alpha$ and $H_2 - E_2 - \alpha$ (for some
orientation of $\alpha$). Since, $\omega_\epsilon (H_i - E_i \pm \alpha) = 1 -
\epsilon$, we get the remaining term $$T^{1 - \epsilon}(w + \frac{1}{w}).$$
To show regularity of the above disks, one notes that the pre-image under
$\tilde{f}$ of a small neighbourhood $\mathrm{m}athcal{N}_s$ of $\gamma_s$ contain the
whole family of the above disks and is actually
toric. Moreover, $(\tilde{f}^{-1}(\mathrm{m}athcal{N}_s), L_s^\partialta)$ is $T^2$-homogeneous
\cite{EL15b}, or if you will, $S^1$-pseudohomogeneous (Definition \ref{dfn:
almHomogeneuos}) for a $j_\partialta$-holomorphic $S^1$-action transverse to $\partial
\alpha$, which shows Assumption \ref{ass: A2}.
The choice of spin structure is given by trivialising $TL_s^\epsilon$ according
to $\{\alpha, \textbf{e}ta\}$ and is so that the evaluation map is orientation
preserving, as in the proof of Proposition \ref{prp: Poten}. See also
\cite[Section~5.5]{Vi13} and \cite[Section~8]{Cho04}.
\end{proof}
\textbf{e}gin{rmk} The above potential can also be computed by a technique similar to
the one developed in \cite{FO312} and also by some gluing procedure similar to
the one developed in Section 5.2 of the ArXiv.1002.1660v1 version of
\cite{FO312} and in \cite{Wu15}.
\end{rmk}
\textbf{e}gin{rmk} For each $\partialta' > 0$, the family $\{L_s^\epsilon: s \in [1/2, 1 -
\partialta'] \}$ can be seen as fibres of an almost toric fibration (ATF) of
$\mathbb{C}P^2\# \overline{\mathbb{C}P^2}II$, represented by an almost toric base diagram (ATBD) analogous to the
one in Figure 9 $(A_3)$ of \cite{Vi16a}. In fact, the singular fibration
described by the second diagram in Figure \ref{fig: Bl3} can be thought as a
limit of ATFs described by sliding nodes of the ATBD in Figure 9 $(A_3)$ of \cite{Vi16a}.
Moreover, the potential $\mathfrak{PO}^{L_s^\epsilon}$ can be obtained
from the toric potential $$ \mathfrak{PO}^{\mathrm{m}athrm{toric}} = u_1 + u_2 + \frac{T}{u_1} +
\frac{T}{u_2} + \frac{T^{1 - \epsilon}u_1}{u_2} + \frac{T^{1 -
\epsilon}u_2}{u_1},$$ via wall-crossing transformation $u = u_1(1 + w)$, $w =
u_2/u_1$, giving another example where actual computations meet wall-crossing predictions
\cite{Au07,Au09,Vi13}.
\end{rmk}
Let $\mathfrak{s} \in C^2(\mathbb{C}P^2\# \overline{\mathbb{C}P^2}II)$ be the cocycle Poincar\'e dual to $\{y_1 = 0\} \cup E_1$,
so $[\mathfrak{s}] = H_1 - E_2 + E_1$. Analogous to Proposition \ref{prp: Bulk def Poten}, we
have:
\textbf{e}gin{prp} \label{prp: bulkdef potBl3}
The potential for $L_s^\epsilon$, bulk deformed by the cocycle $\mathfrak{b} = T^{\rho}\mathfrak{s}
\in C^2(\mathbb{C}P^2\# \overline{\mathbb{C}P^2}II, \Lambda_+)$ is given by:
\textbf{e}gin{equation} \label{eq: PotBulked Bl3}
\mathfrak{PO}^{L_s^\epsilon}_{\mathfrak{b}} = u + \frac{T}{u}(1 + w)(e^{T^\rho} + \frac{1}{w}) + T^{1 -
\epsilon}(e^{T^\rho}w + \frac{1}{w}).
\end{equation}
\end{prp}
We can then compute the critical points of $\mathfrak{PO}^{L_s^\epsilon}_{\mathfrak{b}}$ and obtain:
\textbf{e}gin{lem} \label{lem: crit PotBl3}
We have that $w = - e^{\frac{-T^\rho}{2}}$ and $u = \pm T^{\frac{1}{2}}(1
- e^{\frac{-T^\rho}{2}})^{\frac{1}{2}}(e^{T^\rho -
e^{\frac{T^\rho}{2}}})^{\frac{1}{2}}$ are critical points of
$\mathfrak{PO}^{L_s^\epsilon}_{\mathfrak{b}}$. The valuations of $w$ and $u$ are respectively
$0$ and $1/2 + \rho$.
\end{lem}
Since we have that $\int_{\textbf{e}ta}\omega_\epsilon = s$ and $\int_{\alpha} \omega_\epsilon
= 0$:
\textbf{e}gin{lem} \label{lem: crit2 PotBl3} For $s > 1/2$ and $\mathfrak{b}_s^\epsilon = T^{s -
1/2}[\mathfrak{s}]$, there exists a weak bounding cochain $b_s^\epsilon \in
H^1(L_s^\epsilon, \Lambda_0)$ which is a critical point of
$\mathfrak{PO}^{L_s^\epsilon}_{\mathfrak{b}_s^\epsilon}$.
\end{lem}
Following similar arguments as in Sections \ref{sec: Proof} and \ref{sec: Quasimorph}, we
are able to prove Theorem \ref{thm: Bl3heavy} and consequently Theorem \ref{thm:
Bl3}. \qed
\end{document} |
\begin{document}
\title{Classification of quantum channels of information transfer}
\abstract{ Classification of states of quantum channels of information transfer is built on the basis of unreducible representations of qubit state space group of symmetry and properties of density matrix spectrum. It is shown that pure disentangled states form two-dimensional surface, and the reason of state disentanglement is in degeneration of non-zero density matrix eigenvalues.
}
\section{ Group of symmetry of Quantum Information}
Group of symmetry in quantum information theory \(\mathcal{Q}\) is the group of unitary transformations of qubit state space \(U(2)\) (see \cite{Vaccaro,Man}).
In reality important is Lie algebra of quantum information \(\mathcal{AQ}=u(2)\) generated by operators \(J_3\),\(J_+\),\(J_-\):
\begin{equation}
\left[J_-J_+\right]=2J_3;\ \left[J_3J_\pm\right]=\pm J_\pm.
\end{equation}
The report deals with physically substantial consequences of existence of group of symmetry of quantum information.\\
In problems of quantum information one applies term qubit to arbitrary two-level quantum system. Qubit pure state is characterized by state vector -- normalized vector $\cat{\psi}$:
\begin{equation}
\cat{\psi}=\cos{\frac{\theta}{2}}\cat{0}+e^{i\phi}\sin{\frac{\theta}{2}}\cat{1}.
\end{equation}
Set of state vectors forms a sphere -- Poincare sphere in quantum optics, or Bloch sphere in laser physics. Group of unitary transformations \(U(2)\) is the group of symmetry of qubit sphere.
\begin{figure}
\caption{Space of qubit state vectors: \(\cat{0,1}
\label{fig:1}
\end{figure}
\section{Information transfer channels}
\paragraph{Information} is obtained through comparison of results of measurements to expected results \cite{MP,Berry}. In the case of given probability distribution $p_k$ for each of $k$ variants of expected results of measurement, value of information obtained together with expected result $m$, is \(I_m\), and average value of information obtained in each sequential measurement is determined by Shannon entropy $S_S$:
\begin{equation}
I_m\stackrel{def}{=}-\log p_m;\ S_S\stackrel{def}{=}-\sum{p_m\log p_m}.
\end{equation}
\paragraph{Information transfer channel} is an arbitrary
device used for information processing (even in the case it just stores it during given time). Channel is prepared by means of source in given state, after that the state of the channel is measured by detector. Taken together, source + channel + detector are called channel as well. Sequence of states can be random or ordered, it forms mixed state.
\begin{figure}
\caption{Information transfer channel and its components: Source, Media and Detector.}
\label{fig:2}
\end{figure}
Ordered sequence of pure states is used for deliberate information transfer.
\paragraph{Quantum channel} of information transfer differs by fact that processes of channel state preparation, state distribution and its measurement can be explained with use of quantum physics laws only -- laws of classical physics are not enough for at least one of those three processes.
\subsection{Parallel channels}
Set of all the expected results of measurements is finite, that's why for information transfer use of devices with finite state number is just enough.
In classical physics all states from finite set one can give in bitwise representation. One can for instance give numbers to points of state space and write down numbers of those in binary representation
\(\left\{k=\left[b_0,\ldots,b_p\right]\right\}\).
In such way decomposition of arbitrary classical information transfer channel to set of bits takes place. Each of bits can be transfered by separate sub-channel (parallel channel), or one after another by sequential channel.
\subsubsection{Decomposition problem}
Quantum channel, in spite of classical one, instead of finite set of states has state space with finite dimensionality, that's why problem on decomposition becomes much more complicated \cite{Lew}.
Let us consider quantum channel with state space being product of two subspaces with dimensionalities $N_a$ and $N_b$. Set of state vectors of such channel is hyper-sphere with complex dimensionality $N_aN_b-1$. Division of channel into two subchannels with dimensionalities $N_a$ and $N_b$ gives as sets of pure states of subchannels hyperspheres with dimensionalities $N_a-1$ and $N_b-1$. Direct product of those sets has dimensionality $N_aN_b-N_a-N_b+1$ smaller than $N_aN_b-1$ -- not every pure state of quantum channel can be represented by pure states of subchannels. The states that have no such representation have got names of entangled states, for those total correlation (or anticorrelation) between results of measurements of subchannels is specific.
\subsection{Density matrix}
Set of channel states formed by source forms basis \(B=\left\{\cat{k};\ k=1\ldots N\right\}\) of state space -- finite-dimensional Hilbert space.
Sequence of states generated by source can be characterized by one vector, in such case it forms pure state, or by set of different vectors, in such case it is a mix.
Universal method for description of states is given by density matrix. For pure state density matrix is projector to one-dimensional subspace of vectors collinear to state vector
\begin{equation}
\hat{\rho}\left(\psi\right)=\cat{\psi}\otimes\bra{\psi},
\end{equation}
for mixed state density matrix is weighted mix of such projectors
\begin{equation}\label{rhodef}
\hat{\rho}=\sum_{k=1}^N{p_k\roa{k}}; \ \forall k:\; 0\leq p_k\leq1;\ \sum_{k=1}^N{p_k}=1.
\end{equation}
Eigenvalues of density matrix are determined by probabilities or relative frequencies of production of respective eigenstates, and eigenvectors are the pure states produced by channel source.
Irrespectively of fact if sequence of states is random or ordered density matrix remains the same, sorting in process of state production by source affects only time correlations between separate events of information transfer. Methods of investigation of information transfer channel states are equal in effectiveness in cases of spontaneous production of states and in problems on information transfer as well, till those do not deal with time correlations.
\subsection{Multi-state quantum channels of information transfer}
State space of classical channel of information transfer is finite and it can always be represented as direct product of state spaces of two-state subchannels -- for classical information transfer channel there exists possibility of decomposition of channel to set of bits. Consequences of this fact are present in all the discrete mathematics -- mathematical apparatus of classical information theory.
As to quantum channels, main consequence of existence of entangled states is in unsolvability of problem on decomposition of quantum channel of information transfer to subchannels with smaller dimensionality.
Density matrix as Hermitian matrix with unit spur has $N^2-1$ independent real parameters. For multi-state quantum channel of information transfer with dimensionality of state space $N_aN_b$, that is product of dimensionalities of state spaces of subchannels $N_a$ and $N_b\leq N_a$, number of free parameters $\left(N_a^2-1\right)\left(N_b^2-1\right)=N^2+1-N_a^2-N_b^2$ is smaller than needed one. Thus properties of channel state are not limited by properties of states of subchannels -- additional parameters of channel state are needed (for instance, correlation coefficients).
\subsubsection{Induced and entangled bases}
Hereinafter we consider paired quantum channel of information transfer in which it is possible to register simultaneously states of two subchannels (source of states prepares one common channel state).
Induced basis of common state space $\left\{\catt{k}\in \mathcal{H}\right\}$ is represented by direct products of bases $\left\{\cat{m}\in \mathcal{H}_A\right\}$ of state space of particle of sort $A$ and $\left\{\catr{n}\in \mathcal{H}_B\right\}$ of sort $B$ \begin{equation}
\catt{k=m\cdot n}=\cat{m}\otimes\catr{n}\ \forall m,n.
\end{equation}
\begin{figure}
\caption{Paired channel and its components: channel has common source and two different detectors.}
\label{fig:3}
\end{figure}
Common basis is result of arbitrary unitary transformation of induced basis. In the case of such transformation not being product of unitary transformations of subspaces at least part of vectors of common basis is some linear combination of direct products and corresponds to entangled states.
\begin{equation}\label{arbasis}
\catt{k}=\sum_{\forall n}{c^{\left\{k\right\}}_{n}\cat{\mu_k\left(n\right)}\otimes\catr{n}}
\end{equation}
Such entangled basis is useful for instance in the case of its vectors being eigenvectors of density matrix. It is characterized with set of coefficients \(c^{\left\{k\right\}}_{n}\) and with sequence of numbers \(\mu_k\left(n\right)\).
\subsubsection{Density matrix of paired channel}
Own basis of density matrix \uref{rhodef} is usually formed by vectors of state space of paired channel that are some non-trivial combinations \uref{arbasis} of vectors of induced basis, and representation of density matrix in induced basis has non-diagonal components
\begin{equation}\label{red_rho}
\hat{\rho}=
\sum_{\forall n,n'}{\Big[\sum_{\forall k}{p_{k}c^{\left\{k\right\}}_{n} c^{*\left\{k\right\}}_{n'}}\Big]
\left(\cat{\mu_k\left(n\right)}\otimes\bra{\mu_k\left(n'\right)}\right)\otimes
\left(\catr{n}\otimes\brar{n'}\right)}.
\end{equation}
\subsubsection{Density matrices of subchannels}
Density matrix of subchannel is obtained through averaging density matrix of paired channel by states of second subchannel
\[
\hat{\rho}^{\left\{a\right\}}=\sum_{\forall n}{\brar{n}\hat{\rho}\catr{n}}=\sum_{\forall m,m'}{\sum_{\forall n}{\rho_{m,m';n,n}}\cat{m}\bra{m'}}\]
\[
\hat{\rho}^{\left\{b\right\}}=\sum_{\forall m}{\bra{m}\hat{\rho}\cat{m}}
=\sum_{\forall n,n'}{\sum_{\forall m}{\rho_{m,m;n,n'}}\catr{n}\brar{n'}}
\]
With account of explicit form of density matrix in induced basis \uref{red_rho} we have
\begin{equation}\label{rho_ab}
\hat{\rho}^{\left\{a\right\}}=
\sum_{\forall m}{p_{m}^{\left\{a\right\}}\roa{m}};\
\hat{\rho}^{\left\{b\right\}}=
\sum_{\forall n}{p_{n}^{\left\{b\right\}}\eroa{n}}
\end{equation}
\begin{equation}\label{p_ab}
p_{m}^{\left\{a\right\}}=
\sum_{\forall k \forall n}{p_{k}\left|c^{\left\{k\right\}}_{n}\right|^2\delta_{m,\mu_k\left(n\right)}};\
p_{n}^{\left\{b\right\}}=
\sum_{\forall k }{p_{k}\left|c^{\left\{k\right\}}_{n}\right|^2}.
\end{equation}
\subsection{Channel types}
Depending on properties of own basis density matrix of paired channel in specific cases can be a product of density matrices of subchannels, or a mix of such products -- both those cases can take place for classical paired information transfer channel as well.
Quantum properties of paired channel, particularly ones without classical analog, take place in all the cases when channel state can not be reduced to some classical variant, like entangled states of paraqubit can not be reduced to classical analog. Non-classical states, similarly to case of paraqubit, are given name of entangled states, that's why all the states of paired quantum channel of information transfer belong to one of the following three types:
\begin{enumerate}
\item Independent subchannels: $\hat{\rho}=\hat{\rho}_A\otimes\hat{\rho}_B$;
\item Mix of independent subchannels: $\hat{\rho}= \sum_{s}{p_s\hat{\rho}^{\left(s\right)}_A \otimes\hat{\rho}^{\left(s\right)}_B}$;
\item Entangled states of subchannels: all the others.
\end{enumerate}
Negative definition of entanglement leads to need in search of characteristics and criteria of entanglement. Further in this report results of symmetry analysis of entanglement of states of paired quantum channel of information transfer are given.
\section{Representation of Lie algebra of quantum information in terms of channel state space}
Set of bases of \(N\)-dimensional state space of quantum channel is the orbit of group \(U(N)\) of unitary transformations of the space. Each unitary transformation of basis \(\cat{l}=\sum{U(l,k)\cat{k}}\) leaves density matrix the same and changes coefficients, \[ \rho_{l,l'}=\sum{U(l,k)\rho_{k,k'}U^+(l',k')}.\]
This expression gives representation of group of symmetry of quantum information in state space of information transfer quantum channel with arbitrary finite dimensionality.\\
\subsection{Ladder operators}
For each set of basis vectors, thus for each quantum information transfer channel there exists (by construction, see \cite{cvu1}) system of operators
\begin{equation}\label{ladder_def}
\hat{J}_+\stackrel{Def}{=} \sum^{N}_{k=1}{\sqrt{\left(N-k\right)k}\cat{k+1}\otimes\bra{k}};\
\hat{J}_-\stackrel{Def}{=} \sum^{N}_{k=1}{\sqrt{\left(N+1-k\right)\left(k-1\right)}\cat{k-1}\otimes\bra{k}};
\end{equation}
\[
\hat{J}_3\stackrel{Def}{=} \big(\hat{J}_-\hat{J}_+-\hat{J}_+\hat{J}_-\big)/2
=\sum^{N}_{k=1}{\left(k-(1+N)/2\right)\cat{k}\otimes\bra{k}}.
\]
Result of effect of operator $\hat{J}_{+}$ on arbitrary vector $\cat{k}$ is in following vector $\cat{k+1}$ or zero (in the case of vector with the largest number). Operator \(\hat{J}_3\)
has basis vectors as eigenvectors. Re-denotation $\cat{k}\rightarrow\cat{m=k-(1+N)/2}: m=-N/2\ldots N/2$ completes analogy between arbitrary basis and basis of irreducible representation of group \(U(2)\). The group \(U(2)\) can be represented by orthogonal transformations of ladder operators as well.
\subsection{Representation of density matrix}
Ladder operators realize irreducible representation of Lie algebra of group of invariance of quantum information. In the case if from physical considerations it is needed to change at least sequence of basis vectors one has to change simultaneously ladder operators of basis -- ladder operators are associated to basis of state space. From the other hand, ladder operators are generators of algebra of operators in the meaning that arbitrary operator has representation by double series
\begin{equation}
\hat{A}=\sum{A_{k,l}\hat{J}_{+}^k\hat{J}_{-}^l}.
\end{equation}
Eigenvectors of density matrix form basis to which system of ladder operators corresponds \uref{ladder_def}. In this basis it is possible to represent by means of some interpolating function
\begin{equation}
p\left(x\right):\ p\left(k-\frac{1+N}{2}\right)=p_k,
\end{equation}
density matrix in form invariant with respect to unitary transformations of state space:
\begin{equation}
\rho=p\left(\vec{n}\cdot\vec{J}\right).
\end{equation}
Here denotation \(\vec{n}\cdot\vec{J}=n_3J_3+n_-J_++n_+J_-\) is used.
\subsection{Representation of pure states of paired channel}
Ladder operators of channel are expanded to operators of subchannels, similarly to division of operator of total moment $\hat{J}_a=\hat{L}_a+\hat{S}_a$ to spin $\hat{S}_a$, and orbital $\hat{L}_a$ ones.
Result is in
classification of pure states of paired channel
by irreducible representations.
Let us denote $l=\left(N_B-1\right)/2$ and $s=\left(N_A-1\right)/2 \le l$. Basis in state space of channel
\begin{equation}
\left\{\catt{j,m_j}; m_j=-j,\ldots j;j=l-s,l-s+1,\ldots l+s\right\},
\end{equation}
is formed by eigenvectors of operators $\hat{J}_3$ and $\hat{J}^2= \hat{J}^2_3+\hat{J}_3+\hat{J}_+\hat{J}_-$:
\begin{equation}
\hat{J}_3\catt{j,m_j}=m_j\catt{j,m_j};\ \hat{J}^2\catt{j,m_j}=j(j+1)\catt{j,m_j}.
\end{equation}
Expansion of eigenvectors by induced basis
\begin{equation}\label{clebsh}
\catt{j,m_l+m_s}=\sum{
C_{j,m_l;m_s}\cat{l,m_l}\otimes\catr{s,m_s},
}
\end{equation}
is given by Clebsch-Gordan coefficients
\[C_{j,m_l;m_s}=\sum_k{\frac{(-1)^k}{k!(l-m_l-k)!(s+m_s-k)!(l+s-j-k)!(j-l+m_s+k)!(j-s-m_l+k)!}}.
\]
In this basis only two limiting states
\begin{equation}\label{dist_state}
\catt{l+s,\pm\left(l+s\right)}=\cat{l,\pm l}\otimes\catr{s,\pm s},
\end{equation}
are products of one-particle ones, all the others correspond to entangled states.
\subsection{Representation of mixed states of paired channel}
Mixed states of paired quantum channel of information transfer are weighted mix of states produced by source, that's why density matrices of those are diagonal in basis of source states
\[
\hat{\rho}=
\sum_{j=l-s}^{l+s}{\sum_{m=-j}^{j}{p_{j,m}\proa{j,m} }};\
\sum_{j=l-s}^{l+s}{\sum_{m=-j}^{j}{p_{j,m} }}=1.
\]
In induced basis density matrix of paired channel has non-zero non-diagonal components
\begin{equation}\label{full}
\hat{\rho}=\sum_{j=l-s}^{l+s}{\sum_{m=-j}^{j}{p_{j,m}\sum_{k,n=-s}^{ s}{C_{j,m-k;k}C_{j,m-n;n}
\cat{m-k}\bra{m-n}\otimes\ecat{k}\ebra{n}}}
}.
\end{equation}
Density matrices of subchannels according to \uref{rho_ab} are diagonal:
\begin{equation}
\hat{\rho}^{\left\{a\right\}}=
\sum_{\forall m}{p_{m}^{\left\{a\right\}}\roa{m}};\
\hat{\rho}^{\left\{b\right\}}=
\sum_{\forall n}{p_{n}^{\left\{b\right\}}\eroa{n}}.
\end{equation}
Probabilities of states of subchannels \uref{p_ab} are
\begin{equation}
p_k^{\left\{a\right\}}=
\sum_{j=l-s}^{l+s}{\sum_{n=-s}^{s}{p_{j,k+n}C_{j,k;n}^2}
};\
p_n^{\left\{b\right\}}=\sum_{j=l-s}^{l+s}{\sum_{k=-l}^{ l}{p_{j,n+k}C_{j,k;n}^2}}.
\end{equation}
Probabilities of simultaneous registration of one subchannel in state $k$ and the other one in state $n$ are
\begin{equation}
P_{k,n}=\sum_{j=l-s}^{l+s}{p_{j,n+k}C_{j,k;n}^2 }.
\end{equation}
Full correlation between states of subchannels takes place in specific cases, like pure states, only. From the other hand, loss of correlation between states of subchannels is possible in specific cases of reduction of coefficients at non-diagonal elements of common density matrix in induced basis \uref{full} only.
Main conclusion is in exceptionality of not entangled states.
\section{Disentanglement of states}
Reasons for absence of entanglement in typical states of classical channels of information transfer are:
\begin{itemize}
\item
Disentanglement of states can come to existence due to degeneration of eigenvalues of density matrix.
\item
Totally degenerated density matrix is proportional to unit matrix that is product of unit matrices of subsystems, that's why common density matrix is product of density matrices of subsystems, thus it corresponds to independent subchannels of information transfer.
\item
Degeneration is specific to each pure state in which density matrix is $N-1$-times degenerated ($\rho_k^{deg}=0$) as well, though among pure states only two correspond to independent subchannels of information transfer.
\end{itemize}
Thus for some pairs of eigenvalues only result of degeneration of density matrix is in disentanglement of state.
\subsection{Disentanglement of paraqubit}
The simpliest example of two-particle quantum channel of information transfer -- paraqubit -- is a rather clear example of effect of degeneration of density matrix on disentanglement of states.
\subsubsection{Basis of irreducible representations of paraqubit}
Two irreducible representations are singlet $j=1/2-1/2=0$:
\[
\catt{s}=\frac{1}{\sqrt{2}}\cat{\frac{1}{2}}\otimes\ecat{-\frac{1}{2}}
-\frac{1}{\sqrt{2}}\cat{-\frac{1}{2}}\otimes\ecat{\frac{1}{2}};
\]
and triplet $j=1/2+1/2=1$:
\[
\begin{array}{ll}
\catt{d}&=\cat{-\frac{1}{2}}\otimes\ecat{-\frac{1}{2}};\\
\catt{0}&=\frac{1}{\sqrt{2}}\cat{\frac{1}{2}}\otimes\ecat{-\frac{1}{2}}
+\frac{1}{\sqrt{2}}\cat{-\frac{1}{2}}\otimes\ecat{\frac{1}{2}};\\
\catt{u}&=\cat{\frac{1}{2}}\otimes\ecat{\frac{1}{2}}
\end{array}
\]
ones.
Arbitrary linear combination of vectors of triplet state
\(a\catt{d}+b \catt{0}+c\catt{u}=\cat{\psi}\otimes\ecat{\phi}\)
is product of two vectors under condition
\(a=k^2c;\ b=\sqrt{2}kc;\ c=\frac{1}{1+\abs{k}^2}
\).
Set of disentangled triplet states is topologically equivalent to sphere.
Fully disentangled basis is constructed by both subspaces of irreducible representations.
General state of paraqubit is
\begin{equation}\label{full_pq}
\hat{\rho}=p_s \hat{\rho}_s + p_{d}\proa{d}+ p_{u}\proa{u}+ p_{0}\proa{0}
\end{equation}
\begin{itemize}
\item $p_s$ -- part of singlet state;
\item $p_{0}$ -- part of entangled triplet state;
\item $p_{d}$, $p_{u}$ -- parts of states of independent particles.
\end{itemize}
In induced basis
\begin{equation}\label{full_pqs}
\begin{array}{l}
\hat{\rho}= p_{d}\roa{0}\otimes\eroa{0}+p_{u}\roa{1}\otimes\eroa{1}\\
+ \frac{p_{0}+p_s}{2}\big(\roa{0}\otimes\eroa{1}+\roa{1}\otimes\eroa{0}\big)\\
+ \frac{p_{0}-p_s}{2}\big(
\cat{0}\bra{1}\otimes\ecat{1}\ebra{0}+
\cat{1}\bra{0}\otimes\ecat{0}\ebra{1}
\big)
\end{array},
\end{equation}
expression for density matrix has three types of terms. In the first line terms corresponding to classical state doubling are gathered. The second line consists of terms responsible for classical mix. Only in the third line two terms responsible for entanglement are gathered. Those terms disappear at coincidence of singlet and entangled triplet states $p_s=p_{0}$.
Coincidence of other pairs of eigenvalues does not generate disentanglement of states.
\section{Conclusions}
\begin{itemize}
\item Symmetry of state space of quantum channels of information transfer is determined by group \(U(2)\) of unitary transformations of qubit state spaces.
\item Quasi-classical, i.e. not entangled states of quantum channel of information transfer with arbitrary dimensionality form sphere isomorphous to Poincare sphere of pure qubit state.
\item Entangled states have as state vectors basis vectors of irreducible representations of group of symmetry of state space of information transfer quantum channels. Inputs of states of subchannels to each entangled state are determined by Clebsch-Gordan coefficients.
\item Mixed states of quantum channel of information transfer are entangled. Those are disentangled in channels with degenerated density matrix.
\end{itemize}
\end{document} |
\begin{document}
\title{A flow-based ascending auction to compute buyer-optimal Walrasian prices}
\begin{abstract}
We consider a market where a set of objects is sold to a set of buyers, each equipped with a valuation function for the objects.
The goal of the auctioneer is to determine reasonable prices together with a stable allocation.
One definition of ``reasonable'' and ``stable'' is a Walrasian equilibrium, which is a tuple consisting of a price vector together with an allocation satisfying the following desirable properties: \begin{enumerate*}[label=(\roman*)]\item the allocation is market-clearing in the sense that as much as possible is sold, and \item the allocation is stable in the sense that every buyer ends up with an optimal set with respect to the given prices.\end{enumerate*}
Moreover, ``buyer-optimal'' means that the prices are smallest possible among all Walrasian prices.
In this paper, we present a combinatorial network flow algorithm to compute buyer-optimal Walrasian prices in a multi-unit matching market with additive valuation functions and buyer demands.
The algorithm can be seen as a generalization of the classical housing market auction and mimics the very natural procedure of an ascending auction.
We use our structural insights to prove monotonicity of the buyer-optimal Walrasian prices with respect to changes in supply or demand.
\end{abstract}
\section{Introduction}\label{sec:intro}
We consider a market where $m$ different indivisible types of \emph{objects} $\Omega = \{i_1, \ldots, i_m\}$ are sold to $n$ \emph{buyers} $N = \{j_1, \ldots, j_n\}$.
Every object $i \in \Omega$ has a \emph{supply} of $b_i \in \mathbb{Z}_+$ identical copies of that object.
All buyers $j \in N$ have a \emph{demand} $d_j \in \mathbb{Z}_+$, which is the maximum number of items they want to purchase.
The goal of the auctioneer is to find a per-unit \emph{price} $p(i)$ for each object $i \in \Omega$ together with an \emph{allocation} $\boldsymbol{x} \in \mathbb{Z}_+^{\Omega \times N}$ of items to buyers such that the prices $\boldsymbol{p}$ and the allocation $\boldsymbol{x}$ satisfy certain desirable properties.
Certainly, the allocation $\boldsymbol{x}$ should be \emph{feasible} in the sense that at most $b_i$ units of each object $i \in \Omega$ are sold, and each buyer $j \in N$ gets awarded at most $d_j \in \mathbb{Z}_+$ items.
We consider \emph{additive valuations with demand} $v_j \colon \mathbb{Z}_+^{\Omega} \to \mathbb{R}_+$, based on a value $v_{ij} \in \mathbb{Z}_+$ that a buyer $j \in N$ has per unit of object $i \in \Omega$, and demand $d_j \in \mathbb{Z}_+$,
\[
v_j(\boldsymbol{y}) \coloneqq \max_{\boldsymbol{y'} \leq \boldsymbol{y}}\set{\sum_{i \in \Omega} v_{ij} y'_{i} \,\middle\vert\, \sum_{i \in \Omega} y'_i \leq d_j} \qquad \text{for all } j \in N, \boldsymbol{y} \in \mathbb{Z}_+^{\Omega} .
\]
The \emph{utility} of one unit of object $i \in \Omega$ to buyer $j \in N$ is then given by $v_{ij} - p(i)$.
A standard and natural equilibrium concept for markets are \emph{Walrasian equilibria}, named after L\'eon Walras \cite{walras1874}.
To properly define those equilibria we first need some more notation.
We call a feasible allocation \emph{stable}, when each buyer $j \in N$ gets a best possible allocation $\boldsymbol{x}_{\bullet j}$ given prices $\boldsymbol{p}$, in other words, every buyer is assigned to one of her \emph{preferred bundles}, i.e. a vector from the set
\begin{equation}\label{eq:demand-set}
D_j(\boldsymbol{p}) = \arg\max_{\boldsymbol{x}_{\bullet j}} \left\{\sum_{i \in \Omega} (v_{ij} - p(i))x_{ij} \,\middle\vert\, \sum_{i \in \Omega} x_{ij} \leq d_j, 0 \leq x_{ij} \leq b_i, i \in \Omega\right\}.
\end{equation}
If a price vector $\boldsymbol{p}$ admits a stable allocation, then $\boldsymbol{p}$ is called \emph{competitive}.
Note that competitive prices always exist as $p(i) = 1 + \max_{j \in N} v_{ij}$ for all $i \in \Omega$ is competitive with stable allocation $\boldsymbol{x} = \boldsymbol{0}$.
However, these prices are not interesting.
We want to find prices that are not only competitive but also \emph{market-clearing}, i.e., as much as possible should be sold, which is $\min\{\sum b_i, \sum d_j\}$ in our model.
Prices that are competitive and market-clearing are called \emph{Walrasian} and a pair $(\boldsymbol{p}, \boldsymbol{x})$ of Walrasian prices and supporting stable allocation is called \emph{Walrasian equilibrium}.
Walrasian prices where the prices are as low as possible, i.e., where $\sum p(i)$ is minimum among all Walrasian prices, are called \emph{buyer-optimal} Walrasian prices or \emph{minimum Walrasian prices}.
The definitions above assume that the minimum selling price for an item is $0$.
If there are positive minimum selling prices, the notion of market-clearing changes.
An allocation is market-clearing if everything with a price above the minimum selling price is sold.
We can easily adapt our results for this more general case, but for simplicity we assume minimum selling prices to be $0$ in the following.
Recall that in our model the buyers' valuations $v_j\colon \mathbb{Z}_+^\Omega \to \mathbb{R}$ are additive with demand.
This is an important special case of \emph{gross substitutes} \cite{kelso1982job}, which form the biggest class of valuation functions that are in some sense well-behaved, which will be covered in Subsection~\ref{sec:intro:related-work}.
A fundamental property is that Walrasian prices are guaranteed to exist if all buyers have gross substitute valuations (see \cite{ausubel2006efficient} for multi-supply and \cite{kelso1982job} for unit-supply settings) and that the (unique) buyer-optimal Walrasian price vector can be found using an ascending auction (\cite{ausubel2006efficient,gul2000english}).
Additive valuations with demand are a natural class of valuation functions.
It will turn out that they allow for a simple flow-based algorithm to execute the auction.
Moreover, additive valuations with demand generalize the special case of single-unit matching markets.
\paragraph{Single-unit matching markets.}
The model we consider generalizes the classical matching market model, in the literature also commonly referred to as housing market (see e.g.~\cite{easley2010}).
In those markets buyers have only unit-demand, i.e., $d_j = 1$ for all $j \in N$, or more formally, $v_j(\boldsymbol{x}_{\bullet j}) = \max\{v_j(\boldsymbol{\chi}_i) \mid i \in \Omega \text{ and } x_{ij} > 0\}$, and also each object is only available in one copy, i.e., $b_i = 1$ for all $i \in \Omega$\boldsymbolootnote{We use $\boldsymbol{\chi}_X \in \{0,1\}^\Omega$ to denote the characteristic vector of $X$, i.e., the zero-one vector in $\{0,1\}^\Omega$ with a $1$ precisely in the entries corresponding to objects in $X$.}.
For this setting, a seminal paper by Demange et al. \cite{demange1986} describes an ascending auction which starts at the minimum possible selling price, i.e. $p(i) = 0$ for all $i \in \Omega$, and then iteratively raises the prices on a set of items that are overdemanded (which is a set of neighbors of a Hall set, i.e. a subset of buyers that are jointly interested in a set of items without enough supply for the demand in the subset) until the prices are market-clearing.
If one only raises prices on an inclusion-wise minimal overdemanded set, they show that this process yields component-wise minimum (in fact unique) competitive prices, and that these prices are market-clearing, and hence, Walrasian.
It is also well known that one can determine a socially optimal allocation together with buyer-optimal market-clearing prices with an adaption of the Hungarian algorithm, a primal-dual algorithm (see~\cite{shapley1971assignment}).
For a good overview on these topics, we refer to the book \cite{easley2010} or the short but self-contained paper by Kern et al. \cite{kern2016}.
\paragraph{The copy method.}
A na\"ive approach to reduce our more general multi-unit auction to a single-unit auction is via the following \emph{copy method}:
we replace the $b_i$ copies of object $i \in \Omega$ by $b_i$ individual objects with unit-supply, and replace the $d_j$ items demanded by buyer $j$ by $d_j$ unit-demand buyers, with the same valuations.
Certainly, an ascending auction of the single-unit instance will return Walrasian prices, but these prices are in general not buyer-optimal:
\begin{example}\label{ex:1}
Consider one buyer $N = \{1\}$ with a demand of $d_1 = 2$, and two different items $\Omega = \{\alpha, \beta\}$ with a supply of $b_\alpha = b_\beta = 1$ which are valued differently by the sole buyer, say $\boldsymbol{v}_{\bullet 1} = (5, 1)$.
If we copy the buyer, both copies will prefer object $\alpha$ until the price increased to $4$.
Now both copies of the sole buyer are indifferent between the objects and thus, $\boldsymbol{p} = (4, 0)$ and $\boldsymbol{x} = ((1, 0), (0, 1))$ is a Walrasian equilibrium.
However, considering the original situation, since the buyer is alone $\boldsymbol{p} = (0, 0)$ and $\boldsymbol{x} = (1, 1)$ is the buyer-optimal Walrasian equilibrium.
Thus, the prices computed by the {copy method} are not buyer-optimal.
\end{example}
As we just saw in the example above, an algorithm to compute buyer-optimal prices in the single-unit matching markets does not trivially given an algorithm for buyer-optimal prices when buyers have additive valuations with demands.
The good news is that these valuation functions still allow for a nice flow-based algorithm to compute overdemanded sets.
This generalizes the method to use a Hall set which works in the single-unit case and is more intuitive than the general submodular function minimization which is used for strong gross substitute valuations.
\subsection{Our contribution}
In this paper, we provide a flow-based ascending auction for multi-unit markets where all buyers have additive valuations with demand.
This auction enables us to compute buyer-optimal Walrasian prices and a stable allocation.
It generalizes the matching-based ascending auction introduced by \cite{demange1986}.
Although there already exist ascending auctions that compute the minimum Walrasian prices by Ausubel \cite{ausubel2006efficient} and Murota et al.\ \cite{murota2013computing}, these algorithms strongly use submodular function minimization to compute the set of objects on which prices should be increased in the ascending auction.
In contrast, we can replace this with a simple and fast algorithm based on max flow computations.
This enables us to prove all our results independent from other literature on auctions with more general valuation functions (gross substitutes).
Moreover, it allows us to show that minimum Walrasian prices react to changes in supply and demand in a natural way, i.e. they can only increase when supply decreases or demand increases.
This is a very natural monotonicity property, but it was not addressed in the literature yet.
More concretely, in Section~\ref{sec:flow}, we present an ascending auction which iteratively raises prices on the objects in the left-most (i.e., inclusion-wise minimal) min $s$-$t$ cut in an associated auxiliary flow network.
In Section~\ref{sec:walrasian}, we prove that the auction indeed terminates with minimum Walrasian prices and a stable allocation.
Section~\ref{sec:monotonicity} shows how one can obtain price monotonicity results purely from structural insights of the flow-based auction.
Section~\ref{sec:comparison} compares our work to previous work.
Finally, in Section~\ref{sec:adapted_step_length} we discuss some extensions of the auction to make the computation of Walrasian prices more efficient and we analyze the runtime.
\subsection{Known results and related work}\label{sec:intro:related-work}
In this subsection, we give a short summary on what is known about ascending auctions (single- and multi-unit), in particular with valuation functions that are less restrictive.
We also refer the reader to the excellent survey paper by Paes Leme \cite{leme2017gross}.
The basic setup remains the same as above, i.e. an auctioneer wants do determine prices $\boldsymbol{p} \in \mathbb{Z}_+^\Omega$ and an allocation $\boldsymbol{x} \in \mathbb{Z}_+^{\Omega \times N}$ satisfying $\sum_{j \in N} x_{ij} \leq b_i$ for all $i \in \Omega$.
However, each buyer $j \in N$ might not have a constant $v_{ij}$ for each object $i \in \Omega$ but instead a valuation function $v_j(\boldsymbol{x}_{\bullet j})$.
The net utility buyer $j$ gets given a price vector $\boldsymbol{p}$ and allocation $\boldsymbol{x}$ is then $v_j(\boldsymbol{x}_{\bullet j}) - \boldsymbol{p}^T \boldsymbol{x}_{\bullet j}$ and hence, a buyer's preferred bundle under prices $\boldsymbol{p}$ set is given by
\[
D_j(\boldsymbol{p}) \coloneqq \arg\max\{v_j(\boldsymbol{x}_{\bullet j}) - \boldsymbol{p}^T \boldsymbol{x}_{\bullet j} \mid \boldsymbol{0} \leq \boldsymbol{x}_{\bullet j} \leq b, \boldsymbol{x}_{\bullet j} \in \mathbb{Z}^\Omega\}.
\]
In the sequel, it is also convenient to define a buyer's \emph{indirect utility function}, the maximum utility a buyer can get under a given price vector:
\[
V_j(\boldsymbol{p}) \coloneqq \max\{v_j(\boldsymbol{x}_{\bullet j}) - \boldsymbol{p}^T \boldsymbol{x}_{\bullet j} \mid \boldsymbol{0} \leq \boldsymbol{x}_{\bullet j} \leq b, \boldsymbol{x}_{\bullet j} \in \mathbb{Z}^\Omega\}.
\]
A valuation function $v_j\colon \mathbb{Z}_+^\Omega \to \mathbb{Z}$ is \emph{gross substitute} if for all price vectors $\boldsymbol{p}, \boldsymbol{q} \in \mathbb{R}^\Omega$ with $\boldsymbol{p} \leq \boldsymbol{q}$ it holds that for all $\boldsymbol{x}_{\bullet j} \in D_j(\boldsymbol{p})$ there exists $\boldsymbol{y}_{\bullet j} \in D_j(\boldsymbol{q})$ such that $x_{ij} \leq y_{ij}$ for all $i \in \Omega$ with $p(i) = q(i)$.
If additionally $\sum_{i \in \Omega} x_{ij} \geq \sum_{i \in \Omega} y_{ij}$ holds for these allocations $\boldsymbol{x}, \boldsymbol{y}$, then $v_j$ is \emph{strong gross substitute}.
Intuitively, this condition expresses that if one increases the price on one object, the demand on other objects (whose prices did not increase) does not diminish.
What makes this class of functions (introduced by Kelso and Crawford \cite{kelso1982job}) particularly interesting is that Walrasian prices are guaranteed to exist if all buyers have gross substitute valuation functions (see also \cite{ausubel2006efficient}).
This is not true for more general valuation functions, which makes gross substitutes essentially the widest class of valuation functions that can be handled.\boldsymbolootnote{However, we should point out that there are other classes that are in some sense orthogonal to gross substitutes that have different interesting applications. In particular, \emph{complementarities} form one such interesting class.}
Gul and Stacchetti \cite{gul1999walrasian} proved some equivalent characterizations of gross substitutes (such as the \emph{single improvement} or \emph{no complementarities} condition) which are insightful on their own, but which also make gross substitute more convenient to consider from an algorithmic point of view.
Fujishige and Yang \cite{fujishige2003note} also showed that there is a very fundamental connection of gross substitutes to discrete convex analysis, namely, a valuation function is gross substitute if and only if it is $\mathrm{M}^\natural$-concave.
A few more characterizations of gross substitutes can be found in the more recent work of Ben-Zwi \cite{ben2017walrasian}, which also provides more fundamental insights to ascending auctions and \emph{overdemanded sets}.
However, the aforementioned publications only handle the single-unit case, i.e. each object is only available in one copy.
Murota et al. \cite{murota2013computing} showed that these results all transfer when going to the multi-unit setting if the valuation functions are \emph{strong} gross substitutes (going to the multi-unit setting in mathematical terms is to go from the Boolean lattice to the integer lattice as the domain of the valuation functions).
The connection to discrete convex analysis is helpful to define an ascending auction that can find Walrasian prices.
For the single-unit case, Gul and Stacchetti \cite{gul2000english} laid out the framework for an ascending auction which naturally increases prices on subsets of objects that are overdemanded.
However, they only showed that overdemanded sets must exist if the current price vector is not Walrasian using matroid theory (and giving a generalized Hall condition), they did not show how to efficiently find those sets.
Ausubel \cite{ausubel2006efficient} and Murota et al. \cite{murota2013computing,murota2016time} also considered auctions for the multi-unit setting.
Their auctions follow essentially the same idea, i.e. increasing prices on overdemanded sets but also allow different start prices and price reduction steps (on underdemanded sets, which do not occur if the starting prices are low enough and the price increments step is implemented correctly).
The key contribution of their work is the algorithm to find overdemanded sets efficiently.
All of those auctions rely on a potential function (the \emph{Lyapunov} function)
\[
L(\boldsymbol{p}) = \sum_{j \in N} V_j(\boldsymbol{p}) + \boldsymbol{p}^T \boldsymbol{b}.
\]
The main features of the Lyapunov function are that it is minimized exactly at Walrasian prices, and that it is $\mathrm{L}^\natural$-convex (and in particular, submodular) if all valuation functions are $\mathrm{M}^\natural$-concave (or equivalently, strong gross substitute).
Hence, a Walrasian price vector can be found efficiently by using submodular function minimization but also a steepest descent direction of $L$ (which corresponds to a maximum\boldsymbolootnote{Here \emph{maximum} does not refer to the size of the set but to the \emph{overdemandedness}, i.e. the difference between demand and supply of that set.} overdemanded set) can be computed in strongly polynomial time \cite{murota2013computing}.
A buyer-optimal Walrasian price vector can be found if one selects an (inclusion-wise) minimal minimizer of $X \mapsto L(\boldsymbol{p} + \boldsymbol{\chi}_X)$.
Moreover, they also showed that the ascending auction needs $\|\boldsymbol{p} - \boldsymbol{p}_0\|$ iterations to terminate, where $\boldsymbol{p}_0$ is the initial price vector and $\boldsymbol{p} \geq \boldsymbol{p}_0$ a Walrasian price vector.
\section{A flow-based ascending auction}\label{sec:flow}
We first sketch our flow-based ascending auction, called the \ref{alg:price-raising}.
The auction starts with the all-zero vector $p_0(i) = 0$ for all $i \in \Omega$ (or with any initial price vector $\boldsymbol{p}_0$ known to be a lower bound on the minimum Walrasian price vector $\boldsymbol{p}^*$).
In each iteration, given the current price vector $\boldsymbol{p} \in \mathbb{R}_+^{\Omega}$, the algorithm computes an integral $s$-$t$-flow $f$ of maximum value in an auxiliary flow network $G(\boldsymbol{p})$ (described below).
If the value $\val(f)$ of flow $f$ equals the sum of capacities on the $s$-leaving arcs in $G(\boldsymbol{p})$, denoted as $D_{\boldsymbol{p}}$, the algorithm stops.
Otherwise ($\val(f) < D_{\boldsymbol{p}}$), the prices on all objects in the left-most\boldsymbolootnote{Left-most min cut denotes the (unique) inclusion-wise minimal set $S$, with $s \in S$ that minimizes the sum of capacities on the outgoing arcs, $\capa(S)$. This set can be found via BFS from $s$ in the residual network which corresponds to a max flow.} min cut are raised by one unit, and the algorithm iterates with the updated price vector. \comment{ and the corresponding auxiliary flow network $G(p')$.}
Then Theorem~\ref{thm:algo_computes_minimal_competitive_prices} shows that the final price vector $\boldsymbol{p}^*$ returned by the algorithm is the minimum (and thus buyer-optimal) competitive price vector.
For computing a corresponding stable and market-clearing allocation~$\boldsymbol{x}^*$ such that $(\boldsymbol{p}^*, \boldsymbol{x}^*)$ is a buyer-optimal Walrasian equilibrium, we modify $G(\boldsymbol{p}^*)$ slightly to network $H(\boldsymbol{p}^*)$, which allows us to find a stable allocation where $\min\{\sum_{i \in \Omega} b_i, \sum_{j \in N} d_j\}$ items are sold.
Furthermore, we show that each object $i \in \Omega$ with $p^*(i) > 0$ is completely sold.
\subsection{Structure of preferred bundles}
For a fixed price vector $\boldsymbol{p}$ and a fixed buyer $j$, any minimal preferred bundle of buyer $j$ can be computed with the following greedy approach.
Let $\prec_j$ be a total ordering of the items by non-decreasing payoffs, i.e., satisfying
\begin{equation}\label{lin-extension}
v_{ij} - p(i) \geq v_{kj} - p(k) \quad \text{whenever} \quad i \prec_j k.
\end{equation}
For ease of notation, assume that $\prec_j = (1, \ldots, m)$.
Note that the ordering $\prec_j$ might not be unique due to ties in the payoffs.
However, each ordering $\prec_j$ satisfying \eqref{lin-extension} uniquely defines a minimum preferred bundle of buyer $j$ which can be constructed as follows:
\begin{enumerate}
\item Initially, no items are selected, the residual demand $d_j^{res}$ is equal to $d_j$, and $i\coloneqq 1$.
\item While $d_j^{res} > 0$ and $v_{ij} > p(i)$
\begin{enumerate}
\item set $x_{ij} = \min\{b_i, d_j^{res}\}$, and
\item iterate with residual demand $d_j^{res} \coloneqq d_j^{res} - x_{ij}$, and $i \coloneqq i+1$.
\end{enumerate}
\item Return $k_j \coloneqq i-1$
\end{enumerate}
We observe that the greedy algorithm selects exactly $b_i$ copies of all items in $\{1, \ldots, k_j-1\}$ and $d_j - \sum_{\ell=1}^{k_j-1} b_\ell$ copies from item $k_j$.
Since any minimal preferred bundle can be constructed with the greedy algorithm based on an ordering satisfying \eqref{lin-extension}, we observe that the set of \emph{all} minimal preferred bundles obeys the following structure.
Consider the two item sets $\Omega'_j$ and $\Omega_j''$ consisting of all items of larger or equal payoff than the payoff of item $k_j$, which was selected last by the greedy algorithm, i.e.,
\begin{align*}
\Omega_j'(\boldsymbol{p}) &\coloneqq \{i \in \Omega \mid v_{ij}-p(i)>v_{k_jj}-p(k_j)\}, \text{ and}\\
\Omega_j''(\boldsymbol{p}) &\coloneqq \{i \in \Omega \mid v_{ij}-p(i)=v_{k_jj}-p(k_j)\}.
\end{align*}
Note that $\Omega_j'(\boldsymbol{p})$ can be empty.
This happens when all elements in the preferred bundle have the same payoff.
If $\sum_{i \in \Omega_j'(\boldsymbol{p}) \cup \Omega_j''(\boldsymbol{p})} b_i < d_j$, a minimal preferred bundle contains less than $d_j$ items.
In such a case, and if there are objects $i \in \Omega$ of payoff $v_{ij} - p(i) = 0$, we assume what is called ``free disposal'' in the literature.
Namely, that buyer $j$ is indifferent between choosing a preferred bundle as described, or filling the demand up with items in
\[
\Omega_j'''(\boldsymbol{p}) \coloneqq \{i \in \Omega \mid v_{ij} - p(i) = 0\}.
\]
Thus, in every preferred bundle, buyer $j$ buys exactly $d_{j'}(\boldsymbol{p}) \coloneqq \sum_{i \in \Omega_j' (\boldsymbol{p})} b_i$ items from objects in $\Omega_j'(\boldsymbol{p})$ and $d_{j''}(\boldsymbol{p}) \coloneqq \min \{\sum_{i \in \Omega_j''(\boldsymbol{p}) } b_i,\ d_j - d_{j'}(\boldsymbol{p})\}$ items from objects in $\Omega_j''(\boldsymbol{p})$.
In addition, there might be up to $d_{j'''}(\boldsymbol{p}) \coloneqq \min \{\sum_{i \in \Omega_j'''(\boldsymbol{p})} b_i,\ d_j - d_{j'}(\boldsymbol{p}) - d_{j''}(\boldsymbol{p})\}$ items of objects with zero payoff in a preferred bundle.
To shorten notation, we omit the price when $\boldsymbol{p}$ is clear from the context, e.g., we write $\Omega_j'$ instead of $\Omega_j'(\boldsymbol{p})$, or $d_{j'}$ instead of $d_{j'}(\boldsymbol{p})$.
\subsection{Construction of auxiliary flow networks}\label{section:aux-network}
In each iteration of our flow-based ascending auction with current price $\boldsymbol{p}$, buyer $j$ reveals to the auctioneer the following information:
the two sets $\Omega_j'$ and $\Omega_j''$, together with the amounts $d_{j'}$ and $d_{j''}$ they want to buy from these sets, respectively, and the set $\Omega_j''' = \Omega_j'''(\boldsymbol{p})$ of items of payoff $0$ with the amount $d_{j'''}$.
Given this information, our algorithm constructs a network $G(\boldsymbol{p})$ and uses a max-flow computation to compute the set of objects on which the price has to be increased.
In the following we describe the construction of this network.
An example with two buyers and three objects is given in \Cref{fig:examplenetwork}.
The vertex set of $G(\boldsymbol{p})$ consists of a source $s$, a sink $t$, one vertex for each object $i \in \Omega$, and two vertices $j'$ and $j''$ for each buyer $j\in N$.
We denote the collection of the $j'$ vertices as $N'$ and the one of $j''$ as $N''$.
Vertices $j'$ and $j''$ correspond to the sets $\Omega_j'(\boldsymbol{p})$ and $\Omega_j''(\boldsymbol{p})$, respectively.
The arcs (with positive capacity) are defined as follows:
\begin{align*}
(s, j') & \quad\text{with capacity $d_{j'} \comment{\coloneqq \sum_{i \in \Omega_j'} b_i}$ for all $j \in N$,} \\
(s, j'') & \quad\text{with capacity $d_{j''} \comment{\coloneqq \min \{\sum_{i \in \Omega_j''} b_i,\ d_j - \sum_{i \in \Omega_j'} b_i\}}$ for each $j \in N$,} \\
(j', i) & \quad\text{with capacity $c_{j'i} \coloneqq b_i$ for all $j \in N$, $i \in \Omega_j'$} \\
(j'', i) & \quad\text{with capacity $c_{j''i} \coloneqq \min\{b_i, d_{j''}\}$ for all $j \in N$, $i \in \Omega_j''$, and} \\
(i, t) & \quad\text{with capacity $b_i$ for all $i \in \Omega$.}
\end{align*}
\begin{figure}
\caption{The network $G(\boldsymbol{p}
\label{fig:examplenetwork}
\end{figure}
We denote the total capacity on the $s$-leaving arcs by $\capa(s)$, and observe that
$\capa(s) = \sum_{j \in N} (d_{j'} + d_{j''})$.
That is, $\capa(s)$ is equal to the sum of sizes of the buyers' minimal preferred bundles.
The following Lemma states that prices $\boldsymbol{p}$ are competitive if and only if the value of a max flow in $G(\boldsymbol{p})$ is equal to $\capa(s)$.
\begin{lemma}\label{lem:competitive_flow}
The prices $\boldsymbol{p}$ are competitive if and only if there is a flow $f$ in $G(\boldsymbol{p})$ of value
$\val(f) = \capa(s)$.
Moreover, given a competitive price vector $\boldsymbol{p}$, an associated stable allocation $\boldsymbol{x}$ can be computed via a single max flow computation in $G(\boldsymbol{p})$.
\end{lemma}
\begin{proof}
There is a clear one-to-one correspondence between an integral $s$-$t$-flow and a feasible assignment of objects.
Since all capacities in $G(\boldsymbol{p})$ are integral, there exists a max flow with integral values.
By assigning $f_{j'i} + f_{j''i}$ items of object $i$ to buyer $j$ we obtain a feasible assignment.
Note that at least one of the summands is zero since at most one of the arcs has positive capacity ($\Omega_j' \cap \Omega_j'' = \emptyset$).
This assignment is competitive if and only if the demand of every buyer at prices $\boldsymbol{p}$ is satisfied.
This is equivalent to the requirement that the flow satisfies all $s$-leaving arcs, i.e., $\val(f) = \sum_{j\in N} (d_{j'} + d_{j''}) = \capa(s)$.
\end{proof}
However, in this allocation $\boldsymbol{x}$, no item with payoff zero is included.
Since we aim for an allocation where as much as possible is sold, we have to allocate the other items as well.
This is easy for those items which have price zero, since then every buyer with left-over demand is willing to buy them.
We show in \Cref{theorem:market-clearing} that there are enough buyers who are willing to buy the items with a positive price as well.
We extend $G(\boldsymbol{p}^*)$ to a flow network $H(\boldsymbol{p}^*)$ such that the assignment of buyers to objects of payoff zero is possible.
To do so, we first balance the supply and the demand.
If $\sum_{i \in \Omega} b_i < \sum_{j \in N} d_j$, we add a dummy object $i_0$ with supply $b_{i_0} = \sum_{j \in N} d_j - \sum_{i \in \Omega} b_i$ and valuations $v_{i_0j} = 0$ for all $j \in N$.
If $\sum_{i \in \Omega} b_i > \sum_{j \in N} d_j$, we add a dummy buyer $j_0$ with demand $d_{j_0} = \sum_{i \in \Omega} b_i - \sum_{j \in N} d_j$ and valuations $v_{ij_0} = 0$ for all $i \in \Omega$.
Now we can assume that $\sum_{i \in \Omega} b_i = \sum_{j \in N} d_j$.
Note that the \ref{alg:price-raising} computes the same prices with the dummy object or buyer as without.
To construct $H(\boldsymbol{p}^*)$ from $G(\boldsymbol{p}^*)$, additionally we add a vertex $j'''$ for each buyer $j \in N$ and an arc $(s,j''')$ with capacity $d_{j'''}$.
Furthermore, we add for each $j \in N$ the arc $(j''',i)$ with capacity $b_i$ for $i \in \Omega_j'''$.
\begin{proposition}\label{proposition:allocation}
A max flow in network $H(\boldsymbol{p})$ and its corresponding allocation satisfy:
\begin{enumerate}[topsep=0.5ex, itemsep=0pt]
\item A feasible flow in $G(\boldsymbol{p})$ is a feasible flow in $H(\boldsymbol{p})$.
\item If for buyer $j \in N$ the flow on the arcs $(s, j')$ and $(s, j'')$ is saturated, $j$ is assigned to one of her preferred bundles at prices $\boldsymbol{p}$.
\item If $\boldsymbol{p}$ is a Walrasian price vector, the allocation induced by a max flow is stable, i.e., every buyer obtains a preferred bundle.
\end{enumerate}
\end{proposition}
Note that part 3 of the proposition is due to the fact that there exists an allocation where everything is sold (including dummy items), so the max flow saturates all $s$-leaving arcs, since demand and supply are balanced.
Hence in the corresponding allocation every buyer gets a preferred bundle.
\subsection{Computation of the buyer-optimal Walrasian equilibrium}
Here we formally describe the \ref{alg:price-raising}.
Each of its iterations can be done in polynomial time since the network can be constructed in polynomial time and only one max flow computation is needed.
The intuition of the \ref{alg:price-raising} is to increase the price of a set of objects whenever the demand on this set exceeds the supply.
It is natural to increase the prices of the objects of an overdemanded subset until the buyers that were interested in these objects get interested in other objects as well.
This is exactly what happens in the following algorithm.
\begin{algorithm}
\SetAlgoRefName{Price-Raising Algorithm}
\KwIn{Supplies $b_i$, demands $d_j$, valuations $v_{ij}$ for all $i \in \Omega$ and all $j \in N$, initial prices $\boldsymbol{p}_0 \coloneqq \boldsymbol{0}$}
\KwOut{Buyer-optimal Walrasian prices $\boldsymbol{p}^*$}
Initialize: $\ell \coloneqq 0$\\
Construct the auxiliary network $G_0 \coloneqq G(\boldsymbol{p}_0)$ and find an integral max flow $f^0$ in it.\\
\While{$\val(f^\ell) < \capa_{G_\ell}(s)$}{
Determine the left-most min cut $C_\ell$\\
$\boldsymbol{p}_{\ell+1} \coloneqq \boldsymbol{p}_{\ell} + \boldsymbol{\chi}_{C_{\ell} \cap \Omega}$\\
Construct the network $G_{\ell+1} \coloneqq G(\boldsymbol{p}_{\ell +1 })$\\
Find an integral max flow in $G_{\ell+1}$.\\
$\ell \coloneqq \ell +1$
}
\Return{$\boldsymbol{p}^* \coloneqq \boldsymbol{p}_{\ell}$}
\caption{}
\label{alg:price-raising}
\end{algorithm}
In \Cref{section:prices} we show that the prices computed by the \ref{alg:price-raising} are the component-wise minimum competitive prices.
Given component-wise minimum competitive prices $\boldsymbol{p}^*$, the \ref{alg:allocation} constructs the auxiliary network $H(\boldsymbol{p}^*)$ and its max flow, leading to allocation $\boldsymbol{x}^*$.
\comment{The \ref{alg:allocation} only needs to construct the network $H(\boldsymbol{p}^*)$ and to compute a max flow.}
Hence $\boldsymbol{x}^*$ can be found in polynomial time. \Cref{section:allocation} shows that the value of the maximum flow is $\max\{\sum_{i \in \Omega} b_i, \sum_{j \in N} d_j\}$, since we include a dummy buyer resp.\ a dummy object.
With \Cref{proposition:allocation} this implies that each buyer is assigned to one of her preferred bundles.
\begin{algorithm}
\SetAlgoRefName{Allocation Algorithm}
\KwIn{Supplies $b_i$, demands $d_j$, $\Omega_j'(p^*)$, $\Omega_j''(p^*)$, $\Omega_j'''(p^*)$ for all buyers $j \in B$, prices $\boldsymbol{p}^*$}
\KwOut{Stable allocation $\boldsymbol{x}^*$ where as much as possible is sold.}
Construct the auxiliary network $H(\boldsymbol{p}^*)$.\\
Find an integral max flow $f$ in $H(\boldsymbol{p}^*)$.\\
Determine the allocation $\boldsymbol{x}^*$ given by the flow on the arcs $(j,i)$.\\
\Return{$\boldsymbol{x}^*$}
\caption{}
\label{alg:allocation}
\end{algorithm}
If the supply does not exceed the demand, everything is sold.
Otherwise, only the items which are allocated to the dummy buyer $j_0$ are not sold.
Since $v_{ij_0}=0$ for all $i \in \Omega$, the price of an object allocated to $j_0$ has to be zero.
Thus, all objects with positive price are completely sold.
\begin{theorem}\label{thm:algo_computes_minimal_market-clearing_prices}
The prices $\boldsymbol{p}^*$ computed by the \ref{alg:price-raising} are the component-wise minimum competitive prices.
Moreover, under the stable allocation $\boldsymbol{x}^*$ returned by the \ref{alg:allocation} as much as possible is sold, and every item with positive price is sold.
Thus, the prices $\boldsymbol{p}^*$ coincide with the buyer-optimal Walrasian price vector.
\end{theorem}
We prove Theorem~\ref{thm:algo_computes_minimal_market-clearing_prices} in Section~\ref{sec:walrasian}.
Note that the fact that the flow-based ascending auction returns the buyer-optimal Walrasian prices can also be shown directly by combining results of the literature on strong substitute valuation functions \cite{ausubel2006efficient} with some observation (see \Cref{sec:comparison}).
Ausubel \cite{ausubel2006efficient} showed that the buyer-optimal prices are computed by an ascending auction, where prices are always raised on the inclusion-wise minimal set corresponding to the steepest descent direction of the Lyapunov function.
We observe that these sets correspond to minimal maximum overdemanded sets which are exactly the sets we compute with a left-most min cut computation.
However, the proof given in the subsequent sections has two advantages.
First, we show that the minimum Walrasian prices $\boldsymbol{p}^*$ coincide with the minimum competitive prices, which will turn out to be crucial when proving our monotonicity results in Section~\ref{sec:monotonicity}.
Second, our proof is independent from the literature on strong substitute and $\mathrm{L}^\natural$-convex functions and uses only network flow arguments.
As shown in \cite{murota2013computing}, the number of iterations of the \ref{alg:price-raising} is at most $\left\Vert \boldsymbol{p}^\ast - \boldsymbol{p}_0\right\Vert_\infty$, where $\boldsymbol{p}^\ast$ is the minimum Walrasian price vector.
Since $p^*(i) \leq \max_{j \in \Omega} v_{ij}$ this is pseudo-polynomial.
Note, however, that we may as well start with any alternative start vector $\boldsymbol{p}_0$, as long as $\boldsymbol{p}_0$ is known to be component-wise smaller or equal to the unique minimum competitive price vector.
In \Cref{sec:adapted_step_length} we present a variation of the price raising algorithm where the price is raised as far as possible for the same inclusion-wise minimal maximum overdemanded set.
\subsection{Auctioneer with memory -- Warm start with flow updates}\label{sec:flow_updates}
A very natural idea is to not compute the flow completely from scratch in every iteration but instead to update the flow, when updating the price vector and to start the Max-Flow Algorithm (we use Edmonds-Karp \cite{edmonds1972theoretical}) directly on the updated flow.
We will describe this procedure and analyze the resulting running time.
Interestingly, in one iteration this may not speed up the algorithm, but when combining this procedure with the adapted step length algorithm described in \Cref{sec:adapted_step_length}, then the running time of the complete auction improves.
To update the flow from one iteration to the next, assume we are given a maximum flow $f^\ell$ in the network induced by price vector $\boldsymbol{p}_\ell$.
Moreover, let $I$ be the left most min cut in the network $G(\boldsymbol{p}_\ell)$ and assume $I \neq \{s\}$ (because in this case the computed prices are already competitive and the auction terminates).
Then the \ref{alg:flow_update} shows how to update the flow to obtain a new feasible flow in $G(\boldsymbol{p}_{\ell+1})$.
The idea is to keep as much flow as possible from the previous assignment.
We use that items might change their set, i.e. go from $\Omega_{j}'(\boldsymbol{p}_\ell)$ to $\Omega_{j}''(\boldsymbol{p}_{\ell +1})$ or vice versa, i.e., from $\Omega_{j}''(\boldsymbol{p}_\ell)$ to $\Omega_{j}'(\boldsymbol{p}_{\ell +1})$ but that for a fixed buyer this is possible in at most one direction.
A third possibility is that a buyer might lose interest in an item, but in this case this item is not in $\Omega_{j}'(\boldsymbol{p}_{\ell+1}) \cup \Omega_{j}''(\boldsymbol{p}_{\ell+1})$ and thus no flow is assigned.
\begin{algorithm}
\SetAlgoRefName{Flow update Algorithm}
\KwIn{Network $G(\boldsymbol{p}_\ell)$ with max flow $f^{\ell}$, $\boldsymbol{p}_{\ell+1} \coloneqq \boldsymbol{p}_\ell + \boldsymbol{\chi}_I$, new network $G(\boldsymbol{p}_{\ell+1})$}
\KwOut{A feasible flow $f$ in $G(\boldsymbol{p}_{\ell+1})$ with $D_{\boldsymbol{p}_\ell} -\val(f^\ell) \geq D_{\boldsymbol{p}_{\ell+1}} - \val(f)$}
$f \coloneqq 0$ for all arcs in $G(\boldsymbol{p}_{\ell+1})$\\
\For{$(j,i)\in N \times \Omega$ with $f^\ell_{j'i}>0$ or $f^\ell_{j''i}>0$}{
\uIf{$(j',i)$ is an arc in $G(\boldsymbol{p}_{\ell+1})$, i.e., $i \in \Omega_j'(\boldsymbol{p}_{\ell+1})$}{
Add $f^\ell_{j'i} + f^\ell_{j''i}$ flow units to $f$ on the path $s$-$j'$-$i$-$t$
}
\ElseIf{$(j'',i)$ is an arc in $G(\boldsymbol{p}_{\ell+1})$, i.e., $i \in \Omega_j''(\boldsymbol{p}_{\ell+1})$}{
Add $f^\ell_{j'i} + f^\ell_{j''i}$ flow units to $f$ on the path $s$-$j''$-$i$-$t$
}
}
\Return{$f$}
\caption{}
\label{alg:flow_update}
\end{algorithm}
This flow update is well defined, since feasibility is shown in the following Lemma.
\begin{lemma}\label{lemma:flow_update_feasible}
The flow $f$ computed in \ref{alg:flow_update} is a feasible flow in $G(\boldsymbol{p}_{\ell+1})$.
\end{lemma}
\begin{proof}
Note that flow conservation is fulfilled in every node by construction.
It remains to show that the capacities in $G(\boldsymbol{p}_{\ell+1})$ are obeyed.
\begin{itemize}
\item For $(s,j')$ this is given:
we assign $\sum_{i \in \Omega_j'(\boldsymbol{p}_{\ell+1})} f^{\ell}_{j'i}+f^\ell_{j''i}$ units of flow and the capacity is given by $\sum_{i \in \Omega_j'(\boldsymbol{p}_{\ell+1})}b_i$.
Note that $f^{\ell}_{j'i}+f^\ell_{j''i} \leq b_i$ since the only $i$ leaving arc has capacity $b_i$.
\item For $(s,j'')$ we consider how $d_{j''}(\boldsymbol{p}_\ell)$ changes if we increase the prices on objects in $I$.
\begin{itemize}
\item If there are objects in $\Omega_j''(\boldsymbol{p}_{\ell}) \cap \Omega_j'(\boldsymbol{p}_{\ell+1})$, let $M \coloneqq \Omega_j''(\boldsymbol{p}_{\ell}) \cap \Omega_j'(\boldsymbol{p}_{\ell+1})$ be the objects which move from $\Omega_j''$ to $\Omega_j'$ and $S \coloneqq \Omega_j''(\boldsymbol{p}_{\ell}) \cap \Omega_j''(\boldsymbol{p}_{\ell+1})$ be the items that stay in $\Omega_j''$.
We know that the objects in $\Omega_j''(\boldsymbol{p}_{\ell}) \cap I \subseteq \Omega_j''(\boldsymbol{p}_{\ell+1})$.
Moreover, the objects in $\Omega_j'(\boldsymbol{p}_{\ell})$ stay in $\Omega_j'(\boldsymbol{p}_{\ell+1})$, i.e. $\Omega_j'(\boldsymbol{p}_{\ell}) \subseteq \Omega_j'(\boldsymbol{p}_{\ell+1})$.
Hence, the demand $d_{j'}$ increases and the demand $d_{j''}$ decreases by the number of items in $M$, i.e., $\sum_{i \in M} b_i$.
Recall that left-most min cut $C$ is defined by the vertices reachable from $s$ in the residual network corresponding to $f^\ell$ and recall that $I = C \cap \Omega$.
By definition $M \cap I = \emptyset$ and $S \subseteq I$.
If $j''$ is in the left-most min cut, the arcs in the cut which are leaving this node are fully used in any maximum flow.
Thus, $f^\ell_{j''i} = c_{j''i}=b_i$ for $i \in M$.
That $c_{j''i} = \min \{ b_i, d_{j''} \} = b_i$ follows since otherwise with this item the complete demand could be satisfied.
But this is a contradiction since buyer $j$ wants to buy items with less payoff at prices $\boldsymbol{p}_{\ell + 1}$, in other words $i \in \Omega_j'(\boldsymbol{p}_{\ell+1})$ and not $i \in \Omega_j''(\boldsymbol{p}_{\ell+1})$.
Since this flow is shifted to $j'$ after the price update, the flow is reduced by the number of items in $M$ as well, thus the capacities are still obeyed.
If $j''$ is not in the left-most min cut, we cannot reach $j''$ from items in $S \subseteq I$.
Hence, the flow on the edges $(j''i)$ with $i \in S$ is zero (otherwise the backwards arc exists in the residual network and $j''$ is reachable).
Hence, in the \ref{alg:flow_update} we do not assign any flow to an edge through the vertex $j''$.
Thus, the capacity of $(s,j'')$ is still not exceeded.
\item Consider the case where $\Omega_j''(\boldsymbol{p}_{\ell}) \cap \Omega_j'(\boldsymbol{p}_{\ell+1}) = \emptyset$.
If $\Omega_j''$ does not lose any objects by the price update, we know that
\[
d_j''(\boldsymbol{p}_{\ell+1}) = d_j''(\boldsymbol{p}_{\ell}) + \hspace{2ex} \sum_{\mathclap{\hspace{7ex}\Omega_j'(\boldsymbol{p}_{\ell}) \cap \Omega_j''(\boldsymbol{p}_{\ell+1})}}\hspace{1ex} b_i \hspace{6ex} .
\]
Hence, the capacity is not exceeded on $(s,j'')$.
It remains to show that the capacity is not exceeded if $\Omega_j''$ loses some objects (it might get new ones from $\Omega_j'$ as well).
The only situation which can cause problems is if the demand of objects in $\Omega''$ decreases.
The demand $d_j''$ only decreases if there are objects moving from $\Omega_j''$ to $\Omega_j'$ (which does not happen by assumption) or if there are not enough objects with a positive payoff.
The latter case implies that all items available in $\Omega_j''(\boldsymbol{p}_{\ell+1})$ are demanded and thus, the capacity constraint on $(s,j'')$ is fulfilled.
\end{itemize}
\item For $(i,t)$ the capacity does not change, so the capacity constraints are fulfilled.
\item For $(j',i)$ and $(j'',i)$ it follows directly by the definition of the capacity and since the capacity on the $s$-leaving and $t$-entering arcs is not exceeded.\qedhere
\end{itemize}
\end{proof}
We can use the structure of the left-most min cut to show that if the flow decreases in an update step, than the demand decreases by at least the same amount.
This will help us to show that the prices returned by the auction are market-clearing (see \Cref{section:allocation}).
Moreover, this enables us to given an upper bound on the overall running time of the ascending auction with adapted step length (see \Cref{sec:adapted_step_length}).
\begin{lemma}\label{lemma:flow_demand}
Given a max flow in $G(\boldsymbol{p}_\ell)$ with corresponding left-most min cut $C$.
Let $I = C \cap \Omega$ be the overdemanded set and $\boldsymbol{p}_{\ell+1}$ be the price vector after the price update.
Then, the demand $d_{j'} + d_{j''}$ of buyer $j$ will decrease at least by
\begin{equation}
\sum_{\mathclap{\substack{i \in \Omega_j''(\boldsymbol{p}_{\ell}): \\ i \notin \Omega_j'(\boldsymbol{p}_{\ell+1}) \cup \Omega_j''(\boldsymbol{p}_{\ell+1})}}} \hspace{1ex} f^\ell_{j''i}.
\label{removed_flow}
\end{equation}
\end{lemma}
\begin{proof}
If \eqref{removed_flow} is zero, the statement is directly fulfilled.
Thus, from now on, we consider the cases when flow is removed.
This can only occur if some objects get a payoff of zero after the price update, i.e., if
\[
S \coloneqq \Omega_j''(\boldsymbol{p}_{\ell}) \setminus (\Omega_j'(\boldsymbol{p}_{\ell+1}) \cup \Omega_j''(\boldsymbol{p}_{\ell+1})) \neq \emptyset.
\]
Note that $S$ describes the set of objects which move from $\Omega_j''$ to $\Omega_j'''$.
Since $\Omega_j'''$ just contains objects with utility zero and $\Omega_j''$ only those with a positive payoff, all objects in $S$ are contained in $I$.
By definition, the demand $d_{j'}+d_{j''}$ will decrease by
\begin{align*}
\max\Big\{0, d_{j''}(\boldsymbol{p}_{\ell}) - \hspace{2ex} \sum_{\hspace{3ex} \mathclap{i \in \Omega_j''(\boldsymbol{p}_{\ell}) \setminus I}} b_i \hspace{3ex}\Big\}.
\end{align*}
If $j''$ is contained in the left-most min cut, all arcs from $j''$ to $\Omega_j''(\boldsymbol{p}_{\ell}) \setminus I$ are fully satisfied (since they are not reachable from $j''$).
Hence, there can be at most $d_{j''}(\boldsymbol{p}_{\ell}) - \sum_{i \in \Omega_j''(\boldsymbol{p}_{\ell}) \setminus I} b_i \geq 0$ units of flow going through $j''$ to vertices in $S$.
Thus, for buyer $j$ we reduce the demand at least by the removed flow units traveling through a vertex of $j$.
If $j'$ is not contained in the left-most min cut, there is no flow on the arcs from $j''$ to $I$.
Thus, no flow through buyer $j$ is removed and we are done in this case as well.
\end{proof}
\begin{corollary}\label{coro:demand-flow_value_non_decreasing}
The flow $f$ computed in the \ref{alg:flow_update} satisfies
\[
D_{\boldsymbol{p}_\ell}- \val(f^\ell) \geq D_{\boldsymbol{p}_{\ell+1}} - \val(f).
\]
\end{corollary}
\section{The ascending auction returns buyer-optimal Walrasian prices}\label{sec:walrasian}
We will show that the \ref{alg:price-raising} returns buyer-optimal Walrasian prices by showing separately that the prices are the component-wise minimum competitive ones and that the prices are market clearing.
\subsection{The ascending auction returns component-wise minimum competitive prices}\label{section:prices}
We now analyze the \ref{alg:price-raising} by considering the behavior of the buyers at given prices $\boldsymbol{p}$ using the structure of $G(\boldsymbol{p})$.
As usual, for a given digraph $(V, A)$, we use $\Gamma^-(v)$ to refer to all nodes that are the starting node of an incoming arc into $v$, i.e., $\Gamma^-(v) \coloneqq \{u \in V \mid (u, v) \in A\}$, analogously for $\Gamma^+(v)$.
We extend this definition also to sets, so that $\Gamma^-(I) \coloneqq \{u \in V \mid \text{there exists } v \in I \text{ with } (u, v) \in A\}$.
For a given set of items $I \subseteq \Omega$, we denote the demands of all buyers which cannot be fulfilled by items which are not in $I$ by
\begin{equation*}
d_{\boldsymbol{p}}(I) \coloneqq \sum_{{j^\bullet} \in \Gamma^-(I)} \max\Big\{0, d_{{j^\bullet}} - \hspace{1ex} \sum_{\mathclap{\hspace{3ex} i \in \Gamma^+({j^\bullet}) \setminus I}} \hspace{1ex} c_{ij} \hspace{1ex} \Big\}.
\end{equation*}
Note that $d_{\boldsymbol{p}}(I)$ is a natural lower bound on the number of copies from $I$ that are needed in every stable allocation.
We call an item set $I$ \emph{overdemanded} if $\sum_{i \in I} b_i < d_{\boldsymbol{p}}(I)$.
The following condition \eqref{Hall-condition} can be viewed as a generalized Hall condition.
\begin{lemma}
Prices $\boldsymbol{p}$ are competitive if and only if
\begin{equation}\label{Hall-condition}
\sum_{i \in I} b_i \geq d_{\boldsymbol{p}}(I) \quad \text{for all } I \subseteq \Omega.
\end{equation}
Moreover, if prices $\boldsymbol{p}$ are not competitive, and $C$ is the left-most min cut in $G(\boldsymbol{p})$, then $C \cap \Omega$ is an overdemanded set.
\end{lemma}
\begin{proof}
Recall from Lemma~\ref{lem:competitive_flow} that prices $\boldsymbol{p}$ are competitive if and only if there exists a flow $f$ in $G(\boldsymbol{p})$ of value $\val(f) = \capa(\{s\}) = \sum_{j \in N} d_{j'} + d_{j''}$.
Thus, by the Max Flow Min Cut Theorem, prices $\boldsymbol{p}$ are competitive if and only if $\{s\}$ is a min cut in $G(\boldsymbol{p})$.
First, we show that the generalized Hall condition \eqref{Hall-condition} is necessary, i.e.\ that \eqref{Hall-condition} holds if $\boldsymbol{p}$ is competitive.
Consider a flow $f$ in $G(\boldsymbol{p})$ of value $\val(f) = \sum_{j \in N} d_{j'} + d_{j''}$ and some arbitrary set $I \subseteq \Omega$.
According to the flow conservation, for each vertex ${j^\bullet}$ corresponding to a buyer $j \in N$ (i.e., either $j'$ or $j''$) we have
\begin{align*}
\sum_{\mathclap{i \in \Gamma^+({j^\bullet})\cap I}} \hspace{1ex} f_{{j^\bullet} i} = \overbrace{f_{s{j^\bullet}}}^{= d_{j'} \text{ resp. } d_{j''}} - \hspace{1ex} \sum_{\mathclap{\hspace{3ex} i \in \Gamma^+({j^\bullet}) \setminus I}} \hspace{1ex} \overbrace{f_{{j^\bullet} i}}^{\leq c_{{j^\bullet} i}} \hspace{1ex}
\geq \max\Big\{0,\ d_{{j^\bullet}} - \hspace{1ex} \sum_{\mathclap{ \hspace{3ex}i \in \Gamma^+({j^\bullet}) \setminus I}} \hspace{1ex} c_{{j^\bullet} i} \hspace{1ex}\Big\}.
\end{align*}
If we sum over all ${j^\bullet} \in \Gamma^-(I)$ we get
\begin{align*}
d_{\boldsymbol{p}}(I)=\sum_{\mathclap{{j^\bullet} \in \Gamma^-(I)}} \hspace{1ex} \max\Big\{0,\ d_{{j^\bullet}} - \hspace{1ex} \sum_{\mathclap{\hspace{3ex} i \in \Gamma^+({j^\bullet})\setminus I}} \hspace{1ex} c_{{j^\bullet} i} \hspace{1ex} \Big\}
\leq \sum_{{j^\bullet} \in \Gamma^-(I)} \hspace{4ex} \sum_{\mathclap{i \in \Gamma^+({j^\bullet})\cap I }} f_{{j^\bullet} i}
= \sum_{\mathclap{ i \in I}} \sum_{{j^\bullet} \in \Gamma^-(I)} f_{{j^\bullet} i} \hspace{1ex}
= \sum_{i \in I} f_{it}
\leq \sum_{i \in I} b_i.
\end{align*}
Thus, $I$ is not an overdemanded set.
Since we chose $I$ arbitrary, the generalized Hall condition \eqref{Hall-condition} is fulfilled.
Now, we show that condition \eqref{Hall-condition} implies that prices $\boldsymbol{p}$ are competitive.
To see this, suppose that $\boldsymbol{p}$ is not competitive, and let $C$ be the left-most min cut of $G(\boldsymbol{p})$.
Since $\boldsymbol{p}$ is not competitive, we know that
\begin{equation}
\capa(C) < \capa(\{s\}) = \sum_{{j^\bullet} \in N' \cup N''} d_{{j^\bullet}}.\label{overdemanded_2}
\end{equation}
Define $I \coloneqq C \cap \Omega$ and $T \coloneqq C \cap (N' \cup N'')$.
The capacity of $C$ is given by
\begin{equation}
\capa(C) = \sum_{i \in I} b_i + \sum_{{j^\bullet} \in T} \hspace{2ex} \sum_{\mathclap{\hspace{3ex} i \in \Gamma^+({j^\bullet}) \setminus I}} \hspace{1ex} c_{{j^\bullet} i} \hspace{1ex} + \hspace{1ex} \sum_{\mathclap{\hspace{3ex} {j^\bullet} \in (N' \cup N'') \setminus T}} \hspace{1ex} d_{{j^\bullet}} \hspace{2ex}. \label{overdemanded_1}
\end{equation}
By combining and rearranging \eqref{overdemanded_1} and \eqref{overdemanded_2} we get the following chain of inequalities:
\begin{equation*}
\sum_{i \in I} b_i < \sum_{{j^\bullet} \in T} d_{{j^\bullet}} - \sum_{{j^\bullet} \in T} \hspace{2ex} \sum_{\mathclap{\hspace{3ex} i \in \Gamma^+({j^\bullet})\setminus I}} \hspace{1ex} c_{{j^\bullet} i} \hspace{1ex} = \sum_{{j^\bullet} \in T} \Big( d_{{j^\bullet}} - \hspace{1ex} \sum_{\mathclap{\hspace{3ex} i \in \Gamma^+({j^\bullet})\setminus I}} \hspace{1ex} c_{{j^\bullet} i} \hspace{1ex} \Big) \leq \sum_{{j^\bullet} \in T} \max\Big\{0, d_{{j^\bullet}} - \hspace{1ex} \sum_{\mathclap{\hspace{3ex} i \in \Gamma^+({j^\bullet}) \setminus I}} \hspace{1ex} c_{{j^\bullet} i} \hspace{1ex}\Big\} = d_{\boldsymbol{p}}(I).
\end{equation*}
Hence, $I = C \cap \Omega$ is an overdemanded set and \eqref{Hall-condition} does not hold in this case.
\end{proof}
\begin{theorem}\label{thm:algo_computes_minimal_competitive_prices}
The prices $\boldsymbol{p}^*$ returned by our \ref{alg:price-raising} are the (unique) component-wise minimum competitive prices, i.e., if $\boldsymbol{q}$ is a competitive price vector then $p^*(i) \leq q(i)$ for all $i \in \Omega$.
\end{theorem}
\begin{proof}
Assume towards a contradiction that for a competitive price vector $\boldsymbol{q}$, there is an object $i \in \Omega$ such that $p^*(i) > q(i)$.
Let $\boldsymbol{p}_\tau$ denote the price vector in iteration $\tau$.
Then, for the start vector $\boldsymbol{p}_0$ we have $\boldsymbol{p}_0 \leq \boldsymbol{q}$.
Let $t$ be the last iteration where $\boldsymbol{p}_t \leq \boldsymbol{q}$, i.e., there is an object $i \in \Omega$ such that $p_{t+1}(i) > q(i)$.
Let $I$ be the overdemanded set chosen by the algorithm in iteration $t$ and split it into two parts $I^ = \cup I^<$ as follows:
\begin{align*}
I &= \{i \in \Omega \mid p_t(i) < p_{t+1}(i)\},
& I^= &= \{i \in I \mid p_t(i) = q(i)\},
& I^< &= \{i \in I \mid p_t(i) < q(i)\}.
\end{align*}
We will derive a contradiction by showing that $I^<$ induces a min cut $\tilde{C}$ which is a strict subset of $C$, since $I^=$ is non-empty by choice of $t$.
To do so, we start with analyzing the behavior of buyer $j \in N$ at prices $\boldsymbol{q}$ by comparing it with the behavior at prices $\boldsymbol{p}_t$.
For this purpose, we fix the network properties, i.e., we talk about everything w.r.t prices $\boldsymbol{p}_t$ if not stated otherwise.
In the following lemma, we will show that
\[
\widetilde{C} = \{s\} \cup \Big\{{j^\bullet} \in N' \cap N'' \mid d_{j^\bullet} > \sum_{\substack{i \in \Gamma^+({j^\bullet}) \setminus I^<}} c_{{j^\bullet} i}\Big\} \cup I^<
\]
is also a min cut with $\tilde{C} \subsetneq C$ (see Lemma~\ref{lem:smaller_mincut} below).
This, however, is a contradiction to $C$ being a left-most min cut, implying that the assumption that there is an object $i$ with price $p^*(i) > q(i)$ cannot be true.
Therefore, the \ref{alg:price-raising} finds the component-wise minimum competitive price vector.
\end{proof}
The following lemma is needed in the proof of \Cref{thm:algo_computes_minimal_competitive_prices} and uses the notation which is described there.
\begin{lemma}\label{lem:smaller_mincut}
The cut $\widetilde{C} = \{s\} \cup \Big\{{j^\bullet} \in N' \cap N'' \mid d_{j^\bullet} > \sum_{\substack{i \in \Gamma^+({j^\bullet}) \setminus I^<}} c_{{j^\bullet} i}\Big\} \cup I^<$ is a min cut and $\tilde{C} \subsetneq C$.
\end{lemma}
\begin{proof}
Consider the buyers who need to buy items of $I$ such that $I$ becomes overdemanded.
Let $T = C \cap (N' \cup N'')$, $T_1$ be the subset of buyers ($N' \cup N''$) demanding some objects in $I^=$, and $T_2$ are those who do not, i.e.,
\begin{align*}
T &= \Big\{{j^\bullet} \in N' \cup N'' \mid d_{j^\bullet} > \hspace{1ex} \sum_{\mathclap{\hspace{3ex }i \in \Gamma^+({j^\bullet}) \setminus I}} \hspace{1ex} c_{{j^\bullet} i} \Big\},
&T_1 &= \Big\{ {j^\bullet} \in T \mid c_{{j^\bullet} i} > 0 \text{ for an } i \in I^= \Big\},
&T_2 &= T \setminus T_1.
\end{align*}
To see that this fits to the definition of $T$, we only need to check which vertices in $N' \cup N''$ are contained in the cut $C$.
Recall that the capacity $\capa(C)$ of cut $C$ is defined as the sum of capacities on all outgoing arcs of $C$.
If a vertex ${j^\bullet}$ is in the cut, $\capa(C)$ includes the sum over all capacities of arcs $({j^\bullet}, i)$ where $i \notin I$.
If vertex ${j^\bullet}$ is not in the cut, $\capa(C)$ includes the capacity of the arc $(s, {j^\bullet})$ which is $d_{j^\bullet}$.
Because $C$ is a left-most min cut, $C$ contains ${j^\bullet}$ if and only if
\[
d_{j^\bullet} > \hspace{1ex} \sum_{\mathclap{\hspace{3ex} i \in \Gamma^+({j^\bullet}) \setminus I}} \hspace{1ex} c_{{j^\bullet} i}.
\]
\begin{figure}
\caption{Proof sketch. Illustration of a minimum $s$-$t$-cut $C$ and the induced sets $I$ (light blue) and $T$ (light green). The set $I^=$ is red, $I^<$ is orange, $T_1$ is yellow and $T_2$ is petrol.}
\label{fig:proof}
\end{figure}
\begin{claim}
It holds that
\begin{equation}\label{I_1 not overdemanded}
\sum_{i \in I^=} b_i \geq \sum_{{j^\bullet} \in T_1} \min\Big\{d_{j^\bullet} - \hspace{1ex} \sum_{\mathclap{\hspace{3ex} i \in \Gamma^+({j^\bullet})\setminus I}} \hspace{1ex} c_{{j^\bullet} i}\hspace{1ex}, \hspace{2ex} \sum_{\mathclap{\hspace{3ex}i \in \Gamma^+({j^\bullet})\cap I^=}} \hspace{1ex} c_{{j^\bullet} i} \hspace{1ex}\Big\}.
\end{equation}
\end{claim}
\begin{subproof}
First, consider the demand that $j'$ has on objects in $I^=$ at price $\boldsymbol{p}_t$, i.e.,
\[
\sum_{i \in \Gamma^+(j') \cap I^=} c_{j'i}.
\]
Comparing $\boldsymbol{q}$ and $\boldsymbol{p}_t$, remember that the price of any item only increases, i.e., $p_t(i) \leq q(i)$, while the prices in $I^=$ remain the same.
That is why the buyer $j$ likes to buy at prices $\boldsymbol{q}$ at least the same amount of items from objects in $I^=$ as at prices $\boldsymbol{p}_t$.
Next, we consider the demand $j''$ has on objects in $I$ at prices $\boldsymbol{p}_t$.
Recall that $j''$ gets the same utility from every item in $\Omega_j''$.
Comparing $\boldsymbol{p}$ and $\boldsymbol{q}$ we have that the prices on $I^=$ remain constant while the prices on $I^<$ strictly increase.
Thus, $j$ likes to fill up the preferred bundle with items in $I^=$ and maybe with items outside of $I$ before buying the items in $I^<$.
Note however, that for prices $\boldsymbol{q}$ it is not clear to which copy of buyer $j$ this demand is assigned.
Since furthermore, we have that buyer $j$ cannot buy more objects than available in $I^=$ we get the following lower bound on the demand reassigned from a buyer $j''$ for prices $\boldsymbol{p}_t$ to buyer $j'$ or $j''$ for prices $\boldsymbol{q}$:
\begin{equation*}
\min\Big\{d_{j''} - \hspace{1ex} \sum_{\mathclap{\hspace{3ex} i \in \Gamma^+(j'')\setminus I}} \hspace{1ex} c_{j''i}\hspace{1ex} , \hspace{3ex} \sum_{\mathclap{\hspace{3ex} i \in \Gamma^+(j'')\cap I^=}} \hspace{1ex} c_{j''i}\Big\}.
\end{equation*}
Finally, if we sum up the demands of all $j' \in N'$ and $j'' \in N''$ at prices $q$ we obtain the following lower bound of the total demand of all buyers $j \in T_1$
\begin{align*}
&\sum_{j' \in T_1 \cap N'} \sum_{\substack{i \in \Gamma^+(j')\\ i \in I^=}} c_{j'i} +
\sum_{j'' \in T_1 \cap N''} \min\Big\{d_{j''} - \sum_{\substack{i \in \Gamma^+(j'')\\ i \notin I}} c_{j''i}, \sum_{\substack{i \in \Gamma^+(j'')\\ i \in I^=}} c_{j''i}\Big\}\\
\geq &\sum_{{j^\bullet} \in T_1} \min\Big\{d_{j^\bullet} - \sum_{\substack{i \in \Gamma^+({j^\bullet})\\ i \notin I}} c_{{j^\bullet} i}, \sum_{\substack{i \in \Gamma^+({j^\bullet})\\ i \in I^=}} c_{{j^\bullet} i}\Big\}.
\end{align*}
The set $I^=$ is not overdemanded at price $\boldsymbol{q}$.
Thus, the demand of items in $I^=$ at price $\boldsymbol{q}$ is smaller or equal than the total supply of all objects in $I^=$.
Using the bound of the total demand of all buyers in $T_1$, we obtain the inequality of the claim.
\end{subproof}
Next, we show that $\widetilde{C}$ is a min cut by comparing the capacities of $\widetilde{C}$ and of the left-most min cut $C$.
\begin{align*}
&\ \capa(C) - \capa(\widetilde{C})\\
=&\ \Bigg(\sum_{i \in I} b_i + \sum_{{j^\bullet} \in N' \cup N''} \hspace{-1ex} \min\Big\{d_{j^\bullet}, \hspace{1ex} \sum_{\mathclap{\hspace{3ex} i \in \Gamma^+({j^\bullet})\setminus I}}\hspace{1ex} c_{{j^\bullet} i} \hspace{1ex}\Big\}\Bigg) - \Bigg(\sum_{i \in I^<} b_i + \sum_{{j^\bullet} \in N' \cup N''} \hspace{-1ex} \min\Big\{d_{j^\bullet}, \hspace{1ex} \sum_{\mathclap{\hspace{3ex} i \in \Gamma^+({j^\bullet})\setminus I^<}} \hspace{1ex} c_{{j^\bullet} i} \hspace{1ex} \Big\}\Bigg)\\
=&\ \sum_{i \in I^=} b_i + \sum_{{j^\bullet} \in N' \cup N''} \underbrace{\Big(\min\Big\{d_{j^\bullet}, \underbrace{\hspace{2ex} \sum_{\mathclap{\hspace{3ex} i \in \Gamma^+({j^\bullet})\setminus I}} \hspace{1ex} c_{{j^\bullet} i} \hspace{1ex}}_{\text{\small{$\eqqcolon \alpha$}}}\Big\} - \min\Big\{d_{j^\bullet}, \underbrace{\hspace{2ex} \sum_{\mathclap{\hspace{3ex}i \in \Gamma^+({j^\bullet})\setminus I_2}} \hspace{1ex} c_{{j^\bullet} i} \hspace{1ex}}_{\text{\small{$\eqqcolon \beta$}}}\Big\}\Big)}_{\text{\small{ $= 0$ for ${j^\bullet} \notin T$, since $d_{j^\bullet} \leq \alpha$ (by definition) and $\alpha \leq \beta$}}}\\
=&\ \sum_{i \in I^=} b_i + \sum_{{j^\bullet} \in T} \Bigg(\hspace{2ex}\sum_{\mathclap{\hspace{3ex} i \in \Gamma^+({j^\bullet})\setminus I}} \hspace{1ex} c_{{j^\bullet} i} \hspace{1ex} - \hspace{1ex} \min\Big\{d_{j^\bullet}, \hspace{1ex} \sum_{\mathclap{\hspace{3ex}i \in \Gamma^+({j^\bullet})\setminus I^<}} \hspace{1ex} c_{{j^\bullet} i} \hspace{1ex} \Big\}\Bigg)\\
=&\ \sum_{i \in I^=} b_i - \sum_{{j^\bullet} \in T}\min\Big\{d_{j^\bullet} - \hspace{1ex} \sum_{\mathclap{\hspace{3ex} i \in \Gamma^+({j^\bullet})\setminus I}} \hspace{1ex} c_{{j^\bullet} i} \hspace{1ex}, \hspace{2ex} \sum_{\mathclap{\hspace{3ex} i \in \Gamma^+({j^\bullet})\cap I^=}} \hspace{1ex} c_{{j^\bullet} i} \hspace{1ex}\Big\} \overset{\eqref{I_1 not overdemanded}}{\geq} 0.
\end{align*}
Therefore $\widetilde{C}$ is a min cut too.
It remains to show that $\tilde{C} \subsetneq C$.
This follows directly by the definitions:
$\tilde{C} \cap (N' \cup N'') \subseteq T = C \cap (N' \cup N'')$ and $\tilde{C} \cap \Omega = I^< \subsetneq I = C \cap \Omega$, since $I^=$ is not empty.
\end{proof}
\begin{observation}
The proof of Theorem~\ref{thm:algo_computes_minimal_competitive_prices} does not use that the \ref{alg:price-raising} starts at prices zero.
As long as $p_0(i) \leq p^*(i)$ for all $i \in \Omega$, the proof works.
Thus, the algorithm finds the minimum competitive price vector $\boldsymbol{p}^*$ if it starts at prices $\boldsymbol{p}_0 \leq \boldsymbol{p}^*$.
\end{observation}
\subsection{The auction returns market-clearing prices}\label{section:allocation}
To complete the proof of Theorem~\ref{thm:algo_computes_minimal_market-clearing_prices} it remains to show that the \ref{alg:price-raising} computes market-clearing prices, i.e., that an allocation exists where $D = \min \{ \sum_{i \in \Omega} b_i, \sum_{j \in N} d_j \}$ is sold.
\begin{theorem}\label{theorem:market-clearing}
Given prices $\boldsymbol{p}^*$ computed by the \ref{alg:price-raising}, there exists an allocation $\boldsymbol{x}^* \in \mathbb{Z}_+^{\Omega \times N}$ such that:
\begin{enumerate}
\item Any buyer $j \in N$ gets a preferred bundle, i.e., $\boldsymbol{x}^*_{\bullet j} \in \mathbb{Z}_+^{\Omega}\in D_j(\boldsymbol{p}^*)$. \label{property:pref_bundle}
\item If there is an item which is not sold, it has price zero, i.e., $\sum_{j \in N} x^*_{ij} = b_i$ for all $i \in \Omega$ with $p^*(i) > 0$.
\item As much as possible is sold, i.e., $\sum_{i \in \Omega} \sum_{j \in N} x^*_{ij} = D \comment{\min \{ \sum_{i \in \Omega} b_i,\ \sum_{j \in N} d_j \}}$.
\end{enumerate}
\end{theorem}
To show this theorem, we will use the following notation.
An allocation $\bar{\boldsymbol{x}}$ is an \emph{extension} of $\boldsymbol{x}$ if $\bar{x}_{ij} \geq x_{ij}$ for all $j \in N$ and $i \in \Omega$.
A flow $f$ in $G(\boldsymbol{p})$ \emph{induces} an allocation $\boldsymbol{x}^f$ by defining $x_{ij} = f_{ij'} + f_{ij'}$.
Note that an allocation induced by a flow in $G(\boldsymbol{p})$ always distributes subsets of a preferred bundle w.r.t.\ prices $\boldsymbol{p}$.
\begin{proof}
We construct an allocation with the desired properties by using the flow that we obtained during the \ref{alg:price-raising} which uses the \ref{alg:flow_update} as described in \Cref{sec:flow_updates}.
The constructed allocation $\boldsymbol{x}^*$ will be induced by the max flow in $G(\boldsymbol{p}^*)$, thus property \ref{property:pref_bundle} is fulfilled by definition.
We extend $\boldsymbol{x}^*$ to an allocation where every item with a positive price is sold.
For $i \in \Omega$ with $\sum_{j \in N} x_{ij}^* < b_i$ we assign the remaining items to buyers $j$ who demanded items of this objects in the last iteration where the price on $i$ was increased, i.e., in the last iteration with $i \in I$.
More precisely, we assign it to those buyers for which the flow was removed.
Since the price is not increased in later iterations, the object is still in $\Omega_j'''(\boldsymbol{p}^*)$.
By \Cref{lemma:flow_demand} we know that the removed flow did not exceed $d_j - d_{j'}(\boldsymbol{p}^*) - d_{j''}(\boldsymbol{p}^*)$, since $d_{j'}+d_{j''}$ just decreases during the algorithm.
Hence, the demand is not exceeded if we assign items according to the removed flow.
This means all items with a positive price are assigned.
This is due to the fact that objects in $I$ are fully assigned, since $I$ is given by a left-most min cut.
The flow on $(b_i, t)$ remains if we do not remove flow through $i$ which can only happen if $i \in I$.
Thus, all items $i$ with a positive price are assigned, either induced by the max flow or by the removed-flow in the last iteration with $i \in I$.
It remains to extend this assignment to an allocation where as much as possible is sold.
All unsold items have price $0$ and all players with left over demand have payoff $0$ on all these items.
This means we get a complete bipartite graph in which every maximum flow assigns either all remaining objects or satisfies all demands.
Since all prices and payoffs are 0, any such induced allocation can just be added to the computed assignment.
\end{proof}
Note that the allocation can also be computed by the \ref{alg:allocation}.
We mainly use the described procedure to prove that there is an allocation with the desired properties.
This answers the question why the algorithm returns market-clearing prices.
\begin{proof}[Proof of \Cref{thm:algo_computes_minimal_market-clearing_prices}]
The prices are the minimum competitive prices by \Cref{thm:algo_computes_minimal_competitive_prices}.
By \Cref{theorem:market-clearing} there is an associated stable allocation where $D \comment{\min \{\sum_{i \in \Omega} b_i, \sum_{j \in B} d_j \}}$ is sold and where each item with positive price is sold.
This existence together with the construction of the network $H(\boldsymbol{p}^*)$ gives us that the \ref{alg:allocation} computes such an allocation.
The existence of an allocation where as much as possible is sold also implies that the minimum competitive prices computed by the \ref{alg:price-raising} are buyer-optimal Walrasian prices.
\end{proof}
\section{Monotonicity}\label{sec:monotonicity}
The \ref{alg:price-raising} determines for each instance\comment{consisting of demands $(d_j)_{j \in B}$, supplies $(b_i)_{i\in \Omega}$, and valuations $(v_{ij})_{i\in \Omega, j\in B}$} the unique component-wise minimum Walrasian price vector $\boldsymbol{p}^*$.
In this section, we analyze the monotonicity of the auction with respect to changes of the demand and supply.
In particular, we show that, as intuitively expected, the auction is monotone in the sense that the returned prices can only increase if the demand increases or the supply decreases.
\begin{theorem}\label{thm:monotonicity}
Given an instance with valuations $\boldsymbol{v}$, demands $\boldsymbol{d}$, supplies $\boldsymbol{b}$ and the corresponding buyer-optimal Walrasian prices $\boldsymbol{p}$, consider a second instance with the same valuation functions but increased demands and decreased supplies, i.e., demands $d^{\text{new}}b$ with $0 \leq d_j \leq d^{\text{new}}_j$ for all $j \in N$ and supplies~$b^{\text{new}}b$ with $0 \leq b^{\text{new}}_i \leq b_i$ for all $i \in \Omega$ with buyer-optimal Walrasian prices $p^{\text{new}}b$.
Then we have $p(i) \leq p^{\text{new}}(i)$ for all $i \in \Omega$ with $b^{\text{new}}_i > 0$.
\end{theorem}
The proof idea is that prices remain competitive when the supply increases or the demand decreases. Since for additive valuation functions with demand minimum competitive prices are unique and market-clearing, the buyer-optimal Walrasian prices are smaller or equal to any competitive prices.
To prove the theorem we show two lemmas, analyzing the change of the demand and the supply separately.
\begin{lemma}\label{lem:monotonicity_demand}
Let $\boldsymbol{d}$ and $d^{\text{new}}b$ be two demand vectors with $d_j \leq d^{\text{new}}_j$ for all $j \in N$ (here it is possible that $d_j = 0$ for some $j \in N$).
Then the buyer-optimal Walrasian prices $\boldsymbol{p}$ at demand $\boldsymbol{d}$ are not greater than the buyer-optimal Walrasian prices $p^{\text{new}}b$ at demand $d^{\text{new}}b$, i.e., $p(i)\leq p^{\text{new}}(i)$ for all $i \in \Omega$.
\end{lemma}
\begin{proof}
Consider an integral max flow $f'$ at prices $p^{\text{new}}b$ in the auxiliary flow network $G(p^{\text{new}}b)$.
Recall that flow $f'$ corresponds to a feasible allocation from buyers to their preferred bundles.
Now adapt this flow as follows.
For each buyer $j$ with $d_j < d^{\text{new}}_j$, among the paths going through $j'$ or $j''$, select one with a currently lowest payoff and reduce flow on that path, until flow through $j'$ and $j''$ gets reduced to $d_j$.
This procedure terminates with a flow meeting demands $d_j$.
Furthermore, since we still allocate the items with the highest payoff to a buyer, each buyer is allocated to a preferred bundle at prices $p^{\text{new}}b$ at demand $\boldsymbol{d}$.
Thus, $p^{\text{new}}b$ is a competitive price vector for demand $\boldsymbol{d}$.
By Theorem~\ref{thm:algo_computes_minimal_competitive_prices} it follows that $\boldsymbol{p}$ is not only the minimum Walrasian price vector but also the component-wise minimum competitive one.
Thus, we get $p(i) \leq p^{\text{new}}(i)$ for all $i \in \Omega$.
\end{proof}
Next we show that the minimum competitive prices are bigger if the supply is smaller.
\begin{lemma}\label{lem:monotonicity_supply}
Let $\boldsymbol{b}$ and $b^{\text{new}}b$ be two supply vectors with $b^{\text{new}}_i \leq b_i$ for all $i \in \Omega$ (here it is possible that $b^{\text{new}}_i=0$ for some $i \in \Omega$).
Then for the corresponding buyer-optimal Walrasian prices $\boldsymbol{p}$ and $p^{\text{new}}b$, it holds that $p(i) \leq p^{\text{new}}(i)$ for all $i$ with $b^{\text{new}}_i > 0$.
\end{lemma}
\begin{proof}
Assume without loss of generality that $\boldsymbol{b}$ and $b^{\text{new}}b$ only differ in the supply of object $\ell$ by one item.
We fix the allocation computed by the minimum competitive prices at supply $b^{\text{new}}b$ and we consider two cases.
First, we consider the case $b^{\text{new}}_\ell > 0$.
Given the assigned bundles, we analyze the behavior of the buyers when the additional item of object $\ell$ arrives.
If there is a buyer $j$ who is not assigned to one of her preferred bundles at supply $\boldsymbol{b}$, we knew that she is the only one who is assigned to items of object $\ell$ since otherwise the prices $p^{\text{new}}b$ are not competitive.
Thus, all other buyers are assigned to one of their preferred bundles at supply $\boldsymbol{b}$.
Hence we can change the preferred bundle of buyer $j$ by assigning one more item of $\ell$ to her, if necessary by omitting the least profitable item.
This change does not harm the other buyers, thus in the new allocation everyone is assigned to a preferred bundle at prices $p^{\text{new}}b$.
Thus, prices $p^{\text{new}}b$ are competitive for the instance with supply $\boldsymbol{b}$.
The prices $\boldsymbol{p}$ are the minimum competitive prices by Theorem~\ref{thm:algo_computes_minimal_competitive_prices}, which yields $p(i) \leq p^{\text{new}}(i)$.
Next, we consider the case $b^{\text{new}}_\ell=0$.
We adapt the prices $p^{\text{new}}b$ to prices $\bar{\boldsymbol{p}}$ by setting $\bar{p}(i) = p^{\text{new}}(i)$ for $i \in \Omega \setminus \{\ell\}$ and $\bar{p}(\ell) = \max_{j \in B} v_{\ell j} +1$.
Thus, no buyer wants to buy an item of object $\ell$.
Therefore, the given allocation is an assignment of buyers to one of their preferred bundles at prices $\bar{\boldsymbol{p}}$ for supply $\boldsymbol{b}$.
Using again that $\boldsymbol{p}$ is the minimum competitive price vector at supply $\boldsymbol{b}$, we get $p(i) \leq \bar{p}(i) = p^{\text{new}}(i)$ for all $i \in \Omega \setminus \{\ell\}$.
\end{proof}
\begin{proof}[Proof of \Cref{thm:monotonicity}]
For a given modified instance we can construct an intermediate instance where only the demand is changed and the supply remains as in the original instance.
With \Cref{lem:monotonicity_demand} this implies that prices only increase compared to the original instance.
Now applying \Cref{lem:monotonicity_supply} to the intermediate instance gives the statement of the theorem.
\end{proof}
The monotonicity allows for faster re optimization by starting with the old Walrasian price vector.
\begin{corollary}
Given prices $\boldsymbol{p}$ we can compute $p^{\text{new}}b$ by applying at most $\|{\boldsymbol{p}-p^{\text{new}}b}\|_{\infty}$ iterations of the \ref{alg:price-raising} with start prices $p(i)$ for all $i \in \Omega$ with $b^{\text{new}}_i > 0$.
\end{corollary}
\Cref{thm:algo_computes_minimal_market-clearing_prices} allows starting with any initial price vector which is in every component at most as large as the minimum competitive price vector.
Thus, Theorem~\ref{thm:monotonicity} allows us to start the \ref{alg:price-raising} at the price $p(i)$ for all $i \in \Omega$ with $b^{\text{new}}_i>0$.
Murota et al.~\cite{murota2013computing} show that the number of iterations is then bounded by $\max\{p(i) - p^{\text{new}}(i)\mid i \in \Omega,\ b^{\text{new}}_i > 0\}$.
However, the following example shows that we cannot bound $\|{\boldsymbol{p}-p^{\text{new}}b}\|_{\infty}$ even if the demand or supply is only slightly changed:
\begin{example}
Consider an instance with two buyers and two objects.
The valuation of both buyers are $(M, M)$, the demand for both is two and the supply of both objects is two.
In this instance $\boldsymbol{p} = (0, 0)$ are buyer-optimal competitive prices.
Now assume the demand of one buyer is increased by one.
In this case, the unique buyer-optimal prices are $p^{\text{new}}b = (M, M)$ and thus $\|{\boldsymbol{p}-p^{\text{new}}b}\|_{\infty} = M$.
The same happens if the supply of one item is decreased by one.
\end{example}
It is worth pointing out that monotonicity is not guaranteed if the valuation functions are not gross substitute.
\begin{example}
Consider an instance with two buyers $N = \{1, 2\}$ and two objects $\Omega = \{\alpha, \beta\}$.
The supply of each object is $1$, the demand of both buyers is unbounded.
The valuations are not given element-wise but as a set function by
\[
v_i(\emptyset) = v_i(\{\alpha\}) = v_i(\{\beta\}) = 0, \quad v_i(\Omega) = 1
\]
for $i \in N$.
The valuations are \emph{complementarities}, i.e. here buyers want to buy items only in a bundle with the other object otherwise they have no value to the buyers (think of left-hand gloves and right-hand gloves).
Gross substitutes always have the no-complementarities condition (see \cite{gul1999walrasian}).
Any price vector that yields a total price of $1$, together with an allocation that gives both objects to the same buyer is at equilibrium.
However, if we reduce the supply of either object to $0$, the unique Walrasian price vector is $(0, 0)$ as each object on its own is worthless to both buyers.
\end{example}
\section{Comparison to existing literature}\label{sec:comparison}
The goal of this section is to show the connection between our algorithm and the existing work.
In the first part we consider the connection to ascending auctions for strong gross substitute valuations.
It turns out that computing a left-most min cut in our network directly corresponds to finding the inclusion-wise minimal set defining a steepest descent direction of the Lyapunov function in case of additive valuation functions with demand.
In the second part we address the question whether the computed prices can be considered as VCG prices (see below for definition).
While this does not hold for gross substitute valuations it is shown for single-unit demands. Unfortunately, for additive valuations with demand no mechanism based on prices per item can determine VCG prices as we show later on.
\subsection{Comparison to Ascending Auction}
Note that in our model the Lyapunov function can be rewritten to
\[
L(\boldsymbol{p}) = \max \sum_{j \in N} \sum_{i \in \Omega} (v_{ij} - p(i)) x_{ij} + \sum_{i \in \Omega} b_i \cdot p(i)
\]
subject to $\sum_{i \in \Omega} x_{ij} \leq d_j$ and $x_{ij} \in [0, b_i]$ for all $j \in N$ and all $i \in \Omega$.
\begin{proposition}\label{prop:difference_lyapunovfunction}
In our model with additive valuation functions with demand the difference of the Lyapunov function in an augmentation step equals the difference between the capacity of the $s$-leaving arcs and the min cut value.
More formally
\[
L(\boldsymbol{p}) - L(\boldsymbol{p} + \boldsymbol{\chi}_X)=\hspace{2ex}\sum_{\mathclap{{j^\bullet} \in \Gamma^-(X)}} \hspace{1ex}\max \Big\{0, d_{j^\bullet} - \hspace{1ex}\sum_{\mathclap{ \hspace{3ex} i \in \Gamma^+({j^\bullet})\setminus X}} \hspace{1ex} c_{{j^\bullet} i} \hspace{1ex} \Big\} - \sum_{i \in X} b_i.
\]
\end{proposition}
The first sum is the sum over the differences $V_j(\boldsymbol{p}) - V_j(\boldsymbol{p} + \boldsymbol{\chi}_X)$ for each buyer.
The difference corresponds to the total amount of items that $j$ wants to buy from $X$ under prices $\boldsymbol{p}$ without any alternative outside of $X$.
This yields the equation by definition of the Lyapunov function.
\begin{lemma}\label{lemma:comparison_murota}
Given prices $\boldsymbol{p}$, the overdemanded set $I$ determined by the left-most min cut in $G(\boldsymbol{p})$ minimizes $L(\boldsymbol{p} + \boldsymbol{\chi}_X)$ among all $X \subseteq \Omega$.
\end{lemma}
\begin{proof}
A set $X \subseteq \Omega$ minimizes $L(\boldsymbol{p} + \boldsymbol{\chi}_X)$ if and only if it maximizes
\[
L(\boldsymbol{p}) - L(\boldsymbol{p} + \boldsymbol{\chi}_X) = \sum_{j \in N} (V_j(\boldsymbol{p}) - V_j(\boldsymbol{p} + \boldsymbol{\chi}_X)) - \sum_{i \in X} b_i.
\]
Now we consider again the constructed auxiliary networks $G(\boldsymbol{p})$ and $G(\boldsymbol{p} + \boldsymbol{\chi}_X)$ and the induced changes in capacities.
With Proposition~\ref{prop:difference_lyapunovfunction}, we obtain that $X \subseteq \Omega$ maximizes
\begin{equation}\label{eq:lyapunov_network}
\sum_{{j^\bullet} \in \Gamma^-(X)} \max\{0, d_{j^\bullet} - \hspace{1ex} \sum_{\mathclap{\hspace{3ex} i \in \Gamma^+({j^\bullet})\setminus X}} \hspace{1ex} c_{{j^\bullet} i} \hspace{1ex}\} - \sum_{i \in X} b_i
\end{equation}
Given an $X$ that minimizes $L(\boldsymbol{p} + \boldsymbol{\chi}_X)$, we construct the cut
\[
C_X = \{s\} \cup \{{j^\bullet} \in N' \cup N'' \mid d_{j^\bullet} > \hspace{1ex} \sum_{\mathclap{\hspace{3ex} i \in \Gamma^+({j^\bullet})\setminus X}} \hspace{1ex} c_{{j^\bullet} i} \hspace{1ex} \} \cup X.
\]
The structure of the cut, determined by our algorithm from set $I$, is given by
\[
C = \{s\} \cup \{{j^\bullet} \in N' \cup N'' \mid d_{j^\bullet} > \hspace{1ex} \sum_{\mathclap{\hspace{3ex} i \in \Gamma^+({j^\bullet})\setminus I}} \hspace{1ex} c_{{j^\bullet} i} \hspace{1ex} \} \cup I.
\]
We can show that $C_X$ is also a min cut:
\begin{align*}
0 &\geq \capa(C) - \capa(C_X)\\
&= \sum_{i \in I} b_i + \sum_{{j^\bullet} \in N' \cup N''} \hspace{-1ex} \min\{d_{j^\bullet}, \hspace{2ex} \sum_{\mathclap{\hspace{3ex} i \in \Gamma^+({j^\bullet}) \setminus I}} \hspace{1ex} c_{{j^\bullet} i} \hspace{1ex} \} - \sum_{i \in X} b_i - \sum_{{j^\bullet} \in N' \cup N''} \hspace{-1ex} \min\{d_{j^\bullet}, \hspace{2ex} \sum_{\mathclap{\hspace{3ex}i \in \Gamma^+({j^\bullet}) \setminus X}} \hspace{1ex} c_{{j^\bullet} i} \hspace{1ex}\}\\
&= \sum_{i \in I} b_i + \sum_{{j^\bullet} \in N' \cup N''} \bigg(d_{j^\bullet} - \max\{0, d_{j^\bullet} - \hspace{1ex} \sum_{\mathclap{\hspace{3ex} i \in \Gamma^+({j^\bullet}) \setminus I}} \hspace{1ex} c_{{j^\bullet} i} \hspace{1ex}\} \bigg) \\
&\phantom{=} - \sum_{i \in X} b_i - \sum_{{j^\bullet} \in N' \cup N''} \bigg(d_{j^\bullet} - \max\{0, d_{j^\bullet} - \hspace{1ex} \sum_{\mathclap{\hspace{3ex}i \in \Gamma^+({j^\bullet}) \setminus X}} \hspace{1ex} c_{{j^\bullet} i} \hspace{1ex}\} \bigg)\\
&= \bigg(\sum_{{j^\bullet} \in N' \cup N''} \hspace{-1ex} \max\{0, d_{j^\bullet} - \hspace{1ex} \sum_{\mathclap{\hspace{3ex} i \in \Gamma^+({j^\bullet})\setminus X}} \hspace{1ex} c_{{j^\bullet} i} \hspace{1ex}\} - \sum_{i \in X} b_i \bigg) - \bigg(\sum_{{j^\bullet} \in N' \cup N''} \hspace{-1ex} \max\{0, d_{j^\bullet} - \hspace{1ex} \sum_{\mathclap{\hspace{3ex} i \in \Gamma^+({j^\bullet}) \setminus I}} \hspace{1ex} c_{{j^\bullet} i} \hspace{1ex}\} - \sum_{i \in I} b_i\bigg)\\
&\geq 0,
\end{align*}
where the first inequality follows from the fact that $C$ is a min cut and the last inequality follows from~\eqref{eq:lyapunov_network}, i.e., that $X$ maximizes the term.
Hence, $C_X$ is a min cut.
Moreover the choice $X = \cap \Omega$ minimizes $L(\boldsymbol{p} + \boldsymbol{\chi}_X)$, since equality holds in the chain of inequalities.
\end{proof}
\subsection{VCG prices}\label{sec:VCG}
When assuming that every object is owned by a seller and the revenue of the seller is the sum of prices she collects from buyers, then every Walrasian equilibrium is socially optimal (see e.g. \cite{leme2017gross}).
``Socially optimal" means that the utility of the buyers plus the utility of the sellers is maximum among all allocation and price combinations $(\boldsymbol{p}, \boldsymbol{x})$.
This observation follows since in a Walrasian equilibrium $(\boldsymbol{p}^*, \boldsymbol{x}^*)$ every buyer maximizes her payoff given prices $\boldsymbol{p}^*$.
The sum of the prices cancels out since the sellers get what the buyers pay.
Therefore, the Walrasian equilibrium is socially optimal.
A famous mechanism in auction theory is the VCG mechanism (see for an introduction \cite{roughgarden2016cs269i} and \cite{vickrey1961counterspeculation,clarke1971multipart,groves1973incentives} for the original papers).
It takes bids or valuation functions of the buyers and computes an allocation and prices.
The VCG mechanism is essential for auction theory, since it has two nice properties.
First, the computed allocation, is socially optimal and second, the mechanism is truthful.
This means, for a buyer it never pays off to lie, i.e., when reporting the true valuations a buyer $j$ is never worse off than with reporting a fake valuation.
Moreover, there exist some reported valuations of the other buyers where it is strictly better for buyer $j$ to report her true valuations than to lie.
\paragraph{Single-unit matching markets.}
As introduced in \Cref{sec:intro}, a famous special case of our model is the classical matching market (in the literature also commonly referred to as housing market) model, where buyers have unit demand.
What is remarkable here is that the buyer-optimal market clearing prices which can be computed by an ascending auction coincide with VCG prices.
For a good overview on these topics, we refer to the book \cite{easley2010} or the short but self-contained paper by Kern et al. \cite{kern2016}.
This is interesting as an ascending auction (as e.g. presented here) does not necessarily get all the information about buyers' valuations which would be necessary when na\"ively computing VCG prices.
Note that this property does not hold if we go beyond the unit-demand setting as shown in \cite{gul2000english}, where indeed the ascending auction cannot distinguish between different valuations because the complete valuation is not communicated to the auctioneer, who thus cannot accurately determine VCG prices.
We show that for additive valuations with demand, no prices that are based on a per object price can coincide with the VCG prices.
But before, we show that the very natural approach to copy buyers and give them unit demand does not lead to VCG prices.
Let us revisit \Cref{ex:1} from \Cref{sec:intro} and observe that the copy method is not truthful, i.e. it pays off to lie, and thus does not lead to VCG prices.
For the buyer it would be strictly better to report valuation $\boldsymbol{v}_{\bullet 1} = (1, 1)$.
\begin{proposition}
In a market where buyers have additive valuations with demand there exists no mechanism to determine prices per object that coincide with the VCG prices.
\end{proposition}
The following example shows that we need prices per bundle to obtain VCG prices, so the proposition holds.
\begin{example}
Let $N = \{1, 2, 3\}$ and $\Omega = \{\alpha, \beta\}$ with demands $d_1 = d_2 = 2, d_3 = 1$, supplies $b_\alpha = 3, b_\beta = 2$, and valuations $\boldsymbol{v}_1 = (3, 1)$, $\boldsymbol{v}_2 = (2, 0)$, and $\boldsymbol{v}_3 = (0, 1)$.
It is easy to see that both feasible allocations (buyer~1 or~2 gets two items of object $\alpha$, the other one gets one item of each object, buyer~3 gets one item of object $\beta$ in both allocations) are socially optimal.
Say buyer~1 gets two items of $\alpha$, buyer 2 gets $\alpha$ only once.
Then the net effect of buyer~1 on buyer~2 (i.e., the cost that buyer~1 imposes on buyer~2 by her presence) is $2$, so the only possible price for object $\alpha$ is $1$.
Since buyer~2 also has to purchase an item of $\alpha$ once, object $\beta$ needs to be subsidized (i.e., assigned a price of $-1$) to give her a total cost of $0$.
However, this would give buyer 3 total cost of $-1$, which is not VCG.
\end{example}
As an immediate observation from the last example, we can also see that it would have been better for buyer~2 to submit her preferred bundles according to a false valuation function $\tilde{\boldsymbol{v}}_2 = (0, 0)$, which would have resulted in prices $\boldsymbol{p} = (0, 0)$ and the same allocation.
Hence, she would have gotten a payoff of $2$.
The prices we would have obtained by the auction with truthful reporting would have been $(2, 0)$ which yields a payoff of $0$ for buyer~2.
Thus, the ascending auction does not incentivize truthful reporting of demand sets.
\section{Adapted step-length algorithm}\label{sec:adapted_step_length}
One natural approach to speed up the computation of buyer-optimal prices is to increase the length of the augmentation steps.
In the \ref{alg:price-raising} this means given a left-most min cut, increase the prices of the corresponding objects until the min cut changes.
In the more general case of strong substitute valuation functions, this means a steepest descent direction of the Lyapunov function is used as long as it remains the steepest descent direction (see \cite{shioura2017algorithms} Section~3, Theorem~4.17).
The fact that increasing the prices on a left-most min cut as long as possible is indeed a special case of the algorithm of Shioura follows by Lemma~\ref{lemma:comparison_murota}.
The step length in this algorithm is determined by a binary search.
This is possible by the following proposition of Shioura \cite{shioura2017algorithms}.
\begin{proposition}[\cite{shioura2017algorithms} Proposition 4.16]\label{prop:shioura_monotonie}
In an ascending auction, whenever a steepest descent direction of the Lyapunov function becomes infeasible one of the following two things happens:
either the support of the direction increases, or the slope with which the Lyapunov function changes decreases.
\end{proposition}
For us, this means whenever the set determined by a cut is not the inclusion-wise minimal maximum overdemanded set any longer, then either the support of the cut increases or the overdemandedness decreases.
Due to the resulting monotonicity we can apply binary search to determine the step length.
This observation allows to bound the number of price raising steps as done by \cite{shioura2017algorithms} in a similar way for the general setting.
\begin{algorithm}
\SetAlgoRefName{Adapted Step Length Algorithm}
\KwIn{Buyer with valuations and demand, objects with supply}
\KwOut{Buyer optimal Walrasian prices}
$p \coloneqq 0$ \\
$f$ max flow in network $G(p)$\\
$C$ left most min cut in $G(p)$\\
\While{$\val(f) < D_p$}{
$\alpha_{up} = v_{\max}$\\
$\alpha_{lo} = 1$ \\
$\alpha = \lfloor \tfrac{\alpha_{up} + \alpha_{lo}}{2}\rfloor$\\
\While{$\alpha_{up} - \alpha_{lo} > 0$}{
Adapt network and flow to price $p + \alpha \cdot \boldsymbol{\chi}_C$\\
Use breadth first search (BFS) in residual network\\
\If{min cut is equal to C}{
$\alpha_{lo}= \alpha$
}
$\alpha_{up}=\alpha$
}
Set $p = p + \alpha \cdot \boldsymbol{\chi}_C$\\
Adapt network and flow to $p$\\
Augment flow $f$ as long as possible in $G(p)$\\ (for each augmentation step we use BFS in residual network and augment along the path found)\\
$C$ new left most min cut (this is all nodes reachable by BFS in residual network)
}
\Return{$p$}
\caption{ }
\label{alg:adapted_step_length}
\end{algorithm}
\begin{lemma}
The running time of an ascending flow auction with adapted step length as described in \ref{alg:adapted_step_length} is given by $\mathcal{O}(\log(v_{\max}) \abs{\Omega}^2 D_0 \abs{N})$, where $v_{\max} = \max_{j \in N, i \in \Omega} v_{ij}$.
\end{lemma}
\begin{proof}
To prove the statement we will show two claims from which it follows directly.
We start by bounding the time of an update step.
As an update step, we summarize a flow augmentation (by a value of one), a min cut computation, and an update of the network and the flow from one iteration to the next.
In the next step we will bound the total number of update steps needed.
Note that the number of update steps does not correspond to the iterations of the \texttt{while-loop} in \ref{alg:adapted_step_length}.
\begin{claim}
Every update step takes time at most $\mathcal{O}(\abs{N}\abs{\Omega})$.
\end{claim}
\begin{subproof}
To increase the flow by one unit (or more), we use a bread first search (BFS) in the residual network.
The same is true for the computation of a min cut.
Given some graph $G=(V,E)$, BFS needs $\mathcal{O}(\abs{V} + \abs{E})$ time.
In our graph $\abs{V}$ is in $\mathcal{O}(\abs{N}+ \abs{\Omega})$ and $\abs{E}$ is in $ \mathcal{O}(\abs{N}\abs{\Omega})$, implying that BFS runs in time $\mathcal{O}(\abs{N}\abs{\Omega})$.
The adaption of the network runs in time $\mathcal{O}(\abs{N}\abs{\Omega})$, since
first of all this is the time needed to compute a preferred bundle per player and second,
to adapt the network every arc ($\mathcal{O}(\abs{N}\abs{\Omega})$) is checked and then in constant time the flow is adapted (see \ref{alg:flow_update}).
\end{subproof}
Now, we bound the number of update steps.
\begin{claim}
The number of update steps is bounded by $\mathcal{O}(\log(v_{\max}) \abs{\Omega} D_0)$.
\end{claim}
\begin{subproof}
In total the auction finds some prices $p^*$ and some flow $f^*$ such that $\val(f^*) = D_{p^*}$ or in other words $\val(f^*) - D_{p^*} = 0$.
It starts with the all $0$-flow and $D_{\boldsymbol{0}}$.
During the algorithm $\val(f)-D_p$ is non-increasing (see \Cref{coro:demand-flow_value_non_decreasing}).
If it decreases in an update step it decreases by at least by one.
Using \Cref{prop:shioura_monotonie} we can show that this value decreases all $\mathcal{O}(\log(v_{\max}) \abs{\Omega})$ update steps.
Given some network and some flow, if we can augment the flow, clearly the value $\val(f)-D_p$ decreases.
So assume it is not possible to augment the flow.
With one update step we can recognize this situation and compute a min cut giving an inclusion-wise minimal maximum overdemanded set.
It is possible that we adapt the network in the next update step, but in the resulting graph, the adapted flow is already maximum, i.e. the min cut does not change.
Using a binary search, we can find in $\mathcal{O}(\log(v_{\max}))$ time a point where the min cut changes.
It is still possible that the left-most min cut changes, but the value of the min cut and thus of the flow remains the same.
Using \Cref{prop:shioura_monotonie}, we know that the left-most min-cut changes monotonically.
By the structure of the network, the left-most min cut is determined by the objects in the cut.
Thus, a left-most min cut with the same value can be seen at most $\abs{\Omega}$ times.
Thus, after $\mathcal{O}(\log(v_{\max}) \abs{\Omega})$ update steps, the flow needs to be augmentable.
\end{subproof}
This finishes the proof as the algorithm does at most $\mathcal{O}(\log(v_{\max}) \abs{\Omega} D_0)$ update steps of cost $\mathcal{O}(\abs{N}\abs{\Omega})$ each.
\end{proof}
In contrast, the fastest known algorithm so far is presented by Murota et al. \cite{murota2013computing}.
They showed that their algorithm needs $\|\boldsymbol{p}^*\|_\infty$ iterations, where $\boldsymbol{p}^*$ is the buyer-optimal Walrasian price.
This can potentially be speed up with the adapted step length procedure.
Each iteration of their algorithm has cost $\mathcal{O}(\abs{N} \abs{\Omega}^4 \log(U) \log(\abs{N}\abs{\Omega}U))$ where $U$ is the maximum number of copies of an object.
The cost per iteration follows by \cite[Theorem 1.3]{murota2013computing} and the number of iterations by \Cref{prop:shioura_monotonie} (or in more detail \cite{shioura2017algorithms})
Note that the running time is not comparable to our algorithm.
\section{Conclusion and outlook}
We present a network interpretation of an ascending auction for a multi-unit market where buyers have additive valuations with demand.
We show that by iteratively raising the prices on a left-most min cut we can compute buyer-optimal Walrasian prices via an ascending auction.
The new part here is the simple and efficient flow-based algorithm to determine the sets on which prices should be raised in the ascending auction, namely the minimal maximum overdemanded sets.
For the special case of unit demand a nice matching based algorithm was known to compute these sets.
For the general case of strong gross substitute valuations, in prior literature, this question was either not addressed or the computation was done by using tools from convex analysis.
We currently work on a more natural and direct approach to compute the minimal maximum overdemanded sets also in the general setting.
With our approach we are, moreover, able to reuse computations from previous iterations to speed up the computation.
Combining this with the an algorithm using adapted step length allows for an improved runtime analysis for the complete auction.
Still the resulting running time is not polynomial but only pseudo-polynomial.
While buyer-optimal Walrasian prices can be computed directly by using an LP, it remains open whether there is an iterative ascending auction that runs in polynomial time. Note that when using an LP approach, the buyers need to reveal their whole valuation function, while they only reveal their preferred bundles in the more natural iterative auction.
Our approach enabled us to show that the minimum Walrasian prices coincide with the minimum competitive prices for additive valuations with demands.
This connection seems very natural but was never discussed before.
It is the main part of our proof to show monotonicity properties, i.e., that buyer-optimal Walrasian prices can only increase when the supply decreases or the demand increases.
We are working on achieving a similar result for the general case of strong gross substitute valuations.
Also in this case the connection between minimum Walrasian prices and minimum competitive prices seems intuitive and would imply monotonicity results.
Lastly, the number of iterations necessary to reach buyer-optimal Walrasian prices for a slightly perturbed instance is not polynomial bounded for the perturbations we considered.
It is an interesting question whether there are perturbations where the algorithm reaches buyer-optimal Walrasian prices again with only constantly many, or only polynomial many update steps.
\end{document} |
\begin{document}
\title{On a generalization of the Howe-Moore property}
\renewcommand{Abstract}{Abstract}
\begin{abstract}
We define a Howe-Moore property relative to a set of subgroups. Namely, a group $G$ has the Howe-Moore property relative to a set $\mathcal{F}$ of subgroups if for every unitary representation $\pi$ of $G$, whenever the restriction of $\pi$ to any element of $\mathcal{F}$ has no non-trivial invariant vectors, the matrix coefficients vanish at infinity. We prove that a semisimple group has the Howe-Moore property relatively to the family of its factors.
\end{abstract}
\section{Introduction}
In \cite{HOWEMOORE}, Howe and Moore discovered a very interesting property of connected, non-compact, simple Lie groups with finite center: whenever they act ergodically on a probability space by preserving the measure, the action is automatically mixing. This property, rephrased purely in terms of unitary representations has since been called the \textbf{Howe-Moore} property. Later, other topological groups were proved to enjoy this property.
In \cite{CIOBO}, a very beautiful paper, Ciobotaru synthesizes the proofs of all known cases of groups having the Howe-Moore property, giving a unified proof.
In this paper, we generalize further the unified proof of \cite{CIOBO} so that it also applies to products and, in particular, generalizes the situation of products of Lie groups considered in \cite[Theorem 1.1, p. 81]{BEKKAMAYER}).
\begin{comment}
Now, some products of groups having the Howe-Moore property also happen to satisfy a weakening of the Howe-Moore property (see \cite[Theorem 1.1, p. 81]{BEKKAMAYER}). In this paper, we discuss this phenomenon by noticing that the unified proof of \cite{CIOBO} can be, up to slight changes, generalized to prove statements of this kind.
\end{comment}
\section{Statement of the results}
\begin{comment}
The exposition and the proofs follow the lines from \cite{CIOBO}.
\end{comment}
Let $G$ be a topological group.
\begin{nota}
If $g \in G^\nn$, let us write $\lim_{n \to \infty} g_n = \infty$ if for every compact subset $K$ of $G$, there is an integer $N$ such that for any integer $n$ such that $n \geq N$, $g_n \not \in K$.
If $f : G \rightarrow \cc$, if $a \in \cc$, we write $\lim_{g \to \infty} f(g) = a$ when we have \[\forall \epsilon > 0,\ \exists K \subset G,\quad K\mbox{ is compact and}\ \forall g \not \in K,\ \vert f(g) - a \vert \leq \epsilon.\]
\end{nota}
\begin{defi}\textbf{(Cartan decomposition)}
We say that a triplet $(K_1,A^+,K_2)$ is a \textbf{Cartan decomposition} of $G$ if the following conditions are satisfied:
\begin{enumerate}
\item $K_1$ and $K_2$ are compact subsets of $G$,
\item $A^+$ is an abelian subsemigroup $G$, that is, $\forall a_1,a_2 \in A^+,\ a_1 a_2 = a_2 a_1 \in A^+$ and
\item $G = K_1A^+K_2$.
\end{enumerate}
\end{defi}
\begin{nota}
If $a \in G^\nn$, we set \[U^+_{a} := \{g \in G \tq \lim_{n \to \infty} a_n^{-1}\ g\ a_n = e\}\ \mbox{et}\] \[U^-_{a} := \{g \in G \tq \lim_{n \to \infty} a_n\ g\ a^{-1}_n = e\}.\]We call them the \textbf{the positive and negative contracting subgroups} associated to $a$.
\end{nota}
\begin{defi}\textbf{(Mautner's property)}
Let $\cali{F}$ be a set of subgroups of $G$, and $A$ a subset of $G$. We say that $(G,A)$ has \textbf{Mautner's property} relative\footnotemark[1] to $\cali{F}$ if \[\forall a \in A^\nn\ \left(\lim_{n \to \infty} a_n = \infty\right) \Longrightarrow \left(\exists F \in \cali{F},\ \exists b \ \mbox{subsequence of}\ a, \quad F \subseteq \overline{\langle U^+_{\mathbf{b}},U^-_{\mathbf{b}}\rangle}\right).\]
\end{defi}
\footnotetext[1]{If $\cali{F} = \{G\}$, we omit ``relative to $\cali{F}$".}
\begin{comment}
Avant suggestion de Christophe
\begin{defi}\textbf{(Mautner's property)}
Let $\cali{F}$ be a set of subgroups of $G$, and $A$ a subset of $G$. We say that $(G,A)$ has \textbf{Mautner's property} relative to $\cali{F}$ \[\forall a \in A^\nn\ \left(\lim_{n \to \infty} a_n = \infty\right) \Longrightarrow \left(\exists F \in \cali{F},\ \exists b \ \mbox{subsequence of}\ a, \quad F \subseteq \overline{\langle U^+_{\mathbf{b}},U^-_{\mathbf{b}}\rangle}\right).\]We say that $(K_1,A^+,K_2)$ is a \textbf{Cartan-Mautner decomposition} of $G$ relative\footnotemark[2] to $\cali{F}$ if $(K_1,A^+,K_2)$ is a Cartan decomposition of $G$ and if $(G,A^+)$ has Mautner's property relative to $\cali{F}$.
\end{defi}
\end{comment}
\begin{rema} In \cite{CIOBO}, it is proved that the following groups have Cartan decompositions $(K_1,A^+,K_2)$ such that $(G,A^+)$ has the Mautner property:
\begin{enumerate}
\item simple algebraic groups over a non-archimedean local field;
\item subgroups of the group of automorphisms of a $d$-biregular tree for $d\geq 3$ that are topologically simple and that act $2$-transitively on the boundary of the tree;
\item noncompact, connected, semisimple Lie groups with a finite center.
\end{enumerate}
\end{rema}
\begin{nota}
If $\pi : G \rightarrow U(\cali{H})$ is a unitary representation of $G$ and $F$ is a subgroup of $G$. We denote by\[\fix(\pi,F) := \{\phi \in \cali{H} \tq \forall g \in F,\quad \pi(g)\phi = \phi\}.\]
\end{nota}
\begin{defi}\textbf{(Relative Howe-Moore property)}
Let $\cali{F}$ be a set of subgroups of $G$. We say that $G$ has the \textbf{Howe-Moore property} relative\footnotemark[2] to $\cali{F}$ if \[\forall \pi : G \rightarrow U(\cali{H}),\quad \left(\forall F \in \cali{F},\quad \fix(\pi,F) = \{0\}\right) \Longrightarrow \left(\forall \phi,\psi \in \cali{H},\quad \lim_{g \to \infty} \langle \pi(g)\phi,\psi\rangle = 0\right).\]
\end{defi}
\footnotetext[2]{As for Mautner's property, if $\cali{F} = \{G\}$, we omit ``relative to $\cali{F}$".}
\begin{rema} In \cite{CLUCORLOUTESVAL}, one can find a ``relative Howe-Moore property", but the one we consider in the present note is different.
\end{rema}
Our main result is the following.
\begin{thmnonnum} Let $\cali{F}$ be a set of subgroups of $G$. If $G$ admits a Cartan decomposition $(K_1,A^+,K_2)$ such that $(G,A^+)$ has the Mautner property relative to $\cali{F}$, then it satisfies the Howe-Moore property, relative to $\cali{F}$.
\end{thmnonnum}
\begin{rema} In the case where $\cali{F} = \{G\}$, then the theorem is just \cite[Theorem 1.2, p. 2]{CIOBO}.
\end{rema}
The following consequence is useful.
\begin{comment}
\footnotetext[3]{Of course, we identify $G_i$ with $\{e\} \times \cdots \times \{e\} \times G_i \times \{e\} \times \cdots \times \{e\}$.}
\end{comment}
\begin{coro}\label{corollaire1} Let $G_1,...,G_N$ be groups having Cartan decompositions $(K_{i,1},A^+_i,K_{i,2})$ such that for all $i$, $(G_i,A^+_i)$ has the Mautner property. Then the product $G:= \Pi_i G_i$ has the Howe-Moore property, relative to $\{G_1,\cdots ,G_N\}$.
\end{coro}
The Howe-Moore property is often used to deduce mixing from ergodicity. The following obvious corollary states the analog result for the relative Howe-Moore property.
\begin{coro}\label{corollaire2} Let $G_1, \cdots, G_N$ be topological groups admitting Cartan decompositions $(K_{i,1},A^+_i,K_{i,2})$ such that for all $i$, $(G_i,A^+_i)$ has the Mautner property. Let $G := G_1 \times \cdots \times G_N$, and let $G \curvearrowright (X,\mu)$ a measure-preserving action on a probability space such that the restriction to each of the $G_i's$ is ergodic. Then the action $G \curvearrowright (X,\mu)$ is mixing.
\end{coro}
As an application, we spell out the following corollary.
\begin{coro}\label{reseauirreductibleergodicite} Let $G_1,\cdots, G_N$ be topological groups having Cartan decompositions $(K_{i,1},A^+_i,K_{i,2})$ such that for each $i$, $(G_i,A^+_i)$ has the Mautner property. Let $G := G_1 \times \cdots \times G_N$, and let $\Gamma$ be an irreducible lattice $G$. Then the action $G \curvearrowright G/\Gamma$ is mixing.
\end{coro}
\begin{rema} The theorem and Corollary \ref{corollaire2} were already known, in the case $G$ is a semisimple group with finite center (see \cite[Theorem 1.1, p. 81 and Theorem 2.1, p. 89]{BEKKAMAYER} for a proof using Lie theory technology). In the approach we propose, Lie theory is only needed to prove that the factors satisfy the Howe-Moore property. We therefore provide an elementary shortcut for a part of their proof. Moreover, our proof is more general and applies to other topological groups.
\end{rema}
\begin{remer} We would like to adress many thanks to Christophe Pittet for his useful help and advice.
\end{remer}
\section{Proofs}
\subsection{Useful facts and notation}
We recall here some tools we need for the proofs.
\begin{nota}
A sequence $g \in G^\nn$ is said to be \textbf{bounded} if $\exists K,\ \forall n\in \nn, \quad g_n \in K$.
\end{nota}
\begin{fact}
If $G$ is locally compact, second countable, every unbounded sequence has a subsequence that goes to infinity.
If $G$ is locally compact, second countable, $f : G \rightarrow \cc$ and $a \in \cc$, we have the following sequential characterization \[\displaystyle\lim_{g \to \infty} f(g) = a \Leftrightarrow \left(\forall g \in G^\nn, \lim_{n \to \infty} g_n = \infty \Rightarrow \lim_{n \to \infty} f(g_n) = a\right).\]
\end{fact}
When we write $\pi : G \rightarrow U(\hilb)$, it is implicit that it is both a morphism and that it is continuous for the strong operator topology (and therefore, a \textbf{unitary representation}), and that $\hilb$ is a complex, separable Hilbert space.
The following fact easily follows from the sequential weak operator compactness of the unit ball in the space of bounded operators.
\begin{fact}\label{limitcommutnormal}
Let $T = (T_n)_{n \in \nn}$ be a sequence of normal operators of norm $1$ on a Hilbert space such that $\forall n,m\in \nn, T_n T_m = T_m T_n$. Then $T$ has a subsequence, that converges, in the weak operator topology, to a normal operator that commutes with all the $T_n's$.
\end{fact}
\begin{comment}
\begin{proof}[Proof]
Let $A$ be a countable, dense subset of the unit ball of $\hilb$. Because $[-1,1]^{A^2}$ is sequentially compact, there is an increasing $\phi : \nn \rightarrow \nn$ such that for all $\xi_1,\xi_2 \in A$, $\langle T_{\phi(n)}\xi_1,\xi_2\rangle$ converges to a number $e(\xi_1,\xi_2)$. We will successively extend $e$ but will keep calling the extension $e$.
For $\xi \in \hilb$, the map \[\begin{array}{rcl}
e(\xi,\cdot) : A &\rightarrow &[-1,1]\\
\nu &\mapsto &e(\xi,\nu)\\
\end{array}\]is $1$-lipschitz, hence uniformly continuous, so it extends to a unique uniformly continuous map $e(\xi,\cdot)$ from $B_1(\hilb)$ to $[-1,1]$. In the same fashion, for all $\nu \in \hilb$, the map\[\begin{array}{rcl}
e(\cdot,\nu) : A &\rightarrow &[-1,1]\\
\xi &\mapsto &e(\xi,\nu)\\
\end{array}\]extends to a unique uniformly continuous map $e(\cdot,\nu)$ from $B_1(\hilb)$ to $[-1,1]$. So $e$ has a unique uniformly continuous extension from $A\times A$ to $B_1(\hilb) \times B_1(\hilb)$. Let us extend it to $\hilb^2$ by \[\forall \xi,\nu \in \hilb^*,\quad e(\xi,\nu) := \Vert \xi \Vert \Vert \nu \Vert e\left(\frac{\xi}{\Vert \xi \Vert},\frac{\nu}{\Vert \nu \Vert}\right).\]It is straightforward to check that $e$ is a hermitian form on $\hilb$ such that for all $\xi,\nu \in \hilb$, \[\vert e(\xi,\nu) \vert \leq \Vert \xi \Vert \Vert \nu \Vert.\]According to Riesz' theorem, there is a bounded operator $E$ such that for all $\xi,\nu \in \hilb$, $e(\xi,\nu) = \langle E\xi,\nu\rangle$. Now, let $m \in \nn$, $\xi,\nu \in \hilb$. Then \[\begin{array}{rcl}
\langle ET_m \xi,\nu\rangle &= &\lim_{n \to \infty} \langle T_{\phi(m)}T_n \xi,\nu\rangle\\
&= &\lim_{n \to \infty} \langle T_n T_{\phi(m)} \xi,\nu\rangle\\
&= &\lim_{n \to \infty} \langle T_{\phi(m)} \xi, T^*_m \nu\rangle\\
&= &\langle E \xi,T^*_m \nu\rangle\\
&= &\langle T_m E \xi,\nu\rangle\\
\end{array}\]from what we deduce that $E$ commutes with all the $T_m's$.
\end{proof}
\end{comment}
\subsection{Proof of the theorem}
The proof of the following lemma is obvious.
\begin{lem}\label{ssgroupcontractproduit} If $a = (a^1,\cdots,a^N) \in (G_1 \times \cdots \times G_N)^\nn$, then $U^+_a = U^+_{a^1} \times \cdots \times U^+_{a^N}$ (and this is also valid for $U^-$).
\end{lem}
Let $\pi : G \rightarrow U(\hilb)$ be a unitary representation.
\begin{lem}{\rm \cite[Lemma 2.9]{CIOBO}}
\label{coefficientmautner}
Let $(K_1,A^+,K_2)$ be a Cartan decomposition of $G$. If \[
\exists \phi,\psi \in \hilb\setminus \{0\},\ \exists g \in G^\nn,\quad (\langle \pi(g_n)\phi,\psi\rangle)_{n \in \nn} {\rm\ doesn't\ converge\ to\ vers}\ 0,\]then \[\exists \phi,\psi \in \hilb\setminus \{0\},\ \exists a \in (A^+)^\nn,\quad (\langle \pi(a_n)\phi,\psi\rangle)_{n \in \nn} {\rm\ doesn't\ converge\ to\ vers}\ 0.\]
\end{lem}
\begin{lem}{\rm \cite[Lemma 2.8]{CIOBO}}
\label{coefficientdiagonal}
Let $g \in G^\nn$. If \[\exists \phi,\psi \in \hilb\setminus \{0\},\quad (\langle \pi(g_n)\phi,\psi\rangle)_{n \in \nn} {\rm\ doesn't\ converge\ to\ vers}\ 0,\]then
\[\exists \phi\in \hilb\setminus \{0\},\quad (\langle \pi(g_n)\phi,\phi\rangle)_{n \in \nn} {\rm\ doesn't\ converge\ to\ vers}\ 0.\]
\end{lem}
\begin{comment}
\begin{lem}{\rm \cite[Lemma 2.12]{CIOBO}}
Soit $g \in G^\nn$, $\phi \in \hilb\setminus\{0\}$ et $\phi_0 \in \hilb$. Si $\pi(g_n)\phi$ converge faiblement vers $\phi_0$, alors $\phi_0$ est fixé par $U^+_g$.
\end{lem}
\end{comment}
\begin{lem}\label{ssgroupefermefixe} If $\phi \in \hilb$, the set $\{g \in G \tq \pi(g) \phi = \phi\}$ is a closed subgroup.
\end{lem}
\begin{proof}[Proof] It is a subgroup because $\pi$ is a morphism, and it is closed because $\pi$ is strongly continuous.
\end{proof}
We extract the following lemma out of \cite[Lemma 3.1]{CIOBO} for the sake of clarity.
\begin{lem}\label{mautnerplusplus} Let $g \in G^\nn$ such that $\forall n,m \in \nn, g_n g_m = g_m g_n$. Let $\phi \in \hilb\setminus\{0\}$ such that $(\langle \pi(g_n)\phi,\phi\rangle)_{n \in \nn}$ doesn't converge to $0$. Then there is $\phi_0 \in \hilb\setminus\{0\}$, fixed by $U^+_{g}$ and by $U^-_g$.
\end{lem}
\begin{proof}[Proof] Up to extraction, we can assume that $(\pi(g_n))_{n \in \nn}$ converges, for the weak operator topology, to a normal operator $E$, which commutes with the $\pi(g_n)'s$, according to Fact \ref{limitcommutnormal}.
Because of the weak operator convergence of the operators, $\langle E\phi,\phi\rangle \not = 0$, which implies that $E\phi \not = 0$. Let us prove that $E\phi$ is fixed by $U^\pm_g$.
Let $u \in U^+_g$, and $\psi \in \hilb$. We have
\[\begin{array}{rcl}
\vert \langle \pi(u)E\phi - E\phi,\psi \rangle \vert &= &\vert\langle E\pi(u)\phi - E\phi,\psi \rangle \vert\\
&= &\displaystyle\left\vert \lim_{n \to \infty} \langle\pi(g_n)\pi(u)\phi - \pi(g_n)\phi,\psi\rangle\right\vert\\
&= &\displaystyle\left\vert \lim_{n \to \infty} \langle\pi(g_nug^{-1}_n)\pi(g_n)\phi - \pi(g_n)\phi,\psi\rangle\right\vert\\
&= &\displaystyle\left\vert \lim_{n \to \infty} \left\langle\left(\pi(g_nug^{-1}_n) - \id\right)\pi(g_n)\phi,\psi\right\rangle\right\vert\\
&= &\displaystyle\left\vert \lim_{n \to \infty} \left\langle\pi(g_n)\phi,\left(\pi(g_nug^{-1}_n) - \id\right)^*\psi\right\rangle\right\vert\\
&= &\displaystyle\left\vert \lim_{n \to \infty} \left\langle\pi(g_n)\phi,\left(\pi(g^{-1}_nu^{-1}g_n) - \id\right)\psi\right\rangle\right\vert\\
&\leq &\lim_{n \to \infty} \Vert \pi(g_n)\phi \Vert \cdot \Vert \left(\pi(g^{-1}_nu^{-1}g_n) - \id\right)\psi\Vert\\
&= &\lim_{n \to \infty} \Vert \phi \Vert \cdot \Vert \left(\pi(g^{-1}_nu^{-1}g_n) - \id\right)\psi\Vert\\
{\small \mbox{($u^{-1} \in U^+_G$)}}&\to &0\\
\end{array}\]This being true for all $\psi$, we therefore have $\pi(u)E\phi = E\phi$. We use the same procedure to prove that $u \in U^-_g$.
\end{proof}
\begin{proof}[Proof of the theorem]
Let us prove that if there is $\phi, \psi \in \hilb$ such that we don't have \[\lim_{g \to \infty} \langle \pi(g)\phi,\psi\rangle = 0,\] then there is $F \in \cali{F}$ and a vector $\phi_0 \in \hilb \setminus\{0\}$ fixed by $\pi(F)$.
So, let $\phi,\psi \in \hilb$ be as such. There is a sequence $g \in G^\nn$ that goes to infinity such that $(\langle\pi(g_n)\phi,\psi\rangle)_{n \in \nn}$ doesn't converge to $0$. Up to extraction, we can assume that there exists $F \in \cali{F}$ such that $F \subseteq \overline{\langle U^+_g,U^-_g\rangle}$. According to Lemma \ref{coefficientmautner} and Lemma \ref{coefficientdiagonal}, we can assume that $(\langle\pi(g_n)\phi,\phi\rangle)_{n \in \nn}$ doesn't converge to $0$, and that $g \in (A^+)^\nn$. According to Lemma \ref{mautnerplusplus}, there is $\phi_0 \in \hilb \setminus\{0\}$ that is fixed by $U^\pm_g$. According to Lemma \ref{ssgroupefermefixe}, $\phi_0$ is, in fact, fixed by $\overline{\langle U^+_g,U^-_g\rangle}$, and therefore, by $F$.
\end{proof}
\subsection{Proofs of the corollaries}
The proof of Corollary \ref{corollaire1} is an obvious application of the following lemma.
\begin{lem} Let $G_1,...,G_N$ be topological groups such that for each $i$, $G_i$ has a Cartan decomposition $(K_{i,1}, A^+_1,K_{i,2})$, and $(G_i,A^+_i)$ has the Mautner property. Then \[(K_{1,1} \times \cdots \times K_{n,1}, A^+_1 \times \cdots \times A^+_n, K_{1,2} \times \cdots \times K_{n,2})\] is a Cartan decomposition of $G := G_1 \times \cdots \times G_N$ such that $(G,A^+_1 \times \cdots \times A^+_n)$ has the Mautner property relative to $\{G_1,...,G_N\}$.
\end{lem}
\begin{proof}[Proof] It is clear that the announced triplet is a Cartan decomposition of $G$. We just have to prove that $(G,A^+_1\times \cdots \times A^+_N)$ satisfies Mautner's property, relative to $\{G_1,...,G_N\}$. Let us denote $A^+ := A^+_1 \times \cdots \times A^+_N$. Let $a = (a^1,\cdots, a^N) \in (A^+)^\nn$ such that $\lim_{n \to \infty} a_n = \infty$. The set $\{i \in \{1,...,N\} \tq (a^i_n)_{n \in \nn}\ {\rm is\ not\ bounded}\}$ is not empty, unless $a$ is itself bounded, but it isn't by hypothesis. Let $j$ be such that $(a^j_n)_{n \in \nn}$ is unbounded. Then there is an increasing $h_1 : \nn \rightarrow \nn$ such that $(a^j_{h_1(n)})_{n \in \nn}$ goes to infinity in $G_j$. By hypothesis on $G_j$, there is an increasing $h_2 : \nn \rightarrow \nn$ such that if we denote $b^j := (a^j_{h_1(h_2(n))})_{n \in \nn}$, then $\overline{\langle U^+_{b^j},U^-_{b^j}\rangle } = G_j$. We then have, by Lemma \ref{ssgroupcontractproduit} \[\overline{\langle U^+_{b},U^-_{b}\rangle } \supseteq \{1\}\times \cdots \times \{1\} \times G_j \times \{1\} \times \cdots \times \{1\}.\]
\end{proof}
To a measure preserving action of a topological group on a probability space $X$, one can associate a unitary representation of the group in $U(L^2(X))$ (called the Koopman representation) such that the action is ergodic if and only if the only invariant vectors of the representation are the constants, and such that the mixing is equivalent to the vanishing at infinity of all matrix coefficients of the subrepresentation on the subspace of functions of zero integral. This said, the proof of Corollary \ref{corollaire2} is obvious.
The proof of Corollary \ref{reseauirreductibleergodicite} goes as follows.
\begin{proof}[Proof of Corollary \ref{reseauirreductibleergodicite}] Thanks to Corollary \ref{corollaire2}, it is enough to check that for every $i$, $G_i \curvearrowright G/\Gamma$ is ergodic. According to \cite[Corollary 2.2.3, p. 18]{ZIMMER}, $G_i \curvearrowright G/\Gamma$ is ergodic if and only if $\Gamma \curvearrowright G/G_i$ is ergodic. But this action is ergodic if and only if the image of $\Gamma$ in $G_1\times \cdots \times \widehat{G_i} \times \cdots \times G_N$ is dense, and this is precisely the case when $\Gamma$ is irreducible.
\end{proof}
\end{document} |
\begin{document}
\title{$\fz$-finite distributions on $p$-adic groups}
\author{Avraham Aizenbud}
\operatorname{ad}dress{Avraham Aizenbud,
Faculty of Mathematics and Computer Science, Weizmann
Institute of Science, POB 26, Rehovot 76100, Israel }
\email{aizenr@gmail.com}
\urladdr{http://www.wisdom.weizmann.ac.il/~aizenr}
\author{Dmitry Gourevitch}
\operatorname{ad}dress{Dmitry Gourevitch, Faculty of Mathematics and Computer Science, Weizmann
Institute of Science, POB 26, Rehovot 76100, Israel }
\email{dimagur@weizmann.ac.il}
\urladdr{http://www.wisdom.weizmann.ac.il/~dimagur}
\author{Eitan Sayag}
\operatorname{ad}dress{Eitan Sayag,
Department of Mathematics,
Ben Gurion University of the Negev,
P.O.B. 653,
Be'er Sheva 84105,
ISRAEL}
\email{eitan.sayag@gmail.com}
\operatorname{ad}dress{Alexander Kemarsky,
Mathematics Department, Technion - Israel
Institute of Technology, Haifa, 32000 Israel}
\email{alexkem@tx.technion.ac.il}
\keywords{Bernstein center, fuzzy balls, \DimaB{special balls}, wavefront set, spherical character, \DimaB{relative character}, Harish-Chandra-Howe germ expansion}
\subjclass[2010]{20G05, 20G25, 22E35, 46F99}
\date{\today}
\maketitle
\begin{abstract}
For a real reductive group $G$,
the center $\fz(\cU(\fg))$ of the universal enveloping algebra of the Lie algebra $\fg$ of $G$ acts on the space of distributions on $G$.
This action proved to be very useful (see e.g. \cite{HCBul,HCReg,Sha,Bar}).
Over non-Archimedean local fields, one can replace the action of $\fz(\cU(\fg))$ by the action of the Bernstein center $\fz$ of $G$, i.e. the center of the category of smooth representations.
However, this action is not well studied. In this paper we provide some tools to work with this action and prove the following results.
\begin{itemize}
\item The wavefront set of any $\fz$-finite distribution $\xi$ on $G$ over any point $g\in G$ lies inside the nilpotent cone of $T_g^*G \cong \fg$.
\item Let $H_1,H_2 \subset G$ be symmetric subgroups. Consider the space $\cJ$ of $H_1\times H_2$-invariant distributions on $G$. We prove that the $\fz$-finite distributions in $\cJ$ form a dense subspace. In fact we prove this result in wider generality, where the groups $H_i$ are spherical subgroups of certain type and the invariance condition is replaced by equivariance.
\end{itemize}
Further we apply those results to density and regularity of \DimaB{relative} characters.
The first result can be viewed as a version of Howe's expansion of characters.
The second result can be viewed as a spherical space analog of
a classical theorem on density of characters of \DimaB{finite length} representations.
It can also be viewed as a spectral version of Bernstein's localization principle.
In the Archimedean case, the first result is well-known and the second remains open.
\end{abstract}
\tableofcontents
\section{Introduction}\label{sec:intro}
Let $\mathbf{G}$ be a reductive group defined over a non-Archimedean local field $F$. Let $G:=\mathbf{G}(F)$ \EitanA{be the corresponding $l$-group} and let $\cS(G)$ be the space of locally constant compactly supported functions on $G$. Let $\fz:=\fz(G):=\operatorname{End}_{G \times G}(\cS(G))$ denote the Bernstein center (see \Cref{subsec:Ber}). The action of $\fz$ on $\cS(G)$ gives rise to the dual action on the space of distributions $\cS^*(G)$.
In this
paper we study $\fz$-finite distributions, i.e. distributions $\xi$ such that \EitanA{$\dim (\fz \cdot \xi)< \infty $}.
\subsection{Wavefront set of $\fz$-finite distributions}
Our first result concerns the wavefront set of such distributions.
For $x\in G$ let $WF_{x}(\xi)$ denote the intersection of the wavefront set of $\xi$ with the cotangent space $T_x^*G$ (see \Cref{subsec:WF}).
In \Cref{sec:WF} we prove
\begin{introthm}\label{thm:main}
Suppose that $F$ has characteristic zero. Let $\xi \in \cS^*(G)$ be a $\fz$-finite distribution. Then for any $x\in G$ we have
\begin{equation}
WF_{x}(\xi) \subset \cN
\end{equation}
where $\cN \subset \fg^*$ is the nilpotent cone, and we identify \DimaA{the Lie algebra} $\fg$ with $T_xG$ using the right action\footnote{Since $\cN$ is invariant by conjugation it does not matter whether we use the right or the left action.}.
\end{introthm}
Our main tool is the theory of \DimaB{special} balls. This theory was developed for $\mathbf{G}= \operatorname{GL}_n$ in \cite{S} \DimaB{(where these balls were called fuzzy balls)}, using some ideas from \cite{H,H2}. In \Cref{subsec:PrelFuzzy,sec:balls} we recall the relevant part of this theory and adapt it to general reductive groups.
\DimaA{
\begin{introremark}
We need the characteristic zero assumption since we use the exponentiation map in order to identify a neighborhood of zero in $\fg$ with a neighborhood of the unit element in $G$. For $G=\operatorname{GL}_n$ one can use the map $X \mapsto \mathrm{Id} + X$ (as in \cite{S}) and drop the assumption on the characteristic. \mathbb{R}amiE{It is likely that for other classical groups one can use the Cayley map, and considerably weaken the assumption on the characteristic. The general case can be possibly treated using \cite[Appendix A]{AS}.}
\end{introremark}
}
\subsection{Density of $\fz$-finite distributions}
\DimaB{
The next results of this paper depend on closed subgroups of $G$.
We will require some conditions on these subgroups. We will describe those conditions in \Cref{defn:FinMult} below. If a subgroup $H\subset G$
satisfies these conditions we will call the pair \mathbb{R}ami{$(G,H)$} \textit{a \mathbb{R}ami{pair of finite type}}. Conjecturally, if $F$ has characteristic zero then this holds for all spherical pairs. As explained below in \Cref{subsubsec:FinGen}, many cases of this conjecture follow from \Cref{sec:FinGen} and \cite[Theorem A]{AAG}, based on \cite[Theorem 5.1.5]{SV}, and \cite{Del}. Those cases include all symmetric pairs of reductive groups.
\begin{introthm}[see \Cref{sec:Dense} below] \label{thm:Dense}
\mathbb{R}ami{Let $H_1,H_2\subset G$ be two (closed) subgroups and $\chi_i$ be characters of $H_i$.}
Consider the two-sided action of $H_1\times H_2$ on $G$ and let $$\cI:=\cS^*(G)^{(H_1\times H_2,\chi_1\times\chi_2)}$$ be the space of $(H_1\times H_2,\chi_1\times\chi_2)$-equivariant distributions on $G$. Note that the Bernstein center $\fz$ acts on $\cI$.
Assume that the \mathbb{R}ami{pairs $(G,H_i)$ are of finite type}.
Then the space of $\fz$-finite distributions in $\cI$ is dense in $\cI$.
\end{introthm}
}
\subsection{Applications}
\DimaB{In this subsection we continue to work in the notation and assumptions of \Cref{thm:Dense}.}
Important examples of $\fz$-finite distributions in $\cI$ are $(H_{1}\times H_{2},\chi_1\times \chi_2)$-\DimaB{relative} characters of \DimaB{finite length} representations (see \Cref{def:SphChar}). It turns out that those examples are exhaustive. Namely, we have the following proposition.
\begin{introprop}[see \Cref{sec:DistChar} below]\label{prop:DistChar}
Any $\fz$-finite distribution in $\cI$ is an $(H_1\times H_2, \chi_1\times \chi_2)$-\DimaB{relative} character of some \DimaB{finite length} representation of $G$.
\end{introprop}
Together with \Cref{thm:Dense} it implies
\begin{introcor}\label{cor:SphCharDense}
The space of $(H_1\times H_2, \chi_1\times \chi_2)$-\DimaB{relative} characters of \DimaB{finite length} representations of $G$ is dense in $\cI$.
\end{introcor}
\Cref{thm:main} provides a simple proof of the easy part of Harish-Chandra's regularity theorem \cite{HC_sub,H2}, namely the regularity of the character on the set of regular semi-simple elements. In \Cref{sec:reg} we generalize this result to the realm of spherical pairs. For that, we introduce the notion of $H_1\times H_2$-cotoric elements and prove
the following result.
\begin{introcor}\label{thm:reg}
Suppose that $F$ has characteristic zero, \DimaB{and $H_i$ are $F$-points of algebraic groups $\mathbf{H}_i\subset \mathbf{G}$.} Let $\xi \in \cI$ be a $\fz$-finite distribution.
Then $\xi$ is smooth in the neighborhood of any $H_1\times H_2$-cotoric element.
\end{introcor}
This results generalizes the main result of \cite[\S 5]{RR}, since if $\EitanA{H:=}H_1=H_2$ is a symmetric subgroup then the $H$-regular semisimple elements are cotoric (see \Cref{lem:SymSmooth}).
\EitanA{Combining \Cref{thm:main,thm:Dense} we obtain the following tool to study invariant distributions}:
\begin{introcor}\label{cor:WF}
The subspace of distributions in $\cI$ whose wavefront set at any point is contained in the nilpotent cone in the dual Lie algebra $\fg^*$ is dense in $\cI$.
\end{introcor}
\subsection{Related results}
The germ at the unit element of the character of an irreducible representation of $G$
\EitanA{can be presented} as a linear combination of
Fourier transforms of invariant measures of nilpotent orbits. This was shown in \cite{H} for $\mathbf{G}=\operatorname{GL}_n$ and in \cite{HCQ} for general $\mathbf{G}$. This cannot be naively generalized to the case of symmetric pairs, since the nilpotent orbital integrals are not defined for symmetric spaces in general. However, in \cite[\S 7]{RR} it is shown that the germ at the unit element of a \DimaB{relative} character is a Fourier transform of a distribution supported on the nilpotent cone.
\Cref{thm:main} can be viewed as a version of these results, which gives less information but works in wider generality. Namely, it
implies that the germ of \DimaB{any relative character of any finite length representation} is a Fourier transform of a distribution supported near the nilpotent cone.
Distributions \EitanA{arising in} representation theory are often $\fz$-finite. In the Archimedean case (where $\fz$ means the center of the universal enveloping algebra of the Lie algebra) this was widely exploited. For example it was used to prove the Harish-Chandra regularity theorem (\cite{HCBul,HCReg}),
uniqueness of Whittaker models (\cite{Sha}) and Kirillov's conjecture
(\cite{Bar}). Recently, it was used in \cite{JSZ} to prove uniqueness of Ginzburg-Rallis models and in \cite{AG_ZR} to show non-vanishing of Bessel-like functions. However, in the non-Archimedean case there were no tools that use finiteness of distributions under the Bernstein center. This work provides such a tool.
A classical result (see \cite[\S A.2]{DKV} and \cite[Appendix]{Kaz})
says that characters of \DimaB{finite length} representations span a dense subspace of the space of conjugation-invariant distributions on $G$. One can view \Cref{cor:SphCharDense} as the relative counterpart of this result.
One can attempt to generalize \Cref{thm:Dense} in the following direction. Let an $l$-group $G$ act on an $l$-space $X$, and let $\cE$ be a $G$-equivariant sheaf on $X$. Let a complex commutative algebra $A$ act on $\cS(X,\cE)$. Let $\cI:=\cS^*(X,\cE)^G$ be the space of invariant distributional sections of $\cE$. Assume that $A$ preserves $\cI$. Is the space of $A$-finite distributions in $\cI$ dense in $\cI$? Another important special case of this question is the case when $A=\cS(Y)$ where $Y$ is some $l$-space and the action of $A$ on $\cS(X)$ is given by a map from $X$ to $Y$ \DimaB{and $G$ acts on the fibers of this map}.
In this case the positive answer is given by Bernstein's localization principle \cite[\S 1.4]{BerP}.
Thus, one can view \Cref{thm:Dense} as a spectral version of Bernstein's localization principle.
The Archimedean analogs of \Cref{thm:Dense} as well as of Bernstein's localization principle are not known in general.
\subsection{Tools developed in this paper}
\subsubsection{Pairs of finite type}\label{subsubsec:FinGen}
\mathbb{R}ami{
\begin{introdefn}\label{defn:FinMult}
Let $H<G$ be a closed subgroup and $\chi$ be its character.
We say that the pair $(G,H)$ has \emph{finite type} if
for any character $\chi$ of $H$ and any compact open
subgroup $K<G$, the module $(\operatorname{ind}_{H}^{G}\chi)^K$
over the Hecke algebra
$\mathcal{H}_K(G)$ is
finitely generated. \DimaB{Here, $\operatorname{ind}_{H}^{G}\chi$ denotes the compact induction.}
\end{introdefn}
In \Cref{sec:FinGen} we give the following criterion for pairs to be of finite type.
\begin{introtheorem}[cf. \Cref{thm:ExpFinGen}]\label{thm:FinGen}
Let $H$ be a closed subgroup of $G$. Let $\mathbf{P}$ be a minimal parabolic subgroup of $\mathbf{G}$ and $P=\mathbf{P}(F)$. Suppose that $H$ has finitely many orbits on $G/P$.
Suppose that for any
irreducible smooth representation $\rho$ of $G$ and any character $\chi$ of $H$ we have
\begin{equation} \label{eq:FinMult}
\dim\operatorname{Hom}_{H}(\rho|_{H}, \chi) < \infty .
\end{equation}
Then the pair $(G,H)$ is of finite type.
\end{introtheorem}
\begin{introrem}$\,$
\begin{enumerate}
\item In fact, \Cref{thm:ExpFinGen} gives a more precise statement, which deduces finite generation of $(\operatorname{ind}_{H}^{G}\chi)^K$ from formula \eqref{eq:FinMult} for a specific character derived from $\chi$. One can strengthen other results of this paper in a similar way. However, this will require longer bookkeeping that we chose to avoid.
\item An incomplete version of \Cref{thm:ExpFinGen} appeared in \cite{AAG}, see \Cref{rem:AAG} for more details.
\item The condition \eqref{eq:FinMult} is proven in \cite{Del} and \cite[Theorem 5.1.5]{SV} for many cases, including arbitrary symmetric pairs over a field with characteristic different from 2.
\end{enumerate}
\end{introrem}}
\subsubsection{Representations generated by $\fz$-finite distributions}
In order to prove \Cref{prop:DistChar} we prove the following lemma:
\begin{introlem}[see \Cref{sec:DistChar}]\label{lem:adm}
\mathbb{R}ami{Let $(G,H)$ be a pair of finite type. Let $\chi$ be a character of $H$.
Consider the left action of $H$ on $G$ and let}
$\xi \in \cS^*(G)^{(H,\chi)}$ be an $(H,\chi)$-equivariant $\DimaA{\fz}$-finite distribution. Then \mathbb{R}amiB{both $\cS(G)*\xi$ and $\xi*\cS(G)$} are \DimaB{finite length} representations of $G$.
\end{introlem}
This lemma implies the following corollary:
\begin{introcor}\label{cor:adm}
Let $\xi \in \cS^*(G)$ be a $\fz$-finite distribution. Then $\mathbb{R}amiB{\cS(G)*\xi * \cS(G)}$ is a \DimaB{finite length} representation of $G\times G$.
\end{introcor}
\subsubsection{Fuzzy balls}
The theory of \DimaB{special} balls was developed in \cite{S} based on \cite{H,H2} for $\mathbf{G}=GL_n$. This theory implies that any irreducible representation is annihilated by a certain collection of elements of the Hecke algebra. In \Cref{subsec:PrelFuzzy,sec:balls} we adapt this statement to representations of a general reductive group (see \Cref{thm:FuzzyFin}).
\subsubsection{Relations between convolution and exponentiation}
The exponentiation maps an open neighborhood $U$ of zero in the Lie algebra of $G$ to $G$. This gives rise to a map of the algebra $\cS(U)$ of smooth compactly supported functions on $U$ (with respect to convolution) to the Hecke algebra of $G$. Unfortunately, this map is not a homomorphism. In \Cref{prop:FouExp} we show that it does behave as a homomorphism on certain pairs of functions.
\subsection{Idea of the proof}
\subsubsection{Sketch of the proof of \Cref{thm:main}}
We first analyze the representation generated by $\xi$ under the two-sided action of the Hecke algebra $\cH(G)$, which has \DimaB{finite length} by \Cref{cor:adm}.
Then we use the theory of \DimaB{special} balls, that produces, for any \DimaB{finite length} representation, a large collection of elements in the Hecke algebra $\cH(G)$ that annihilate it. Those elements will also annihilate $\xi$. In other words, for certain $e_B \in \cH(G)$ we have the following vanishing of convolutions
\begin{equation}\label{=:van}
e_B * \xi=0
\end{equation}
Next we want to linearize this information. For this we use the exponentiation map and \Cref{prop:FouExp}. Unfortunately, \Cref{prop:FouExp} is not directly applicable to the pair $(e_B,\xi)$. However, we use the vanishing \eqref{=:van}
to construct other vanishing convolutions, to which \Cref{prop:FouExp} is applicable. Thus we get that certain convolutions on the Lie algebra vanish. Those vanishings imply the desired restriction on the wave front set.
\subsubsection{Sketch of the proof of \Cref{thm:Dense}}
Let us assume for simplicity that $\chi_i$ are trivial and $H_i$ are unimodular.
To prove \Cref{thm:Dense} we first note that $\cI$ is dual to the space $\cS(G)_{H_1\times H_2}$ of $(H_1\times H_2)$-coinvariants of $\cS(G)$. We can decompose $\cS(G) $ to a direct sum with respect to Bernstein blocks. This leads to a decomposition of $\cS(G)_{H_1\times H_2}$. The finite type assumption implies that each summand is finitely generated over $\DimaA{\fz}$. Thus Artin-Rees Lemma and Hilbert's Nullstellensatz imply that the space of $\DimaA{\fz}$-finite functionals on those summands is dense in the space of arbitrary functionals.
For technical reasons, it is more convenient to work with unions of Bernstein blocks which correspond to compact open subgroups of $G$ than with individual Bernstein blocks.
\subsection{Future applications}
We believe that \Cref{cor:WF} can be used in order to prove the following analog of Harish-Chandra's density theorem \cite[Theorem 3.1]{HCQ}.
\begin{introconj}
Suppose that $G$ is quasisplit. Let $B$ be a Borel subgroup of $G$, $U$ be the nilradical of $B$, $\psi$ be a non-degenerate character of $U$, $H\subset G$ be a reductive spherical subgroup and $X=G/H$. Let $\cO$ be the union of all open $B$-orbits in $X$.
Then the sum of the one-dimensional spaces $\cS^*(Ux)^{U,\psi}$, where $x$ ranges over $\cO$, is dense in $\cS^*(X)^{U ,\psi }$.
\end{introconj}
In \DimaB{the subsequent} paper \cite{AGK} we prove a non-Archimedean analog of \cite{AG_ZR}, which we consider as a step towards this conjecture. Namely, we use \Cref{thm:main} in order to prove that under certain conditions on $H$ any $\fz$-finite distribution $\xi \in \cS^*(X)^{U,\psi }$ which is supported in the complement to $\cO$ vanishes.
In \DimaB{the subsequent} work \cite{AGM} we prove that the set of cotoric elements is open and dense in $G$ if $H_1,H_2$ are spherical subgroups. By
\Cref{thm:reg} this implies that $H_1\times H_2$- \DimaB{relative} characters are smooth almost everywhere.
In fact, in \cite{AGM} we show that the dimension of the variety
$$ \fS=\EitanA{\{(g,\alphahapha) \in G \times \g^*\, \mid\, \alphahapha \text{ is nilpotent, } \langle \alphahapha, \fh_1 \rangle =0, \ \langle \alphahapha, Ad^*(g)(\fh_2) \rangle =0\}
}\subset T^*G$$
equals the dimension of $G$. \Cref{thm:main} implies that the wavefront set of any $H_1\times H_2$- \DimaB{relative} character lies in $\fS$. Thus we obtain a certain version of holonomicity for \DimaB{relative} characters.
\subsection{Structure of the paper}
In \Cref{sec:prel} we give the necessary preliminaries
on the Bernstein center.
In \Cref{sec:DistChar} we prove \Cref{lem:adm} and deduce \Cref{prop:DistChar}.
In \Cref{sec:Dense} we prove \Cref{thm:Dense}.
In \Cref{sec:WF} we prove \Cref{thm:main}. In \Cref{subsec:WF,subsec:PrelFuzzy} we give the necessary preliminaries on wavefront set and on \DimaB{special} balls. In \Cref{sec:PfWF} we deduce \Cref{thm:main} from two main ingredients, which we prove in \Cref{subsec:PfEigenFinSupp,subsec:pfFouExp}.
In \Cref{subsec:PfEigenFinSupp} we prove the vanishing \eqref{=:van}. In \Cref{subsec:pfFouExp} we prove \Cref{prop:FouExp} that states that exponentiation commutes with convolution in certain cases.
In \Cref{sec:reg} we prove \Cref{thm:reg} and \Cref{lem:SymSmooth}, which allows to specialize \Cref{thm:reg} to the symmetric pair case and thus obtain a generalization of \cite[\S 5]{RR}.
In \Cref{sec:balls} we prove the statements on \DimaB{special} balls that were formulated without proof in \Cref{subsec:PrelFuzzy}.
In \Cref{sec:FinGen} we prove \Cref{thm:FinGen}.
\section{Preliminaries}\label{sec:prel}
\subsection{Conventions}
The following conventions will be used throughout the paper.
\begin{itemize}
\item Fix a non-Archimedean local field $F$.
\item All the algebraic groups and algebraic
varieties that we consider are defined over $F$.
We will use bold letters, e.g. $\mathbf{G},\mathbf{X}$ to denote algebraic groups and varieties defined over $F$, and their non-bold versions to denote the $F$-points of these varieties, considered as $l$-spaces or $F$-analytic manifolds (in the sense of \cite{Ser}).
\item We will use capital Latin letters to denote $F$-analytic groups and algebraic groups, and the corresponding Gothic letters to denote their Lie algebras.
\item For an $l$-group $H$
\mathbb{R}ami{
\begin{itemize}
\item let $\cM(H)$ denote the category of smooth representations of $H.$
\item Let $\Deltata_H$ denote the modular character of $H$, \DimaB{i.e. the quotient of the right Haar measure by the left one.}
\item If $H$ acts on an $l$-space $X$ and $x\in X$, we denote by $H_x$ the stabilizer of $x$.
\item If $V$ is a representation of $H$ we denote by $V_H$ the space of coinvariants
$$V_H:=V/({\operatorname{Span}}\{v-hv\, | \, v\in V, \, h\in H\}).$$
\end{itemize}
}
\item Fix a reductive group $\mathbf{G}$.
\item
\mathbb{R}amiB{ For a sheaf $\cF$ \DimaB{of $\mathbb{C}$-vector spaces }on an $l$-space $X$ we denote by $\cS(X,\cF)$ the space of compactly-supported sections and by $\cS^*(X,\cF)$ the dual space.}
\item \DimaB{For a compact open subgroup $K<G$ we denote by $\cH_K(G)$ the corresponding Hecke algebra.}
\end{itemize}
\subsection{Bernstein center}\label{subsec:Ber}
In this subsection we review the basics of the theory of the Bernstein center from \cite{BD}.
\begin{defn}
The \emph{Bernstein center} $\DimaA{\fz}:=\fz(G)$ is the algebra of endomorphisms of the identity functor of the category $\cM(G)$ of smooth representations of $G$.
\end{defn}
\begin{defn}\label{def:split}
Let $K<G$ be a compact open subgroup.
\mathbb{R}amiB{For $V\in \cM(G)$ denote by $V^{(K)}$ the subrepresentation generated by its $K$-fixed vectors. Denote also $$\cM_K(G):= \{V \in \cM(G)\, | V \text{ = }V^{(K)}\}$$} and $$\cM_{K} (G)^{\bot}:=
\{V \in \cM(G)\, | V^K = 0\}.$$
We have a functor $\mathcal{P}_K( V):=V^K$ from $\cM(G)$ to the category $\cM(\cH_K(G))$ of modules over $\cH_K(G)$.
We call $K$ a \emph{splitting subgroup} if the category $\cM(G)$ is the direct sum of the categories $\cM_{K}(G)$ and $\cM_{K}(G)^{\bot}$, and the functor $\mathcal{P}_K:\cM_{K}(G) \to \cM(\cH_K(G))$ is an equivalence of categories. \end{defn}
\EitanA{
\begin{remark}
Recall that an abelian category $\mathcal{A}$ is a direct sum of two abelian subcategories $\mathcal{B}$ and $\mathcal{C}$, if every object of $\mathcal{A}$ is isomorphic to a direct sum of an object in $\mathcal{B}$ and an object in $\mathcal{C}$, and, furthermore, that there are no non-trivial
morphisms
between objects of $\mathcal{B}$ and $\mathcal{C}$.
\end{remark}
}
\begin{thm}[\cite{BD}]\label{thm:Ber}
$\,$
\begin{enumerate}
\item The center of the algebra $\operatorname{End}_G(\cS(G))$ of $G$-endomorphisms of $\cS(G)$ is the algebra $\operatorname{End}_{G\times G}(\cS(G)) $ and the natural morphism from $\DimaA{\fz}$ to this center is an isomorphism.
\item \label{it:split} The set of splitting subgroups defines a basis at 1 for the topology of $G$.
\item For any splitting open compact subgroup $K\subset G$ we have
\begin{enumerate}
\item \label{1} The center $\fz(\cH_K(G))$ of the $K$-Hecke algebra is a direct summand of the Bernstein center $\DimaA{\fz}$. In particular, the natural map $\DimaA{\fz}\to \fz(\cH_K(G))$ is onto.
\item \label{2} \DimaA{The algebra} $\cH_K(G)$ is finitely generated \mathbb{R}amiB{as a module} over its center $\fz(\cH_K(G))$, and thus also over $\DimaA{\fz}$.
\item \label{3} The algebra $\fz(\cH_K(G))$ is finitely generated over $\mathbb{C}$ and has no nilpotents.
\end{enumerate}
\end{enumerate}
\end{thm}
\section{$\fz$-finite distributions and \DimaB{relative} characters}\label{sec:DistChar}
\subsection{\DimaB{Finite length} representations, $\fz$-finite distributions and proof of \Cref{lem:adm}}
\mathbb{R}amiB{We start with several criteria for admissibility of smooth representations. For these criteria we will need the following definition.
\begin{defn}
We say that a smooth representation $\pi$ of $G$ is
\begin{itemize}
\item \emph{locally finitely generated} if for any compact open subgroup $K\subset G$ the module $\pi^K$ is finitely generated over the Hecke algebra $\cH_K(G)$,
\item \emph{$\fz$-finite} if there exists an ideal $I \subset \fz$ of finite codimension that acts by zero on $\pi$.
\end{itemize}
\end{defn}
\begin{lem}\label{lem:ZfinAdm}
Let $\pi \in \cM(G)$ be a $\fz$-finite smooth representation. Assume that for any compact open subgroup $K\subset G$ the space $\pi^K$ is finite-dimensional.
\EitanA{Then $\pi$ has finite length.}
\end{lem}
\begin{proof}
It is enough to show that $\pi\subset \cM_K(G)$ for some splitting subgroup $K\subset G$. Let $I \subset \fz$ be an ideal of finite codimension that acts by zero on $\pi$.
For any splitting $K$ denote by $i_K \subset \fz$ the idempotent that acts by identity on $\cM_K(G)$ and by zero on $\cM_{K} (G)^{\bot}$. Let $j_K$ be the image of $i_K$ in $\fz/I$. Since $\fz/I$ is finite-dimensional there exists a splitting $K$ such that $j_K=j_{K'}$ for any splitting subgroup $K'\subset K$, thus $\pi^{K'}\subset\pi^{(K')}=\pi^{(K)}$ and thus, by \Cref{thm:Ber}\eqref{it:split}, $\pi=\pi^{(K)}$.
\end{proof}
\begin{cor}\label{cor:zLocFinGenAdm}
Any $\fz$-finite locally finitely generated $\pi \in \cM(G)$ \DimaB{has finite length}.
\end{cor}
\begin{proof}
By \Cref{lem:ZfinAdm} and \Cref{thm:Ber}\eqref{it:split} it is enough to show that $\pi^K$ is finite-dimensional for any splitting subgroup $K\subset G$. This follows from \Cref{thm:Ber}(\operatorname{Re}f{1},\operatorname{Re}f{2}).
\end{proof}
\begin{prop}\label{prop:LocFinAdm}
Let $\pi \in \cM(G)$ be locally finitely generated. Then
\begin{enumerate}[(i)]
\item \label{it:AdmQuo}any $\fz$-finite quotient $\rho$ of $\pi$ \DimaB{has finite length},
\item \label{it:AdmSub}any $\fz$-finite subrepresentation $\rho$ of $\widetilde{\pi}$ is \DimaB{has finite length}.
\end{enumerate}
\end{prop}
\begin{proof}
Part \eqref{it:AdmQuo} follows from \Cref{cor:zLocFinGenAdm}. To prove part \eqref{it:AdmSub} denote by $\rho_{\bot} \subset \pi$ the joint kernel of all the functionals in $\rho$. Then $\rho \subset (\rho_{\bot})^{\bot}\cong\widetilde{\pi/\rho_{\bot}}$. Since $\pi/\rho_{\bot}$ \DimaB{has finite length} by part \eqref{it:AdmQuo}, we get that $\rho$ \DimaB{has finite length}.
\end{proof}
\begin{proof}[Proof of \Cref{lem:adm}]
$\,$
\begin{enumerate}[(i)]
\item \label{it:VKFin} \emph{Proof that $\cS(G)*\xi$ \DimaB{has finite length}.}\\
Consider the natural epimorphism $\cS(G)\twoheadrightarrow \cS(G)*\xi$. It is easy to see that there exists a character $\chi'$ of $H$ such that this epimorphism factors through $\operatorname{ind}_H^G(\chi')$. Since $(G,H)$ has finite type, $\operatorname{ind}_H^G(\chi')$ is locally finitely generated and thus, by \Cref{prop:LocFinAdm}\eqref{it:AdmQuo},
$\cS(G)*\xi$ \DimaB{has finite length}.
\item \emph{Proof that $\xi*\cS(G)$ \DimaB{has finite length}.}\\
Let $G$ act on itself by $g \cdot x = xg^{-1}$. This gives rise to an action of $G$ on $\cS^*(G)^{(H,\chi)}$. Let $\cF$ be the natural equivariant sheaf on $X= G/H$ such that $\cS^*(G)^{(H,\chi)}\cong\cS^*(X,\cF)$. Consider $\xi$ as an element in $\cS^*(X,\cF)$. Then
$$\xi*\cS(G) \hookrightarrow \widetilde{\cS(X,\cF)}=\widetilde{\operatorname{ind}_H^G(\chi'')}$$
for some character $\chi''$ of $H$, and \Cref{prop:LocFinAdm}\eqref{it:AdmSub} implies that
$\xi*\cS(G)$ \DimaB{has finite length}.
\end{enumerate}
\end{proof}
}
\subsection{\DimaB{Relative} characters and proof of \Cref{prop:DistChar}}$\,$
Let us recall the definition of \DimaB{relative} character.
\begin{defn}\label{def:SphChar}
Let $(\pi,V)$ be a \DimaB{finite length} representation of $G$. Let $(\tilde \pi,\tilde V) $ be its smooth contragredient. Let $H_1,H_2 \subset G$ be subgroups and $\chi_1,\chi_2$ be their characters. Let $l_{1}\in(V^*)^{H_1,\chi_1^{-1}}$ and $l_{2}\in(\tilde V^*)^{H_2,\chi_2^{-1}}$ be equivariant functionals. The \emph{\DimaB{relative} character} $\Xi^{\pi}_{l_1,l_2} \in \cH(G)^*$ is the generalized function on $G$ given by
\begin{equation}
\Xi^{\pi}_{l_1,l_2}(f) := \langle l_2 , \pi^*(f) l_1 \rangle.
\end{equation}
We refer to such \DimaB{relative} characters as $(H_1\times H_2, \chi_1\times \chi_2)$-\DimaB{relative} characters of $\pi$.
\end{defn}
Since we can identify $\cI$ with the space $(\cH(G)^*)^{H_{1}\times H_{2},\chi_1\times \chi_2}$ of invariant generalized functions, we can consider the \DimaB{relative} character as an element in $\cI$.
\begin{lem}[see \Cref{subsec:FinDual}]\label{lem:admFin}
Let $(G,H)$ be a pair of finite type. Let $V$ be a \DimaB{finite length} representation of $G$ and $\chi$ be a character of $H$. Then $\dim V_{(H,\chi)}< \infty$.
\end{lem}
\begin{proof}[Proof of \Cref{prop:DistChar}]
Let $\xi\in \cI$. Consider the pullback of $\xi$ to $G\times G$ under the multiplication map. This gives us a $G$-invariant bilinear form $B$ on $\cH(G)$. Let $\cL$ be its left kernel and $\cR$ be its right kernel, $M:=\cL\backslash \cH(G)$ and $N:=\cH(G)/\cR$. \mathbb{R}ami{We consider the right $G$-module $M$ as a left one using the anti-involution $g \mapsto g^{-1}$. We get} a non-degenerate pairing between $M$ and $N$. \Cref{lem:adm} implies that $M$ and $N$ \DimaB{have finite length} and thus $M=\tilde N$. We can consider the form $B$ as an element in $(M_{H_1,\mathbb{R}ami{\chi_1}}\leftarrowimes N_{H_2,\mathbb{R}ami{\chi_2}})^*$.
\mathbb{R}ami{Since the pairs $(G,H_i)$ are of finite type}, \Cref{lem:admFin} implies that $M_{H_1,\mathbb{R}ami{\chi_1}}$ and $N_{H_2,\mathbb{R}ami{\chi_2}}$ are finite-dimensional and thus
$$(M_{H_1,\chi_1}\leftarrowimes N_{H_2,\chi_2})^*\cong (M_{H_1,\chi_1})^*\leftarrowimes (N_{H_2,\chi_2})^*\cong (M^*)^{H_1,\chi_1^{\mathbb{R}ami{-1}}}\leftarrowimes (N^*)^{H_2,\chi_2^{\mathbb{R}ami{-1}}}.$$
Therefore $B$ defines an element in $(M^*)^{H_1,\chi_1^{\mathbb{R}ami{-1}}}\leftarrowimes (N^*)^{H_2,\chi_2^{\mathbb{R}ami{-1}}}$ which can be written in the form $B=\sum_{i=1}^k l_1^i \leftarrowimes l_2^i$.
Let $$l_1:=(l_1^1,\dots,l_1^k)\in ((M^k)^*)^{H_1,\chi_1^{\mathbb{R}ami{-1}}},\,l_2:=(l_2^1,\dots,l_2^k)\in ((N^k)^*)^{H_2,\chi_2^{\mathbb{R}ami{-1}}}.$$
It is easy to see that $$\xi=\sum_{i=1}^k \Xi^M_{l_1^i,l_2^i}=\Xi^{M^k}_{l_1,l_2}.$$
\end{proof}
\section{Density of $\fz$-finite distributions}\label{sec:Dense}
\setcounter{lemma}{0}
For the proof of \Cref{thm:Dense} we will need the following
\mathbb{R}amiB{lemma}.
\Dima{
\begin{lem}\label{lem:(K)}
Let $H<G$ be a closed subgroup and $\chi$ be a character of $H$.
Then there exists a character $\chi'$ of $H$ such that for any
$V\in \cM(G)$ and any splitting subgroup $K\subset G$ \DimaB{(see \Cref{def:split})} we have
$$(V^{(K)})_{(H,\chi)}\cong (\operatorname{ind}_H^G\chi')^K \leftarrowimes _{\cH_K(G)} V^K .$$
Here we consider the left ${\cH_K(G)}$-module $(\operatorname{ind}_{H}^G\chi')^K $ as a right one using the anti-involution $g \mapsto g^{-1}.$\end{lem}
\begin{proof}
First note that $V^{(K)} \cong \cH(G) \leftarrowimes _{\cH_K(G)} V^K,$ where the action of $G$ is given by the left action on $\cH(G)$.
\mathbb{R}amiB{
Let $H$ act on $G$ from the left and $G$ act on itself by $g\cdot x = x g^{-1}$. This gives an action of $G$ on $\cH(G)_{(H,\chi)}$. It is easy to see that we have an isomorphism $\cH(G)_{(H,\chi)}\cong (\operatorname{ind}_H^G\chi')$ for some character $\chi'$ of $H$.
Now}
$$(V^{(K)})_{(H,\chi)}\cong \cH(G)_{(H,\chi)} \leftarrowimes _{\cH_K(G)} V^K \cong \operatorname{ind}_H^G\chi' \leftarrowimes _{\cH_K(G)} V^K \cong (\operatorname{ind}_H^G\chi')^K \leftarrowimes _{\cH_K(G)} V^K.
$$
\end{proof}
}
\begin{lem}\label{lem:comalg}
Let $A$ be a unital commutative algebra finitely generated over $\mathbb{C}$. Let $M$ be a finitely generated $A$-module, and $M^*$ denote the space of all $\mathbb{C}$-linear functionals on $M$. Then the space of $A$-finite vectors in $M^*$ is dense in $M^*$.
\end{lem}
\begin{proof}
It is enough to show that the intersection of the kernels of all $A$-finite functionals on $M$ is zero. Let $v$ be an element of this intersection, $\fm\subset A$ be any maximal ideal and $i$ be any integer. Then $M/ \fm^iM$ is finite-dimensional over $\mathbb{C}$ and thus any functional on it defines an $A$-finite functional on $M$. \EitanA{Such a}
functional vanishes on $v$, and thus the image of $v$ in $M/ \fm^iM$ is zero. We conclude that $v$ belongs to the space $\bigcap_{\fm} \bigcap_{i} (\fm^iM)$, which is zero by the Artin-Rees lemma.
\end{proof}
\Dima{
\begin{proof}[Proof of \Cref{{thm:Dense}}]
Denote $X_i:=G/H_i$. For some line bundle $\cF_1$ on $X_1$ we have
$$\cI \cong \cS^*(X_1,\cF_1)^{(H_2,\chi_2)} \cong (\cS(X_1,\cF_1)_{(H_2,\chi_2^{-1})})^*.$$
Thus it is enough to show that for any $f\in \cS(X_1,\cF_1)_{(H_2,\chi_2^{-1})}$ such that $\langle \xi, f\rangle=0$ for any $\DimaA{\fz}$-finite distribution $\xi \in \cI,$ we have $f=0$. Let $f$ be like that. Let $K<G$ be a splitting open compact subgroup that fixes a representative of $f$ in $\cS(X_1,\cF_1)$. Note that $V:=\cS(X_1,\cF_1)=\operatorname{ind}_{H_{1}}^G\chi_1'$ for some character $\chi_1'$ of $H_1$ . \mathbb{R}amiB{Since $K$ is a splitting subgroup, $V^{(K)}$ is a direct summand of $V$ as a $G$-representation. Hence} $M:=(V^{(K)})_{(H_2,\chi_2^{-1})}$ is a direct summand of $V_{(H_2,\chi_2^{-1})}$ as a $\DimaA{\fz}$-module which contains $f$. Therefore it is enough to show that the space of $\DimaA{\fz}$-finite vectors in $M^*$ (which by \Cref{thm:Ber}\eqref{1} equals the space of $\fz(\cH_K(G))$-finite vectors in $M^*$) is dense in $M^*$. By \Cref{lem:(K)}, \mathbb{R}amiC{there exists a character $\chi_2'$ of $H_2$ such that}
$$
M=(\operatorname{ind}_{H_2}^G\chi_{2}')^K \leftarrowimes _{\cH_K(G)} V^K = (\operatorname{ind}_{H_2}^G\chi_{2}')^K \leftarrowimes _{\cH_K}(\operatorname{ind}_{H_{1}}^G\chi_{1}')^K,
$$
where we consider the left ${\cH_K(G)}$-module $(\operatorname{ind}_{H_2}^G\chi_{2}')^K $ as a right one using the anti-involution $g \mapsto g^{-1}$.
The assumption implies that $(\operatorname{ind}_{H_i}^G\chi_{i}')^K $ are finitely generated over $\cH_K(G)$. By \Cref{thm:Ber}\eqref{2} this implies that they are also finitely generated over $\fz(\cH_K(G))$.
Thus $M$ is also finitely generated over $\fz(\cH_K(G))$.
The assertion follows now from \Cref{lem:comalg} in view of \Cref{thm:Ber}\eqref{3}.
\end{proof}}
\section{Wavefront set of $\fz$-finite distributions and the proof of \Cref{thm:main}}\label{sec:WF}
\setcounter{lemma}{0}
In this section we assume that $F$ has characteristic zero.
\subsection{Preliminaries on wave front set}\label{subsec:WF}
\mathbb{R}amiD{
In this section we give an overview of the theory of the wave front set as developed by D.~Heifetz \cite{Hef}, following L.~H\"ormander (see \cite[\S 8]{Hor}). }\mathbb{R}amiD{For simplicity we ignore here the difference between distributions and generalized functions.}
\begin{defn}\label{def:wf}$ $
\begin{enumerate}
\item
Let $V$ be a finite-dimensional vector space over $F$.
Let $f \in C^{\infty}(V^*)$ and $w_0 \in V^*$. We say that $f$ \emph{vanishes asymptotically in the direction of} $w_0$
if there exists $\rho \in \cS(V^*)$ with $\rho(w_0) \neq 0$ such that the function $varietyphii \in C^\infty(V^* \times F)$ defined by $varietyphii(w,\lambda):=f(\lambda w) \cdot \rho(w)$ is compactly supported.
\item
Let $U \subset V$ be an open set and $\xi \in \cS^*(U)$. Let $x_0 \in U$ and $w_0 \in V^*$.
We say that $\xi$ is \emph{smooth at} $(x_0,w_0)$ if there exists a compactly supported non-negative function $\rho \in \cS(V)$ with $\rho(x_0)\neq 0$ such that the Fourier transform $\cF^*(\rho \cdot \xi)$ vanishes asymptotically in the direction of
$w_0$.
\item
The complement in $T^*U$
of the set of smooth pairs $(x_0, w_0)$ of $\xi$ is called the
\emph{wave front set of} $\xi$ and denoted by $WF(\xi)$.
\item For a point $x\in U$ we denote $WF_x(\xi):=WF(\xi)\cap T^*_xU$.
\end{enumerate}
\end{defn}
\begin{remark}
Heifetz defined $WF_{\Lambda}(\xi )$ for any open subgroup $\Lambda$ of $F^{\times}$ of finite index. Our definition above is slightly different from the definition in \cite{Hef}. They relate by
\begin{equation*}
WF(\xi)-(U \times \{0\})= WF_{F^{\times}}(\xi).
\end{equation*}
\end{remark}
\mathbb{R}amiD{
\begin{prop}
[{see \cite[Theorem 8.2.4]{Hor} and \cite[Theorem 2.8]{Hef}}]\label{prop:SubPull}
\label{submrtion}
Let $U \subset F^m$ and $V \subset F^n$ be open subsets. Suppose that $f: U \to V$ is an analytic submersion\footnote{\DimaB{\emph{i.e.} the differential of $f$ is surjective.}}. Then for any
$\xi \in \cS^*(V)$, we have $$WF(f^*(\xi)) \subset f^*(WF(\xi)):=\left\{ (x,v) \in T^*U \vert \, \exists w\in WF_{f(x)}(\xi) \text{ s.t. } d_{f(x)}^*f(w)=v \right \} .$$
\end{prop}
\begin{corollary} \label{iso}
Let $V, U \subset F^n$ be open subsets. Let $f: V \to U$ be an
analytic isomorphism. Then for any $\xi \in \cS^*(V)$ we have
$WF(f^*(\xi)) = f^*(WF(\xi))$.
\end{corollary}}
\begin{corollary}\label{cor:bundle}
Let $X$ be an $F$-analytic manifold\footnote{In the classical
sense of \cite{Ser} and not in the sense of rigid geometry or Berkovich geometry.}. We can define the wave front set of any distribution in
$\cS^*(X)$, as a subset of the cotangent bundle $T^*X$.
\end{corollary}
We will need the following standard properties of the wavefront set.
\begin{lem}\label{lem:WFSmooth}
Let $X$ be an $F$-analytic manifold and
$\xi \in \cS^*(X)$ be a distribution on it.
\begin{enumerate}
\item \label{it:smooth} Let $x\in X$. Assume that $WF_x(\xi)=\{0\}$. Then $\xi$ is smooth at $x$, i.e. there exists an analytic embedding $varietyphii:U \hookrightarrow X$ from an open neighborhood $U$ of the origin in $F^n$ to $X$ such that
$varietyphii(0)=x$ and $varietyphii^*(\xi)$ coincides with a Haar measure.
\item \label{it:equiv} \cite[Theorem 4.1.5]{Aiz} Let an $F$-analytic group $H$ act analytically on $X$. Suppose that $\xi$ changes under the action of $H$ by some character of $H$. Then $$WF(\xi) \subset \{(x,v) \in T^*X|v(\fh x)=0 \},$$
where $\fh x$ denotes the image of the differential of the action map $h \mapsto hx$.
\end{enumerate}
\end{lem}
\subsection{Preliminaries on \DimaB{special} balls}\label{subsec:PrelFuzzy}
The notions of \DimaB{special} balls and admissible balls were defined in \cite{S} \DimaB{(under the name \emph{fuzzy balls})} for $\mathbf{G}=GL_n$. Here we generalize them to arbitrary reductive groups, using the standard theory of exponentiation.
\DimaA{
\begin{notn}
Let $\cO$ be the ring of integers in $F$. Fix a uniformizer $varietypi \in \cO$ and
denote $q:=|varietypi|^{-1}$.
\end{notn}
}
We start with the following standard lemma on exponentiation.
\mathbb{R}amiF{\begin{lemma}\label{lem:exp}
There exists a lattice (i.e. a free finitely-generated $\cO$-submodule of full rank) $\cL \subset \fg$, a compact open subgroup $K\subset G$ and an analytic diffeomorphism $\exp:\cL \to K$ such that
\begin{enumerate}
\item \label{it:hom} For any $x \in \cL, \, \exp|_{\cO \cdot x} $ is a group homomorphism.
\item \label{it:norm} $\frac{d}{dt}\exp(tx)|_{t=0}=x.$
\item \label{it:BCH} For any $X\in varietypi^{m}\cL, Y \in varietypi^{n}\cL$ we have $$\exp^{-1}(\exp(X)\exp(Y))-X-Y \in varietypi^{m+n}\cL.$$
\end{enumerate}
\end{lemma}
For completeness we will indicate the proof of this lemma in \Cref{subsec:BCH}.
\begin{remark}
The conditions \eqref{it:hom} and \eqref{it:norm} define the map $\exp$ uniquely. \end{remark}
}
We fix such an $\cL$. Fix also an additive character $\psi$ of $F$ that is trivial on $\cO$ \DimaB{and non-trivial on $varietypi^{-1}\cO$}.
\begin{defn} $\,$
\begin{itemize}
\item For a vector space $V$ over $F$ and a lattice $\Lambda\subset V$ denote $\Lambda^{\bot}:=\{y\in V^* \, \vert \, \forall x\in \Lambda,\, \, \langle x , y\rangle \in \cO\} \subset V^*$.
\mathbb{R}amiF{
\item For a set $B=a + \Lambda \subset \fg^*$ define a subset $K_B:=exp(\Lambda^{\bot}) \subset G$. Define also a function $\'etalea_B$ of $K_B$ by $\'etalea_B(exp(x))=\psi(\langle a , x \rangle)$. Note that $K_B$ and $\'etalea_B$ depend only on the set $B$ and not on its presentation as $a + \Lambda$.
\item An \emph{admissible ball} is a set $B \subset \fg^*$ of the form $a + \Lambda,$ where $\Lambda\supset \cL^{\bot}$ is a lattice such that
$K_B$ is a group and $\'etalea_B$ is its character.
Define $e_B \in \cH(G)$ to be the measure $\'etalea_B e_{K_B}$, where $e_{K_B}$ is the normalized Haar measure on $K_B$.
}
\item An admissible ball $B$ is called \emph{nilpotent} if it intersects the nilpotent cone $\cN\subset \fg^*$.
\item For an element $x \in \fg^*$ we define $|x|:=\min\{|\alphahap| \, \vert x \in \alphahap \cL^{\bot}, \alphahap \in F\}$.
\item A \DimaB{special} ball of radius $r\geq 1$ is a set $B \subset \fg^*$ of the form $c + \alphahap \cL^{\bot}$, where $\alphahap \in F, \, c \in \fg^*, |\alphahap|=r$ and either $|c| =r^2$ or $|c\DimaA{varietypi}|=r^2$. \mathbb{R}amiC{It is easy to see that any \DimaB{special} ball is an admissible ball.}
\item For $Y\in \fg^*$ we denote by $B(Y)$ the unique \DimaB{special} ball containing $Y$ \DimaB{(see \Cref{lem:BallDisj})}.
\item Denote the set of all \DimaB{special} balls by $\mathfrak{F}$.
\item $\cL_n:=\DimaA{varietypi}^{n}\cL,\, K_n:=\exp(\cL_n)$ for $n \ge 0$.
\end{itemize}
\end{defn}
In \Cref{sec:balls} we give more details about admissible and \DimaB{special} balls and prove the following fundamental statements.
\begin{thm}\label{thm:fuzzy decomp}
Let $(\pi,V)$ be a smooth representation. Then $\{\pi(e_B)\}_{B\in \mathfrak{F}}$ form a full family of mutually orthogonal projectors, i.e.
\begin{enumerate}
\item \label{thm:fuzzy decomp:orthproj} for any $B,C \in \mathfrak{F}$ we have $$ e_{B}e_{C}= \begin{cases}e_B & B=C, \\
0 & B\neq C. \\
\end{cases}$$
\item \label{thm:fuzzy decomp:full} $$ \displaystyle V = \bigoplus_{B\in \mathfrak{F}} \pi(e_B)V.$$
\end{enumerate}
\end{thm}
\begin{thm}\label{thm:FuzzyFin}
For any finitely generated smooth representation $\pi$ there exist only finitely many non-nilpotent \DimaB{special} balls $B$ such that $\pi(e_B)\neq 0$.
\end{thm}
\begin{lemma}\label{lem:FouFuzz}
Let $B$ be an admissible ball and let $1_B\in \cS(\fg^*)$ denote the characteristic function of $B$. Let $\cF(1_B)$ denote the Fourier transform of $1_B$\DimaB{, considered as a measure on $\fg$}. Then $\cF(1_B)=\exp^{*}(e_B )$.
\end{lemma}
\subsection{Proof of \Cref{thm:main}}\label{sec:PfWF}
We will need some preparations.
\begin{prop}[\mathbb{R}amiC{see \Cref{subsec:PfEigenFinSupp}}]\label{prop:EigenFinSupp}
Let $\xi \in \cS^*(G)$ be a $\fz$-finite distribution. Then there exists a compact subset $D\subset \g^*$ such that for any non-nilpotent \DimaB{special} ball $B\subset \g^*\setminus D$ we have $e_{B}*\xi= 0$.
\end{prop}
The following is a straightforward computation.
\begin{lemma}
Let $B:=a+ \alphahap \mathbb{R}amiE{\cL^{\bot}}$ be an admissible ball\EitanA{, with $|\alphahapha|^{2}>|a|$}. Let $\DimaA{S}$ be the set of all \DimaB{special} balls contained in $B$. Then $e_B=\sum_{C \in \DimaA{S}}e_C$.
\end{lemma}
\mathbb{R}amiC{The last 2 statements give us the following corollary.}
\begin{cor}\label{cor:Bxi0}
Let $\xi \in \cS^*(G)$ be a $\fz$-finite distribution. Then there exists a compact subset $D\subset \g^*$ s.t. for any non-nilpotent admissible ball of the form $B:=a+ \alphahap\mathbb{R}amiE{\cL^{\bot}}\subset \g^*\setminus D$\EitanA{, with $|\alphahapha|^{2}>|a|$,}
we have $e_{B}*\xi= 0$.
\end{cor}
\begin{prop}[\mathbb{R}amiC{See \Cref{subsec:pfFouExp}}]\label{prop:FouExp}
Let \EitanA{$n,l>0$ and let} $B=a+ \mathbb{R}amiE{varietypi^{-n}\cL^{\bot}}$ be an admissible ball. Assume that $|a|=\mathbb{R}amiE{q^{n+l}}$. Then for any $\xi \in \cS^*(\exp(\DimaA{varietypi}^l\cL))$ we have $$\exp^*(e_B *\xi)=\exp^*(e_B)*\exp^*(\xi)$$ \end{prop}
\begin{proof}[Proof of \Cref{thm:main}]
Since any shift of $\xi$ is also $\fz$-finite, we can assume that $x$ is the unit element $1\in G$. Thus it is enough to show that $WF_0(exp^*(\xi))\subset \cN.$
\mathbb{R}amiE{Let $Y \in \fg^*$ be non-nilpotent. Then there exists $m$ such that for all big enough $\alphahap \in F$ the set $\alphahap (Y+\DimaA{varietypi}^m\cL^{\bot})$ is a non-nilpotent admissible ball. Let $B:=Y+\DimaA{varietypi}^m\cL^{\bot}$.
There exists $l$ such that $varietypi^{l-m}Y\in \cL^{\bot}$.
Let $varietyphii$ be the characteristic function of $\DimaA{varietypi}^{l}\cL$ and $\mu$ be the characteristic function of $K_{l}:=exp(\DimaA{varietypi}^{l}\cL)$. Let $\zetaa:=\mu\xi$ and $\'etalea:=varietyphii exp^*(\xi)=exp^*(\zetaa)$. We have to show that for all big enough $\alphahap \in F$ we have $\cF(\'etalea)|_{\alphahap B}=0$.
By \Cref{cor:Bxi0}, for all big enough $\alphahap \in F$ we have $$\mathbb{R}amiC{e_{\alphahap B} * \zetaa =0}.$$}
By \Cref{prop:FouExp} for all big enough $\alphahap \in F$ we have: $$\mathbb{R}amiC{exp^{*}(e_{\alphahap B} * \zetaa )}=exp^{*}(e_{\alphahap B} )*\'etalea =\cF^{-1}(\cF(\exp^{*}(e_{\alphahap B} ))\cF(\'etalea)).$$
\Cref{lem:FouFuzz} implies now that $\cF(\'etalea)|_{\alphahap B}=0$ for all big enough $\alphahap \in F$.
\end{proof}
\subsection{Proof of \Cref{prop:EigenFinSupp}}\label{subsec:PfEigenFinSupp}
\begin{proof}[Proof of \Cref{prop:EigenFinSupp}]
Let $\pi:=\cS(G)*\xi*\cS(G)$. By \Cref{cor:adm}, $\pi$ is a \DimaB{finite length} representation of $G\times G$ and thus, by \Cref{thm:FuzzyFin}, there exists a finite set $X$ of \DimaB{special} balls of $G\times G$ such that $\pi(e_{\cB})=0$ for a non-nilpotent $\cB \notin X$. Let $D$ be the union of the projections of the balls in $X$ to the first coordinate. It is easy to see that for any non-nilpotent \DimaB{special} ball $B\subset\g^* \setminus D$ and any \DimaB{special} ball $C, \, B\times C \notin X$ and thus for any $f \in \cS(G)$ we have
$$e_{B}*\xi*f*e_{C}=
e_{B}*e_{B}*\xi*f*e_{C}=\pi(e_{B\times C})(e_{B}*\xi*f)=0$$
By \Cref{thm:fuzzy decomp}, $$e_{B}*\xi*f= \sum_{C,C'\in \mathfrak{F}} e_{C'}*e_{B}*\xi*f*e_C= \sum_{C\in \mathfrak{F}} e_{B}*\xi*f*e_C,$$
where the sum goes over all \DimaB{special} balls in $\g^*$.
This implies $e_{B}*\xi*f=0$. Since this holds for any $f \in \cS(G), \, e_{B}*\xi$ vanishes.
\end{proof}
\subsection{Proof of \Cref{prop:FouExp}}\label{subsec:pfFouExp}
$\,$
\mathbb{R}amiF{ From standard properties of the exponential map (see \Cref{lem:exp}\eqref{it:BCH}) we obtain the following Corollary.}
\begin{cor}\label{cor:exp}$\,$
\begin{enumerate}[(i)]
\item \label{it:exp}
For any natural number $n$ and any $a \in \cL$ we have
$$ \exp(a+\cL_n)=\exp(a)\exp(\cL_n)=\exp(a)K_{n}$$
\item \label{it:expL}
Let $e_{K_0}$ be the Haar probability measure on $K_0$. Then $\exp^*(e_{K_0})$ is the Haar probability measure on $\cL$.
\item \label{it:expbar}
Let $n$ and $l$ be natural numbers. By \eqref{it:exp} we can define $\overline{\exp}:\cL_0/\cL_{n+l}\to K_0/K_{n+l}$. Let $\alphahap,\beta$ be measures on $K_0/K_{n+l}$ such that $\alphahap$ is supported on $K_l/K_{n+l}$ and $\beta$ is supported on $K_n/K_{n+l}$. Then $$\overline{\exp}^*(\alphahap *\beta)=\overline{\exp}^*(\alphahap)*\overline{\exp}^*(\beta)$$
\end{enumerate}
\end{cor}
\begin{proof}[Proof of \Cref{prop:FouExp}]$\,$
\begin{enumerate}[Step 1.]
\item \label{FouExp:1} Proof for the case $l=0$.\\
In this case for any $\DimaB{b}\in \cL_0$ we have $$(e_{B} * \xi)|_{\exp(\DimaB{b})K_n}=(e_{K_n} * \xi)|_{\exp(\DimaB{b})K_n}=\left(\int_{\exp(\DimaB{b})K_n}\xi\right)(\#K_0/K_n) e_{K_0}|_{\exp(\DimaB{b})K_n}.$$
Also,
$$(\exp^*(e_B)*\exp^*(\xi))|_{\DimaB{b}+\cL_n}= \left(\int_{\DimaB{b}+\cL_n}\exp^*(\xi)\right)(\#\cL_0/\cL_n) \mu_{\cL_0}|_{\DimaB{b}+\cL_n},$$
where $\mu_{\cL_0}$ is the Haar probability measure on $\cL_0$.
By \Cref{cor:exp}\eqref{it:exp}, $$\exp^{-1}({\exp(\DimaB{b})K_n})=\DimaB{b}+\cL_n \text{ and } \int_{\exp(\DimaB{b})K_n}\xi=\int_{\DimaB{b}+\cL_n}\exp^*(\xi).$$
Thus, by \Cref{cor:exp}\eqref{it:expL}, $$exp^*(e_{K_0}|_{\exp(\DimaB{b})K_n})=\mu_{\cL_0}|_{\DimaB{b}+\cL_n}.$$
We get
\begin{multline*}
\exp^*(e_B*\xi)|_{(\DimaB{b}+\cL_n)}=\exp^*((e_B*\xi)|_{(\exp(\DimaB{b})K_n)})=\\
=\exp^*\left(\left(\int_{\exp(\DimaB{b})K_n}\xi\right)(\#K_0/K_n) e_{K_0}|_{\exp(\DimaB{b})K_n}\right)=\\=\left(\int_{\exp(\DimaB{b})K_n}\xi\right)(\#K_0/K_n)\exp^*(e_{K_0}|_{\exp(\DimaB{b})K_n})= \\ =\left(\int_{\exp(\DimaB{b})K_n}\xi\right)(\#K_0/K_n)\mu_{\cL_0}|_{\DimaB{b}+\cL_n}= \left(\int_{\DimaB{b}+\cL_n}\exp^*(\xi)\right)(\#\cL_0/\cL_n)\mu_{\cL_0}|_{\DimaB{b}+\cL_n}=\\=(\exp^*(e_B)*\exp^*(\xi))|_{\DimaB{b}+\cL_n}.
\end{multline*}
\item Proof for the general case.\\ Denote by $p_{\cL}$ and $p_K$ the natural projections $\cL_0\to \cL_0/\cL_{n+l}$ and $K_0\to K_0/K_{n+l}$. There exist measures \mathbb{R}amiC{$\beta$ and $\alphahap$ on $ K_0/K_{n+l}$ such that $e_{K_{n+l}}*\xi = p_K^*(\beta)$ and $e_B=p_K^*(\alphahap)$. Clearly $\operatorname{Supp} (\beta)\subset K_l/K_{n+l}$ and $\operatorname{Supp} (\alphahap)\subset K_n/K_{n+l}$.
We have
\begin{equation}\label{FouExp:=1}
\exp^*(e_B*\xi)=\exp^*(e_B*e_{K_{n+l}}*\xi)=\exp^*(p_K^*(\alphahap)*p_K^*(\beta))=
\exp^*(p_K^*(\alphahap*\beta)).
\end{equation}
From the commutative diagram
\begin{equation}\label{diag}\xymatrix{\parbox{20pt}{$\cL_0$}\ar@{->}^{\exp}[r]\ar@{->}^{p_{\cL}}[d]&
\parbox{20pt}{$K_0$}\ar@{->}^{p_K}[d]\\
\parbox{40pt}{$\cL_0/\cL_{n+l}$}\ar@{->}^{\overline{\exp}}[r] &
\parbox{40pt}{$K_0/K_{n+l}$}}
\end{equation}
we have
\begin{equation}\label{FouExp:=2}
\exp^*(p_K^*(\alphahap*\beta))=p_L^*(\overline{\exp}^*(\alphahap*\beta)).
\end{equation}
By \Cref{cor:exp}\eqref{it:expbar}
we have
\begin{equation}\label{FouExp:=3}
p_L^*(\overline{\exp}^*(\alphahap*\beta))= p_L^*(\overline{\exp}^*(\alphahap) * \overline{\exp}^*(\beta)) =
p_L^*(\overline{\exp}^*(\alphahap))* p_L^*(\overline{\exp}^*(\beta)).
\end{equation}
Applying the diagram \eqref{diag} again we get
\begin{equation}\label{FouExp:=4}
p_L^*(\overline{\exp}^*(\alphahap))* p_L^*(\overline{\exp}^*(\beta))=\exp^*(p_K^*(\alphahap))* \exp^*(p_K^*(\beta))=\exp^*(e_B)*\exp^*(e_{K_{n+l}}*\xi).
\end{equation}
Applying Step \operatorname{Re}f{FouExp:1} twice we have
\begin{multline}\label{FouExp:=5}
\exp^*(e_B)*\exp^*(e_{K_{n+l}}*\xi)
=\exp^*(e_B)*\exp^*(e_{K_{n+l}})*\exp^*(\xi)=\\=\exp^*(e_{K_{n+l}})*\exp^*(e_B)*\exp^*(\xi)=\exp^*(e_{K_{n+l}}*e_B)*\exp^*(\xi)=\exp^*(e_B)*\exp^*(\xi).
\end{multline}
Combining (\operatorname{Re}f{FouExp:=1},\operatorname{Re}f{FouExp:=2}-\operatorname{Re}f{FouExp:=5}) we get $\exp^*(e_B*\xi)=\exp^*(e_B)*\exp^*(\xi)$.
}
\end{enumerate}
\end{proof}
\subsection{Regularity of invariant $\fz$-finite distributions at cotoric elements and proof of \Cref{thm:reg}}\label{sec:reg}
\setcounter{lemma}{0}
In this section we prove a generalization of \Cref{thm:reg}. We will need the following notion.
\begin{defn}
Let $\mathbf{H_1},\mathbf{H_2} < \mathbf{G}$ be algebraic subgroups. We say that an element $g\in G$ is $H_1 \times H_2$-\emph{cotoric} if the conormal space to $H_1gH_2$ at $g$ intersects trivially the nilpotent cone of $\fg^*$.
\end{defn}
\begin{lem}\label{lem:SymSmooth}
\DimaB{Let $\mathbf{H}$ be an open subgroup the group of fixed points of an involution $\theta$ of $\mathbf{G}$.}
Let $g\in G$ be an $\mathbf{H} \times \mathbf{H}$-regular semisimple element, i.e. an element such that the double coset $\mathbf{H} g \mathbf{H}$ is closed and of maximal dimension. Then $g$ is $\mathbf{H} \times \mathbf{H}$-cotoric.
In particular, the set of cotoric elements contains an open dense subset of $\mathbf{H} \times \mathbf{H}$.
\end{lem}
\begin{proof}
Let $\sigmama$ be the anti-involution given by $\sigmama(g):=\theta(g^{-1})$. Let $(\mathbf{H}\times \mathbf{H})_g$ be the stabilizer of $g$ with respect to the two-sided action of $ \mathbf{H} \times \mathbf{H}$, and $N_{\mathbf{H}g \mathbf{H},g}^{\mathbf{G}}$ be the normal space to the double coset $\mathbf{H}g \mathbf{H}$ at $g$ in $\mathbf{G}$. Since $g$ is $\mathbf{H} \times \mathbf{H}$-regular semisimple, the Luna slice theorem (see e.g. \cite[Theorem 5.4]{Dre}) implies that $(\mathbf{H}\times \mathbf{H})_g$ acts trivially on $N_{\mathbf{H}g \mathbf{H},g}^{\mathbf{G}}$.
Let $x=g\sigmama(g)$. \DimaB{By \cite[Proposition 7.2.1(ii)]{AG_HC}, the pair consisting of the group $(\mathbf{H}\times \mathbf{H})_g$ and its action on $N_{\mathbf{H}g \mathbf{H},g}^{\mathbf{G}}$ is isomorphic to the pair consisting of the centralizer $\mathbf{H}_x$ and its adjoint action on the centralizer $\mathfrak{g}_x^{\sigmama}$ of $x$ in the space $\mathfrak{g}^{\sigmama}$ of fixed points of $\sigmama$ in $\fg$. Since $g$ is $\mathbf{H}\times\mathbf{H}$-semisimple, \cite[Proposition 7.2.1(i)]{AG_HC} shows that $x$ is a semisimple element of $\mathbf{G}$. Thus $\mathbf{G}_x$ is a reductive group.}
Now, assume that $x$ is not cotoric. Then, using a non-degenerate $\theta$-invariant and $\mathbf{G}$-invariant quadratic form on $\fg$ \DimaB{(see e.g. \cite[Lemma 7.1.9]{AG_HC})}, we can find a nilpotent element $\alphahapha \in \mathfrak{g}_x^{\sigmama}$. Using the Jacobson-Morozov theorem for symmetric pairs (see e.g. \cite[Lemma 7.1.11]{AG_HC}), for some $t\neq 1\in F$ we can find an element $h\in H_x$ such that $ad(h)(\alphahapha)=t\alphahapha$. This contradicts the fact that $H_x$ acts trivially $\mathfrak{g}_x^{\sigmama}$.
\end{proof}
In view of \Cref{lem:WFSmooth}, \Cref{thm:main} gives us the following corollary.
\begin{cor}\label{cor:sm}
Let $\mathbf{H_1},\mathbf{H_2} < \mathbf{G}$ be algebraic subgroups. Let $\chi_i$ be characters of $H_i$, and let $\xi$ be an $(H_1\times H_2,\chi_1\times \chi_2)$-equivariant $\DimaA{\fz}$-finite distribution. Let $x\in G$ be an $H_1 \times H_2$-\emph{cotoric} element. Then $\xi$ is smooth at $x$.
\end{cor}
In view of \Cref{lem:SymSmooth} this corollary implies \Cref{thm:reg}.
\appendix
\section{Fuzzy balls (joint with Alexander Kemarsky) }\label{sec:balls}
\setcounter{lemma}{0}
In this section we prove the statements on admissible balls and \DimaB{special} balls formulated in \Cref{subsec:PrelFuzzy}. \mathbb{R}amiC{We} follow \cite[\S 4 and \S 5.1]{S}. Throughout the section we assume that $F$ has characteristic zero.
\mathbb{R}amiF{
\subsection{The exponential map and proof of \Cref{lem:exp}}\label{subsec:BCH}
It is enough to prove \Cref{lem:exp} for $\mathbf{G}=\operatorname{GL}_n$. Consider the power series $$\mathcal{E}xp(X):=\sum_{k=0}^{\infty}a_k X^k:=\sum_{k=0}^{\infty} X^k/k! \text{ and } \mathcal{L}og(X):=\sum_{k=0}^{\infty}b_k (X-1)^k:=\sum_{k=1}^{\infty} (-1)^{k-1}(X-1)^k/k,$$
where $X\in \operatorname{Mat}_{n\times n}(F)$. The Baker-Campbell-Hausdorff formula is the following equality of power series
\begin{multline} \label{=BCH}
\mathcal{L}og(\mathcal{E}xp(X+Y))-X-Y=\\=\sum_{n=1}^{\infty}\sum_{|i|+|j|= n}\left(c_{ij} ad^{i_1}_X \circ ad^{j_1}_Y \cdots ad^{i_k}_X \circ ad^{j_k}_Y(X) +d_{ij} ad^{i_1}_Y \circ ad^{j_1}_X \cdots ad^{i_k}_Y \circ ad^{j_k}_X(Y) \right),
\end{multline}
where $i=(i_1,\dots,i_{k})$ and $j=(i_1,\dots,i_{k})$ are multi-indices and $c_{ij},d_{ij}\in \mathbb{Q}\subset F$ are certain constants. Let \EitanA{$\alphahap_n:=\max(|a_n|,|b_n|,\max_{|i|+|j|=n}(|c_{ij}|),\max_{|i|+|j|=n}(|d_{ij}|))$}. It is well known for some constant $C>1$ and all $n\geq 1$ we have $\alphahap_n\leq C^n$. Define $\cL:=\{X\in \operatorname{Mat}_{n\times n}(F) \, \vert \, |X_{ij}|<C^{-1}\}$. It is easy to see that the power series $\mathcal{E}xp$ converge on $\cL$. We define $\exp$ to be the corresponding analytic map, and $K$ to be $\exp(\cL)$. Finally, it follows from \eqref{=BCH} that $(\cL,K,\exp)$ satisfy the requirements \eqref{it:hom}-\eqref{it:BCH}.
$\Box$
}
\subsection{Proof of \Cref{thm:fuzzy decomp} }\label{subsec:ballsDecomp}
We start with the following easy lemma.
\begin{lemma}\label{lem:BallDisj} Let $\cB$ denote the collection of all \DimaB{special} balls. Then $\fg^*$ decomposes as a disjoint union
$$\mathfrak{g}^* = \coprod_{B\in\cB} B.$$
\end{lemma}
\begin{proof}
Let $X \in \mathfrak{g}^*$. If $|X| \le 1$, then $X \in \mathbb{R}amiE{\cL_1^\bot}$. If $|X| = q^m > 1$, then
$X \in \mathbb{R}amiE{X+\cL^{\bot}_{[\frac{m}{2}]}} $. Thus $\mathfrak{g}^* = \bigcup B$. Let $\mathbb{R}amiE{B_1 = X+\cL_m^{\bot},\,
B_2 = Y+\cL_n^{\bot}}$ be \DimaB{special} balls and suppose that $z = X + l_1 = Y+l_2 \in B_1 \cap B_2$.
Then $|z| = |X| = |Y|$, so $m=n$. Let $Y+l' \in B_2$. We can rewrite this element as
$$Y+l' = Y+l_2 + l'-l_2 = z + (l'-l_2) \in X + \cL_m^{\perp} = B_1.$$
We have obtained $B_2 \subset B_1$ and clearly by the same argument applied to $B_1$, we obtain
$B_1 \subset B_2$. Therefore, $B_1 = B_2$.
\end{proof}
\mathbb{R}amiF{Let $(\pi,V)$ be a smooth representation.
The following lemma is straightforward.}
\begin{lemma}\label{lem:proj}
For an admissible ball $B$, the image of $\pi(e_B)$ consists of $(\'etalea_B^{-1},K_B)$-equivariant vectors in $V$, i.e.
$$\pi(e_B)V = \left\{ v \in V: \pi(k)v = \'etalea_B^{-1}(k) v \;\; \forall k\in K_B \right\}.
$$ Moreover, $e_B$ is a projection, that is $e_B=e_B^2$.
\end{lemma}
\begin{lemma}\label{lem:orthproj}
Let $B_1, B_2$ be distinct \DimaB{special} balls. Then $e_{B_1} e_{B_2} = 0$
\end{lemma}
\begin{proof}
Suppose $B_1 \ne B_2$ and $e_{B_1}e_{B_2} \ne 0$. Then for any $a \in K :=
K_{B_1} \cap K_{B_2}$ we have $$\'etalea_{B_1}(a^{-1})e_{B_1} e_{B_2}=
ae_{B_1} e_{B_2} = e_{B_1}a e_{B_2} = \'etalea_{B_2}(a^{-1})
e_{B_1}e_{B_2} $$
We get $\'etalea_{B_1}|_K = \'etalea_{B_2}|_K$. Now, if $K_{B_1} = K_{B_2}$, then
$B_1 = B_2$, a contradiction. Otherwise we can assume $K_{B_1} \subset K_{B_2}$,
but then the character $\'etalea_{B_1}$ is a restriction of $\'etalea_{B_2}$ from the bigger
group $K_{B_2}$, thus $B_1$ and $B_2$ intersect and thus by \Cref{lem:BallDisj} they coincide, which again is a contradiction.
\end{proof}
\begin{lemma}\label{lem:BigBallVan}
Let $v \in V^{K_N}$ and $B$ be a \DimaB{special} ball with radius bigger
than $q^N$. Then $\pi(e_B)v = 0$.
\end{lemma}
\begin{proof}
$$\pi(e_B)v = \int_{K_B} \'etalea_B(k) \pi(k)v dk = \left(\int_{K_B} \'etalea_B(k) dk \right)v = 0.$$
\end{proof}
\begin{lemma}\label{lem:FuzzyNonVan}
For every $0 \neq v \in V$, there exists a \DimaB{special} ball $B$, such that
$\pi_B(v) \neq 0$.
\end{lemma}
\begin{proof}
Let $0 \ne v \in V$. If $v \in V^{K_0}$ then $v \in V(B)$ for $B = 0 +\cL^{\bot}$.\\
Suppose $v \not \in V^{K_0}$. Let $n \ge 1$ be the minimal $n$ such that
$v \in V^{K_{2n}},
v \not \in V^{K_{2n-2}}$. Thus the group $A = K_n / K_{2n}$ acts on the finite dimensional
space $W$ generated in $V$ by the orbit $K_n v$. Note that $K_n/K_{2n} \simeq \cL_n/ \cL_{2n}$ and
by \mathbb{R}amiF{standard properties of the exponential map (see \Cref{lem:exp}\eqref{it:BCH})}
the group $\cL_n/\cL_{2n}$ is commutative.
Thus, the group $A$ is a commutative finite group.
The space $W$ can be decomposed as a direct sum of one-dimensional characters of $A$. For a character $\chi$ of $A$ and $w \in W$,
let $w(\chi) \in W$ be the projection of $w$ to the $\chi$-isotypic component of $W$.
Then
$v =\sum_{\chi \in A^{*}} v(\chi)$, and thus there exists a character $\chi$ of $A$ with $v(\chi)\neq 0$ and $\chi|_{K_{2n-2}/K_{2n}}\neq 1$. Lift $\chi$ to a character $\'etalea$ of $K_n$ and note that there exists a unique \DimaB{special} ball $B$ with $K_B=K_{n}$ and $\'etalea_B=\'etalea$. Then $\pi(B)v=v(\chi)\neq 0$.
\end{proof}
\begin{proof}[Proof of \Cref{thm:fuzzy decomp}]
Part \eqref{thm:fuzzy decomp:orthproj} follows from \Cref{lem:proj,lem:orthproj}. To prove part \eqref{thm:fuzzy decomp:full} take $0 \neq v \in V$, and let $w = \sum_{\DimaA{ B \in \mathfrak{F}}} \pi(e_B)v$. By \Cref{lem:BigBallVan} we know that the
sum is well-defined. By \Cref{lem:proj,lem:orthproj}, $\pi(e_B)(v-w) = 0$ for all \DimaB{special} balls $B$. By \Cref{lem:FuzzyNonVan}, it
follows that $v = w$.
\end{proof}
\subsection{Proof of \Cref{thm:FuzzyFin}}\label{subsec:ballsFin}
\begin{defn} A \emph{\DimaB{special} set} is a finite union of \DimaB{special} balls. For a \DimaB{special} set $T = \cup B_i$, denote
$e_T := \sum e_{B_i}$.
\end{defn}
\begin{lemma}\label {lem: fuzzy adjoint}
Let $T$ and $S$ be two \DimaB{special} sets in $\mathfrak{g}^*$ and let $g\in G$. Let $(\pi,V)$ be a smooth representation of $G$. Suppose that $\pi(e_T) \pi(g) \pi(e_S) \ne 0$. Then
$ad(g)S \cap T \ne \emptyset$.
\end{lemma}
\begin{proof}
By linearity we reduce to the case where $$T=B=X + \cL_m^{\bot} \text{ and } S=B'=X' + \cL_{m'}^{\bot}$$ are \DimaB{special} balls. Let
$$K= K_B=\exp(\cL_m), \,K' = K_{B'}=\exp(\cL_{m'}),\,\'etalea = \'etalea_B, \'etalea' = \'etalea_{B'}$$ We first check that
$$\'etalea(a) = \'etalea'(g^{-1}ag) $$ for all $a \in K \cap gK'g^{-1}$. Indeed, let
$v \in V$ be such that $\pi(e_B) \pi(g) \pi(e_B')(v) \ne 0$. Then for all $a \in K \cap gK'g^{-1}$
we have
\begin{equation}\label{eq:1}
\'etalea(a)^{-1} \pi(e_B) \pi(g) \pi(e_{B'})(v) = \pi(a) \pi(e_B )\pi(g) \pi(e_{B'})(v) =
\pi(e_B) \pi(a) \pi(g) \pi(e_{B'})(v),
\end{equation}
since $\pi(e_B) \pi(a) = \pi(a) \pi(e_B)$ for $a \in K$. On the other hand,
\begin{equation}\label{eq:2}
\pi(e_B) \pi(a) \pi(g) \pi(e_{B'})(v) = \pi(e_B) \pi(g) \pi(g^{-1}ag) \pi(e_{B'})(v) =
\'etalea'^{-1}(g^{-1}ag) \pi(e_B)\pi(g) \pi(e_{B'})(v).
\end{equation}
Combining equations (\operatorname{Re}f{eq:1}) and (\operatorname{Re}f{eq:2}), we obtain $\'etalea(a)
= \'etalea'(g^{-1}ag)$ for every $a \in K \cap gK'g^{-1}$.
Therefore, $$ \psi_0\left(X(\log(a))\right) = \psi_0
\left(X'(Ad(g^{-1})\log(a))\right) = \psi_0\left( (Ad(g)X')(\log(a)) \right). $$ \\
We see that for $b \in \cL_m \cap Ad(g)\cL_{m'}$,
$$(Ad(g)X' - X)(b) \in \mathcal{O}.$$ Thus,
$$ Ad(g)X' - X \in (\cL_m \cap Ad(g)\cL_{m'})^{\perp} = \cL_m^{\perp} +
Ad(g)\cL_{m'}^{\perp},$$
that is, there exist $u \in \cL_m^{\perp}$ and $v \in \cL_{m'}^{\perp}$ such that
$$ Ad(g)X' - X = u + Ad(g)v.$$
Hence $Ad(g)(X'-v) = X + u \in X +\cL_m^{\bot}=B$.
\end{proof}
The following Lemma due to Howe plays a central role in the proof of \Cref{thm:FuzzyFin}.
\begin{lemma}[{\cite[Lemma 12.2]{HCDBS}}]\label{lem:comp}
Let $S \subset \mathfrak{g}$ be compact. There exists a compact subset $S_1$ such that $$Ad(G)S
\subset S_1 + \mathcal{N}.$$
\end{lemma}
\begin{proof}[Proof of \Cref{thm:FuzzyFin}]
Suppose $V$ is generated by $v_1,...,v_n$ and for each $i$ pick all \DimaB{special} balls
$B_{ij}$ such that $\pi(e_{B_{ij}})(v_i) \ne 0$. Note that
by \Cref{{lem:BigBallVan}} for every $v \in V$, there are only finitely
many \DimaB{special} ball $B$ such that $\pi(e_B)v \ne 0$. Let $S = \cup B_{ij}$.
By \Cref{thm:fuzzy decomp} $\pi(S)v_i=v_i$.
Since $S$ is compact, \Cref{lem:comp} implies
$$Ad(G)S \subset \cL_{m}^{\bot} + \mathcal{N}$$ for some large $m$.
Let $B$ be a \DimaB{special} ball such that $\pi(e_B) \ne 0.$ Let us show that there exists $g \in G$ such
that $\pi(e_B) \pi(g) \pi(e_S) \ne 0$.
Indeed, suppose on the contrary that $\pi(e_B) \pi(g) \pi(e_S) = 0$ for every
$g \in G$. Let $v$ be such that $\pi(e_B)v \neq 0$ and write $$v = \sum_{j=1,1 \le i_j \le n }^{k} c_i \pi(g_i) v_{i_j}. $$
Then $$\pi(e_B)(v) = \sum_{j=1,1 \le i_j \le n }^{k} c_i \pi(e_B) \pi(g_i) v_{i_j}
= \sum_{j=1,1 \le i_j \le n }^{k} c_i \pi(e_B) \pi(g_i) \pi(e_S) v_{i_j} = 0, $$
and we obtain a contradiction!
By Lemma \operatorname{Re}f{lem: fuzzy adjoint} $$Ad(g)S \cap B \ne \emptyset.$$
In particular $B \cap (\cL_{m}^{\bot} + \mathcal{N}) \ne \emptyset.$ Suppose $B = X+\cL_n^\bot$ with $n \ge m$ and
let $Y \in B \cap (\cL_{m}^{\bot}+ \mathcal{N})$. Then $Y = X + l = l' + n$, $l \in \cL_m^{\perp}, l' \in \cL_n^{\perp}$, and
$n \in \mathcal{N}$. In particular, $n = X + (l-l') \in X + \cL_n^{\perp}$, so $n \in B \cap \mathcal{N}$. \\
We have obtained that every \DimaB{special} ball $B$ that acts on $V$ as non-zero and has big enough radius is
a nilpotent \DimaB{special} ball. Since the number of \DimaB{special} balls with a bounded radius is finite, we obtain that
all except of finitely many non-nilpotent balls act on $V$ as zero.
\end{proof}
\subsection{Proof of \Cref{lem:FouFuzz}}\label{subsec:PfFouFuzz}
We follow \cite[\S 5.1]{S}.
Assume that $ B = B(X,L)$.
Note that $\exp^*(e_B) = f \mu$, where $\mu$ is the Haar measure on $L$, normalized such
that $\mu(L)=1$ and $f$ is a function given by $f(y) =\psi_0(\langle y,X\rangle) 1_L(y)$.
Then $$\cF(\mu)(Z) = \int_{y \in L} \psi_0(\langle y,X+Z\rangle) d\mu(y).$$
The last integral is an integral of an additive character on an additive group. Such an integral is zero, unless
the character is trivial. In our case this means that the integral is zero, unless
$X+Z \in L^{\perp}$, which happens if and only if $-Z \in X + L^{\perp}$ and in that case the integral equals $1$.
Therefore, $\cF(\mu)(Z) = 1_{X +L^{\perp}}(-Z)$. As $\cF \circ \cF = -Id$ (under the identification
$\mathfrak{g} \simeq \mathfrak{g}^{**}$), we get that $\cF(1_B) = \exp^*(e_B)$, as claimed.
\section{Finite Generation of Hecke Modules (by A. Aizenbud and D. Gourevitch)} \label{sec:FinGen}
In this section we prove \mathbb{R}ami{a stronger version of \Cref{thm:FinGen}. For its formulation we will need the following definition.
\begin{defn}$ $
Let $(G,(H,\chi))$ be a twisted pair, i.e. $H <G$ is a (closed) subgroup, and $\chi$ is its character.
\begin{enumerate}
\item Denote by $D_{G/H}$ the $G$-equivariant sheaf of smooth measures on $G/H$ and by $\Deltata_{G/H}$ its fiber at $[1]\in G/H$, considered as a character of $H$.
Note that $\Deltata_{G/H}=(\Deltata_{G})|_H\cdot\Deltata_{H}^{-1}=\Deltata_{H}^{-1}$.
\item We define the \emph{dual} of the twisted pair $(G,(H,\chi))$ to be the pair $(G,(H,\DimaA{\hat \chi}))$, where $\DimaA{\hat \chi}=\Deltata_{G/H}\chi^{-1}$. \mathbb{R}amiC{Note that $\DimaA{\hat{ \hat \chi}}=\chi.$}
\end{enumerate}
\end{defn}
The following theorem clearly implies \Cref{thm:FinGen}.
\begin{theorem}\label{thm:ExpFinGen}
Let $(G,(H,\chi))$ be a twisted pair.
Let $\mathbf{P}$ be a minimal parabolic subgroup of $\mathbf{G}$ and $P=\mathbf{P}(F)$. Suppose that $H$ has finitely many orbits on $G/P$.
Suppose that for any
irreducible smooth representation $\rho$ of $G$ we have
\begin{equation} \label{eq:FinMultExp}
\dim\operatorname{Hom}_{H}(\rho|_{H}, \DimaA{\hat \chi}) < \infty .
\end{equation}
Then for any open compact subgroup $K$ of $G$ the module $\operatorname{ind}_H^G(\chi)^K$ over the Hecke algebra $\cH_K(G)$ is finitely generated.
\end{theorem}
}
Let us now give an overview of the argument.
In Lemma \operatorname{Re}f{VKFinGen} we present a criterion, due to Bernstein, for the finite generation of spaces of $K$-invariants. The proof of the criterion uses the theory of Bernstein Center. \Dima{Using this criterion we \mathbb{R}amiC{introduce a notion} of twisted pairs of finite type \mathbb{R}amiC{(see \Cref{def:Fin}\eqref{Fin:type})} which is equivalent to the \mathbb{R}amiC{local} finite generation of $\operatorname{ind}_H^G(\chi)$.
Bernstein's} criterion is given in terms of all parabolic subgroups of $G$. This allows us to define an intermediate notion of finite cuspidal type \mathbb{R}amiC{(see \Cref{def:Fin}\eqref{Fin:CuspType})}, which means that the criterion holds for the group $G$ as a parabolic subgroup of itself.
Then we introduce duality between twisted pairs. We prove that
\Dima{condition \eqref{eq:FinMultExp}
implies that }the dual pair $(G,(H, \DimaA{\hat \chi}))$ is of finite cuspidal type (see \Cref{subsec:CuspFinType}). We use a simple trick (\Cref{cor:CuspMultType}) to imply that the pair $(G,(H,\chi))$ is itself of finite cuspidal type.
In order to analyze the condition of Lemma \operatorname{Re}f{VKFinGen} for all parabolic subgroups of $G$ we introduce the notion of a descendant of the pair $(G,(H,\chi))$ and prove that if all the descendants are of finite cuspidal type then the pair itself is of finite type (see \Cref{SecPfFinGen}).
Thus it remains to show that if the conditions of \Cref{thm:FinGen} hold for a twisted pair then they hold for all its descendants. This we do in \Cref{subsec:desc.cusp}, using a homological algebra argument.
\begin{rem}\label{rem:AAG}
The argument here is an adaptation of a similar argument in \cite{AAG} that dealt with the case of trivial $\chi$.
However, the argument in \cite{AAG} did not take into account the modular characters of various groups that appear along the way. As a result, it is not valid for non-unimodular $H$ and even for unimodular $H$ it is not complete. The gap in the original argument is filled \mathbb{R}amiC{mainly} by the proof of \Cref{cor:CuspMultType}.
\end{rem}
\subsection{Preliminaries} \lbl{subsec:prel}
\begin{notn}
For a subgroup $H<G$ we denote by $\operatorname{ind}_H^G: \cM(H) \to \cM(G)$ the compactly supported induction functor and by $\operatorname{Ind}_H^G: \cM(H) \to \cM(G)$ the full induction functor. \mathbb{R}amiC{For $\pi \in \cM(G)$ denote by $\widetilde{\pi}:=(\pi^*)^{\infty}$ the smooth contragredient representation. Note that for any character $\chi$ of $H$ we have $\widetilde{\operatorname{ind}_H^G(\chi)}=\operatorname{Ind}_H^G(\DimaA{\hat \chi})$.}
\end{notn}
\begin{defn}
Let $\mathbf{P}<\mathbf{G}$ be a parabolic subgroup with unipotent radical $\mathbf{U}$, and let $\mathbf{M}:=\mathbf{P}/\mathbf{U}$.
Such $\mathbf{M}$ is called a Levi subquotient of $G$.
Note that every representation of $M$ can be considered as a representation of $P$ using the quotient morphism $P \twoheadrightarrow M$.
Define:
\begin{enumerate}
\item The Jacquet functor $r^G_{M}:\cM(G) \to \cM(M)$ by $r^{G}_{M}(\pi):=(\EitanA{\Deltata_{G/P}^{\frac{1}{2}}}\cdot\pi|_{P})_{U}$.
Note that $r^G_{M}$ is defined for any representation of $P$.
\item The parabolic induction functor $i^G_{M}:\cM(M) \to \cM(G)$ by $i^G_{M}(\tau):=\operatorname{ind}_{P}^{G}(\EitanA{\Deltata_{G/P}^{-\frac{1}{2}}}\tau)$.
\end{enumerate}
Note that $i^{G}_{M}$ is right adjoint to $r^{G}_{M}$.
A representation $\pi$ of $G$ is called cuspidal if $r^{G}_{M}(\pi)=0$ for any Levi subquotient $\mathbf{M}$ of $\mathbf{G}$.
\end{defn}
It is well-known that $i^G_M$ and $r^G_M$ are exact functors.
\begin{definition}
A smooth representation $V$ of $G$ is
called \emph{compact} if for any $v \in V$ and $\xi \in
\widetilde{V}$ the matrix coefficient function defined by
$m_{v,\xi}(g):= \xi(gv)$ is a compactly supported function on $G$.
\end{definition}
\begin{theorem}[Bernstein-Zelevinsky]\lbl{CompProj}
Any compact representation of $G$ is
a projective object in the category $\cM(G)$.
\end{theorem}
\begin{definition}$\,$
\begin{enumerate}
\item Denote by $G^1$ the preimage in $G$ of the maximal compact
subgroup of $G/[G,G]$.
\item Denote by $Z(G)$ the center of $G$ and denote $G_0:=G^1Z(G)$.
\item \mathbb{R}amiC{We call } a complex character of $G$ \emph{unramified} if it is trivial
on $G^1$. We denote the set of all unramified
characters by $\Psi_G$. Note that $G/G^1$ is a lattice and therefore we can identify $\Psi_G$ with $(\mathbb{C}^{\times})^n$. This defines a structure of algebraic variety on $\Psi_G$.
\item For any smooth representation $\rho$ of $G$ we denote
$\Psi(\rho):= \operatorname{ind}_{G^1}^G(\rho|_{G^1})$. Note that $\Psi(\rho) \simeq \rho \leftarrowimes \cO(\Psi_G),$
where $G$ acts only on the first factor, but this action depends on the second factor.
This identification gives a structure of $\cO(\Psi_G)$-module on $\Psi(\rho)$.
\end{enumerate}
\end{definition}
\begin{theorem}[Harish-Chandra]\lbl{CuspComp}
Let $V$ be a cuspidal representation
of $G$. Then $V|_{G^1}$ is a compact representation of $G^1$.
\end{theorem}
\begin{corollary} \lbl{CuspProj}
Let $\rho$ be a cuspidal
representation
of $G$. Then\\
(i) $\rho|_{G^1}$ is a projective object in the category
$\cM(G^1)$.\\
(ii) $\Psi(\rho)$ is a projective object in the category
$\cM(G)$.
\end{corollary}
\begin{proof}
(i) \mathbb{R}amiC{Follows from \Cref{CuspComp,CompProj}}.\\
(ii) note that $$Hom_G(\Psi(\rho),\pi) \cong
Hom_{G/G_1}(\cO(\Psi_M),Hom_{G^1}(\rho,\pi)),$$
for any representation $\pi$. Therefore the functor $\pi \mapsto
Hom_G(\Psi(\rho),\pi)$ is a composition of two exact functors and
hence is exact.
\end{proof}
We will use Bernstein's second adjointness theorem.
\begin{thm}[{\cite{BerSec} or \cite[Theorem 3]{Bus}}]\label{thm:2adj}
\mathbb{R}amiC{Let $\mathbf{P}\subset \mathbf{G}$ be a parabolic subgroup and let $\overline{\mathbf{P}}$ be an opposite parabolic subgroup. Let $\mathbf{M}$ be the Levi quotient of $\mathbf{P}$} and let $\overline{r}^G_{M}:\cM(G) \to \cM(M)$ denote the Jacquet functor defined using $\overline{P}$.
Then $\overline{r}^G_{M}$ is right adjoint to $i^G_{M}$. In particular, $i^G_{M}$ maps projective objects to projective ones and hence for any irreducible cuspidal
representation $\rho$
of $M$,
$i^G_{M}(\Psi(\rho))$ is a projective object of $\cM(G)$.
\end{thm}
We now present a criterion, due to Bernstein, for \mathbb{R}amiC{local} finite generation.
\begin{lemma}[{\cite[Lemma 2.1.10]{AAG}}] \lbl{VKFinGen}
Let $V\in \cM(G)$. Suppose that for any
parabolic $P<G$ and any irreducible cuspidal representation $\rho$
of $M$ (where $M$ denotes the reductive quotient
of $P$),
$ \operatorname{Hom}_{G}(i^G_{M}(\Psi(\rho)),V)$ is a finitely generated
module over $\cO(\Psi_M)$. Then $V^K$ is a finitely generated
module over $\fz(\mathcal{H}_K(G))$, for any compact open
subgroup $K<G$.
\end{lemma}
The theory of Bernstein center gives us the following Lemma:
\begin{lem}\label{lem:Noeth}
Let $V$ be a smooth finitely generated representation of $G$. Let $W \subset V$ be a subrepresentation.
Then $W$ is finitely generated.
\end{lem}
\begin{proof}
Let $v_1, \dots, v_n$ be the generators of $V$. By \Cref{thm:Ber}\eqref{it:split} we can choose a splitting subgroup $K \subset G$ s.t. $v_i \in V^K$. Then $V\in \cM_K(G)$ and $V^K$ is finitely generated over $\cH_K(G)$. Hence $W\in \cM_K(G)$ and thus it is enough to show that $W^K$ is finitely generated over $\cH_K(G)$.
By \Cref{thm:Ber}\eqref{2} $\cH_K(G)$ is finite over its center $\fz(\mathcal{H}_K(G))$. So $V^K$ is finitely generated over $Z(\cH_K(G))$. From \Cref{thm:Ber}\eqref{3} it follows that $\fz(\cH_K(G))$ is Notherian, and thus $W^K$ is finitely generated generated over $\fz(\cH_K(G))$.
\end{proof}
\subsection{Finite multiplicity and duality of twisted pairs}\label{subsec:FinDual}
Let $(G,(H,\chi))$ be a twisted pair.
\begin{defn}\label{def:Fin}
We say that the pair $(G,(H,\chi))$ \begin{enumerate}
\item \label{Fin:mult} has \emph{finite multiplicities} \mathbb{R}amiC{(resp. \emph{finite cuspidal multiplicities})} if for any irreducible \mathbb{R}amiC{(resp. cuspidal irreducible)} representation $\pi$ of $G$, $$\dim\operatorname{Hom}(\operatorname{ind}_{H}^G(\chi),\pi)<\infty.$$
\item \label{Fin:type} has \emph{finite type} if for any
parabolic \mathbb{R}amiC{$\mathbf{P}<\mathbf{G}$} and any irreducible cuspidal representation $\rho$
of $M$ (where $\mathbf{M}$ denotes the reductive quotient of $\mathbf{P}$),
$$ \operatorname{Hom}_{G}(i^G_{M}(\Psi(\rho)),\operatorname{ind}_{H}^G(\chi))$$ is a finitely generated
module over $\cO(\Psi_M)$.
\item \label{Fin:CuspType} has \emph{finite cuspidal type} if for any irreducible cuspidal representation $\rho$
of $G$,
$ \operatorname{Hom}_{G}(\Psi(\rho),\operatorname{ind}_{H}^G(\chi))$ is a finitely generated
module over $\cO(\Psi_M)$.
\item is $F$-\emph{spherical}\footnote{
If $char F=0$ and $G$ is quasi-split over $F$ then $(G,H)$ is an $F$-spherical pair if and only if
it is a spherical pair of algebraic groups.
} if for any
parabolic subgroup $\mathbf{P} \subset \mathbf{G}$, there is a finite number of
double cosets in $P \setminus G / H$.
\end{enumerate}
\end{defn}
The following lemma helps to connect multiplicities to duality.
\begin{lem}\label{lem:tilde}
Let $\pi,\rho \in \cM(G)$ and assume that $\rho$ has \DimaB{finite length}. Then the natural morphism $\operatorname{Hom}(\pi,\rho) \to \operatorname{Hom}(\widetilde{\rho},\widetilde{\pi}) $ is an isomorphism.
\end{lem}
\begin{proof}
By \Cref{thm:Ber} we can choose a splitting subgroup $K$ such that $\rho^K$ generates $\rho$. Then
$$
\operatorname{Hom}(\pi,\rho) \cong \operatorname{Hom}_{\cH_K(G)}(\pi^K,\rho^K)\cong \{v \in (\pi^K)^*\leftarrowimes \rho^K \, \vert \, \forall a \in \cH_K(G), \, (a\leftarrowimes 1 - 1 \leftarrowimes a)v = 0\}.$$
Here we consider the standard action of $\cH_K(G)^{opposite}\leftarrowimes \cH_K(G)$ on acts on $(\pi^K)^*\leftarrowimes \rho^K$. Also
\begin{multline*}\operatorname{Hom}(\widetilde{\rho},\widetilde{\pi})\cong\operatorname{Hom}_{\cH_K}((\rho^{*})^{K},(\pi^*)^K) \cong \operatorname{Hom}_{\cH_K}((\rho^K)^*,(\pi^{K})^*)\cong\rho^K \leftarrowimes (\pi^*)^K \cong \\ \cong\{v \in \rho^K \leftarrowimes (\pi^K)^* \, \vert \, \forall a \in \cH_K(G), \, (a\leftarrowimes 1 - 1 \leftarrowimes a)v = 0\}
\end{multline*}
This easily implies the assertion.
\end{proof}
Using Frobenius reciprocity we obtain the following corollary.
\begin{cor}\label{lem:DualFinMult}
Let $\pi$ be a \DimaB{smooth} representation of $G$ of \DimaB{finite length}. Then
$$\dim\operatorname{Hom}_G(\operatorname{ind}_{H}^G(\chi),\pi)=\dim\operatorname{Hom}_H(\tilde \pi, \DimaA{\hat \chi})$$
\end{cor}
\mathbb{R}amiA{\Cref{lem:admFin} follows from \Cref{lem:DualFinMult} and the next lemma.
\begin{lem}\label{lem:FinTypeFinMult}
If $(G,(H,\chi))$ has \EitanA{finite (cuspidal)} type then it has \EitanA{finite (cuspidal)} multiplicities.
\end{lem}
\begin{proof}
Let $\pi$ be a irreducible (cuspidal) representation of $G$. By \Cref{thm:Ber} we can choose a splitting compact open subgroup $K<G$ s.t. $\pi^K \neq 0$. Then $$\operatorname{Hom}_{G}(\operatorname{ind}_H^G(\DimaA{\hat \chi}),\pi) = \operatorname{Hom}_{\cH_K(G)}((\operatorname{ind}_H^G(\DimaA{\hat \chi}))^K,\pi^K).$$ Since $(\operatorname{ind}_H^G(\DimaA{\hat \chi}))^K$ is finitely generated, this implies that $\dim \operatorname{Hom}_{G}(\operatorname{ind}_H^G(\DimaA{\hat \chi}),\pi)<\infty$.
\end{proof}
}
In view of \Cref{VKFinGen,lem:DualFinMult}, \Dima{\Cref{thm:ExpFinGen}} is equivalent to the following one.
\begin{thm}\label{thm:EqFinGen}
If $(G,(H,\chi))$ is $F$-spherical and has finite multiplicity then it has finite type.
\end{thm}
\subsection{Descent Of
Finite Multiplicity} \lbl{subsec:desc.cusp}
\begin{notation}
Let $(G,(H,\chi))$ be a twisted pair.
Let \mathbb{R}amiC{$\mathbf{P}<\mathbf{G}$ be
a parabolic subgroup and $\mathbf{M}$ be its Levi quotient. Let $\mathbf{\overline{P}}<\mathbf{G}$ be
a parabolic subgroup opposite to $\mathbf{P}$ and $\mathbf{\overline{U}}$ be its unipotent radical.} Let $X:=G/H$.
\mathbb{R}ami{Let $\cF$ be the natural $G$-equivariant sheaf on $X$ such that the stalk at $[1]$ coincides with $\chi$ as a representation of $H$.}
Let $x \in X$. It is easy to see that there exists a geometric quotient $A^{x} = \overline{U}\backslash\overline{P}x$. Denote by $\cF^{x}$ the natural $M$-equivariant sheaf on $A^{x}$ such that $\overline{r}_M^G(\cS(\overline{P}x, \cF|_{\overline{P}x}))=\cS(A^{x},\cF^{x})$.
Suppose that $\cF^{x}\neq 0$. Let $y$ be the image of $x$ in $A^{x}$. We denote its stabilizer in $M$ by $H^x_M$, and we consider the fiber $(\cF^{x})_y$ as a character of $H^x_M$, and denote it by $\chi_M^x$.
We say that the twisted pair $(M, (H_M^x,\chi_M^x))$ is a \emph{$P$-descendent} of the twisted pair $(G,(H,\chi))$. We will say that descendants $(M, (H_M^x,\chi_M^x))$ and $(M, (H_M^{x'},\chi_M^{x'}))$ are equivalent if $x$ and $x'$ belong to the same $P$-orbit.
\end{notation}
The following \mathbb{R}amiC{version of the Bernstein-Zelevinsky geometric} lemma follows from the exactness of $\overline{r}_M^G$.
\begin{lem}\label{lem:geo}
\mathbb{R}amiC{Let $\mathbf{P}<\mathbf{G}$ be
a parabolic subgroup and $\mathbf{M}$ be its Levi quotient.} Let $(G,(H,\chi))$ be an $F$-spherical twisted pair. Then $ \operatorname{ind}_H^G(\chi)$ has a finite filtration such that $$Gr(\overline{r}_M^G( \operatorname{ind}_H^G(\chi)))\simeq\bigoplus_{i} \operatorname{ind}_{H_i}^G(\chi_i), $$
where $(M,(H_i,\chi_i))$ ranges over all the $P$-descendants of $(G,(H,\chi))$ up to equivalence.
\end{lem}
The goal of this subsection is to prove the following lemma.
\begin{lemma} \lbl{FinMultCuspDesc}
Let $(G,(H,\chi))$ be an $F$-spherical pair of finite multiplicity. Let $P<G$ be a parabolic
subgroup and $M$ be its Levi quotient. Let $(M,(H',\chi'))$ be a descendant of $(G,(H,\chi))$. Then $(M,(H',\chi'))$ is an $F$-spherical pair of finite cuspidal multiplicity.
\end{lemma}
\begin{remark}
One can easily show that the converse statement to this lemma is also true. Namely, if all the descendants of the pair have finite cuspidal multiplicity then the pair has finite multiplicity. This also implies that in this case all the descendants have finite multiplicity.
However we will not use these facts.
\end{remark}
We will need the following lemmas.
\begin{lemma} \lbl{LinAlg}
Let $M$ be an l-group and $V,W$ be smooth representations of $M$ such that
that $\dim \operatorname{Hom}(V,W) < \infty$.
Let $0=F^0V \subset ... \subset F^{n-1}V \subset F^nV=V$ be a
finite filtration of $V$ by subrepresentations. Suppose that for
any $i$, either $$\dim \operatorname{Hom}(F^iV/F^{i-1}V,W) = \infty$$ or $$\text{both }\dim\operatorname{Hom}(F^iV/F^{i-1}V,W)< \infty \text{ and }\dim \operatorname{Ext}^{1}(F^iV/F^{i-1}V,W) <
\infty.$$ Then $\dim\operatorname{Hom}(F^iV/F^{i-1}V,W) < \infty$ for any $i$.
\end{lemma}
\begin{proof}
We prove by a decreasing induction on $i$ that $\dim\operatorname{Hom}(F^iV,W)<\infty$, and, therefore, $\dim\operatorname{Hom}(F^iV/F^{i-1}V,W)<\infty$ and by the conditions of the lemma $$\dim \operatorname{Ext}^{1}(F^iV/F^{i-1}V,W)<\infty.$$ Consider the short exact sequence
$$0 \to F^{i-1}V \to F^iV \to F^iV/F^{i-1}V \to 0,$$
and the corresponding long exact sequence
$$...\leftarrow \operatorname{Ext}^1(F^iV/F^{i-1}V,W) \leftarrow \operatorname{Hom}(F^{i-1}V,W) \leftarrow \operatorname{Hom}(F^iV,W) \leftarrow \operatorname{Hom}(F^iV/F^{i-1}V,W) \leftarrow 0.$$
In this sequence $\dim \operatorname{Ext}^{1}(F^iV/F^{i-1}V,W) < \infty$ and $\dim \operatorname{Hom}
(F^iV,W) < \infty$, and hence $\dim \operatorname{Hom}(F^{i-1}V,W) < \infty$.
\end{proof}
\begin{lemma}
\lbl{FinDimH1H0}
Let $(G,(H,\chi))$ be a twisted pair.
Let $\rho$ be an irreducible cuspidal representation of
$G$. Suppose that $\dim \operatorname{Hom}(\operatorname{ind}_H^G(\chi), \rho)< \infty$. Then $\dim \operatorname{Ext}^1(\operatorname{ind}_H^G(\chi), \rho)< \infty$.
\end{lemma}
For the proof we will need the following straightforward lemma.
\begin{lemma} \lbl{AFG}
Let $L$ be a lattice.
Let $V$ be a linear
space. Let $L$ act on $V$ by a character. Then
$$\operatorname{H}_1(L,V) = \operatorname{H}_0(L,V) \leftarrowimes_{\mathbb{C}}(L \leftarrowimes_{\mathbb{Z}}\mathbb{C}).$$
\end{lemma}
\begin{proof}[Proof of \Cref{FinDimH1H0}]
By \Cref{lem:tilde} $$\operatorname{Ext}^i(\operatorname{ind}_H^G(\chi)\mathbb{R}amiC{,\rho}) \cong \operatorname{Ext}^i(\widetilde{\rho},\operatorname{Ind}_H^G(\DimaA{\hat \chi})).$$ By Frobenius reciprocity $$\operatorname{Ext}^i(\widetilde{\rho},\operatorname{Ind}_H^G(\DimaA{\hat \chi}))\cong \operatorname{Ext}^i_H(\widetilde{\rho},\DimaA{\hat \chi}).$$ Let $I:=H\cap G^1$ and $J:=H\cap G_0$. Note that
$$ \operatorname{Ext}^i_H(\widetilde{\rho},\DimaA{\hat \chi} )\cong \operatorname{Ext}^i_H(\widetilde{\rho}\leftarrowimes \DimaA{\hat \chi}^{-1}, \mathbb{C}) \cong \operatorname{Ext}^i_{H/I}((\widetilde{\rho}\leftarrowimes \DimaA{\hat \chi}^{-1})_{I}, \mathbb{C}),$$
where the last isomorphism follows from \Cref{CuspProj}. Now\mathbb{R}amiC{, since $H/J$ is finite,}
$$ \operatorname{Ext}^i_{H/I}((\widetilde{\rho}\leftarrowimes \DimaA{\hat \chi}^{-1})_{I}, \mathbb{C}) \cong \operatorname{Hom} _{H/J}(\operatorname{H}_i(J/I,(\widetilde{\rho}\leftarrowimes\DimaA{\hat \chi}^{-1})_{I}),\mathbb{C}),$$
which implies the assertion by \Cref{AFG}.
\end{proof}
Now we are ready to prove Lemma \operatorname{Re}f{FinMultCuspDesc}.
\begin{proof}[Proof of Lemma \operatorname{Re}f{FinMultCuspDesc}]
Clearly $(M,H')$ is $F$-spherical. It remains to prove that
\begin{equation}\label{eq:CuspDesMult}
\dim \operatorname{Hom}(\operatorname{ind}_{H'}^{M}(\chi'),\tau)<\infty,
\end{equation} for any irreducible cuspidal representation $\tau$ of $M$.
Since $(G,(H,\chi))$ has finite multiplicity, we have $\dim \operatorname{Hom}(\operatorname{ind}_H^G(\chi),\pi)<\infty$ for any irreducible $\pi\in \cM(G)$. Thus for any irreducible $\tau \in \cM(M)$ we have
$$\dim \operatorname{Hom}_G(\operatorname{ind}_H^G(\chi),\overline{i}_{M}^G(\tau)))<\infty.$$
Thus $$\dim \operatorname{Hom}_M(\overline{r}^G_{M}(\operatorname{ind}_H^G(\chi)),\tau)<\infty.$$ By \Cref{lem:geo},
there exists a filtration on $\overline{r}^G_{M}(\operatorname{ind}_H^G(\chi))$ such that $Gr^i(\overline{r}^G_{M}(\operatorname{ind}_H^G(\chi)))=\operatorname{ind}_{H_i}^G(\chi_i)$ where $(M,(H_i,\chi_i))$ ranges over all the descendants of $(G,(H,\chi))$ up to equivalence. In particular we can assume that for some $i_0$, we have $H_{i_0}=H',\, \chi_{i_0}=\chi'$.
By \Cref{FinDimH1H0}, this filtration satisfies the conditions of \Cref{LinAlg} and thus \eqref{eq:CuspDesMult} holds.
\end{proof}
\subsection{Finite cuspidal type} \label{subsec:CuspFinType}
Let us now prove the following cuspidal analogue of theorem \operatorname{Re}f{thm:EqFinGen}
\begin{thm}\label{thm:EqCuspFinGen}
If $(G,(H,\DimaA{\hat \chi}))$ has \EitanA{finite cuspidal} multiplicity, then $(G,(H,\chi))$ has \EitanA{finite cuspidal} type.
\end{thm}
We will need several lemmas.
\begin{lemma}\label{lem:CharExt}
Let $A$ be a locally compact group and $B$ be a closed subgroup. Suppose that $A=BZ(A)$. Then any character of $B$ can be lifted to $A$.
\end{lemma}
\begin{proof}
Taking quotient by the kernel of the character we reduce to the case of abelian $A$. In this case the statement is \cite[Theorem 5]{Dix}.
\end{proof}
\begin{lemma}
\label{G1}
Let $(G,H)$ be an $F$-spherical pair, and denote $\widetilde{H}=HZ(G)\cap G^1$. Let $\chi$ be a character of $H$. Suppose that for any smooth
(respectively cuspidal) irreducible representation $\rho$ of
$G$ we have $$\dim\operatorname{Hom}_{H}(\rho|_{H}, \chi) < \infty$$ Then
for any smooth (respectively cuspidal) irreducible representation
$\rho$ of $G$ and for every character $\psi$ of $\widetilde{H}$ whose restriction to $H\cap G^1$ coincides with $\chi$, we have
$$\dim\operatorname{Hom}_{\widetilde{H}}(\rho|_{\widetilde{H}}, \psi) < \infty.$$
\end{lemma}
\begin{proof}
Let $\rho$ be a smooth (respectively cuspidal) irreducible
representation of $G$. Using Lemma \operatorname{Re}f{lem:CharExt} extend $\chi$ to a character $\chi'$ of $HZ(G)$. Let $varietyphii$ be a character of $\widetilde{H}$ whose restriction to $H\cap G^1$ is trivial.
We have to show that $$\dim \operatorname{Hom}_{\widetilde{H}}\left (\rho|_{\widetilde{H}}, varietyphii\chi' \right) < \infty.$$ We have
\[
\operatorname{Hom}_{\widetilde{H}}\left (\rho|_{\widetilde{H}},varietyphii\chi' \right ) =
\operatorname{Hom}_{H Z(G)\cap G_0} \left (\rho|_{(H Z(G))\cap G_0}, \operatorname{Ind}_{\widetilde{H}}^{H Z(G)\cap G_0}varietyphii\chi'\right).
\]
Since $$HZ(G)\cap G_0=\widetilde{H}Z(G)\cap G_0=\widetilde{H}Z(G),$$
the subspace of $\operatorname{Ind}_{\widetilde{H}}^{(H Z(G))\cap G_0}varietyphii\chi'$ that transforms under $Z(G)$ according to the central character of $\rho$ is at most one dimensional. If this subspace is $0$, then the lemma is clear. Otherwise, denote it by $\tau$. Since $H\cap G^1$ is normal in $HZ(G)$, we get that the restriction of $\operatorname{Ind}_{\widetilde{H}}^{(H Z(G))\cap G_0}varietyphii$ to $H\cap G^1$ is trivial, and thus that $\tau|_{H\cap G^1}=\chi'_{H\cap G^1}$. Hence
\begin{multline*} \operatorname{Hom}_{\widetilde{H}}\left (\rho|_{\widetilde{H}}, varietyphii\chi' \right )=
\operatorname{Hom}_{(H Z(G))\cap G_0} \left (\rho|_{(H Z(G))\cap G_0}, \tau\right)=\\=\operatorname{Hom}_{H\cap G_0} \left (\rho|_{H \cap G_0}, \tau|_{H\cap G_0}\right)
=\operatorname{Hom}_{H}\left(\rho|_{H},\operatorname{Ind}_{H\cap G_0}^{H}\tau|_{H\cap G_0}\right).
\end{multline*}
Since $H / H\cap G_0$ is finite and abelian, we have
$$\operatorname{Ind}_{H\cap G_0}^{H}(\tau|_{H\cap G_0})=\chi\left(\bigoplus_{i=1}^N \chi_i\right)$$
where $\chi_i$ are characters of $H$, s.t. $(\chi_i)|_{H\cap G^1}=1$. \mathbb{R}amiC{By \Cref{lem:CharExt}} the characters $\chi_{i}$ can be extended to characters of $G$, because $H/(H\cap G^1)$ is a sub-lattice of $G/G^1$. Denoting the extensions by $\Theta_{i}$, we get that
\[
\operatorname{Hom}_{H}\left(\rho|_{H},\chi\chi_i\right)=\operatorname{Hom}_{H}\left((\rho\leftarrowimes\Theta_{i}^{-1})|_{H},\chi\right),
\]
but $\rho\leftarrowimes\Theta_{i}^{-1}$ is again smooth (respectively cuspidal) irreducible representation of $G$, so this last space is finite-dimensional.
\end{proof}
\begin{lemma} \lbl{CA}
Let $A$ be a commutative unital Noetherian algebra without zero divisors and let $K$ be its field of fractions. Let $K^\mathbb{N}$ be the space of all sequences of elements of $K$. Let $V$ be a finite dimensional subspace of $K^\mathbb{N}$ and let $M:=V \cap A^\mathbb{N}$. Then $M$ is finitely generated.
\end{lemma}
\begin{proof} Since $A$
does not have zero divisors, $M$ injects into $K^\mathbb{N}$. There is a number $n$ such that the projection of $V$ to $K^{\{1,\ldots n\}}$ is injective. Therefore, $M$ injects into $A^{\{1,\ldots n\}}$, and, since $A$ is Noetherian, $M$ is finitely generated.
\end{proof}
\begin{lemma}\label{fg}
Let $L$ be an $l$-group, and let $L' \subset L$ be an open normal subgroup of $L$ such that $L/L'$ is a lattice. Let $\rho$ be a smooth representation of $L$ of countable dimension. Suppose that for any character $\chi$ of $L$ whose restriction to $L'$ is trivial we have $$\dim\operatorname{Hom}_{L}(\rho, \chi) < \infty.$$
Consider $\operatorname{Hom}_{L}(\rho, \cS(L/L'))$ as a representation of $L$, where $L$ acts by $((hf)(x))([y])=(f(x))([y h])$. Then this representation is finitely generated.
\end{lemma}
\begin{proof} By assumption, the action of $L$ on $\operatorname{Hom}_{L}(\rho,\cS(L/L'))$ factors through $L/L'$. Since $L/L'$ is discrete, $\cS(L/L')$ is the group algebra $\mathbb{C}[L/L']$. We want to show that $\operatorname{Hom}_{L}(\rho,\mathbb{C}[L/L'])$ is a finitely generated module over $\mathbb{C}[L/L']$.
Let $\mathbb{C}(L/L')$ be the fraction field of $\mathbb{C}[L/L']$. Choosing a countable basis for the vector space of $\rho$, we can identify any $\mathbb{C}$-linear map from $\rho$ to $\mathbb{C}[L/L']$ with an element of $\mathbb{C}[L/L']^\mathbb{N}$. Moreover, the condition that the map intertwines the action of $L/L'$ translates into a collection of linear equations that the tuple in $\mathbb{C}[L/L']^\mathbb{N}$ should satisfy. Hence, $\operatorname{Hom}_{L'}(\rho,\mathbb{C}[L/L'])$ is the intersection of the $\mathbb{C}(L/L')$-vector space $\operatorname{Hom}_{L}(\rho,\mathbb{C}(L/L'))$ and $\mathbb{C}[L/L']^\mathbb{N}$. By Lemma \operatorname{Re}f{CA}, it suffices to prove that $\operatorname{Hom}_{L}(\rho,\mathbb{C}(L/L'))$ is finite dimensional over $\mathbb{C}(L/L')$.
Since
$L$ is separable, and $\rho$ is smooth and of countable dimension,
there are only countably many linear equations defining $\operatorname{Hom}_{L}(\rho,\mathbb{C}(L/L'))$; denote them by $varietyphii_1,varietyphii_2,\ldots\in\left(\mathbb{C}(L/L')^\mathbb{N}\right)^*$. Choose a countable subfield $K\subset\mathbb{C}$ that contains all the coefficients of the elements of $\mathbb{C}(L/L')$ that appear in any of the $varietyphii_i$'s. If we define $W$ as the
$K(L/L')$-linear subspace of
$K(L/L')^\mathbb{N}$
defined by the $varietyphii_i$'s, then $\operatorname{Hom}_{L}(\rho,\mathbb{C}(L/L'))=W\leftarrowimes_{K(L/L')} \mathbb{C}(L/L')$, so $\dim_{\mathbb{C}(L/L')}\operatorname{Hom}_{L}(\rho,\mathbb{C}(L/L'))=\dim_{K(L/L')}W$.
Since $L/L'$ is a lattice generated by, say, $g_1,\ldots,g_n$, we get that $K(L/L')=K(t_1^{\pm 1},\ldots,t_n^{\pm 1})$ $=K(t_1,\ldots,t_n)$. Choosing elements $\pi_1,\ldots,\pi_n\in\mathbb{C}$ such that $tr.deg_K(K(\pi_1,\ldots,\pi_n))=n$, we get an injection $\iotaa$ of
$K(L/L')$ into $\mathbb{C}$. As before, we get that if we denote the $\mathbb{C}$-vector subspace of $\mathbb{C}^\mathbb{N}$ cut by the equations $\iotaa(varietyphii_i)$ by $U$, then $\dim_{K(L/L')}W=\dim_{\mathbb{C}}U$. However, $U$ is isomorphic to $\operatorname{Hom}_{L}(\rho,\chi)$, where $\chi$ is the character of $L/L'$ such that $\chi(g_i)=\pi_i$. By assumption, this last vector space is finite dimensional.
\end{proof}
Now we are ready to prove Theorem \operatorname{Re}f{thm:EqCuspFinGen}.
\begin{proof}[Proof of Theorem \operatorname{Re}f{thm:EqCuspFinGen}]
Let $\rho$ be an irreducible cuspidal representation of $G$. By \Cref{lem:DualFinMult} we know that $\dim \operatorname{Hom}_{H} (\rho, \chi) < \infty$. We need to show that $
\operatorname{Hom}_{G}(\Psi(\rho),\operatorname{ind}_{H}^{G}\chi)
$ is finitely generated over $\cO(\Psi_G)$.
We have
$$
\operatorname{Hom}_{G}(\Psi(\rho),\operatorname{ind}_{H}^{G}\chi) =
\operatorname{Hom}_{G^1}(\rho,\operatorname{ind}_{H}^{G}\chi).$$
Here we consider the space $\Phii:=\operatorname{Hom}_{G^1}(\rho,\operatorname{ind}_{H}^{G}\chi)$ with
the natural action of $G$. Note that $G^1$ acts trivially and
hence this action gives rise to an action of $G/G^1$, which gives
the $\cO(\Psi_G)$ - module structure. Let $\Theta:=\operatorname{Hom}_{G^1}(\rho,\operatorname{Ind}_{HZ(G)}^{G}\operatorname{ind}_{H}^{HZ(G)}\chi)$.
Clearly $\Phii \subset \Theta$. Thus, by \Cref{lem:Noeth}, it is enough to show that $\Theta$ is finitely generated over $G$.
Denote $H':=H \cap
G^1$ and $H'':= (H Z(G))\cap G^1$.
Consider the subspace $$V:=\operatorname{Hom}_{G^1}(\rho, \operatorname{Ind}_{H''}^{G^1}(\operatorname{ind}_{H'}^{H''}(\chi|_{H'}))) \subset \Theta.$$ It generates
$\Theta$ as a representation of $G$,
and therefore also as an $\cO(\Psi_G)$ - module. Note that $V$ is $H$-invariant. Therefore it is enough to show that $V$ is finitely generated over $H.$
By Frobenius reciprocity we have $V \cong \operatorname{Hom}_{H''}(\rho,\operatorname{ind}_{H'}^{H''}(\chi|_{H'}))$.
By Lemma \operatorname{Re}f{lem:CharExt} $\chi$ can be extended to a character $\chi'$ of $H Z(G)$. Thus $$\operatorname{ind}_{H'}^{H''}(\chi|_{H'}) \cong \chi'|_{H''}\cS(H''/H').$$
Let $\rho':=\chi'|_{H}^{-1}\rho|_{H}$. Then
$$ V \cong \operatorname{Hom}_{H''}(\rho',\cS(H''/H')).$$
Under this isomorphism, the action $\Pi$ of $H$ on $V$ is given by $$((\Pi(h)(f))(v))([k])=f(\rho'(h^{-1})v)([h^{-1}kh]),$$
where $h\in H, \, f\in \operatorname{Hom}_{H''}(\rho',\cS(H''/H')), \, v \in \rho', \, k \in H'', \, [k]=kH'\in H''/H'.$
Let $\Xi$ be the action of $H''$ on $\operatorname{Hom}_{H''}(\rho',\cS(H''/H'))$ as described in \Cref{fg}, i.e. $$((\Xi(h)(f))(v))([k])=f(v)([kh]).$$
Let us show that $\operatorname{Hom}_{H''}(\rho',\cS(H''/H'))$ is finitely generated w.r.t. the action $\Xi$. By \Cref{fg} it is enough to show that
\begin{equation}\label{=H''Fin}
\dim \operatorname{Hom}_{H''}(\rho', \theta) < \infty
\end{equation}
for any character $\theta$ of $H''$ with trivial restriction to $H'$. Note that $\operatorname{Hom}_{H''}(\rho', \theta)\cong \operatorname{Hom}_{H''}(\rho, \chi' \theta)$. Thus \eqref{=H''Fin} follows from the hypothesis $\dim \operatorname{Hom}_{H}(\rho, \chi) < \infty$ in view of
\Cref{G1} and we have shown that $\operatorname{Hom}_{H''}(\rho',\cS(H''/H'))$ is finitely generated w.r.t. the action $\Xi$.
Now it is enough to show that for any $h \in H''$ there exist an $h' \in H$ and a scalar $\alphahapha$ s.t. $$\Xi(h)=\alphahapha \Pi(h').$$ In order to show this let us decompose $h$ to a product $h=zh'$ where $h' \in H$ and $z\in Z(G)$. Now
\begin{multline*}
((\Xi(h)(f))(v))([k])=f(v)([kh])=f(h^{-1}v)([h^{-1}kh])=f(h^{'-1}z^{-1}v)([h^{'-1}kh'])=\\=
\alphahapha f(h'^{-1}v)([h^{'-1}kh'])= \alphahapha((\Pi(h')(f))(v))([k]),
\end{multline*}
where $\alphahapha$ is the scalar with which
$z^{-1}$ acts on $\rho'$.
Thus $V$ is finitely generated over $H$, thus $\Phii$ and $\Theta$ are finitely generated over $G$ and $\operatorname{Hom}_{G}(\Psi(\rho),\operatorname{ind}_{H}^{G}\chi)$ is finitely generated over $\cO(\Psi_G)$.
\end{proof}
\begin{corollary}\label{cor:CuspMultType}
If $(G,(H, \chi))$ has finite cuspidal multiplicity, then $(G,(H,\chi))$ has finite cuspidal type.
\end{corollary}
\begin{proof}
Assume \EitanA{that} $(G,(H, \chi))$ has finite cuspidal multiplicity. By \Cref{thm:EqCuspFinGen} the twisted pair $(G,(H, \DimaA{\hat \chi}))$ has finite cuspidal type.
\mathbb{R}amiA{ By
\Cref{lem:FinTypeFinMult} the pair $(G,(H, \DimaA{\hat \chi}))$ has finite cuspidal multiplicity. Applying \Cref{thm:EqCuspFinGen} again we obtain that $(G,(H,\chi))$ has finite cuspidal type.}
\end{proof}
\subsection{Proof of \Cref{thm:EqFinGen}} \lbl{SecPfFinGen} $\, $
Let $\mathbf{P} < \mathbf{G}$ be a parabolic subgroup and $\mathbf{M}$ be the Levi quotient of $\mathbf{P}$. Let $\rho$ be a cuspidal representation of $M$. We have to show that $\operatorname{Hom}(i_M^G(\Psi(\rho)),\operatorname{ind}_H^G(\chi))$ is finitely generated over $\cO(\Psi_M)$. By second adjointness theorem (\Cref{thm:2adj}), we have $$\operatorname{Hom}(i_M^G(\Psi(\rho)),\operatorname{ind}_H^G(\chi))=\operatorname{Hom}((\Psi(\rho)),\bar r_M^G(\operatorname{ind}_H^G(\chi))).$$
By \Cref{lem:geo} the representation $\bar r_M^G(\operatorname{ind}_H^G(\chi))$ has a filtration s.t. $$Gr_i(\bar r_M^G(\operatorname{ind}_H^G(\chi)))=\operatorname{ind}_{H_i}^M(\chi_i)$$ where $(M,(H_i,\chi_i))$ are the descendants of $(G,(H,\chi))$.
Since $i_M^G(\Psi(\rho))$ is a projective object (\Cref{thm:2adj}), this gives us filtration on $\operatorname{Hom}((\Psi(\rho)),\bar r_M^G(\operatorname{ind}_H^G(\chi)))$ with $$Gr_i\operatorname{Hom}((\Psi(\rho)),\bar r_M^G(\operatorname{ind}_H^G(\chi)))=\operatorname{Hom}((\Psi(\rho)),\operatorname{ind}_{H_i}^M(\chi_i)).$$
So it remains to show that $(M,(H_i,\chi_i))$ are of finite cuspidal type. This follows from \Cref{FinMultCuspDesc,cor:CuspMultType}.
\end{document} |
\begin{document}
\centerline{}
\selectlanguage{english}
\title{Failure of Wiener's property for positive definite periodic
functions}
\selectlanguage{english}
\author[A. Bonami]{Aline Bonami}
\email{aline.bonami@univ-orleans.fr}
\author[S. Revesz]{Szil\'ard Gy. R\'ev\'esz}
\email{revesz@renyi.hu}
\address[A. Bonami]{F\'ed\'eration Denis Poisson. MAPMO-UMR 6628,
D\'epartement de Math\'ematiques, Universit\'e d'Orl\'eans, 45067
Orl\'eans Cedex 2, France}
\address[S. Revesz]{
R\'enyi Institute of Mathematics, Hungarian Academy of Sciences,
Budapest, P.O.B. 127, 1364 Hungary.}
\begin{abstract}
\selectlanguage{english}
We say that Wiener's property holds for the exponent $p>0$ if we
have that whenever a positive definite function $f$ belongs to
$L^p(-\varepsilon,\varepsilon)$ for some $\varepsilon>0$, then $f$
necessarily belongs to $L^p(\mathbb T)$, too. This holds true for $p\in
2\mathbb N$ by a classical result of Wiener.
Recently various concentration results were proved for idempotents
and positive definite functions on measurable sets on the torus.
These new results enable us to prove a sharp version of the
failure of Wiener's property for $p\notin 2\mathbb N$. Thus we obtain
strong extensions of results of Wainger and Shapiro, who proved
the negative answer to Wiener's problem for $p\notin 2\mathbb N$.
\selectlanguage{francais}
\noindent{\bf Contre-exemples \`a la
propri\'et\'e de Wiener pour les fonctions p\'eriodi--ques
d\'efinies-positives.}
\noindent{\bf R\'esum\'e.} On dit que l'exposant $p$
poss\`ede la propri\'et\'e de Wiener si toute fonction
p\'eriodique d\'efinie-positive qui est de puissance $p$-i\`eme
int\'egrable au voisinage de $0$ l'est sur un intervalle de
p\'eriode. C'est le cas des entiers pairs, d'apr\`{e}s un r\'{e}sultat
classique de Wiener.
Nous avons r\'ecemment obtenu des ph\'enom\`enes de concentration
des polyn\^omes idempotents ou d\'efinis-positifs sur un ensemble
mesurable du tore qui nous permettent de donner une version forte
du fait que les exposants $p\notin 2\mathbb N$ n'ont pas la
propri\'et\'e de Wiener, am\'eliorant ainsi les r\'esultats de
Wainger et Shapiro.
\end{abstract}
\selectlanguage{english}
\maketitle
\section{Introduction}\label{sec:intro}
Let $f$ be a periodic integrable function which is positive
definite, that is, has non negative Fourier coefficients. Assume
that it is bounded (in $\|\cdot\|_\infty$) in a neighborhood of
$0$, then it necessarily belongs to $L_\infty(\mathbb T)$, too. In fact,
its maximum is obtained at $0$ and, as $f(0)=\sum_k
\widehat{f}(k)$, $f$ has an absolutely convergent Fourier series.
The same question can be formulated in any $L^p$ space. Actually,
the following question was posed by Wiener in a lecture, after he
proved the $L^2$ case. We refer to \cite{Sh} for the story of this
conjecture, see also \cite{L}, \cite{Sh} and \cite{W}.
\begin{problem}[Wiener]\label{Wienerproblem} Let $1\le p<\infty$.
Is it true, that if for some $\varepsilon>0$ a positive definite
function $f\in L^p(-\varepsilon,\varepsilon)$, then we necessarily
have $f\in L^p(\mathbb T)$, too?
\end{problem}
The observation that the answer is positive if $p\in 2\mathbb N$ has
been given by Wainger \cite{Wa}, as well as by Erd\H os and Fuchs
\cite{EF}. We refer to Shapiro \cite{Sh} for the proof, since the
constant given by his proof is in some sense optimal, see
\cite{L,L2}. Generalizations in higher dimension may be found in
\cite {Hl} for instance. It was shown by Shapiro \cite{Sh} and
Wainger \cite{Wa} that the answer is to the negative for all other
values of $p$. Negative results were obtained for groups in e.g.
\cite{F} and \cite{L}.
There is even more evidence that the Wiener property must hold
when $p=2$ and we prescribe large gaps in the Fourier series of
$f$. Indeed, in this case by well-known results of Wiener and
Ingham, see e.g. \cite{W,Z}, we necessarily have an essentially
uniform distribution of the $L^2$ norm on intervals longer than
the reciprocal of the gap, even without the assumption that $f$ be
positive definite. As Zygmund pointed out, see the Notes to
Chapter V \S 9, page 380 in \cite{Z}, Ingham type theorems were
not known for $p\ne 2$, nevertheless, one would feel that
prescribing large gaps in the Fourier series should lead to better
control of the global behavior by means of having control on some
subset like e.g. $(-\varepsilon,\varepsilon)$. So the analogous
Wiener question can be posed restricting to positive definite
functions having gaps tending to $\infty$. However, we answer
negatively as well. In this strong form the question, to the best
of our knowledge, has not been dealt with yet. Also we are able to
replace the interval $(-\varepsilon, +\varepsilon)$ by any
measurable symmetric subset $E$ of the torus of measure $|E|<1$.
Neither extension can be obtained by a straightforward use of the
methods of Shapiro and Wainger.
\section{$L^2$ results and concentration of integrals}\label{sec:concentration}
We use the notation $\mathbb T:=\mathbb R/\mathbb Z$ for the torus. Then
$e(t):=e^{2\pi i t}$ is the usual exponential function adjusted to
interval length $1$, and we denote $e_h$ the function $e(hx)$.
The set of positive definite trigonometrical polynomials is the
set
\begin{equation}\label{eq:posdefpol}
{\mathcal T}^{+}:=\left\{ \sum_{h\in H}a_k e_k ~:~ H\subset \mathbb Z \quad
(\textrm{or}~~ \mathbb N), ~~ \# H< \infty, \quad a_k\geq 0 ~(k\in H)
\right\}
\end{equation}
For obvious reasons of being convolution idempotents, the set
\begin{equation}\label{eq:idempotents}
\PP:=\left\{ \sum_{h\in H}e_h ~:~ H\subset \mathbb Z \quad (\textrm{or}
~~\mathbb N), ~~ \# H< \infty \right\}
\end{equation}
is called the set of \emph{(convolution-)idempotent exponential
(or trigonometric) polynomials}, or just \emph{idempotents} for
short.
Note that multiplying a polynomial by an exponential $e_K$ does
not change its absolute value, and the property of belonging to
$\PP$ or ${\mathcal T}^{+}$ is not changed either. Therefore, it suffices to
consider polynomials with nonnegative spectrum, i.e. $H\subset
\mathbb N$ in \eqref{eq:posdefpol} and \eqref{eq:idempotents}.
Also note that for a positive definite function the function
$|f|$ is necessarily even. This is why we consider $0$-symmetric
(or, just symmetric for short) intervals or sets, (alternatively,
we could have chosen to restrict to $[0,1/2)$ instead of $\mathbb T$).
Let us first state the theorem on positive definite functions in
$L^2$. Recall that the direct part is attributed to Wiener, with
the constant given by Shapiro in \cite{Sh}. The converse seems to
be well known (see \cite{L,L2}), except, may be, for the fact that
counter-examples may be given by idempotents. The fact that the
Wiener property fails for arbitrary measurable sets is, to the
best of our knowledge, new.
\begin{theorem}[Wiener, Shapiro] \label{th:shapiro} For $p$ an
even integer, for $0<a<1/2$ and for $f\in {\mathcal T}^{+}$, we have the
inequality
\begin{equation}\label{shapiro}
\frac 1{2a}\int_{-a}^{+a}|f|^p\geq \frac 12
\int_{-1/2}^{+1/2}|f|^p.
\end{equation}
Moreover, the constant $1/2$ cannot be replaced by a smaller one,
even when restricting to idempotents. Indeed, for each integer
$k>2$, for $a<1/k$ and for $b>1/k$, there exits an idempotent $f$
and such that $\int_{-a}^{+a}|f|^p\leq
b\times \int_{-1/2}^{+1/2}|f|^p$.
\end{theorem}
\begin{proof} We refer to Shapiro for the proof of the
inequality \eqref{shapiro}.
To show sharpness of the constant, let us now give an example,
inspired by the examples of \cite{DPQ}. We take $f:=D_n*\mu_k$,
where $D_n$ is the Dirichlet kernel, defined here as
\begin{equation}\label{eq:Dndef}
D_n(x):=\sum_{\nu=0}^{n-1} e(\nu x) = e^{\pi i(n-1)x/2}
\frac{\sin(\pi n x)}{\sin(\pi x)},
\end{equation}
and $\mu_k$ is the mean of Dirac masses at each $k$-th root of
unity. Both have Fourier coefficients $0$ or $1$, so that $f$ is
an idempotent. Only one of the point masses of $\mu_k$ lies inside
the interval $(-a,+a)$ and one can see that the ratio between
$\int_{-a}^{+a}|f|^p$ and $\int_{-1/2}^{+1/2}|f|^p$ tends to
$1/k$ when $n$ tends to infinity.
\end{proof}
\begin{remark} The interval $(-a,+a)$ cannot be replaced by a
measurable set $E$ having $0$ as a density point, even if $|E|$ is
arbitrarily close to $1$. Indeed, assume that the complement of
$E$ is the union (modulo $1$) of all intervals of radius $1/l^3$
around all irreducible rational numbers $k/l$, with $k$ different
from $0$ and $l>L$. Then $E$ has the required properties, while,
for the same idempotent $f:=D_n*\mu_l$, the ratio between $\int_E|f|^p$ and
$\int_{-1/2}^{+1/2}|f|^p$ tends to $1/l$ when $n$ tends to infinity.
We get our conclusion noting that $l$ may be arbitrarily
large.
\end{remark}
Let us now consider the $p$-concentration problem, which comes
from the following definition.
\begin{definition}
Let $p>0$, and $\mathcal F$ be a class of functions on $\mathbb T$. We
say that for the class $\mathcal F$ there is $p$-concentration if
there exists a constant $c>0$ so that for any symmetric measurable
set $E$ of positive measure one can find an idempotent
$f\in{\mathcal F}$ with
\begin{equation}\label{eq:Lpconcentration}
\int_E |f|^p \geq c \int_\mathbb T |f|^p.
\end{equation}
\end{definition}
The problem of $p$-concentration on the torus for idempotent
polynomials has been considered in \cite{DPQ}, \cite{DPQ2},
\cite{CRMany}. It was essentially solved recently in \cite{BR}.
Also, the weaker question of concentration of $p^{\textrm th}$
integrals of positive definite functions has been dealt with
starting with the works \cite{DPQ,DPQ2}. In this respect we have
proved the following result, see \cite[Theorem 48]{BR}. We will
only state that part of the theorems of \cite{BR} that we will
use.
\begin{theorem}\label{th:concentration} For all $0<p<\infty$, $p$
not an even integer, whenever a $0$-symmetric measurable set $E$
of positive measure $|E|>0$ is given, then to all $\varepsilon>0$
there exists some positive definite trigonometric polynomial
$f\in{\mathcal T}^{+}$
so that
\begin{equation}\label{eq:concentration}
\int_{^cE} |f|^p \leq\varepsilon \int_\mathbb T |f|^p.
\end{equation}
Moreover, $f$ can be taken with arbitrarily large prescribed gaps
between frequencies of its Fourier series.
\end{theorem}
\begin{remark} The same result is also proved for open
symmetric sets and idempotents, and for measurable sets and
idempotents when $p>1$.
\end{remark}
Theorem \ref{th:concentration} allows to see immediately that
there is no inequality like \eqref{shapiro} for $p$ not an even
integer. What is new, compared to the results of Shapiro and
Wainger, is the fact that this is also the case if $f$ has
arbitrarily large gaps, and that we can replace intervals
$(-a,+a)$ by arbitrary measurable sets of measure less than $1$.
We will give a different statement in the next section for $E$ an
open set, and also show a strong version of the negative state of
Wiener's problem.
\section{Negative results in Wiener's problem}\label{sec:results}
Let us start with somewhat strengthening the previous theorem for
open sets, which we obtain by an improvement of the methods of
Shapiro in \cite{Sh}.
\begin{theorem}\label{th:strong-conc} For all $0<q\leq p<2$, whenever a $0$-symmetric open set $E$
of positive measure $|E|>0$ is given, then for all $\varepsilon>0$
there exists some positive definite trigonometric polynomial
$f\in{\mathcal T}^{+}$
so that
\begin{equation}\label{eq:strong-conc}
\int_{^cE} |f|^p \leq\varepsilon \left (\int_\mathbb T
|f|^q\right)^{p/q}.
\end{equation}
The same is valid for $q<p$ with $p$ not an even integer, provided
that $q$ is sufficiently close to $p$, that is $q>q(p)$, where
$q(p)<p$.
\end{theorem}
The construction is closely related to the failure of Hardy
Littlewood majorant property. We do not know whether, for $p>2$
not an even integer, that is $2k<p<2k+2$, we can take $q(p)=2k$.
Due to Theorem \ref{th:shapiro}, we cannot take $q(p)<2k$. We do
not know either whether the next statement is valid for functions
with arbitrary large gaps.
\begin{proof}
Let us first assume that $p<2$. Then, for $D_n$ the Dirichlet
kernel with $n$ sufficiently large depending on $\varepsilon$,
there exists a choice of $\eta_k=\pm 1$ such that
$$\|D_n\|_p\leq \varepsilon \|\sum_{k=0}^n \eta_k e_k\|_q.$$
Indeed, if it was not the case, taking the $q$-th power,
integrating on all possible signs and using Khintchine's
Inequality, we would find that $c\varepsilon\sqrt n \leq
\|D_n\|_p\leq Cn^{1-\frac 1p}$ ($p>1$), $c\varepsilon\sqrt n \leq
\|D_n\|_1\leq C \log n$ and $c\varepsilon\sqrt n \leq
\|D_n\|_p\leq C$ ($0<p<1$) which leads to a contradiction.
We assume that $E$ contains $I\cup (-I)$, where $I:=(\frac kN,
\frac{k+1}N)$, and denote $$g(t):=\sum_{k=0}^n \eta_k
e_k(t)\hspace{2cm} G(t):=D_n(t).$$ Let $\Delta$ be a triangular
function based on the interval $ (-\frac 1{2N}, +\frac{1}{2N})$,
that is, $\Delta(t):=\left (1-2N|t|\right)_+ $. We finally
consider the function
$$f(t):=\Delta(t-a)g(2Nt)+\Delta(t+a)g(2Nt)+2\Delta(t)G(2Nt),$$ where
$a$ is the center of the interval $I$. Then an elementary
computation of Fourier coefficients, using the fact that $\Delta$
has positive Fourier coefficients while the modulus of those of
$g$ and $G$ are equal, allows to see that $f$ is positive
definite. Let us prove that one has (\ref{eq:strong-conc}). The
left hand side is bounded by $\frac 2N \|G\|_p^p$, while $
\int_\mathbb T |f|^q $ is bounded below by $\frac 1{2N}\|g\|_q^q- \frac
2N \|G\|_q^q$. We conclude the proof choosing $n, N$ sufficiently
large.
Let us now consider $p>2$ not an even integer. Mockenhaupt and
Schlag in \cite{MS} have given counter-examples to the Hardy
Littlewood majorant conjecture, which are based on the following
property: for $j>p/2$ an odd integer, the two trigonometric
polynomials
$$g_0:=(1+e_j)(1- e_{j+1})\hspace{2cm} G_0:=(1+e_j)(1+ e_{j+1})$$
satisfy the inequality $\|G_0\|_p<\|g_0\|_p$. By continuity, this
inequality remains valid when $p$ is replaced by $q$ in the right
hand side, with $q>q(p)$, for some $q(p)<p$. By a standard Riesz
product argument, for $K$ large enough, as well as $N_1,
N_2,\cdots N_K$, depending on $\varepsilon$, the functions
$$g(t):=g_0(t)g_0(N_1t)\cdots g_0(N_Kt)\ \ \mbox{\rm and}\ \
G(t):=G_0(t)G _10(N_1t)\cdots G_0(N_Kt)$$ satisfy the inequality
$$\|G\|_p\leq \varepsilon \|g\|_q.$$
From this point the proof is identical.
\end{proof}
We can now state in two theorems the counter-examples that we
obtain for the Wiener conjecture when $p$ is not an even integer.
\begin{theorem}\label{th:noWiener} Let $0<p<\infty$, and $p\notin
2\mathbb N$. Then for any symmetric, measurable set $E\subset\mathbb T$ with
$|E|>0$ and any $q<p$, there exists a function $f$ in the Hardy
space $H^q(\mathbb T)$ with positive Fourier coefficients, so that its
pointwise boundary value $f^*$ is in $L^p(^cE)$ while $f^*\notin
L^p(\mathbb T)$. Moreover, $f$ can be chosen with gaps tending to
$\infty$ in its Fourier series.
\end{theorem}
Here
$H^q(\mathbb T)$ denotes the space of periodic distributions $f$ whose
negative coefficients are zero, and such that the function $f_r$
are uniformly in $L^q(\mathbb T)$ for $0<r<1$, where
$$f_r(t):=\sum_{n }\hat f(n)r^{|n|} e^{2i\pi n t}.$$
Moreover, the norm (or quasi-norm) of $f$ is given by
$$\|f\|_{H^q(\mathbb T)}^q:=\sup_{0<r<1}\int_0^1|f_r|^q.$$
It is well known that, for $f\in H^q(\mathbb T)$, the functions $f_r$
have an a. e. limit $f^*$ for $r$ tending to $1$. The function
$f^*$, which we call the pointwise boundary value, belongs to
$L^q(\mathbb T)$. When $q\geq 1$, then $f$ is the distribution defined
by $f^*$, and $H^q(\mathbb T)$ coincides with the subspace of functions
in $L^q(\mathbb T)$ whose negative coefficients are zero. In all cases
the space $H^q(\mathbb T)$ identifies with the classical Hardy space
when identifying the distribution $f$ with the holomorphic
function $\sum_{n\geq 0 }\hat f(n)z^n$ on the unit disc. This
explains the use of the term of boundary value.
The function $f\in H^q$ is said to have gaps (in its Fourier
series) tending to $\infty$ whenever its Fourier series of $f$ can
be written as $\sum_{k=0}^\infty a_k e^{2i\pi n_k x},$ where $n_k$
is an increasing sequence such that $n_{k+1}-n_k\to \infty$ with
$k$.
In opposite to this theorem, recall that for $n_k$ a
\emph{lacunary} series, if the Fourier series is in $L^p(E)$ for
some measurable set $E$ of positive measure, then the function $f$
belongs to all spaces $L^q(\mathbb T)$, see \cite{Z}. This has been
generalized by Miheev \cite{M} to $\Lambda(p)$ sets for $p>2$: if
$f$ is in $L^p(E)$, then $f$ is in the space $L^p(\mathbb T)$. See also
the expository paper \cite{BD}.
\begin{proof}
The key of the proof is Theorem \ref{th:concentration}. Remark
that we can assume that $p>q>1$. Indeed, $f^\ell$ is a positive
definite function when $f$ is, and counter-examples for some $p>1$
will lead to counter-examples for $p/\ell$. Now, let us take a
sequence $E_k$ of disjoint measurable subsets of $E$ of positive
measure, such that $|E_k|<2^{-\alpha k}$, with $\alpha$ to be
given later and let $f_k$ be a sequence of positive definite
trigonometric polynomials such that
\begin{equation}\label{first}
\int_{ \mathbb T\setminus E_k} |f_k|^p \leq 2^{-kp } \int_{\mathbb T} |f_k|^p .
\end{equation}
Moreover, we assume that $f_k$'s have gaps larger than $k$. Using
H\"older's inequality, we obtain
\begin{align*}\label{eq:smallernorm}
\int_{\mathbb T}|f_k|^q \leq 2^{-\alpha (1-q/p) k}\left(\int_{E_k}
|f_k|^p\right)^{q/p}+\left(\int_{\mathbb T\setminus E_k}
|f_k|^p\right)^{q/p} \leq 2\times 2^{-{kq}}\left (\int_{\mathbb T}
|f_k|^p\right)^{q/p},
\end{align*}
if $\alpha$ is chosen large enough. Finally, we normalize the
sequence $f_k$ so that $\int_{\mathbb T} |f_k|^p=2^{\frac k{2}}$, and
take
\begin{equation}\label{series}
f(x):=\sum_{k\geq 1} e^{2i\pi m_k x}f_k(x),
\end{equation}
where the $m_k$ are chosen inductively sufficiently increasing, so
that the condition on gaps is satisfied. The series is convergent
in $L^q(\mathbb T)$ and in $L^p(^cE)$, and the limit $f$ has its Fourier
series given by \eqref{series}. Now, let us prove that $f$ is
not in $L^p(\mathbb T)$. Since the $E_j$'s are disjoint,
$$
\| f\|_{p}\geq \| f\|_{L^p(E_k)} \geq \| f_k\|_{p} - \sum_{j} \|
f_j\|_{L^p(^cE_j)} \geq 2^{\frac k2} - \sum_{j>0} 2^{-\frac j2},
$$ which allows to conclude.
\end{proof}
Using Theorem \ref{th:strong-conc} instead of Theorem
\ref{th:concentration}, we have the following.
\begin{theorem}\label{th:strong-noWiener}\begin{itemize}\item[(i)]
Let $p>2$, with $p\notin 2\mathbb N$, and let $\ell\in\mathbb N$ such that
$2\ell<p<2(\ell +1)$. Then, for any symmetric open set
$U\subset\mathbb T$ with $|U|>0$ and $q>q(p)$, there exists a positive
definite function $f\in L^{2\ell}(\mathbb T)$, whose negative
coefficients are zero, such that $f\notin L^q(\mathbb T)$ while $f$ is
in $L^p(^cU)$.
\item[(ii)]
Let $0<p<2$. Then
for any symmetric open set $U\subset\mathbb T$ with $|U|>0$
and any $s<q<p$, there exists a function $f$ in the Hardy space
$H^{s}(\mathbb T)$ with non negative Fourier coefficients, so that
$f\notin H^q(\mathbb T)$ while $f^*$ is in $L^p(^cU)$.
\end{itemize}
\end{theorem}
\begin{proof} Let us first prove $(i)$. We can assume that $^cU$ contains a neighborhood of $0$. So, by Wiener's property,
if $f$ is integrable and belongs to $L^p(^cU)$, then $f$ is in
$L^{2\ell}(\mathbb T)$. Let us prove that there exists such a function,
whose Fourier coefficients satisfy the required properties, and
which does not belong to $L^q(\mathbb T)$. The proof follows the same
lines as in the previous one. By using Theorem
\ref{th:strong-conc}, we can find positive definite polynomials
$f_k$ such that $\|f_k\|_q=2^{k/2}\to \infty$, while $\|
f_k\|_{L^p(^cU_k)}\leq 2^{-k}$ with $U_k\subset U$ disjoint and of
sufficiently small measure, so that
$\sum\|f_k\|_{L^p(^cU)}<\infty$. As before, the function $
f:=\sum_{k\geq 1} e_{ m_k}f_k$ will have the required properties.
Let us now consider $1\leq p<2$, from which we conclude for
$(ii)$: if $p< 1$, we look for a function of the form $f^{\ell}$,
with $f$ satisfying the conclusions for $\ell p$, with $\ell$ such
that $1\leq \ell p<2$. We can assume that $q< 1$. We proceed as
before, with $f_k$'s given by Theorem \ref{th:strong-conc}, such
that $\|f_k\|_q=2^{k/2}$ and $\| f_k\|_{L^p(^cU_k)}\leq
2^{-k/2}$. The $U_k$'s are assumed to be disjoint and of small
measure, so that $\sum_k \|f_k\|_{H^s}^s<\infty$. It follows
that $f\in H^s(\mathbb T)$. Remark that
$f$ is not a function, in general, but a distribution. Recall that
$f^*$ is the boundary value of the corresponding holomorphic
function. We write as before
$$
\| f\|_{H^q(\mathbb T)}^q\geq \| f^*\|_{L^q(U_k)}^q \geq \| f_k\|_{q}^q
- \sum_{j} \| f_j\|_{L^q(^cU_j)}^q \geq 2^{\frac {kq}2} -
\sum_{j>0} 2^{-\frac {jq}2},
$$ which allows to conclude for the fact that $f$ is not in
$H^q(\mathbb T)$.
\begin{remark} As Wainger in \cite{W}, we can prove a little more:
the function $f$ may be chosen such that $\sup_{r<1}|f_r|$ is in
$L^p(^cU)$. Let us give the proof in the case $(i)$. We can assume
that $U$ may be written as $I\cup(-I) $
for some interval $I$. Let $J$ be the interval of same center and
length half, and take $f$ constructed as wished, but for the open
set $J\cup(-J)$. Finally, write $f=\phi+\psi$, with $\phi:=f\reb{Change }i_{^c\left(J\cup
(-J)\right)}$. Then using the maximal theorem we know that $\sup_{r<1}|\phi_r|\in
L^p(\mathbb T)$, while the Poisson kernel $P_t(x-y)$ is uniformly bounded
for $x\notin U$ and $y\in J\cup(-J)$, so that $\sup_{r<1}|\psi_r|$ is uniformly bounded outside $U$.
In the case $(ii)$, the proof is more technical, $f$ being only a
distribution. We use the fact that derivatives
of the Poisson kernel $P_t(x-y)$ are also uniformly bounded
for $x\notin U$ and $y\in J\cup(-J)$.
\end{remark}
\end{proof}
\end{document} |
\begin{document}
\title{Reliable Frequency Regulation through Vehicle-to-Grid:\ Encoding Legislation with Robust Constraints}
\begin{abstract}
Vehicle-to-grid increases the low utilization rate of privately owned electric vehicles by making their batteries available to electricity grids. We formulate a robust optimization problem that maximizes a vehicle owner's expected profit from selling primary frequency regulation to the grid and guarantees that market commitments are met at all times for all frequency deviation trajectories in a functional uncertainty set that encodes applicable legislation. Faithfully modeling the energy conversion losses during battery charging and discharging renders this optimization problem non-convex. By exploiting a total unimodularity property of the uncertainty set and an exact linear decision rule reformulation, we prove that this non-convex robust optimization problem with functional uncertainties is equivalent to a tractable linear program.
Through extensive numerical experiments using real-world data, we quantify the economic value of vehicle-to-grid and elucidate the financial incentives of vehicle owners, aggregators, equipment manufacturers, and regulators. We find that the prevailing penalties for non-delivery of promised regulation power are too low to incentivize vehicle owners to honor their promises toward grid operators.
\textbf{Keywords:} Vehicle-to-Grid, Frequency Regulation, Energy Storage, Energy Economics, Robust Optimization, Continuous-Time Linear Programming.
\end{abstract}
\section{Introduction}
Replacing internal combustion engine vehicles with electric vehicles reduces urban air pollution and mitigates climate change if electricity is generated from renewable sources~\citep{DS94}. In general, privately owned vehicles are a vastly underutilized resource. Vehicle usage data collected by the US~\cite{NHTS17} shows that on an average day over $90$\% of all privately owned vehicles are parked at any one time---even during peak rush hour. Since electricity grids require storage capacity to integrate increasing amounts of intermittent wind and solar power, electric vehicle owners could capitalize on their batteries by offering storage to the electricity grid when their vehicles are parked. \cite{WK97} term this idea \textit{vehicle-to-grid}.
R\'eseau de transport d'\'electricit\'e (RTE), Europe's largest transmission system operator, expects to need an additional flexible generation and electricity storage capacity of $10$GW to $20$GW by 2035. This corresponds to $7.5$\% to $15$\% of the total French electricity generation capacity in 2017~(RTE~\citeyear{RTE17b, RTE17c}). If electric vehicles were to provide some of this flexibility, then the vehicles and the electricity grid could share the costs of electric vehicle batteries. \cite{WK05a} and \cite{LN19} have identified \textit{primary frequency regulation}\footnote{Primary frequency regulation is also known as primary frequency control and as frequency containment reserves.} as one of the most profitable flexibility services for vehicle-to-grid. Electric vehicles that provide this service must maintain a continuous power flow to the vehicle battery that is proportional to the deviation of the instantaneous grid frequency from its nominal value (\textit{e.g.}, 50Hz in Europe). As primary frequency regulation is the first flexibility service used to stabilize the electricity network after disturbances~\citep{YR07a}, its provision must be highly reliable. However, RTE questions the reliability of vehicle-to-grid~(RTE~\citeyear{RTE17b}). The~\cite{EU17} has recently addressed this concern by defining a minimum level of reliability that electric vehicles and other providers of frequency regulation must guarantee. Specifically, it demands that providers must be able to deliver regulation power for all frequency deviation trajectories with certain characteristics.
Adopting the perspective of a vehicle owner, we formulate an optimization model for determining the bidding strategy on the regulation market that maximizes the expected profit from selling primary frequency regulation to the transmission system operator under the reliability constraints imposed by the European Commission. These constraints must hold {\em robustly} for all frequency deviation trajectories in an uncertainty set consistent with applicable legislation. As these trajectories constitute continuous-time functions, we are confronted with a robust optimization problem \rev{with functional uncertainties}. Moreover, the impossibility of simultaneously charging and discharging the battery---which amounts to dissipating energy through conversion losses and could be profitable when the battery is full and there is a reward for down-regulation (see, {\em e.g.}, \cite[p.~84]{JAT15})---renders the optimization problem non-convex. The main theoretical contribution of this paper is to show that the resulting non-convex robust optimization problem \rev{with functional uncertainties} is equivalent to a tractable linear program. Specifically, this paper makes the following methodological contributions to robust optimization (see \cite{AB09} for a textbook introduction).
\begin{itemize}
\item We introduce new {\em uncertainty sets in function spaces} that capture those frequency deviation trajectories for which regulation providers must be able to deliver all promised regulation power. These uncertainty sets are reminiscent of the {\em budget uncertainty sets} by \cite{DB04} in finite-dimensional spaces, and their construction is inspired by EU legislation.
\item By leveraging a {\em total unimodularity property} of the proposed uncertainty sets and an {\em exact linear decision rule reformulation}, we prove that the worst-case frequency deviation scenarios in all (convex or non-convex) robust constraints of the vehicle owner's optimization problem can be found by solving continuous linear programs, which can be viewed as variants of the so-called {\em separated continuous linear programs} introduced by \cite{EA83}.
\item By demonstrating that all these continuous linear programs are solved by piecewise constant frequency deviation trajectories, we show that the vehicle owner's robust optimization problem in continuous time is equivalent to a {\em robust optimization problem in discrete time}. In doing so, we use more direct proof techniques than \cite{MP95}, who derived sufficient conditions under which the solutions of separated continuous linear programs are piecewise constant.
\item The robust optimization problem obtained by time discretization is still non-convex. Using the structural properties of its (discretized) uncertainty sets and of its objective and constraint functions, however, we can prove that it is equivalent to a {\em linear} robust optimization problem that can be reformulated as a {\em tractable linear program} via standard techniques.
\end{itemize}
To our best knowledge, robust optimization models with uncertainty sets embedded in function spaces have so far only been considered in the context of robust control, where the primary goal is to develop algorithms for evaluating conservative approximations \citep{BH11}, \rev{and in the context of robust continuous linear programming, where the primary goal is to reduce robust to {\em non}-robust continuous linear programs, which can be addressed with existing algorithms \citep{ghate20}. In contrast, we study here a non-convex robust optimization problem with functional uncertainties that admits a lossless time discretization and can be reformulated {\em exactly} as a tractable linear program. Remarkably, the state-of-the-art methods for solving the deterministic counterparts of this robust optimization problem are based on methods from mixed-integer linear programming. To our best knowledge, we thus describe the first class of practically relevant mixed-integer linear programs that simplify to standard linear programs through robustification.
}
As the emerging linear programs are amenable to efficient numerical solution, we are able to perform extensive numerical experiments based on real-world data pertaining to the French electricity system. We define the~\emph{value of vehicle-to-grid} as the profit from selling primary frequency regulation relative to a baseline scenario in which the vehicle owner does not offer grid services. \rev{As our optimization model faithfully captures effective legislation, it enables us to quantify the true value of vehicle-to-grid. This capability is relevant for understanding the economic incentives of different stakeholders such as vehicle owners, aggregators, equipment manufacturers, and regulators. The model developed in this paper enables us to assess} how the value of vehicle-to-grid depends on the penalties for non-delivery of promised regulation power, the size of the uncertainty set, and the vehicle's battery, charger, and mileage. We thus contribute to the growing literature on the impact of contract parameters on electricity storage~\citep{GB17, NS19}. The main insights drawn from our computational experiments
can be summarized as follows.
\begin{itemize}
\item Based on 2016--2019 data, we show that the value of vehicle-to-grid attainable with a bidding strategy that is {\em guaranteed} to satisfy all reliability requirements is around $100$\EUR per year \rev{and vehicle}. Earlier studies based on anticipative bidding strategies that may violate the legal requirements in practice have estimated this value to be four times higher~\citep{PC15, OB19}.
\item We find a similar value of vehicle-to-grid as \cite{PC15} and \cite{OB19} if the vehicle owner risks financial penalties for ignoring the legal reliability requirements. This suggests that \emph{current penalties are too low} to incentivize vehicle owners to respect the law.
\item \rev{We show that the value of vehicle-to-grid saturates at daily plug times above 15 hours. Thus, maximal profits from frequency regulation can be reaped even if the vehicle is disconnected from the grid up to 9 hours per day. This means that vehicle owners still enjoy considerable flexibility as to when to drive, which could help to promote the adoption of vehicle-to-grid.}
\end{itemize}
Beyond vehicle-to-grid, this paper contributes to the literature on the optimal usage of energy storage assets. The value of a storage asset is usually identified with the profit that can be generated through arbitrage by trading the stored commodity on spot or forward markets. If trading is restricted to the spot market and prices are Markovian, it is known that the asset's value is maximized by a basestock policy \citep{NS10}. If the commodity is also traded on forward markets, then the high dimensional models of forward curve evolution lead to intractable Markov decision processes that can be addressed with approximate dynamic programming methods \citep{SN15}. For systems of interconnected storage assets with large capacities such as hydroelectric reservoirs, medium-term planning over several months or years is necessary. The resulting optimization problems are traditionally addressed with stochastic dynamic programming \citep{WY85} or stochastic dual dynamic programming \citep{PP91}. Alternatively, \cite{GP05} use a two-layer dynamic programming method to optimize the participation of a hydroelectric reservoir in a spot market, where the inner layer maximizes the expected revenues over a stage, which comprises several trading intervals, for a fixed mean and variance of water release over the stage, while the outer layer optimizes the mean and variance of water release over the stages. More recently, \cite{LWM13} combine ideas from stochastic dual dynamic programming and approximate dynamic programming for optimizing the forward trading decisions of hydro storage~systems.
Unlike traditional centralized storage assets, decentralized storage assets such as electric vehicles are usually connected to distribution rather than transmission grids. This means that they face retail and not wholesale electricity prices. While wholesale prices are determined by market mechanisms and thus stochastic, retail prices are often regulated and thus deterministic. Another major difference is that it may take several days to fully charge or discharge centralized storage assets such as hydropower plants, whereas the batteries of electric vehicles can be fully charged and discharged in just a few hours. A daily planning horizon is therefore sufficient for optimizing their usage. In addition, typical vehicle owners can anticipate their driving needs at most one day in advance. One can thus solve the storage management problem in a receding horizon fashion.
The state-of-charge of a vehicle battery depends non-linearly on the power in- and outflows, which leads to non-convex optimization models. If the battery is merely used for arbitrage and market prices are non-negative, then these optimization models admit exact convex relaxations. Conversely, if the battery is used for frequency regulation or if market prices can fall below zero, then a non-convex constraint is needed to prevent the models from dissipating energy by simultaneously charging and discharging the battery~\citep{YZ16}. If energy conversion losses are negligible and the battery state-of-charge is thus linear in the power flows, then one can model the provision of frequency regulation through adjustable uncertainty sets. Such an approach has been proposed by \cite{XZ17} for frequency regulation with building appliances. A stochastic dynamic programming scheme for optimizing the charging and discharging policy of an electric vehicle with linear battery dynamics is proposed by \cite{JD14}. If energy conversion losses are significant, however, one may still approximate the state-of-charge by a linear decision rule of the uncertain frequency deviations~\citep{JW13}. \cite{ES12} study a similar model under the assumption of perfect foresight.
In practice, several hundreds or thousands of electric vehicles must be aggregated to be able to bid enough reserve power to qualify for participation in the frequency regulation market. \cite{CG09b}, \cite{SH10}, \cite{GW18} and \cite{YZ20} develop frameworks for controlling the batteries of aggregated vehicles, while the design of contracts between aggregators and vehicle owners is examined by \cite{SH11} and \cite{GB17}. The policy implications for the market entry of electric vehicle aggregators are investigated by \cite{OB18b}. Yet the study of vehicle-to-grid schemes for individual vehicles remains relevant because they constitute important building blocks for aggregation schemes and because they still pose many challenges---especially when it comes to faithfully modeling all major sources of uncertainty.
The model developed in this paper is most closely related to the discrete-time robust optimization models by \cite{EY17} and \cite{EN19}, which capture the uncertainty of the frequency deviations through simplicial uncertainty sets that cover all empirical frequency deviation scenarios. However, these uncertainty sets may fail to include unseen future frequency deviation scenarios and are inconsistent with applicable EU legislation. While \cite{EY17} disregard energy conversion losses, \cite{EN19} account for them heuristically and test the resulting charging and discharging policies experimentally on a real battery. Heuristics are also common in pilot projects that demonstrate the use of vehicle-to-grid for frequency regulation \citep{SV13, SV20}.
The model proposed in this paper relies on three simplifying assumptions that we justify below.
Our first key assumption is that the provision of frequency regulation has no negative impact on battery lifetime---even though the fear of battery degradation has been identified as a major obstacle to the widespread adoption of vehicle-to-grid \citep{EEVC17}. To justify this assumption, we point out that the impact of vehicle-to-grid on battery longevity is not yet well understood. In fact, \cite{MD17} claim that such degradation is severe, while \cite{KU17} claim that vehicle-to-grid may actually extend battery lifetime. In \citep{KU18}, the authors of these two studies reconcile their contradictory findings by concluding that the impact of vehicle-to-grid depends on the operating conditions of the battery, such as its temperature and variations in its state-of-charge. We further justify our no-degradation assumption by restricting the battery state-of-charge to lie within 20\% and 80\% of the nominal battery capacity. \cite{AT18} suggests these restrictions as a rule of thumb for extending the lifetime of common lithium-ion batteries, and \cite{TS17} adopt similar rules to optimize recharging policies of electric vehicles. Models that account for battery degradation are studied by \cite{GH16} and \cite{PC19}.
Our second key assumption is that vehicle owners can specify time and energy windows for their driving needs one day in advance. This assumption makes sense for commuters who adhere to predictable daily routines, for example.
The third key assumption is that \rev{the vehicle owners are price takers who influence neither the market prices nor the grid frequency.
This assumption is reasonable because one vehicle may cover at most several kilowatts of the 700~megawatts required for frequency regulation in France. A model of a regulation provider influencing the grid frequency is described by~\cite{PM09}.}
The paper proceeds as follows. Section~\ref{sec:Prob_Des} formulates the vehicle owner's decision problem for a single day as a non-convex robust program with functional uncertainties. In Sections~\ref{sec:time_dis} and~\ref{sec:LPR} we show that this problem can be reformulated equivalently as a non-convex robust program with vectorial uncertainties and even as a tractable linear program, respectively.
Section~\ref{sec:multi-stage} formulates a decision problem that looks several days into the future and shows that the resulting multistage model is still equivalent to a linear program.
Numerical experiments are discussed in Section~\ref{sec:NumEx}, and policy insights are distilled in Section~\ref{sec:conclusions}. All proofs are relegated to the appendix.
\paragraph*{Notation.} All random variables are designated by tilde signs. Their realizations are denoted by the same symbols without tildes. Vectors and matrices are denoted by lowercase and uppercase boldface letters, respectively. For any $z \in \mathbb{R}$, we define $[ z ]^+ = \max \{z,0\}$ and $[z]^- = \max\{-z,0\}$ such that $z=[z]^+-[z]^-$. The intersection of a set~$\set{A} \subseteq \mathbb R^d $ with~$\mathbb R^d_+$ is denoted by~$\set{A}^+$. For any closed intervals $\set{T},\set{U} \subseteq \mathbb R$, we define~$\set{L}(\mathcal{T},\set{U})$ as the space of all Riemann integrable functions~$f: \mathcal{T} \to \set{U}$, and we denote the intersection of \rev{a set}~$\set{B} \subseteq \set{L}(\mathcal{T},\mathbb R)$ with~$\set{L}(\mathcal{T},\mathbb R_+)$ as~$\set{B}^+$.
\section{Problem Description}\label{sec:Prob_Des}
Consider an electric vehicle whose state at any time~$t$ is characterized by the amount of energy~$y(t)$ stored in its battery and the instantaneous power consumption for driving~$d(t)$. We require that $y(t)$~is never smaller than~$\ubar{y}$ and never larger than~$\bar{y}$. To mitigate battery degradation, we set these limits to~20\% and~80\% of the nominal battery capacity, respectively. The battery interacts with the power grid through a bidirectional charger with charging efficiency~$\eta^+ \in (0,1]$ and discharging efficiency $\eta^- \in (0,1]$, where an efficiency of~$1$ corresponds to a lossless energy~conversion between the grid and the battery. The charger is further characterized by its maximum power consumption~$\bar{y}^+(t)$ from the grid and its maximum power provision to the grid~$\bar{y}^-(t)$. The power the battery can charge or discharge is therefore limited by~$\eta^+\bar{y}^+(t)$ and~$\frac{1}{\eta^-}\bar{y}^-(t)$, respectively. Note that~$\bar{y}^+(t)$ and~$\bar{y}^-(t)$ depend on the charger to which the vehicle is connected at time~$t$. When the vehicle is not connected to any charger, \textit{e.g.}, when it is driving, then both~$\bar{y}^+(t)$ and~$\bar{y}^-(t)$ must vanish. A stationary battery can be modeled by setting $d(t)=0$ and keeping $\bar{y}^+(t)$ and $\bar{y}^-(t)$ constant for all $t$.
In order to charge the battery at time $t$, the vehicle owner may buy power~$x^b(t)$ from the local utility at a known time-varying price~$p^b(t)$ as is the case under dynamic pricing schemes or day/night tariffs. In addition, she may also use the vehicle battery to earn extra revenue by providing primary frequency regulation, which can be viewed as an insurance bought by the transmission system operator (TSO) to balance unforeseen mismatches of electricity demand and supply in real time~\citep{JDG10}. If there is more supply than demand, the frequency of the power grid rises. Conversely, if there is more demand than supply, the frequency falls. A battery owner offering regulation power~$x^r(t)$ at time~$t$ is obliged to
\rev{change}
her nominal power consumption~$x^b(t)$ from the grid by~$\delta(t) x^r(t)$, where~$\delta(t)$ quantifies the normalized deviation of the instantaneous grid frequency~$f(t)$ from its nominal value~$f_0$~\citep{RTE09}. Formally, we have
\begin{equation*}
\delta(t) = \left\{ \begin{array}{ll}
+1 & \text{if } f(t) > f_0 + \mathcal{D}elta f, \\
\frac{f(t)-f_0}{\mathcal{D}elta f} & \text{if } f_0 - \mathcal{D}elta f \leq f(t) \leq f_0 + \mathcal{D}elta f, \\
-1 & \text{if } f(t) < f_0 - \mathcal{D}elta f,
\end{array}\right.
\end{equation*}
where $\mathcal{D}elta f > 0$ is a threshold beyond which all promised regulation power must be delivered.
The TSO contracts frequency regulation as an insurance over a prescribed planning horizon of length~$T$, \textit{e.g.}, one day. \rev{The planning horizon is subdivided into trading intervals~$\mathcal{T}_k=[ (k-1)\mathcal{D}elta t, k \mathcal{D}elta t )$ for all~$k \in \mathcal{K} = \{1,\ldots,K\}$, where $K = \frac{T}{\mathcal{D}elta t} \in \mathbb{N}$. In the French electricity market, for example, the length $\mathcal{D}elta t$ of a trading interval is $30$~minutes.
The TSO requests the vehicle owner to announce the market decisions}
$x^b(t)$ and~$x^r(t)$ before the beginning of the planning horizon, \textit{e.g.}, one day ahead at noon~\citep{RTE17}.
\rev{These decisions need to be piecewise constant over the trading intervals.}
The TSO compensates the vehicle owner for the frequency regulation~$x^r(t)$ made available at the {\em availability price}~$p^a(t)$ and charges her for the increase~$\delta(t) x^r(t)$ in her power consumption at the {\em delivery price}~$p^d(t)$. Note that this charge becomes negative (\textit{i.e.}, it becomes a remuneration) if $\delta(t)$ is negative. In summary, the vehicle owner's total cost over the planning horizon~$\mathcal{T} = [0,T]$ amounts to
\begin{equation*}
\int_0^T p^b(t) x^b(t) - \left( p^a(t) \rev{-} \delta(t) p^d(t)\right) x^r(t) \, \mathrm{d}t.
\end{equation*}
The impact of providing frequency regulation on the battery state-of-charge depends on how the vehicle owner adjusts the power consumed from and the power injected into the grid to achieve the desired net power consumption $x^b(t) + \delta(t) x^r(t)$. The most energy-efficient way is to avoid unnecessary energy conversion losses resulting from simultaneously charging and discharging. Sometimes, however, such losses can be attractive, for example if the battery is almost full and receives a request for down-regulation~($ \delta(t) > 0$). \cite{YZ16} show that energy losses can also be attractive when electricity prices are negative. Since common chargers are not able to simultaneously charge and discharge, we forbid this option and set the charging rate to
\begin{subequations}
\begin{equation}
y^+\left(x^b(t),x^r(t),\delta(t)\right) = \left[ x^b(t) + \delta(t) x^r(t) \right]^+ \label{eq:y+}
\end{equation}
and the discharging rate to
\begin{equation}
y^-\left(x^b(t),x^r(t),\delta(t)\right) = \left[ x^b(t) + \delta(t) x^r(t) \right]^-. \label{eq:y-}
\end{equation}
\end{subequations}
\begin{Rmk}
When operating a vehicle fleet, some vehicles could charge while others discharge, which suggests that the regulation profits achievable with $n$~vehicles may exceed the regulation profit of a single vehicle multiplied by~$n$. In this paper, we focus on the case $n=1$.
$\mathcal{B}ox$
\end{Rmk}
The power exchanged with the grid and the power needed for driving determine the battery state-of-charge at any time $t$ via the integral equation
\begin{equation}
\label{eq:Inte}
y\left(x^b,x^r,\delta,y_0,t \right) = y_0 + \int_{0}^{t} \eta^+ y^+\left(x^b(t'),x^r(t'),\delta(t')\right) - \frac{y^-\left(x^b(t'),x^r(t'),\delta(t')\right)}{\eta^-} - d(t') \, \mathrm{d}t',
\end{equation}
where~$y_0$ represents the state-of-charge at time~$0$. For later use, we establish here some basic properties of the battery state-of-charge.
\begin{Prop}\label{Prop:y}
Holding all other factors fixed, the battery state-of-charge $y(x^b,x^r,\delta,y_0,t)$ is concave nondecreasing in $x^b$, concave in $x^r$, concave nondecreasing in $\delta$, and affine nondecreasing in $y_0$.
\end{Prop}
At the time when the vehicle owner needs to choose and report the market commitments $x^b(t)$ and $x^r(t)$, she has no knowledge of the uncertain future frequency deviations $\delta(t)$ and the delivery prices $p^d(t)$ at time $t \in \set{T}$.
In addition, she has no means to predict the battery state-of-charge $y_0$ at the beginning of the planning horizon, which depends on market commitments chosen on the previous day and on the uncertain frequency deviations to be revealed until time $0$. By contrast, the availability prices~$p^a(t)$ for~$t \in \mathcal{T}$ can be assumed to be known at the planning time. In practice, these prices are determined by an auction. As the vehicle owner bids an offer curve expressing~$x^r(t)$ as a function of~$p^a(t)$ for any~$t \in \mathcal{T}$, it is as if the availability prices were known upfront.\footnote{The bidding process is described at \url{https://www.entsoe.eu/network_codes/eb/fcr/}.} \rev{Next, we describe the information that is available about the uncertain problem parameters~$\delta$, $p^d$, and~$y_0$.}
We first discuss the uncertainty in the frequency deviations, which limits the amount of reserve power that can be sold on the market. Indeed, the vehicle owner must ensure that the battery state-of-charge will never drop below $\ubar{y}$ or exceed $\bar{y}$ when the TSO requests down-regulation ($\delta(t) < 0$) or up-regulation ($\delta(t) > 0$), respectively, for a prescribed set of conceivable frequency deviation scenarios. Otherwise, the vehicle owner may not be able to honor her market commitments, in which case the TSO may charge a penalty or even ban her from the market.
The TSO defines under what conditions regulation providers must be able to deliver the promised regulation power, keeping in mind that extreme frequency deviations are uncommon. Indeed, between 2015 and 2018 the frequency deviation $\delta(t)$ has never attained its theoretical maximum of~$1$ or its theoretical minimum of~$-1$ in the French market.\footnote{The French TSO publicizes frequency measurements at \url{http://clients.rte-france.com/}.} In the following, we thus assume that the vehicle owner needs to guarantee the delivery of regulation power \emph{only} for frequency deviation scenarios within the uncertainty set
\begin{equation*}
\mathcal{D} = \left\{ \delta \in \mathcal{L} \left( \mathcal{T}, \left[-1,1\right] \right) :
\int_{\left[t-\Gamma \right]^+}^{t} \left\vert \delta(t') \right\vert \, \mathrm{d}t' \leq \gamma \quad \forall t \in \mathcal{T}\right\}
\end{equation*}
parametrized by the duration~$\Gamma \in \mathbb R_+$ of a \emph{regulation cycle} and the duration~$\gamma \in \mathbb R_+$ of an \emph{activation period}. Throughout this paper, we assume that $0 < \gamma \leq \Gamma \leq T$. By focusing on frequency deviation scenarios in~$\mathcal{D}$, one stipulates that consecutive extreme frequency deviations $\delta(t) \in \{-1,1\}$ can occur at most over one activation period within each regulation cycle. The \emph{activation ratio}~$\gamma/\Gamma$ can thus be interpreted as the percentage of time during which the vehicle owner must be able to deliver all committed reserve power.
\begin{Rmk}\label{Rmk:setD}
Note that the uncertainty set $\mathcal{D}$ grows with $\gamma$ and shrinks with $\Gamma$.
$\mathcal{B}ox$
\end{Rmk}
Besides displaying favorable computational properties, the uncertainty set~$\mathcal{D}$ has conceptual appeal because it formalizes the delivery guarantee rules prescribed by the~\cite{EU17}. These rules stipulate that the ``\textit{minimum activation period to be ensured by} [frequency regulation] \textit{providers }[is not to be] \textit{greater than $30$~or smaller than $15$~minutes.}'' This guideline prompts us to set~$\gamma = 30$~minutes. The EU further demands that regulation providers ``\textit{shall ensure the recovery of} [their] \textit{energy reservoirs as soon as possible, within $2$~hours after the end of the alert state.}'' This means that, although there may be several activation periods of $30$~minutes within any $2.5$~hour interval, the regulation provider only has to cover one of them. Thus, we set~$\Gamma = 2.5$~hours.
In the following, we compare the empirical distribution of the daily variance of~$\delta$ between the years 2017 and 2019 with the maximum variance that can be achieved by any hypothetical frequency deviation scenario $\delta \in \mathcal{D}$ for a planning horizon of one day. By slight abuse of notation, we define the variance of a frequency deviation scenario $\delta$ with respect to zero as $\mathrm{Var}(\delta) = \frac{1}{T} \int_{0}^{T} \delta(t)^2 \, \mathrm{d}t$. This is justified because the TSO protects the system against \emph{unforeseen} demand and supply fluctuations, which means that the frequency deviations should be unbiased and thus vanish on average. Indeed, the empirical frequency deviations have an average of $5.98\cdot 10^{-4}$.
\begin{Prop} If $\delta \in \mathcal{D}$, then $\mathrm{Var}(\delta) \leq\ceil{T/\Gamma} \gamma/T$.
\end{Prop}\label{Prop:Var}
Figure~\ref{fig:Daily_StdDev} shows that if $T=1$ day, $\gamma = 30$ minutes, and $\Gamma = 2.5$ hours, then the maximum standard deviation of any~$\delta \in \mathcal{D}$ exceeds the maximum empirical standard deviation by a factor of~$2.5$. Thus, $\mathcal{D}$ contains extreme frequency deviation scenarios with unrealistically high variance.
\begin{figure}
\caption{Empirical cumulative distribution function (cdf) of the daily standard deviation of~$\delta$ and the maximum standard deviation of any scenario in~$\hat{\mathcal{D}
\label{fig:Daily_StdDev}
\end{figure}
The optimization model developed below not only involves the conservative uncertainty set $\mathcal{D}$ compatible with the guidelines of the European Commission but also a smaller uncertainty set
\begin{equation*}
\set{\hat{D}} = \left\{ \delta \in \mathcal{L} \left( \mathcal{T}, \left[-1,1\right] \right) :
\int_{[t - \hat{\Gamma}]^+}^t \left\vert \delta(t') \right\vert \, \mathrm{d}t' \leq \hat{\gamma} ~\forall t \in \mathcal{T} \right\}
\end{equation*}
parametrized by $\hat{\Gamma} \geq \Gamma$ and $\hat{\gamma} \leq \gamma$. This uncertainty set contains only frequency deviation scenarios that are likely to materialize under normal operating conditions. Note that $\hat{\mathcal{D}}$ is obtained from $\mathcal{D}$ by inflating $\Gamma$ to $\hat{\Gamma}$ and shrinking $\gamma$ to $\hat{\gamma}$. By Remark~\ref{Rmk:setD}, we may thus conclude that $\hat{\mathcal{D}}$ is indeed a subset of $\mathcal{D}$. While the pessimistic uncertainty set $\mathcal{D}$ is used to enforce the stringent delivery guarantees imposed by the European Commission, the more optimistic uncertainty set $\hat{\mathcal{D}}$ is used to model a softer reachability guarantee for the terminal state-of-charge. In the numerical experiments we will set $\hat{\Gamma} = T = 1$~day and $\hat{\gamma} = \gamma = 30$~minutes. By Proposition~\ref{Prop:Var}, the variance of all frequency deviation scenarios in $\hat{\mathcal{D}}$ is therefore bounded above by $\mathcal{D}elta t/T = 1/48$. Empirically, this threshold exceeds the variance of the frequency deviation on $99.2\%$~of all days in the years from 2017 to 2018.
Next, we discuss the uncertainty in the initial battery state-of-charge $y_0$. Recall that $y_0$ is uncertain at the time when $x^b$ and $x^r$ are chosen because it depends on how much regulation energy must be provided until the beginning of the planning horizon. This quantity depends itself on uncertain frequency deviations that have not yet been revealed. We assume that the vehicle owner constructs two confidence intervals $\set{Y}_0 = [\ubar{y}_0, \bar{y}_0]$ and $\hat{\set{Y}}_0 = [\ubar{\hat{y}}_0, \hat{\bar{y}}_0]$ for $y_0$, either taking into account all frequency deviations under which she must imperatively be able to deliver regulation power or only those frequency deviations that are likely to occur under normal operating conditions.
The only assumption we make about the uncertainty in the delivery price $p^d$ is that the vehicle owner can reliably estimate the \emph{expected} regulation price $p^r(t) = p^a(t) + \E[\tilde\delta(t) \tilde p^d (t)]$.
We are now ready to formalize the vehicle owner's decision problem for selecting the market decisions \rev{$x^b$ and $x^r$}. The primary objective is to minimize the expected cost
\begin{align}
c(x^b,x^r) = \E \int_{\mathcal{T}} p^b(t) x^b(t) - \left( p^a(t) + \tilde \delta(t) \tilde p^d(t) \right) x^r(t) \, \mathrm{d}t' = \int_{\mathcal{T}} p^b(t) x^b(t) - p^r(t) x^r(t) \, \mathrm{d}t, \label{eq:obj}
\end{align}
while ensuring that $x^b$ and $x^r$ are robustly feasible across all frequency deviation scenarios $\delta \in \mathcal{D}$ and initial battery states $y_0 \in \set{Y}_0$. Mathematically, the charging rate $y^+(x^b(t),x^r(t),\delta(t))$, the discharging rate $y^-(x^b(t),x^r(t),\delta(t))$, and the battery state-of-charge $y(x^b,x^r,\delta,t,y_0)$ must therefore satisfy the robust constraints
\begin{empheq}[right=\empheqrbrace \text{$\forall t \in \mathcal{T} \text{, } \forall \delta \in \mathcal{D} \text{, } \forall y_0 \in \set{Y}_0 \text{.}$}]{align*}
y^+(x^b(t),x^r(t),\delta(t)) \leq \bar{y}^+(t), \qquad & y(x^b,x^r,\delta, y_0, t) \leq \bar{y},\\
y^-(x^b(t),x^r(t),\delta(t)) \leq \bar{y}^-(t), \qquad& y(x^b,x^r,\delta, y_0, t) \geq \ubar{y}
\end{empheq}
As the vehicle owner continues to use the vehicle for driving and for offering grid services after the end of the planning horizon, the battery should end up in a state that is ``\textit{conducive to satisfactory future operations}'' \citep{WY85}. \rev{Consequently, the vehicle owner aims to steer $y(x^b,x^r,\delta,y_0,T)$ to a desirable state-of-charge~$y$. We assume that the cost-to-go of any~$y\in [\ubar y, \bar y]$ is quantified by a convex and piecewise affine value function~$\varphi(y)=\max_{n \in \set{N}} \{ a_n y + b_n\}$ determined by~$a_n,b_n\in\mathbb R$ for all~$n \in \set{N} = \{1, \ldots, N\}$. In Section~\ref{sec:multi-stage}, we will present a principled approach to calibrate these coefficients by solving a dynamic programming problem over multiple periods of length~$T$.}
As $y_0$ and $\delta$ are uncertain, \rev{the terminal state-of-charge is also uncertain}. To trade off present versus future costs, it is therefore reasonable to minimize \rev{$\varphi(y(x^b,x^r,\delta,y_0,T))$} in view of the worst of all scenarios $\delta \in \hat{\mathcal{D}}$ and $y_0 \in \hat{\set{Y}}_0$. This can be achieved by adding the term \rev{$\max_{\delta \in \hat{\mathcal{D}}} \max_{y_0 \in \hat{\set{Y}}_0} \varphi(y(x^b,x^r,\delta,y_0,T))$} to the objective function~\eqref{eq:obj}.
In summary, the vehicle owner's decision problem can be cast as \rev{the following robust optimization problem with continuous (functional) uncertain parameters,}
\begin{equation}
\tag{R}
\label{pb:Rc}
\begin{array}{>{\displaystyle}c*3{>{\displaystyle}l}}
\min_{x^b, x^r \in \mathcal{X}} & \multicolumn{3}{>{\displaystyle}l}{c(x^b,x^r) + \max_{\delta \in \set{\hat{D}}, \, y_0 \in \set{\hat{Y}}_0} \rev{\varphi(y(x^b,x^r,\delta,y_0,T))}} \\
\rm{s.t.} & y^+(x^b(t),x^r(t),\delta(t)) &\leq \bar{y}^+(t) & \forall \delta \in \mathcal{D},~ \forall t \in \mathcal{T} \\
& y^-(x^b(t),x^r(t),\delta(t)) &\leq \bar{y}^-(t) & \forall \delta \in \mathcal{D},~ \forall t \in \mathcal{T} \\
& y(x^b,x^r,\delta,y_0,t) &\leq \bar{y} & \forall \delta \in \mathcal{D},~\forall t \in \mathcal{T},~ \forall y_0 \in \set{Y}_0 \\
& y(x^b,x^r,\delta,y_0,t) &\geq \ubar{y} & \forall \delta \in \mathcal{D},~ \forall t \in \mathcal{T},~ \forall y_0 \in \set{Y}_0,
\end{array}
\end{equation}
\rev{where~$\mathcal X$ denotes the set of all functions in~$\mathcal{L}(\mathcal{T}, \mathbb R_+)$ that are constant on the trading intervals.} Using the conservative uncertainty sets $\mathcal{D}$ and $\set{Y}_0$ in the constraints ensures that the delivery guarantee dictated by the European Commission can be fulfilled. Failing to fulfill this guarantee might lead to exclusion from the regulation market. In contrast, there are no drastic consequences of \rev{reaching an undesirable state-of-charge at time~$T$}. Hence, we use the less conservative uncertainty sets $\hat{\mathcal{D}}$ and~$\hat{\set{Y}}_0$ in the objective function to steer the terminal state-of-charge toward a desirable value under all reasonably likely frequency deviation scenarios. The use of different uncertainty sets in the same model has previously been proposed in robust portfolio insurance problems \citep{SZ11}.
Recall from Proposition~\ref{Prop:y} that the function $y(x^b,x^r,\delta,y_0,t)$ is concave in the decision variables~$x^b$ and~$x^r$. Upper bounds on this function thus constitute non-convex constraints.
This implies that~\eqref{pb:Rc} represents a non-convex robust optimization problem with functional uncertain parameters. In general, such problems are severely intractable.
\rev{\begin{Rmk}[Uncertain driving patterns]\label{rmk:driving_patterns}
Although model~\eqref{pb:Rc} assumes deterministic driving patterns, it readily extends to uncertain driving times and distances. If it is only known that the vehicle will drive at some time within a prespecified interval, then the vehicle owner must not plan on exchanging any electricity with the grid during that interval. Similarly, if it is only known that the vehicle will drive some distance within a certain range, then the vehicle owner must plan with the low end of the range for the constraint on the maximum state-of-charge and with the high end of the range for the constraint on the minimum state-of-charge. The worst-case driving times and distances are thus independent of the vehicle owner's decisions and can be determined \emph{ex-ante}.
$\mathcal{B}ox$
\end{Rmk}}
\section{Time Discretization}\label{sec:time_dis}
\rev{In order to derive a lossless time discretization of the frequency deviation scenarios in problem~\eqref{pb:Rc}, we assume from now on} that the power demand for driving and the maximum charge and discharge power of the vehicle charger remain constant over the trading intervals. This assumption is justified because a vehicle that is both driving and parking in the same trading interval cannot offer constant market bids and is therefore unable to participate in the electricity market. Although the power demand for driving may fluctuate wildly, the battery state-of-charge cannot increase while the vehicle is driving, and therefore the power consumption for driving can be averaged over trading intervals without loss of generality. Note that we do \emph{not} assume the frequency deviation scenarios~$\delta$ to remain constant over the trading intervals. In practice $\delta$ may fluctuate on time scales of the order of milliseconds, and averaging out the frequency deviations across a trading interval could result in a dangerous oversimplification of reality. \rev{This phenomenon is illustrated in the following example.
\begin{Ex}[Risks of ignoring intra-period fluctuations]
\label{ex:discretization}
As the market decisions~$x^b$ and~$x^r$, the power demand~$d$ and the charging limits~$\bar y^+$ and~$\bar y^-$ are piecewise constant, one might be tempted to replace the frequency deviation signal~$\delta$ with a piecewise constant signal obtained by averaging~$\delta$ over the trading intervals.
As we will see, however, averaging~$\delta$ relaxes the battery state-of-charge constraints.
Decisions~$x^b$ and~$x^r$ that are {\em in}feasible under the true signal may therefore appear to be feasible under the averaged signal. Hence, replacing the true signal with the averaged signal could make it impossible for the vehicle owner to honor her market commitments.
As a simple example, assume that~$x^b(t) = 0$ and $x^r(t)=x^r_1>0$ are constant and that the true frequency deviation signal averages to~0 over the first trading interval~$[0,\mathcal{D}elta t]$, that is, $\frac{1}{\mathcal{D}elta t}\int_0^{\mathcal{D}elta t} \delta(t) \mathrm{d}t = 0$. The left chart of Figure~\ref{fig:avg} visualizes two such signals, which display a small and a high total variation and are denoted by $\delta^{(1)}$ and~$\delta^{(2)}$, respectively. The constant signal equal to their (vanishing) average over~$[0,\mathcal{D}elta t]$ is denoted by~$\delta^{(3)}$. If~$\delta^{(1)}$ reflects reality but is incorrectly replaced with~$\delta^{(3)}$, we are led to believe that the state-of-charge will remain constant at~$y_0$. In reality, however, the battery dissipates the amount~$\mathcal{D}elta\eta \, x^r_1\mathcal{D}elta t/2$ of energy over the first trading interval, where~$\mathcal{D}elta \eta = \frac{1}{\eta^-} - \eta^+ \ge 0$, and the state-of-charge temporarily rises above~$y_0$ by~$\eta^+x^r_1\mathcal{D}elta t/2$. If~$\delta^{(2)}$ reflects reality, on the other hand, then the repeated charging and discharging of the battery still dissipates energy. See the right chart of Figure~\ref{fig:avg} for a visualization. While scenario~$\delta^{(1)}$ is contrived for maximum impact, scenario~$\delta^{(2)}$ rapidly fluctuates around~0 and thus captures a stylized fact that one would expect to see in reality. This example suggests that finding the minimum or the maximum of the state-of-charge over the entire planning horizon and over all signals~$\delta\in \mathcal{D}$ should be non-trivial because intra-period fluctuations {\em do} matter. As a further complication, note that the constraints of the uncertainty set~$\mathcal{D}$ couple the frequency deviations across time.
$\mathcal{B}ox$
\begin{figure}
\caption{Frequency deviation signals (left) and their state-of-charge trajectories (right).}
\label{fig:avg}
\end{figure}
\end{Ex}
}
\rev{We will now argue that, in spite of Example~\ref{ex:discretization}, $\mathcal{D}$ and~$\hat\mathcal{D}$ can be restricted to contain only piecewise constant frequency deviation signals {\em without} relaxing problem~\eqref{pb:Rc}.}
To formalize the reasoning about piecewise constant functions, we introduce a lifting operator $L:\mathbb{R}^K \to \set{L}(\mathcal{T},\mathbb{R})$ that maps any vector $\bm{v} \in \mathbb R^K$ to a piecewise constant function $L\bm{v}$ with $K$ pieces defined through $(L \bm{v} )(t) = v_k$ if $t \in \mathcal{T}_k$, $k \in \mathcal{K} \rev{ = \{1, \ldots, K\}}$. We also introduce the adjoint operator $L^\dagger: \set{L}(\mathcal{T},\mathbb{R}) \to \mathbb{R}^K$ that maps any function $w\in\set{L}(\mathcal{T},\mathbb R)$ to a $K$-dimensional vector $L^\dagger w$ defined through $(L^\dagger w)_k = \frac{1}{\mathcal{D}elta t} \int_{\mathcal{T}_k}w(t) \, \mathrm{d}(t)$ for all $k \in \mathcal{K}$. Note that $L$ and $L^\dagger$ are indeed adjoint to each other because $\int_{\mathcal{T}} (L \bm{v})(t) w(t) \, \mathrm{d}t = \bm{v}^\top L^\dagger(w) $ for all $\bm{v} \in \mathbb{R}^K$ and $w \in \set{L}(\mathcal{T},\mathbb{R})$. Mathematically, we impose from now on the following assumption.
\rev{
\begin{Ass}\label{Ass:cst}
The functions $d$, $\bar{y}^+$ and $\bar{y}^-$ are piecewise constant, that is, there exist $\bm{d}, \bar{\bm{y}}^+, \bar{\bm{y}}^- \in \mathbb{R}^K$ such that $d = L\bm{d}$, $\bar{y}^+ = L \bar{\bm{y}}^+$ and $\bar{y}^- = L \bar{\bm{y}}^-$.
\end{Ass}
}
Next, we introduce a discretized uncertainty set
\begin{equation*}
\mathcal{D}_\mathcal{K} = \left\{ \bm{\delta} \in \left[-1,1\right]^K : \sum\limits_{l=1+ \left[k - \Gamma/\mathcal{D}elta t\right]^+}^{k} \vert \delta_l \vert \leq \frac{\gamma}{\mathcal{D}elta t} ~ \forall k \in \mathcal{K} \right\}
\end{equation*}
reminiscent of $\mathcal{D}$, where $\Gamma/\mathcal{D}elta t$ and $\gamma/\mathcal{D}elta t$ count the trading intervals within a regulation cycle and an activation period, respectively. Similarly, we define a smaller discretized uncertainty set~$\hat{\mathcal{D}}_\mathcal{K} \subseteq \mathbb R^K$ reminiscent of $\hat{\mathcal{D}}$, which is obtained from $\mathcal{D}_{\mathcal{K}}$ by replacing $\Gamma$ with $\hat{\Gamma}$ and $\gamma$ with $\hat{\gamma}$. In the remainder we impose the following divisibility assumption.
\begin{Ass}\label{Ass:div}
The parameters $\Gamma$, $\gamma$, $\hat{\Gamma}$ and $\hat{\gamma}$ are (positive) multiplies of $\mathcal{D}elta t$.
\end{Ass}
The discretized uncertainty sets are of interest because of the following proposition.
\begin{Prop}\label{Prop:D}
The following statements hold.
\begin{enumerate}
\item[(i)] $L\mathcal{D}_{\mathcal{K}} \subseteq \mathcal{D}$ and $L^\dagger \mathcal{D} = \mathcal{D}_{\mathcal{K}}$.
\item[(ii)] $L\mathcal{D}_{\mathcal{K}}^+ \subseteq \mathcal{D}^+$ and $L^\dagger \mathcal{D}^+ = \mathcal{D}^+_\mathcal{K}$.
\end{enumerate}
\end{Prop}
Next, we define the finite-dimensional feasible set $\set{X}_\mathcal{K} = L^\dagger \set{X}$.
As $\set{X}$ contains only piecewise constant functions, we have $\set{X} = L \set{X}_\mathcal{K}$. We further define the cost function $c_\mathcal{K}(\bm{x^b}, \bm{x^r}) = c(L \bm{x^b},L \bm{x^r})$, which is linear in $\bm{x^b} \in \mathbb{R}^K$ and $\bm{x^r} \in \mathbb{R}^K$. In addition, for any $k \in \mathcal{K}$ we define the function
\begin{align}
y_{k}\left(\bm{x^b},\bm{x^r},\bm{\delta},y_0\right) & = y\left(L\bm{x^b}, L\bm{x^r}, L \bm{\delta}, y_0, k \mathcal{D}elta t\right) \notag \\
& = y_0 + \mathcal{D}elta t \sum_{l=1}^{k} \eta^+ y^+(x^b_l,x^r_l,\delta_l) - \frac{1}{\eta^-} y^-(x^b_l,x^r_l,\delta_l) - d_l,
\end{align}
which represents the battery state-of-charge at the end of period~$k$ under the assumption that \rev{both} the market bids and the frequency deviations are piecewise constant.
\begin{Prop}\label{Prop:yk}
Holding all other factors fixed, $y_k(\bm{x^b},\bm{x^r},\bm{\delta},y_0)$ is concave nondecreasing in $\bm{x^b}$, concave in $\bm{x^r}$, concave nondecreasing in $\bm{\delta}$, and linear nondecreasing in $y_0$ for any $k \in \mathcal{K}$.
\end{Prop}
We are now ready to define the discrete-time counterpart of the robust optimization problem~\eqref{pb:Rc}.
\begin{equation}
\tag{R$_\mathcal{K}$}
\label{pb:R}
\begin{array}{>{\displaystyle}c*3{>{\displaystyle}l}}
\min_{\bm{x^b},\bm{x^r} \in \set{X}_\mathcal{K}} & \multicolumn{3}{>{\displaystyle}l}{c_\mathcal{K}(\bm{x^b},\bm{x^r}) + \max_{\bm{\delta} \in \set{\hat{D}}_\mathcal{K}, y_0 \in \set{\hat{Y}}_0} \rev{\varphi(y_K(\bm{x^b},\bm{x^r},\bm{\delta},y_0))}} \\
\rm{s.t.} & y^+(x^b_k,x^r_k,\delta_k) &\leq \bar{y}^+_k & \forall \bm{\delta} \in \mathcal{D}_\mathcal{K}, ~\forall k \in \mathcal{K} \\
& y^-(x^b_k,x^r_k,\delta_k) &\leq \bar{y}^-_k & \forall \bm{\delta} \in \mathcal{D}_\mathcal{K}, ~\forall k \in \mathcal{K} \\
& y_{k}(\bm{x^b},\bm{x^r},\bm{\delta},y_0) &\leq \bar{y} & \forall \bm{\delta} \in \mathcal{D}_\mathcal{K}, ~\forall k \in \mathcal{K} \cup \{0\}, ~\forall y_0 \in \set{Y}_0 \\
& y_{k}(\bm{x^b},\bm{x^r},\bm{\delta},y_0) &\geq \ubar{y} & \forall \bm{\delta} \in \mathcal{D}_\mathcal{K}, ~\forall k \in \mathcal{K} \cup \{0\}, ~\forall y_0 \in \set{Y}_0
\end{array}
\end{equation}
Unlike the original problem~\eqref{pb:Rc}, the discrete-time counterpart~\eqref{pb:R} constitutes a standard robust optimization problem that involves only finite-dimensional uncertain parameters. For this reason, there is hope that $\eqref{pb:R}$ is easier to solve than~$\eqref{pb:Rc}$.
\begin{Th}[Lossless time discretization]\label{th:time}
The problems~\eqref{pb:Rc} and~\eqref{pb:R} are equivalent.
\end{Th}
\rev{The equivalence of~\eqref{pb:Rc} and~\eqref{pb:R} is perhaps surprising in view of Example~\ref{ex:discretization}. It means that the worst-case frequency deviation scenarios are piecewise constant even though intra-period fluctuations matter. Theorem~\ref{th:time} is proved by showing that the four robust constraints in~\eqref{pb:Rc} with functional uncertainties are equivalent to the corresponding robust constraints in~\eqref{pb:R} with vectorial uncertainties and that the worst-case terminal cost functions in~\eqref{pb:Rc} and~\eqref{pb:R} coincide. For example, the equivalence of the first (second) robust constraints in~\eqref{pb:Rc} and~\eqref{pb:R} follows from the observation that, for any fixed~$t\in\mathcal T$, the left hand side of the first (second) constraint in~\eqref{pb:Rc} is maximized by a scenario~$\delta \in\mathcal{D}$ with~$\delta(t)=1$ ($\delta(t)=-1$), which exists by the definition of~$\mathcal{D}$. The last two robust constraints in~\eqref{pb:Rc} are nonlocal as they depend on the entire frequency deviation scenario~$\delta$ and not only on its value at a particular time. Thus, they are significantly more intricate. The robust upper bound on the state-of-charge can be reformulated as an upper bound on $\max_{t\in\mathcal T} \max_{\delta\in \mathcal{D}}y(x^b,x^r,\delta,y_0,t)$. By using Propositions~\ref{Prop:y} and~\ref{Prop:D}, one can then show that the maximum over~$t\in\mathcal T$ must be attained at~$t=k\mathcal{D}elta t$ for some~$k\in\mathcal K\cup\{0\}$ and that for any such~$t$ the state-of-charge can be expressed as an integral of~$\delta$ against a piecewise constant function. Thus, averaging~$\delta$ across the trading intervals has no impact on the state-of-charge, which in turn allows us to focus on piecewise constant scenarios without restricting generality. The robust lower bound on the state-of-charge in~\eqref{pb:Rc} can be reformulated as a lower bound on $\min_{t\in\mathcal T} \min_{\delta\in \mathcal{D}}y(x^b,x^r,\delta,y_0,t)$, which appears to be intractable because the optimization problem over~$\delta$ minimizes a concave function over a convex feasible set and is therefore non-convex. Classical robust optimization provides no general recipe for handling such constraints even if the uncertain parameters are finite-dimensional, and state-of-the-art research settles for deriving approximations \citep{roos2018approximation}. By exploiting a continuous total unimodularity property of the uncertainty set~$\mathcal{D}$ facilitated by Assumption~\ref{Ass:div}, we first prove that the minimum of~$y(x^b,x^r,\delta,y_0,t)$ over~$\mathcal{D}$ is attained by a frequency deviation trajectory that takes only values in~$\{-1,0\}$. Next, we demonstrate that there exists an affine function of~$\delta$ that matches~$y(x^b,x^r,\delta,y_0,t)$ for all trajectories~$\delta\in\mathcal{D}$ valued in~$\{-1,0\}$ and for all~$t\in\mathcal T$. In the language of robust optimization, the state-of-charge~$y(x^b,x^r,\delta,y_0,t)$ can be viewed as an analysis variable that adapts to the uncertainty~$\delta$, and the corresponding affine function constitutes a decision rule approximating~$y(x^b,x^r,\delta,y_0,t)$. Decision rule approximations almost invariably introduce approximation errors \citep[\S~14]{AB09}.
However, the affine decision rule proposed here is error-free because it coincides with~$y(x^b,x^r,\delta,y_0,t)$ for all scenarios~$\delta\in\mathcal{D}$ valued in~$\{-1,0\}$ that may attain the worst case in~$\min_{t\in\mathcal T} \min_{\delta\in \mathcal{D}}y(x^b,x^r,\delta,y_0,t)$. Using this decision rule in an elaborate sensitivity analysis, we can finally prove that the minimum over~$t\in\mathcal T$ must be attained at~$t=k\mathcal{D}elta t$ for some~$k\in\mathcal K\cup\{0\}$ and that for any such~$t$ the state-of-charge can be expressed as an integral of~$\delta$ against a piecewise constant function. Thus, we may focus again on piecewise constant scenarios without restricting generality. The full proof of Theorem~\ref{th:time} can be found in Appendix~\ref{Apx:Proofs}.
The new robust optimization techniques developed to prove Theorem~\ref{th:time} are of independent interest as they provide exact tractable reformulations for certain adjustable robust optimization problems with functional or vectorial uncertain parameters, where the embedded optimization problems over the uncertainty realizations are non-convex. We also note that the embedded optimization problems over~$\delta\in\mathcal{D}$ in problem~\eqref{pb:Rc} can be viewed as variants of the so-called {\em separated continuous linear programs} introduced by \cite{EA83}. The proof of Theorem~\ref{th:time} shows that these problems are solved by piecewise constant frequency deviation scenarios that can be computed efficiently, thereby extending the purely existential results by \cite{MP95}. Our results are also orthogonal to those by~\cite{ghate20}, who proves that {\em robust} separated continuous linear programs with budget uncertainty sets are equivalent to standard separated continuous linear programs.
}
Even though the non-convex robust optimization problem~\eqref{pb:Rc} \rev{with functional uncertainty} admits a lossless time discretization, its discrete-time counterpart~\eqref{pb:R} still constitutes a non-convex robust optimization problem and thus appears to be hard. In the next section, however, we will show that~\eqref{pb:Rc} can be reformulated as a tractable linear program by exploiting its structural properties.
\section{Linear Programming Reformulation}
\label{sec:LPR}
In order to establish the tractability of the non-convex robust optimization problem~\eqref{pb:Rc}, it is useful to reformulate its time discretization~\eqref{pb:R} as the following \emph{linear} robust optimization problem, where all constraint functions are bilinear in the decision variables and the uncertain parameters.
\begin{equation}
\tag{R$'_\mathcal{K}$}
\label{pb:LR}
\begin{array}{>{\displaystyle}c*3{>{\displaystyle}l}}
\min & c_\mathcal{K}(\bm{x^b}, \bm{x^r}) + \rev{z} \\
\rm{s.t.}
& \multicolumn{3}{*1{>{\displaystyle}l}}{\rev{
\bm{x^b}, \bm{x^r} \in \set{X}_{\mathcal{K}}, ~ \bm m \in \mathbb{R}^K
}}\\
& \multicolumn{2}{*1{>{\displaystyle}l}}{
x^r_k + x^b_k \leq \bar{y}^+_k,
\quad
x^r_k - x^b_k \leq \bar{y}^-_k}
& \forall k \in \mathcal{K} \\
& \multicolumn{1}{*1{>{\displaystyle}l}}{
m_k - \eta^+ x^r_k \geq 0,
\quad
m_k - \frac{1}{\eta^-} x^r_k + \mathcal{D}elta \eta \, x^b_k} & \geq 0
& \forall k \in \mathcal{K} \\
& \bar{y}_0 + \mathcal{D}elta t \sum_{l=1}^{k} \eta^+ \big( x^b_l + \delta_l x^r_l \big) - d_l & \leq \bar{y} & \forall \bm \delta \in \mathcal{D}_\mathcal{K}^+,~\forall k \in \mathcal{K} \cup \{0\} \\
& \ubar{y}_0 + \mathcal{D}elta t \sum_{l=1}^{k} \eta^+ x^b_l - m_l \delta_l - d_l & \geq \ubar{y} & \forall \bm \delta \in \mathcal{D}_\mathcal{K}^+,~\forall k \in \mathcal{K} \cup \{0\} \\
& \rev{\hat{\bar{y}}_0 + \mathcal{D}elta t \sum_{k=1}^{K} \eta^+ \big( x^b_k + \delta_k x^r_k \big) - d_k} & \leq \rev{\frac{z-b_n}{a_n}}
& \forall \bm \delta \in \hat \mathcal{D}^+_\mathcal{K},~ \rev{\forall n \in \mathcal{N}_+} \\
& \rev{\hat{\ubar{y}}_0 + \mathcal{D}elta t \sum_{k=1}^{K} \eta^+ x^b_k - m_k \delta_k - d_k} & \geq \rev{\frac{z-b_n}{a_n}} & \forall \bm \delta \in \hat \mathcal{D}_\mathcal{K}^+,~ \rev{\forall n \in \mathcal{N}_-}\\
& \rev{z\ge b_n } & & \forall n\in\mathcal{N}_0
\end{array}
\end{equation}
Here, $\mathcal{D}elta \eta = \frac{1}{\eta^-} - \eta^+ \ge 0$ is used as a shorthand for the reduction in the battery state-of-charge resulting from first charging and then discharging one unit of energy as seen from the grid. \rev{In addition, we set $\set{N}_+ = \{n \in \set{N}: a_n > 0 \}$, $\set{N}_- = \{n \in \set{N}: a_n < 0 \}$ and $\set{N}_0 = \set{N} \setminus (\mathcal{N}_+\cup\mathcal{N}_-)$.}
\begin{Th}[Lossless linearization]\label{th:lr}
The problems~\eqref{pb:R} and~\eqref{pb:LR} are equivalent.
\end{Th}
\rev{The proof of Theorem~\ref{th:lr} critically relies on the exact affine decision rule approximation discovered in the proof of Theorem~\ref{th:time}. Note that} the linear robust optimization problem~\eqref{pb:LR} still appears to be difficult because each robust constraint must hold for all frequency deviation scenarios in an uncountable uncertainty set~$\mathcal{D}_{\mathcal{K}}^+$ or~$\hat \mathcal{D}^+_\mathcal{K}$ and therefore corresponds to a continuum of ordinary linear constraints. Fortunately, standard robust optimization theory~\citep{AB04, DB04} allows us to reformulate~\eqref{pb:LR} as the tractable linear program
\begin{equation}
\label{pb:LP}
\tag{R$^{''}_\mathcal{K}$}
\begin{array}{*1{>{\displaystyle}c}*2{>{\displaystyle}l}}
\min & \multicolumn{2}{>{\displaystyle}l}{c_\mathcal{K}(\bm{x^b}, \bm{x^r}) + \rev{z}} \\
\rm{s.t.}
& \multicolumn{2}{>{\displaystyle}l}{
\bm{x^b}, \bm{x^r} \in \set{X}_\mathcal{K},~z\in\mathbb R, ~
\bm m, \bm \lambda^+, \bm \lambda^-, \bm \theta^+, \bm \theta^- \in \mathbb R^K_+,~
\bm \Lambda^+, \bm \Lambda^-, \bm \mathcal{T}heta^+, \bm \mathcal{T}heta^- \in \mathbb{R}^{K\times K}_+
} \\
& \multicolumn{1}{>{\displaystyle}l}{
x^r_k + x^b_k \leq \bar{y}^+_k,
\quad
x^r_k - x^b_k
\leq \bar{y}^-_k} & \forall k \in \mathcal{K} \\
& \multicolumn{1}{>{\displaystyle}l}{
m_k \geq \eta^+ x^r_k,
\quad
m_k \geq \frac{1}{\eta^-}x^r_k - \mathcal{D}elta \eta \, x^b_k }
& \forall k \in \mathcal{K} \\
& \sum_{l=1}^k \mathcal{D}elta t \left( \eta^+x^b_l + \Lambda^+_{k,l} - d_l \right) + \gamma \mathcal{T}heta^+_{k,l} \leq \bar{y} - \bar y_0 & \forall k \in \mathcal{K} \cup\{0\}\\
&\sum_{l = 1}^k \mathcal{D}elta t \left( \eta^+x^b_l - \Lambda^-_{k,l} - d_l \right) - \gamma \mathcal{T}heta^-_{k,l} \geq \ubar{y} - \ubar y_0 & \forall k \in \mathcal{K} \cup\{0\}\\
& \rev{\sum_{k \in \mathcal{K}} \mathcal{D}elta t \left( \eta^+x^b_k + \lambda^+_{k} - d_k \right) + \hat \gamma \theta^+_{k} \leq \frac{z-b_n}{a_n} - \hat{\bar y}_0} & \rev{\forall n \in \mathcal{N}_+} \\
& \rev{\sum_{k \in \mathcal{K}} \mathcal{D}elta t \left( \eta^+x^b_k - \lambda^-_{k} - d_k \right) - \hat \gamma \theta^-_{k} \geq \frac{z-b_n}{a_n} - \hat{\ubar y}_0} & \rev{\forall n \in \mathcal{N}_-} \\
& \rev{b_n\leq z} & \rev{\forall n\in\mathcal{N}_0}\\
& \Lambda^+_{k,l} + \sum\limits_{i = l}^{I(k,l)} \mathcal{T}heta^+_{k,i} \geq \eta^+ x^r_l, \quad
\Lambda^-_{k,l} + \sum\limits_{i = l}^{I(k,l)} \mathcal{T}heta^-_{k,i} \geq m_l & \forall k,l \in \mathcal{K}:\; l \leq k \\
& \lambda^+_{k} + \sum\limits_{i = k}^{\hat I(K,k)} \theta^+_{i} \geq \eta^+ x^r_k, \quad
\lambda^-_{k} + \sum\limits_{i = k}^{\hat I(K,k)} \theta^-_{i} \geq m_k & \forall k \in \mathcal{K},
\end{array}
\end{equation}
where $I(k,l) = \min\{k, l+ \Gamma/\mathcal{D}elta t - 1\}$ and $\hat I(k,l) = \min\{k, l+ \hat \Gamma/\mathcal{D}elta t -1\}$.
\begin{Th}[Linear programming reformulation]\label{th:lp}
The problems~\eqref{pb:LR} and~\eqref{pb:LP} are equivalent.
\end{Th}
The conversion of the robust optimization problem~\eqref{pb:LR} to the linear program~\eqref{pb:LP} comes at the expense of introducing $4K^2 + 4K$ dual variables. For a daily planning horizon with half-hourly resolution, this amounts to introducing 9,408~additional continuous variables. Overall, the linear program~\eqref{pb:LP} involves $4K^2 + 7K + 1$ variables and $\rev{K^2 + 9 K + N + 2}$~constraints, that is, its size scales quadratically with~$K$. In conjunction, Theorems~1--3 imply that the non-convex robust optimization problem~\eqref{pb:Rc} \rev{with continuous uncertain parameters} can be reduced without any loss to the tractable linear program~\eqref{pb:LP}, which is amenable to efficient numerical solution with state-of-the-art linear programming solvers such as CPLEX or Gurobi.
\rev{
\begin{Rmk}[Robustification reduces complexity]
A striking property of the robust optimization model~\eqref{pb:Rc} is that it is much {\em easier} to solve than the underlying deterministic model, which would assume precise knowledge of the frequency deviation scenario~$\delta$. Indeed, the textbook formulation of the deterministic model requires continuous decision variables to represent~$y^+(x^b(t),x^r(t),\delta(t))$ and~$y^-(x^b(t),x^r(t),\delta(t))$ and a binary decision variable to model their complementarity for every~$t~\in~\set T$ \cite[p.~85]{JAT15}. This results in a large-scale mixed-integer linear program even if~$\set T$ is discretized. In contrast, the robust optimization model~\eqref{pb:Rc} is equivalent to the tractable linear program~\eqref{pb:LP}. To our best knowledge, we have thus discovered the first practically interesting class of optimization problems that become dramatically easier through robustification.
$\mathcal{B}ox$
\end{Rmk}
}
\section{Multi-Stage Extensions}\label{sec:multi-stage}
\rev{Model~\eqref{pb:Rc} described in Section~\ref{sec:Prob_Des} looks only one day ahead and accounts for the future usage of the vehicle only through the cost-to-go function~$\varphi$. We will now show that this static decision problem readily extends to a dynamic model that looks~$H$ days into the future. For ease of exposition, we assume that the market bids for day~$h\in\mathcal H=\{0,\ldots,H-1\}$ are due at midnight of the previous day, which implies that the initial state-of-charge~$y_{h}$ on day~$h$ is precisely known.
However, this assumption can be relaxed. A vehicle owner aiming to minimize worst-case expected costs (where the expectation is taken with respect to the prices, and the worst-case is taken with respect to the frequency deviation scenarios in~$\hat \mathcal{D}$) across~$H$ days solves the robust dynamic program
\begin{equation}
\label{eq:dynamic-problem}
\varphi_h(y_h)
= \min_{(x_h^b, x_h^r) \in \set{X}_{\text{R}}(y_h)} c(x_h^b, x_h^r)
+ \max_{\delta_h \in \hat{\mathcal{D}}}
\varphi_{h+1}\big(y(x_h^b, x_h^r, \delta_h, y_h, T)\big)\quad \forall h\in\mathcal H.
\end{equation}
Here, $\set{X}_{\text{R}}(y_h)$ denotes the feasible set of the single-stage problem~\eqref{pb:Rc} with uncertainty sets~$\mathcal{D}$ and~$\set Y_0=\{y_h\}$. In principle, $\set{X}_{\text{R}}(y_h)$ could be empty for some~$y_h$. Under the reasonable assumption that each night the vehicle is plugged in long enough for the battery to be fully charged, however, $\set{X}_{\text{R}}(y_h)$ is guaranteed to be non-empty. In this case, problem~\eqref{eq:dynamic-problem} remains feasible on all days~$h\in\set H$ irrespective of previous market decisions and even when facing a frequency deviation scenario~$\delta\notin\hat\mathcal{D}$. The cost-to-go functions~$\varphi_h$, $h\in\mathcal H$, are defined recursively through~\eqref{eq:dynamic-problem}. In practice, it proves useful to initialize the recursion by~$\varphi_{H}(y_H) = p^\star \vert y_H - y^\star \vert$ for some target state-of-charge~$y^\star$ and penalty parameter~$p^\star\geq 0$, which can be calibrated to historical data via cross-validation (see Section~\ref{sec:backtest}).
\begin{Prop}\label{Prop:phi}
The cost-to-go function~$\varphi_h$ is convex and piecewise affine for every~$h\in\mathcal H$.
\end{Prop}
By Proposition~\ref{Prop:phi}, problem~\eqref{eq:dynamic-problem} is structurally equivalent to problem~\eqref{pb:Rc} for any~$h\in\set H$. Theorems~\ref{th:time}, \ref{th:lr} and~\ref{th:lp} thus imply that~\eqref{eq:dynamic-problem} can be recast as a tractable linear program whose right hand side coefficients depend affinely on~$y_h$. Computing the function value and an arbitrary subgradient of~$\varphi_h$ at a fixed~$y_h$ is therefore tantamount to solving a linear program.
This insight suggests that convex piecewise affine bounds on the cost-to-go function~$\varphi_h$ can be constructed by solving a series of linear programs. If~$\varphi_{h+1}$ is precisely known for some~$h\in\set H$, for example, then a coarse upper bound is obtained by evaluating~$\varphi_h$ at~$\ubar y$ and~$\bar y$ and by linearly interpolating the two function values. A coarse lower bound is given by the pointwise maximum of the two tangents of~$\varphi_h$ constructed from the function values and subgradients at~$\ubar y$ and~$\bar y$. The difference between these coarse bounds is maximal at the kink of the lower bound. To improve both bounds, we can then break the interval~$[\ubar y, \bar y]$ apart at the kink and construct separate upper and lower bounds on the two resulting subintervals by repeating the above procedure. Iteratively partitioning the subinterval on which the gap between the bounds is maximal yields increasingly tight convex piecewise affine bounds that approximate~$\varphi_h$ uniformly on~$[\ubar y, \bar y]$ to any precision. In each iteration one has to solve a linear program akin to~\eqref{pb:LP} in order to compute the value and a subgradient of~$\varphi_h$ at a new anchor point. Note that the outlined procedure remains applicable if~$\varphi_{h+1}$ in~\eqref{eq:dynamic-problem} is replaced with a convex piecewise affine bound, in which case one has only access to an {\em inexact} oracle for the values and subgradients of~$\varphi_h$. In this case the approximation errors accumulate over the dynamic programming recursions.
In reality, the market bids for day~$h$ are due around noon and not at midnight of day~$h-1$. As a consequence, the exact state-of-charge~$y_h$ at the beginning of day~$h$ is unknown at stage~$h$, that is, at the time when~$(x^b_h,x^r_h)$ must be chosen. Instead, only the confidence intervals~$[\ubar{\hat y}_0,\hat{\bar y}_0]$ and~$[\ubar y_0,\bar y_0]$ for~$y_h$ are available, which can be constructed from the current state-of-charge and from~$(x^b_{h-1},x^r_{h-1})$; see Section~\ref{sec:Prob_Des}. In a more realistic multi-stage model, the information available at stage~$h$ is therefore encoded by the state vector~$(\ubar{\hat y}_0, \hat{\bar y}_0, \ubar y_0, \bar y_0)$, and thus the cost-to-go functions have four arguments, while the state transition functions have four components.
By generalizing Theorems~\ref{th:time}, \ref{th:lr} and~\ref{th:lp} as well as Proposition~\ref{Prop:phi}, one can still show that all cost-to-go functions are convex and piecewise affine and that their values and subgradients can be computed by solving linear programs akin to~\eqref{pb:LP}. While tedious, the proofs of these generalized results require no fundamentally new ideas and are thus omitted for brevity. As the cost-to-go functions have four arguments, the construction of convex piecewise affine upper and lower bounds is more challenging but still possible via the robust dual dynamic programming algorithm by~\citet{AG19}.
Numerical experiments in Section~\ref{sec:ex_2stage} suggest that the benefits of solving a multi-stage instead of a single-stage problem are negligible in practice because electric vehicles can always be charged overnight. We emphasize, however, that the robust optimization models and techniques developed in this paper may also be useful to optimize the operation of other energy storage devices that are characterized by slower dynamics and therefore necessitate a proper multi-stage approach.
}
\section{Numerical Experiments}\label{sec:NumEx}
In the following, we first describe how the vehicle owner's decision problem is parametrized from data, and we explain the backtesting procedure that is used to assess the performance of a given bidding strategy. Next, we present numerical results and discuss policy implications. All experiments are run on an Intel i7-6700 CPU with $3.40$GHz clock speed and $64$GB of RAM. All linear programs are solved with GUROBI~$\rev{9.1.2}$ using its PYTHON interface. In order to ensure the reproducibility of our experiments, we provide links to all data sources and make our code available at
\url{www.github.com/lauinger/reliable-frequency-regulation-through-vehicle-to-grid}.
\subsection{Model Parametrization}\label{sec:Params}
The French transmission system operator (RTE) publishes availability and delivery prices and frequency measurements.\footnote{\url{http://clients.rte-france.com/}} There have been two policy changes in frequency regulation since 2015. While the availability prices were historically kept constant throughout the year, they change on a weekly basis since mid-January 2017 and on a daily basis since July 2019. At this point, the pricing mechanism also changed from a pay-as-bid auction to a clearing price auction. The average availability price over all years from 2015 to 2019 amounts to $0.8$cts/kWh, but the yearly average \emph{decreased} in 2017 and 2018, and \emph{increased} again in 2019 to pre-2017 levels. \rev{For all practical purposes we may assume that the expected regulation price $p^r(t) = p^a(t) + \E[\tilde\delta(t) \tilde p^d (t)]$ coincides with the availability price~$p^a(t)$ because the realized regulation price~$p^a(t) + \delta(t) p^d(t)$ oscillates rapidly around the availability price~$p^a(t)$ due to intra-day fluctuations of the frequency-adjusted delivery prices; see Figure~\ref{fig:Reserve price} in Appendix~\ref{Apx:Data}.
In fact, $\delta(t) p^d(t)$ empirically averages to~$-2.36 \cdot 10^{-5}$\EUR~over all 10s intervals from from 2015 to 2019.}
We further identify the utility prices~$p^b(t)$ with the residential electricity prices charged by Electricité de France (EDF), the largest European electricity provider. These prices exhibit six different levels corresponding to peak- and off-peak hours on high, medium and low price days. High price days can occur exclusively on work days between November and March, whereas medium price days can occur on all days except Sundays. Low-price days can occur year-round. The peak hours are defined as the hours from 6~am to 10~pm on work days, and all the other hours are designated as off-peak hours. The prices corresponding to each type of day and hour are regulated and published in the official French government bulletin.\footnote{Journal Officiel de la République Fran\c{c}aise: \url{https://www.legifrance.gouv.fr/}} Over the past five years, these prices have not changed more than three times per year. On each day, RTE announces the next day's price levels by 10:30~am. The average utility price over the years from 2015 to 2019 amounts to $14$cts/kWh and thus exceeds the average availability price by an order of magnitude.
When simulating the impact of the market decisions on the battery state-of-charge, it is important to track the frequency signal with a high time resolution. In fact, the \cite{EU17} requires regulation providers to adjust the power flow between the battery and the grid every ten seconds in order to ensure that it closely matches $x^b(t) + \delta(t) x^r(t)$ for all $t \in \mathcal{T}$. This means that regulation providers need to measure the frequency deviation $\delta(t)$ at least every ten seconds. Hence, we use a sampling rate of~$100$mHz when simulating the impact of the market decisions on the battery state-of-charge. This contrasts with previous studies, which used sampling rates below $20$mHz \citep{SH10, ES12, JD14, FW19}. Recall from Section~\ref{sec:Prob_Des} that the frequency deviation $\delta(t)$ depends on the nominal grid frequency $f_0 = 50$Hz and on the normalization constant $\mathcal{D}elta f = 200$mHz. Moreover, recall that the uncertainty set~$\mathcal{D}$ is parametrized by $\gamma = 0.5$h and $\Gamma = 2.5$h in order to respect the delivery guarantee rules prescribed by the European Commission, while the less conservative uncertainty set~$\hat \mathcal{D}$ is parametrized by $\hat \gamma = 0.5$h and $\hat \Gamma = 24$h as described in the discussion of Figure~\ref{fig:Daily_StdDev}.
The vehicle data is summarized in Table~\ref{tab:Params} in Appendix~\ref{Apx:Data}. The chosen parameter values are representative for commercially available midrange vehicle-to-grid-capable electric vehicles such as the 2018~Nissan Leaf, and they are in line with experimental measurements of charging and discharging efficiencies. For example, \cite{EA17} find that charging and discharging efficiencies $\eta^+$ and $\eta^-$, respectively, vary between $64$\% and $88$\% for a $\text{LiPF}_\text{6}$ cobalt battery with a nominal voltage of~$345$V and a nominal capacity of~$106$Ah.
\rev{We assume that the vehicle owner reserves the time windows from 7~am to 9~am and from 5~pm to 7~pm on workdays and from 8~am to 8~pm on weekends and public holidays for driving.} At all other times, the car is connected to a bidirectional charging station. We also assume that the car's yearly mileage amounts to 10,000km, which approximately matches the French average of 13,000km~\citep{CGDD10}. Hence, the car travels about \rev{27km} per day.
\rev{Even though this distance is easily covered within one hour, it makes sense to reserve extended time slots for driving. Indeed, the vehicle owner may not be able to (nor wish to) pinpoint the exact driving times one day in advance. It may also be impossible to find bidirectional charging stations on weekend trips.} With a standard vehicle efficiency of~$0.2$kWh/km, the car thus consumes~$2,000$kWh per year.
\subsection{Backtesting Procedure and Baseline Strategy}\label{sec:backtest}
In our experiments, we assess the performance of different bidding strategies over different test datasets covering one of the years between 2015 and 2019. A bidding strategy is any procedure that computes on each day at noon a pair of market decisions $x^b$ and $x^r$ for the following day. We call a strategy \emph{non-anticipative} if it determines the market decisions using only information observed in the past. In addition, we call a strategy \emph{feasible} if it allows the vehicle owner to honor all market commitments for all frequency deviation scenarios within the uncertainty set~$\mathcal{D}$.
To measure the profit generated by a particular strategy over one year of test data, we use the following backtesting procedure. On each day at noon we compute the market decisions for the following day. We then use the actual frequency deviation data between noon and midnight and the market decisions for the current day to calculate the true battery state-of-charge at midnight. Next, we use the frequency deviation data of the following day to calculate the revenue from selling regulation power to the TSO, which is subtracted from the cost of buying electricity for charging the battery. \rev{If the strategy is infeasible and the vehicle owner is not able to deliver all promised regulation power even though the realized frequency deviation trajectory falls within the uncertainty set~$\mathcal{D}$, then she pays a penalty. The penalty at time~$t$ is set to~$k_{\mathrm{pen}}\cdot p^a(t)\cdot(x^r(t) - x^r_\mathrm{d}(t))$, where $x^r_\mathrm{d}(t)$ denotes the maximum amount of regulation power that could have been offered without risking an infeasibility, and
the penalty factor~$k_{\mathrm{pen}}$ ranges from~$3$ to~$10$.\footnote{For example, \cite{RTE17} sets $k_{\mathrm{pen}} = 5$.} Repeated offenses may even lead to market exclusion.}
For simplicity, in each experiment we either assume that the vehicle owner pays a penalty corresponding to a fixed value of $k_{\mathrm{pen}}$ for every offense or is excluded from the regulation market directly upon the first offense.
\rev{If the battery is depleted during a trip, any missing energy needed for driving is acquired at a high price~$p^y$ from a public fast charging station. We assume that~$p^y$ accounts for the price of energy as well as for the opportunity cost of the time lost in driving to the charging station and waiting to be serviced. In our experiments we set $p^y$ either to 0.75\EUR/kWh
(which corresponds to typical energy prices offered by the European fast charging network Ionity\footnote{\url{https://ionity.eu/en/}}),
to~7.5\EUR/kWh or to~75\EUR/kWh.}
The procedure described above is repeated on each day, and the resulting daily profits are accumulated over the entire test dataset.
Our baseline strategy is to determine the next day's market decisions by solving the robust optimization problem~\eqref{pb:Rc} \rev{with terminal cost function~$\varphi(y) = p^\star \vert y - y^\star \vert$, where the calibration of~$y^\star$ and~$p^\star$ is described below. Thus, \eqref{pb:Rc} is equivalent to an instance of the linear program~\eqref{pb:LP}.} This problem is updated on each day because \rev{the driving pattern~$d$ as well as} the market prices~$p^b$ and~$p^r$ change, and because the uncertainty sets $\set{Y}_0$ and $\hat{\set{Y}}_0$ for the state-of-charge at midnight depend on the state-of-charge at noon and on the market commitments between noon and midnight that were chosen one day earlier. The baseline strategy is feasible thanks to the robust constraints in~\eqref{pb:Rc}, which ensure that regulation power can be provided for all frequency deviation scenarios in~$\mathcal{D}$.
The parameters $p^\star$ and $y^\star$ are kept constant throughout each backtest. \rev{Specifically, we set $p^\star = \frac{3+k}{40}$\EUR/kWh for some $k = 1,\ldots,9$ and $y^\star = (\frac{\bar{y} + \ubar{y}}{2} + l)$kWh for some $l = 0,\ldots,5$.} Every tuple~$(p^\star, y^\star)$ encodes a different bidding strategy. Given a training dataset comprising one year of frequency measurements and market prices, we compute the cumulative profit of each strategy via the backtesting procedure outlined above, and we choose the tuple~$(p^\star,y^\star)$ that corresponds to the winning strategy. This strategy is non-anticipative if the year of the training dataset precedes the year of the test dataset. Table~\ref{tab:Tuning} shows that selecting $p^\star$ and $y^\star$ non-anticipatively on a historical training dataset has low regret relative to selecting these parameters anticipatively on the test dataset. Here, the regret is defined as the ratio of the absolute difference and the arithmetic mean of the cumulative profits generated by the anticipative and the non-anticipative strategies tuned on the test and training datasets, respectively. We always use the year immediately prior to the test dataset as training dataset.
Table~\ref{tab:Tuning} shows that from 2017 onward, perhaps surprisingly, anticipative parameter tuning has no advantage over non-anticipative tuning.
From now on, we assume that~$p^\star$ and~$y^\star$ are tuned non-anticipatively using the year of training data immediately prior to the test dataset. Additional robustness checks reveal that the cumulative profit (evaluated on 2019 data) is relatively insensitive to the choice of~$p^\star$ and~$y^\star$ within the suggested search grid, which indicates that its resolution is sufficiently high. Details are omitted for the sake of brevity.
\begin{table}[t!]
\centering
\caption{Calibration of $p^\star$ and $y^\star$.}
\label{tab:Tuning}
\begin{tabular}{c|ccc|ccc|c}
Test & \multicolumn{3}{c}{Anticipative Calibration} & \multicolumn{3}{|c|}{Non-Anticipative Calibration} & Regret \\
Dataset & \multicolumn{1}{c}{$p^\star$ (\EUR/kWh)} & \multicolumn{1}{c}{$\frac{y^\star - \ubar y}{\bar y - \ubar y}$ (\%)} & \multicolumn{1}{c}{Profit (\EUR)} & \multicolumn{1}{|c}{$p^\star$ (\EUR/kWh)} & \multicolumn{1}{c}{$\frac{y^\star - \ubar y}{\bar y - \ubar y}$ (\%)} & \multicolumn{1}{c|}{Profit (\EUR)} & (\%) \\
\midrule
$2015$ & \rev{$0.125$} & \rev{$56.7$} & \rev{$-74$} & n/a & n/a & n/a & n/a \\
$2016$ & $0.15$ & \rev{\vline} & \rev{$-101$} & \rev{$0.15$} & \rev{$56.7$} & \rev{$-112$} & $10$ \\
$2017$ & \vline & \vline & \rev{$-169$} & \rev{\vline} & \rev{\vline} & \rev{$-169$} & $0$ \\
$2018$ & \vline & \vline & \rev{$-190$} & \vline & \vline & \rev{$-190$} & \vline \\
$2019$ & $0.15$ & $56.7$ & \rev{$-153$} & $0.15$ & $56.7$ & \rev{$-153$} & 0 \\
\bottomrule
\end{tabular}
\end{table}
{\color{black}
\subsection{Futility of Solving a Multi-Stage Model}\label{sec:ex_2stage}
The baseline strategy described in Section~\ref{sec:backtest} looks only one day into the future. We now show numerically that the added value of solving a dynamic model that looks~$H>1$ days into the future is negligible. Specifically, we compare the baseline strategy against a dynamic strategy, which computes the market commitments on each day in a receding horizon fashion by solving model~\eqref{eq:dynamic-problem} with~$H=2$ and~$\varphi_2(y_2) = p^\star \vert y_2 - y^\star \vert$, assuming that the electricity prices and the driving patterns for both subsequent days are known upfront. To compute the out-of-sample profits, we use the backtesting procedure outlined in Section~\ref{sec:backtest} but assume that the market bids are due at midnight when the initial state-of-charge~$y_0$ is known. As explained in Section~\ref{sec:multi-stage}, this assumption greatly simplifies the computation of the dynamic strategy. We define the \emph{value of vehicle-to-grid} under a particular bidding strategy as the cumulative \emph{excess} profit of that strategy with respect to a simplified strategy that does not participate in the reserve market. This simplified strategy solves problem~\eqref{pb:Rc} under the additional constraint $x^r = 0$. When examining the value of vehicle-to-grid over the years from 2017 to 2019, we find that the dynamic strategy outperforms the baseline strategy only by~$2.1$\%.
Hence, the dynamic strategy does not generate significantly higher revenues even though it has the advantage of knowing the electricity prices and driving patterns {\em two} days in advance (which would not be the case in reality). We also emphasize that {\em both} strategies benefit from the assumption that~$y_0$ is known upfront. However, we conjecture that both strategies benefit equally from this information relaxation and that the dynamic strategy thus still fails to outperform the baseline strategy when the market bids are due at noon and~$y_0$ is uncertain. This reasoning justifies our use of a {\em static} baseline strategy that looks merely one day into the future.}
\subsection{Experiments: Set-up, Results and Discussion}
\label{sec:restuls-discussion}
In the remainder, we distinguish six different simulation scenarios. The nominal scenario uses the parameters of Table~\ref{tab:Params} in Appendix~\ref{Apx:Data} for both training and testing. All other scenarios are based on slightly modified parameters. Specifically, we consider a lossless energy conversion scenario, which trains and tests the baseline strategy under the assumption that $\eta^+=\eta^-=1$. A variant of this scenario assumes lossless energy conversion in training but tests the resulting strategy under the nominal values of $\eta^+$ and $\eta^-$. We also consider two scenarios with weaker robustness guarantees that replace the uncertainty set~$\mathcal{D}$ in the training phase with its subset~$\hat \mathcal{D}$. The resulting bidding strategy can be infeasible because it may fail to provide the legally required amount of reserve power. The two scenarios do not differ in training but impose different sanctions for infeasibilities in testing. In the first of the two scenarios the vehicle owner is immediately excluded from the reserve market upon the first infeasibility, thus loosing the opportunity to earn money by offering grid services for the rest of the year. \rev{In the second scenario, the vehicle owner is penalized by $k_\mathrm{pen} \cdot p^a(t)$
with $k_\mathrm{pen}= 5$ for energy that is missing for frequency regulation (see also Section~\ref{sec:backtest}) and by $ p^y = 0.75$\EUR{}/kWh
for energy that is missing for driving.} Finally, we consider a scenario in which the vehicle is only equipped with a unidirectional charger, that is, we set $\bar y^-=0$. Thus, the vehicle is unable to feed power back into the grid. Requests for up-regulation ($\delta(t)<0)$ can therefore only be satisfied by consuming less energy, which is possible only if $\delta (t) x^r(t)\leq x^b(t)$.
\begin{figure}
\caption{\rev{Value of vehicle-to-grid in 2019 in different simulation scenarios.}
\label{fig:Results}
\end{figure}
Figure~\ref{fig:Results} visualizes the value of vehicle-to-grid in 2019 as a function of time for each of the six simulation scenarios. We first observe that the value of vehicle-to-grid in the lossless energy conversion scenario amounts to \rev{165\EUR{}} at the end of the year and thus significantly exceeds the respective value of \rev{138\EUR{}} in the nominal scenario. Using a perfectly efficient vehicle charger would thus have boosted the value of vehicle-to-grid by~\rev{$20$\%} in 2019. This is not surprising because a perfect charger prevents costly energy conversion losses. Note also that the scenario with misspecified efficiency parameters results in almost the same value of vehicle-to-grid as the nominal scenario, which suggests that misrepresenting~$\eta^+$ or~$\eta^-$ in training has a negligible effect on the test performance. However, the underlying bidding strategy is~\emph{not} guaranteed to be feasible because it neglects energy conversion losses. Even though this strategy happens to remain feasible throughout~2019, it bears the risk of financial penalties or market exclusion. The two bidding strategies with weakened robustness guarantees initially reap high profits by aggressively participating in the reserve market, but they already fail in the first half of January to fulfill all market commitments. If infeasibilities are sanctioned by market exclusion, the cumulative excess profit thus remains flat after this incident. If infeasibilities lead to financial penalties, on the other hand, the cumulative excess profit drops sharply below zero near the incident but recovers quickly and then continues to grow steadily. As only a few other mild infeasibilities occur in 2019, the end-of-year excess profit of this aggressive bidding strategy still piles up to \rev{296\EUR, which is more than twice the excess profit in the nominal scenario.}
We conclude that the current level of financial penalties is too low to deter vehicle owners from making promises they cannot honor. Finally, with a unidirectional charger, the 2019 value of vehicle-to-grid falls to \rev{$15$\EUR{}}, which is only~$11$\% of the respective value with a bidirectional charger.
\begin{figure}
\caption{\rev{Value of vehicle-to-grid in 2019 for different penalty parameters.}
\label{fig:Penalties}
\end{figure}
As the bidding strategy with a weakened robustness guarantee can earn high profits when infringements of the EU delivery guarantee incur only financial penalties, we carry out an additional experiment to analyze the impact of the penalty parameters~$k_{\mathrm{pen}}$ and~$p^y$ on the value of vehicle-to-grid; \rev{see Figure~\ref{fig:Penalties}. We observe that for $p^y = 0.75$\EUR/kWh, doubling the penalty factor to~$k_{\mathrm{pen}}=10$ decreases the value of vehicle-to-grid by only \rev{$4.1$\%} to~$284$\EUR{}. An additional calculation reveals that the TSO would have to set~$k_{\mathrm{pen}}\approx 240$ in order push the value of vehicle-to-grid below that attained in the basline scenario, which fulfills the EU delivery guarantee.}
On the other hand, for~$k_{\rm pen}=5$, a tenfold increase of~$p^y$ to $7.50$\EUR/kWh decreases the value of vehicle-to-grid by~\rev{$16.6$\% to $247$\EUR{},
and an additional tenfold increase of~$p^y$ to~$75$\EUR/kWh pushes the value of vehicle-to-grid below zero.}
\rev{Increasing~$p^y$ from $0.75$\EUR/kWh to $7.50$\EUR/kWh also reduces the number of days on which the vehicle owner does not have enough energy to drive from~7 to~2.}
\rev{Since the market prices display distinct regime shifts as European energy policies evolve, the value of vehicle-to-grid fluctuates over the years. Indeed, Figure~\ref{fig:temporal_evolution} in Appendix~\ref{Apx:results} shows that the value of vehicle-to-grid averages to $108$\EUR{} per year from~2016 to~2019 with a minimum of $76$\EUR{} in~2018.}
\begin{figure}
\caption{\rev{Value of vehicle-to-grid in 2019 as a function of the C-rate.
}
\label{fig:c-rate}
\end{figure}
In the next experiment we investigate how the value of vehicle-to-grid depends on the activation ratio~$\gamma/\Gamma$ \rev{and the battery's size and charge rate (C-rate). The C-rate is defined as the ratio of the charger power and the battery size, and thus it expresses the percentage of the battery's total capacity that can be charged within one hour.
Figure~\ref{fig:c-rate} shows that the value of vehicle-to-grid increases with the C-rate up to a saturation point that is insensitive to the battery size but decreases with the activation ratio.
In the saturation regime, the value of vehicle-to-grid increases with the battery size.
Typical electric vehicles can be fully charged overnight, within about 8~hours. The corresponding C-rate of~$0.125\mathrm{h}^{-1}$ falls within the saturation regime for both investigated activation ratios of~$0.1$ and~$0.2$. This observation has two implications. First, for typical electric vehicles the value of vehicle-to-grid cannot be increased by increasing the charger power (and thereby increasing the C-rate).
This insight contradicts previous studies by \cite{WK05a}, \cite{PC15} and \cite{OB19}, which advocate for electric vehicles with higher C-rates of 1, 0.45, and 0.37, respectively. The reason for this discrepancy is that none of the previous studies faithfully account for the EU delivery guarantee. In particular, \cite{OB19} allows the vehicle owner to anticipate future frequency deviations, and \cite{PC15} assume that the bidding strategy can be updated on an hourly basis, thereby exploiting information that is not available at the the time when the market bids are collected by the TSO (\textit{i.e.}, one day in advance). By underestimating the amount of energy that the vehicle owner must be able to exchange with the TSO to satisfy all future obligations on the reserve market, these studies overestimate the amount of regulation power that can be sold, which makes potent battery chargers appear more useful than they actually are.
The second implication is that the activation ratio has a critical impact on the value of vehicle-to-grid. The incumbent storage providers of the European electricity grid, namely pumped-hydro storage power plants, have C-rates of about~$0.0125\mathrm{h}^{-1}$ \citep{EC20}, which are
significantly
smaller than the C-rates of electric vehicles. At such C-rates, the value of providing frequency regulation is the same for activation ratios of~0.1 and~0.2. To minimize competition from vehicle-to-grid, pumped-hydro storage operators may thus have an incentive to lobby for high activation ratios.}
From the perspective of a TSO, the higher the activation ratio, the larger the uncertainty set~$\mathcal{D}$ and the lower the probability of blackouts. However, Figure~\ref{fig:Daily_StdDev} suggests that an activation period of $30$~minutes is already conservative. On the other hand, the larger~$\mathcal{D}$, the harder it is for storage operators to provide regulation power, which may lead to less competition, higher market prices, and a higher total cost of frequency regulation. As the system operator is a public entity, this cost is ultimately borne by the public, and the choice of the activation ratio is a political decision.
\rev{The last two experiments study the influence of the driving time and distance on the value of vehicle-to-grid.
Figure~\ref{fig:Driving} in Appendix~\ref{Apx:results} shows that the value of vehicle-to-grid in 2019 approximately displays a concave dependence on the yearly driving distance and plateaus at 138\EUR{} for driving distances between $5{,}000$km and $10{,}000$km.
It is perhaps surprising that the value of vehicle-to-grid is {\em not} globally decreasing in the driving distance.
An explanation for this phenomenon
is provided in Appendix~\ref{Apx:results}.}
\begin{figure}
\caption{\rev{Value of vehicle-to-grid in 2019 versus daily plug time.}
\label{fig:plug_time}
\end{figure}
Figure~\ref{fig:plug_time} shows the value of vehicle-to-grid in 2019 against the daily plug time, that is, the total amount of time during which the vehicle is connected to a bidirectional charger. In this experiment, we assume that a daily plug time of~$t_p \in [0\text{h}, 24\text{h}]$ means that cars are plugged from midnight to~$\frac{t_p}{2}$~am and from~$12\text{h} - \frac{t_p}{2}$~pm to midnight the next day. Whenever the car is not plugged, it consumes a constant amount of power such that the total consumption over the year corresponds to a mileage of 10,000~km as in the baseline experiment. We observe that the value of vehicle-to-grid increases with the daily plug time and saturates after 15~hours at a level that scales with the battery size.
Thus, a daily plug time of 15~hours suffices for offering the maximal possible amount of regulation power. Additional experiments show that for an activation ratio of 0.1 instead of 0.2, the saturation point increases to 20~hours.
\section{Conclusions}
\label{sec:conclusions}
We develop an optimization model for the decision problem of an electricity storage operator offering frequency regulation. To our best knowledge, this is the first model that faithfully accounts for the delivery guarantees required by the~\cite{EU17}. In contrast, all existing models relax the true delivery guarantee constraints and therefore risk that the electricity storage is empty (full) when a request for up- (down-)regulation arrives. In its original formulation, our model represents a non-convex robust optimization problem with functional uncertain parameters and thus appears to be severely intractable. Indeed, the state-of-the-art methods for solving the deterministic version of this problem for a single frequency deviation scenario in discrete time rely on techniques from mixed-integer linear programming. Maybe surprisingly, however, our robust optimization problem is equivalent to a tractable linear program. This is an exact result and does not rely on any approximations. To our best knowledge, we have thus discovered the first practically interesting class of optimization problems that become significantly easier through robustification.
In the numerical experiments centered around electric vehicles we restrict the planning horizon of our model to 24~hours. This is justified because the batteries of electric vehicles can be fully charged overnight and because vehicle owners may not know their driving needs several days in advance. Numerical experiments provide strong evidence that the added value of looking two or more days into the future is marginal even if future market prices and driving patterns were known.
As our optimization model faithfully captures effective legislation, it enables us to quantify the true value of vehicle-to-grid. This capability is relevant for understanding the economic incentives of different stakeholders such as vehicle owners, aggregators, equipment manufacturers, and regulators.
As for the vehicle owners, we find that their profits from frequency regulation range from~100\EUR\ to~500\EUR\ per year under typical driving patterns. It seems unlikely that such profits are sufficient for vehicle owners to forego the freedom of using their car whenever they please to. Nevertheless, some vehicle owners may choose to participate in vehicle-to-grid for idealistic reasons such as advancing the energy transition away from fossil fuels. Our numerical results also reveal that the value of vehicle-to-grid saturates at daily plug times above 15~hours. Thus, maximal profits from frequency regulation can be reaped even if the vehicle is disconnected from the grid up to 9~hours per day. This means that vehicle owners participating in vehicle-to-grid still enjoy considerable flexibility as to when to drive, which could help to promote the adoption of vehicle-to-grid.
Our results also have ramifications for aggregators, which pool multiple vehicles to offer regulation power. Indeed, aggregators may allow vehicle owners to reserve their vehicles for up to 9~hours of driving per day without sacrificing profit. This leaves vehicle owners ample freedom and reduces the probability that they exceed their driving slots. Thus, the actual number of vehicles available for frequency regulation at any point in time closely matches its prediction. This allows aggregators to place reserve market bids with small safety margins, which ultimately lowers transaction costs. In practice, aggregators can use our results as follows. On each day, they either ask drivers to schedule their driving needs for the next day, or they infer these needs from past travel patterns. Next, they solve our linear program for each car individually, which can be done efficiently by parallel computing even for large fleets with thousands of vehicles. Last, they add up the maximum regulation power each car can provide and sell it to the grid operator. This approach is easy to implement and requires few computational resources at the expense of sacrificing some optimality.
As opposed to individual vehicle owners, aggregators can decide which cars (or other flexible electric devices) to use to provide a given amount of regulation power. This has two advantages. First, battery degradation can be reduced by managing the state-of-charge of vehicles more precisely. Second, charging and discharging losses can be reduced by running vehicle chargers at their nominal operating points. In addition, aggregators may be able to trade on intraday markets, which would allow them to offer more regulation power for a given aggregate battery size. For example, if they encounter an extreme deviation trajectory, they can buy or sell electricity on the intraday market to maintain the state-of-charge within its bounds. This is risky, however, because intraday markets may not be liquid enough for aggregators to find trading partners. Especially not when the grid is already in distress, which is the case when extreme frequency deviations occur.
Equipment manufacturers design and sell bidirectional vehicle chargers. Contrary to previous studies that relax the exact delivery guarantee constraints, we find that the battery size and not the charger power is limiting the profits from frequency regulation. Manufacturers thus have no incentive to produce overly powerful bidirectional chargers.
The electricity system and the society as a whole could benefit significantly from vehicle-to-grid, which harnesses the idle storage capacities of electric vehicles and thereby reduces the need for other sources of flexible electricity supply, such as gas power plants or stationary batteries. This in turn reduces the need for imports of natural gas and critical raw materials, increases the long-term security of electricity supply, and decreases greenhouse gas emissions. Regulators may therefore want to make vehicle-to-grid more attractive by prescribing the availability and delivery prices, defining appropriate delivery guarantee requirements
and setting the penalties charged for non-compliance. Our results show that the vehicle owners' profits from frequency regulation decrease with the activation ratio and that current penalties are too low to incentivize vehicle owners to respect the law. Regulators could thus make vehicle-to-grid more attractive by decreasing the activation ratio and thereby relaxing the delivery guarantee requirements.
Given that the delivery guarantee in our nominal scenario is very restrictive, this would only slightly decrease the reliability of frequency regulation. If, at the same time, regulators were to increase the penalties for non-compliance, then the reliability of frequency regulation might even increase because more vehicle owners would honor their contractual obligations. Our new model can be used for finding the appropriate level of the penalty. Our results also show that incumbent storage operators with low charge rates may lobby against weaker delivery guarantees because these would increase their competition from vehicle-to-grid but not increase their profits.
\paragraph{Acknowledgments}
D.L. acknowledges fruitful discussions with Nadège Faul, Fran\c{c}ois Colet, Wilco Burghout, Paul Codani, Olivier Borne, Emilia Suomalainen, Ja\^{a}far Berrada, Willett Kempton, Evangelos Vrettos, and Sophie Hiriart, as well as funding from the Institut VEDECOM.
\linespread{1}
\small
\linespread{1.5}
\normalsize
\appendix
\section{Problem Data and Model Parameters}\label{Apx:Data}
\begin{table}[h!]
\centering
\caption{Parameters of the Nominal Simulation Scenario.}
\label{tab:Params}
\begin{tabular}{llrc}
Parameter & Symbol & Value & Unit \\
\midrule
\multicolumn{4}{c}{Vehicle Data}\\
Minimum State-of-Charge & $\ubar{y}$& 10 & kWh \\
Maximum State-of-Charge & $\bar{y}$ & 40 & kWh \\
Target State-of-Charge & $y^\star$ & 27 & kWh \\
Deviation Penalty & $p^\star$ & 0.15 & \EUR/kWh \\
Charging Efficiency & $\eta^+$ & 85 & \% \\
Discharging Efficiency & $\eta^-$ & 85 & \% \\
Maximum Charing Power & $\bar{y}^+$ & 7 & kW \\
Maximum Discharging Power & $\bar{y}^-$ & 7 & kW \\
Yearly Energy for Driving & & 2,000 & kWh \\
Fraction of Time Driving & & \rev{27} & \% \\
\midrule
\multicolumn{4}{c}{Grid Data}\\
Nominal frequency & $f_0$ & 50 & Hz \\
Normalization constant & $\mathcal{D}elta f$ & 200 & mHz\\
Average Utility Price from 2015 to 2019 & & 14.31 & cts/kWh \\
Average Availability Price from 2015 to 2019 & & \rev{0.825} & cts/kW/h \\
\midrule
\multicolumn{4}{c}{General Parameters}\\
Trading Interval & $\mathcal{D}elta t$ & 30 & min \\
Activation Period in $\mathcal{D}$ & $\gamma$ & 30 & min \\
Regulation Cycle in $\mathcal{D}$ & $\Gamma$ & 2.5 & h \\
Activation Period in $\hat \mathcal{D}$ & $\hat \gamma$ & 30 & min \\
Regulation Cycle in $\hat \mathcal{D}$ & $\hat \Gamma$ & 1 & day \\
Sampling Rate for Frequency Measurements & & 0.1 & Hz \\
Planning Horizon & $T$ & 1 & day \\
\bottomrule
\end{tabular}
\end{table}
\begin{figure}
\caption{Evolution of the availability and regulation prices from 2015 to 2019. The regulation price changes every~$10$s. For better visibility, we show its daily averages.}
\label{fig:Reserve price}
\end{figure}
\rev{\section{Additional Experiments and Results}\label{Apx:results}
In the first additional experiment we assess the heterogeneity of the value of vehicle-to-grid across the years~2016 to~2019. Figure~\ref{fig:temporal_evolution} visualizes the temporal evolution of the value of vehicle-to-grid and shows that it may vary between~75\EUR and~140\EUR at the end of the year. The second additional experiment compares two vehicles with uni- and bidirectional chargers.
Figure~\ref{fig:Driving} indicates that the value of vehicle-to-grid is approximately concave in the yearly mileage for both vehicles and for mileages up to~$30{,}000$km. Since a vehicle with a bidirectional charger can be used for unidirectional charging, the value of vehicle-to-grid with a bidirectional charger exceeds that of the same vehicle with a unidirectional charger. Furthermore, a higher mileage necessitates higher utility purchases~$x^b$, which has two opposing effects on regulation profits. On the one hand, power discharges become less likely because they only occur when \rev{$\delta(t) x^r(t) < -x^b(t)$}, which reduces energy conversion losses and makes the provision of frequency regulation more cost-effective. On the other hand, the effective upper bound~$\bar{y}^+(t) - x^b(t)$ on~$x^r(t)$ tightens, which reduces the amount of regulation power that can be offered on the market. The value of vehicle-to-grid peaks at~$138$\EUR for a vehicle with a bidirectional charger traveling between $5{,}000$km and $10{,}000$km per year and at~$42$\EUR for a vehicle with a unidirectional charger traveling~$30{,}000$km per year. For yearly mileages greater than~$5,000$km, the higher the yearly mileage, the lower the added value of a bidirectional charger. This result may be of particular interest for operators of shared electric vehicles. However, even for a yearly mileage of~$30{,}000$km, using a bidirectional instead of a unidirectional charger more than doubles the value of vehicle-to-grid. Furthermore, many drivers may prefer vehicles with internal combustion engines or fuel cells over electric vehicles to cover high yearly mileages because of their greater ranges and shorter refueling times. Therefore, owners of electric vehicle are more likely to cover yearly mileages close to the French average of $13{,}000$km.
Providing frequency regulation with a unidirectional charger would thus earn them about~$20$\EUR per year. In practice, these earnings would have to be shared with vehicle aggregators and equipment manufacturers.
\begin{figure}
\caption{\rev{Value of vehicle-to-grid from 2016 to 2019.}
\label{fig:temporal_evolution}
\end{figure}
\begin{figure}
\caption{\rev{Value of vehicle-to-grid in 2019 vs mileage.}
\label{fig:Driving}
\end{figure}
To complement the analysis of the nominal scenario described in Section~\ref{sec:restuls-discussion}, Figure~\ref{fig:9aug} shows the relationships between market bids, prices, driving needs, frequency deviations, and the battery state-of-charge on 9~August 2019 in a 10~second resolution. As expected, the vehicle owner charges the battery at night when utility prices are low. We also observe that the vehicle participates in the regulation market at a more or less constant level whenever it is not driving. This reduces the exposure to extreme frequency deviations at any one time. The battery state-of-charge naturally decreases when the vehicle is driving, increases when the vehicle is being charged and remains essentially constant when the vehicle provides only frequency regulation because the frequency deviations fluctuate rapidly around zero.
\begin{figure}
\caption{\rev{Frequency deviations, prices, market bids, driving needs, and state-of-charge on 9 August 2019.}
\label{fig:9aug}
\end{figure}
}
\rev{\section{Proofs}\label{Apx:Proofs}}
\rev{This appendix contains the proofs of all theorems and propositions in the main text.}
\begin{proof}[Proof of Proposition~\ref{Prop:y}]
By definition we have
\begin{align*}
y\left(x^b,x^r,\delta,y_0,t\right) = & y_0 + \int_{0}^{t} \eta^+ \left[x^b(t') + \delta(t')x^r(t')\right]^+ - \frac{\left[x^b(t') + \delta(t')x^r(t')\right]^-}{\eta^-} -d(t') \, \mathrm{d} t' \\
= & y_0 + \int_0^t \min\left\{ \eta^+\left( x^b(t') + \delta(t') x^r(t') \right), \frac{x^b(t') + \delta(t')x^r(t')}{\eta^-} \right\} - d(t') \, \mathrm{d}t',
\end{align*}
where the second equality holds because $\eta^+ < \frac{1}{\eta^-}$. The postulated properties of $y(x^b,x^r,\delta,y_0,t)$ follow from the observation that the minimum of two (nondecreasing) affine functions is a concave (nondecreasing) function~\cite[p.~73]{SB04}.
\end{proof}
\begin{proof}[Proof of Proposition~\ref{Prop:Var}]
We will show that
$ \max_{\delta \in \mathcal{D}} \mathrm{Var}(\delta) \leq \beta/T$, where $\beta = \ceil{T/\Gamma} \gamma$.
To this end, we note that
\begin{equation}
\label{eq:Var}
\max_{\delta \in \mathcal{D}} \mathrm{Var} (\delta) = \max_{\delta \in \mathcal{D}^+} \mathrm{Var} (\delta) \leq \max_{\delta \in \mathcal{D}_\beta} \mathrm{Var}(\delta),
\end{equation}
where $\mathcal{D}_\beta = \{ \delta \in \mathcal{L}(\mathcal{T}, [0,1]) : \int_{0}^{T} \delta(t) \, \mathrm{d}t \leq \beta \}$. The equality in~\eqref{eq:Var} holds because $\text{Var}(\delta)$ remains unchanged when~$\delta(t)$ is replaced with~$\vert \delta(t) \vert$ for every~$t \in \mathcal{T}$. Moreover, the inequality holds because $\mathcal{D}^+ \subseteq \mathcal{D}_\beta$. To see this, observe that for any $\delta \in \mathcal{D}^+$ we have
\begin{equation*}
\int_0^T \delta(t) \, \mathrm{d}t =
\sum_{n=1}^{\lceil T/\Gamma \rceil } \int_{(n-1)\Gamma}^{\min\{n\Gamma, T\}} \delta(t) \, \mathrm{d} t
\leq \ceil*{\frac{T}{\Gamma}} \revv{\gamma} = \beta.
\end{equation*}
Note that $\mathcal{D}_\beta$ constitutes a budget uncertainty set of the type introduced by~\cite{DB04}, where the uncertainty budget $\beta$ corresponds to the maximum number of regulation cycles within the planning horizon multiplied by the duration of an activation period. Thus,~$\beta$ can be viewed as the maximum amount of time within the planning horizon during which all reserve commitments must be honored. By weak duality, the highest variance of any scenario in $\mathcal{D}_\beta$ satisfies
\begingroup
\allowdisplaybreaks
\begin{align*}
\max_{\delta \in \mathcal{D}_\beta} \mathrm{Var}(\delta)
\leq & \min_{\lambda \geq 0} \max_{\delta \in \set{L}(\mathcal{T},[0,1])} \frac{1}{T} \int_{0}^{T}\delta^2(t) \, \mathrm{d}t + \lambda \left( \beta - \int_{0}^{T} \delta(t) \, \mathrm{d}t\right) \\
= & \min_{\lambda \geq 0} \lambda \beta + \max_{\delta \in \set{L}(\mathcal{T},[0,1])} \int_{0}^T \delta(t) \left( \delta(t)/T - \lambda \right) \mathrm{d}t \\
= & \min_{\lambda \geq 0} \lambda \beta + T \max_{\delta \in [0,1]} \delta \left( \delta/T - \lambda \right) \\
= & \min_{\lambda \geq 0} \lambda \beta + \max\left\{ 0,1-\lambda T \right\} \\
= & \min_{\lambda \geq 0} \max\left\{ \lambda \beta, 1 - \lambda \left( T - \beta \right) \right\} = \frac{\beta}{T}.
\end{align*}
\endgroup
The claim now follows by substituting the above result into~\eqref{eq:Var} and recalling the definition of~$\beta$.
\end{proof}
\begin{proof}[Proof of Proposition~\ref{Prop:D}]
As for assertion~\textit{(i)} we first prove that $L \mathcal{D}_\mathcal{K} \subseteq \mathcal{D}$. To this end, select any $\bm{\delta} \in \mathcal{D}_\mathcal{K}$, and define $\delta = L \bm{\delta}$. It is easy to see that $\delta \in \set{L}(\mathcal{T}, [-1,1])$. Next, select any $t \in \mathcal{T}$. If $t \leq \Gamma$, note that
\begin{equation*}
\int_{\left[t-\Gamma\right]^+}^t \vert \delta(t') \vert \, \mathrm{d}t'
= \int_0^t \vert \delta(t') \vert \, \mathrm{d}t'
\leq \int_0^{\Gamma} \vert \delta(t') \vert \, \mathrm{d}t' = \mathcal{D}elta t \sum_{l=1}^{\mathcal{D}elta k} \vert \delta_l \vert \leq \gamma,
\end{equation*}
where the auxiliary parameter $\mathcal{D}elta k = \Gamma/\mathcal{D}elta t$ is integral thanks to Assumption~\ref{Ass:div}. The second inequality in the above expression holds because $\bm{\delta} \in \mathcal{D}_\mathcal{K}$. If $t \geq \Gamma$, on the other hand, we define $k = \lceil \frac{t}{\mathcal{D}elta t} \rceil$ and $\alpha = \lceil \frac{t}{\mathcal{D}elta t} \rceil - \frac{t}{\mathcal{D}elta t} \in [0,1)$. Then, we find
\begin{align*}
\int_{[t-\Gamma]^+}^t \vert \delta(t') \vert \, \mathrm{d}t'
=& \int_{t-\Gamma}^{ (k-\mathcal{D}elta k) \mathcal{D}elta t} \vert \delta(t') \vert \, \mathrm{d}t' + \int_{(k-\mathcal{D}elta k) \mathcal{D}elta t}^{ (k-1) \mathcal{D}elta t} \vert \delta(t') \vert \, \mathrm{d}t' + \int_{(k-1) \mathcal{D}elta t}^{ t} \vert \delta(t') \vert \, \mathrm{d}t' \\
= & \left(k \mathcal{D}elta t - t \right) \vert \delta_{k-\mathcal{D}elta k} \vert
+ \mathcal{D}elta t \sum_{l=k-\mathcal{D}elta k +1}^{k-1} \vert \delta_l \vert
+ \left( t - \left(k-1\right)\mathcal{D}elta t \right) \vert \delta_{k} \vert \\
= & \mathcal{D}elta t \left( \alpha \sum_{l=k-\mathcal{D}elta k}^{k-1} \vert \delta_l \vert
+ \left(1 - \alpha\right) \sum_{l=k- \mathcal{D}elta k + 1}^{k} \vert \delta_l \vert \right) \leq \gamma,
\end{align*}
where the inequality holds because $\bm{\delta} \in \mathcal{D}_\mathcal{K}$, which ensures that both $\sum_{l=k-\mathcal{D}elta k}^{k-1} \vert \delta_l \vert$ and $\sum_{l=k-\mathcal{D}elta k + 1}^{k} \vert \delta_l \vert$ are smaller or equal to~$\gamma/\mathcal{D}elta t$. As $t \in \mathcal{T}$ was chosen arbitrarily, this implies that $\delta \in L\mathcal{D}_\mathcal{K}$. In summary, we have shown that $L \mathcal{D}_\mathcal{K} \subseteq \mathcal{D}$.
Next, we show that $L^\dagger \mathcal{D} \subseteq \mathcal{D}_\mathcal{K}$. To this end, select any $\delta \in \mathcal{D}$ and define $\bm{\delta} = L^\dagger \delta$. It is easy to see that $\bm{\delta} \in [-1,1]^K$. Moreover, for any $k \in \mathcal{K}$ we have
\begin{equation*}
\sum_{l = 1 + [k - \Gamma/\mathcal{D}elta t]^+}^k \delta_l = \sum_{l = 1 + [k - \Gamma/\mathcal{D}elta t]^+}^k \frac{1}{\mathcal{D}elta t}\int_{\mathcal{T}_l} \delta(t') \, \mathrm{d}t' = \frac{1}{\mathcal{D}elta t} \int_{[k \mathcal{D}elta t - \Gamma]^+}^{k \mathcal{D}elta t} \delta(t') \, \mathrm{d}t' \leq \frac{\gamma}{\mathcal{D}elta t},
\end{equation*}
where the inequality holds because $\delta \in \mathcal{D}$. As $k \in \mathcal{K}$ was chosen arbitrarily, this implies that $\bm{\delta} \in L^\dagger \mathcal{D}$. In summary, we have shown that $L^\dagger \mathcal{D} \subseteq \mathcal{D}_\mathcal{K}$.
Finally, we prove that $\mathcal{D}_\mathcal{K} \subseteq L^\dagger \mathcal{D}$. To this end, we observe that $L^\dagger L$ coincides with the identity mapping on $\mathbb{R}^K$. As $L \mathcal{D}_\mathcal{K} \subseteq \mathcal{D}$, this implies that
\begin{equation*}
\mathcal{D}_\mathcal{K} = L^\dagger L \mathcal{D}_\mathcal{K} \subseteq L^\dagger \mathcal{D}.
\end{equation*}
Since both $L^\dagger \mathcal{D} \subseteq \mathcal{D}_\mathcal{K}$ and $\mathcal{D}_\mathcal{K} \subseteq L^\dagger\mathcal{D}$, we have in fact shown that $L^\dagger \mathcal{D} = \mathcal{D}_\mathcal{K}$. Thus assertion~\textit{(i)} follows. Assertion~\textit{(ii)} can be proved in a similar manner. Details are omitted for brevity.
\end{proof}
\begin{proof}[Proof of Proposition~\ref{Prop:yk}]
The proof widely parallels that of Proposition~\ref{Prop:y} and is therefore omitted.
\end{proof}
\rev{The proof of Theorem~\ref{th:time} relies on Propositions~\ref{Prop:yc}--\ref{Prop:H} below.}
\begin{Prop}\label{Prop:yc} The following equivalences hold.
\begin{subequations}
\begin{align*}
(i) \quad & y^+\left(x^b(t),x^r(t),\delta(t)\right) \leq \bar{y}^+(t) ~ \forall \delta \in \mathcal{D}, \forall t \in \mathcal{T}
\iff y^+_k\left(x^b_k,x^r_k,\delta_k\right) \leq \bar{y}^+_k ~ \forall \bm{\delta} \in \mathcal{D}_\mathcal{K}, \forall k \in \mathcal{K} \\
(ii) \quad & y^-\left(x^b(t),x^r(t),\delta(t)\right) \leq \bar{y}^-(t) ~ \forall \delta \in \mathcal{D}, \forall t \in \mathcal{T}
\iff y^-_k\left(x^b_k,x^r_k,\delta_k\right) \leq \bar{y}^-_k ~ \forall \bm{\delta} \in \mathcal{D}_\mathcal{K}, \forall k \in \mathcal{K}
\end{align*}
\end{subequations}
\end{Prop}
\begin{proof}
Assertion~$(i)$ can be reexpressed as
\begin{equation*}
\max_{t \in \mathcal{T}} \max_{\delta \in \mathcal{D}} y^+\left(x^b(t),x^r(t),\delta(t)\right) - \bar{y}^+(t) \leq 0 \iff \max_{k \in \mathcal{K}} \max_{\bm{\delta} \in \mathcal{D}_\mathcal{K}} y^+_k\left(x^b_k,x^r_k,\delta_k\right) - \bar{y}^+_k \leq 0.
\end{equation*}
We will prove this equivalence by showing that \rev{the left hand sides of the two inequalities are equal.}
Indeed, a direct calculation reveals that
\begin{equation}
\label{eq:yc}
\begin{aligned}
\max_{t \in \mathcal{T}} \max_{\delta \in \mathcal{D}} y^+\left(x^b(t),x^r(t),\delta(t)\right) - \bar{y}^+(t) = &
\max_{t \in \mathcal{T}} \max_{-1 \leq \delta(t) \leq 1} \left[ x^b(t) + \delta(t) x^r(t) \right]^+ - \bar{y}^+(t) \\
= & \max_{t \in \mathcal{T}} x^b(t) + x^r(t) - \bar{y}^+(t) \\
= & \max_{k \in \mathcal{K}} x^b_k + x^r_k - \bar{y}^+_k \\
= & \max_{k \in \mathcal{K}} \max_{-1 \leq \delta_k \leq 1} \left[x^b_k + \delta_k x^r_k\right]^+ - \bar{y}^+_k \\
= & \max_{k \in \mathcal{K}} \max_{\bm{\delta} \in \mathcal{D}_\mathcal{K}} y^+\left(x^b_k,x^r_k,\delta_k\right) - \bar{y}^+_k,
\end{aligned}
\end{equation}
where the first equality follows from the definition of $y^+$ in~\eqref{eq:y+} and the observation that $\{ \delta(t) : \delta \in \mathcal{D} \} = [-1,1]$, while the second equality holds because~$x^b(t) \geq 0$ and $x^r(t) \geq 0$ which implies that $\delta(t) = 1$ maximizes the instantaneous charging rate. The third equality exploits our assumption that $x^b, x^r$, and~$\bar{y}^+$ are piecewise constant functions. The fourth equality holds because~$x^b_k \geq 0$ and~$x^r_k \geq 0$, which implies that $\delta_k = 1$ maximizes the per-period charging rate. The fifth equality follows again from the definition of $y^+$ in~\eqref{eq:y+} and the observation that $\{\delta_k : \bm{\delta} \in \mathcal{D}_\mathcal{K}\} = [-1,1]$.
The proof of assertion~$(ii)$ is similar and therefore omitted.
\end{proof}
\begin{Prop}\label{Prop:ybar}
The following equivalence holds.
\begin{equation*}
y\left(x^b,x^r,\delta,y_0,t\right) \leq \bar{y}~\forall \delta \in \mathcal{D}, \forall t \in \mathcal{T}
\iff y_{k}\left(\bm{x^b},\bm{x^r},\bm{\delta},y_0\right) \leq \bar{y}~\forall \bm{\delta} \in \mathcal{D}_\mathcal{K},~\forall k \in \mathcal{K}\cup\{0\}
\end{equation*}
\end{Prop}
\begin{proof}
The claim follows if we can show that
\begin{equation}\label{eq:ybar}
\max_{t \in \mathcal{T}} \max_{\delta \in \mathcal{D}} y\left(x^b,x^r,\delta,y_0,t\right) = \max_{k\in\mathcal{K} \cup \{0\}} \max_{\bm{\delta} \in \mathcal{D}_\mathcal{K}} y_{k}\left(\bm{x^b},\bm{x^r},\bm{\delta},y_0\right).
\end{equation}
To this end, assume first that $t=k\mathcal{D}elta t$ for some $k \in \mathcal{K} \cup \{0\}$. In this case, we have
\begin{equation}
\label{eq:ybar_full}
\begin{aligned}
\max_{\delta \in \mathcal{D}} y\left(x^b,x^r,\delta,y_0,t\right)
= & \max_{\delta \in \mathcal{D}^+} y\left(x^b,x^r,\delta,y_0,t\right) \\
= & y_0 + \max_{\delta \in \mathcal{D}^+} \int_0^{t} \eta^+ \left( x^b(t') + \delta(t') x^r(t') \right) - d(t') \, \mathrm{d}t' \\
=& y_0 + \max_{\delta \in \mathcal{D}^+} \sum_{l=1}^{k} \int_{\mathcal{T}_l} \eta^+ \left( x^b_l + \delta(t') x^r_l \right) - d_l \, \mathrm{d}t' \\
=& y_0 + \max_{\delta \in \mathcal{D}^+} \mathcal{D}elta t \sum_{l=1}^{k} \eta^+ \left( x^b_l + (L^\dagger\delta)_l x^r_l \right) - d_l\\
=& y_0 + \max_{\bm{\delta} \in \mathcal{D}^+_\mathcal{K}} \mathcal{D}elta t \sum_{l=1}^{k} \eta^+ \left( x^b_l + \delta_l x^r_l \right) - d_l\\
= & \max_{\bm{\delta} \in \mathcal{D}^+_\mathcal{K}} y_{k}\left(\bm{x^b},\bm{x^r},\bm{\delta},y_0\right)
= \max_{\bm{\delta} \in \mathcal{D}_\mathcal{K}} y_{k}\left(\bm{x^b},\bm{x^r},\bm{\delta},y_0\right),
\end{aligned}
\end{equation}
where the first equality holds because $\delta \in \mathcal{D}$ if and only if $\vert \delta \vert \in \mathcal{D}^+$ and because $y$ is nondecreasing in~$\delta$ thanks to Proposition~\ref{Prop:y}. The second equality follows from the definitions of $y$, $y^+$, and $y^-$ and from the non-negativity of $x^b$, $x^r$ and $\delta$. The third equality exploits our assumption that $d$, $x^b$ and $x^r$ are piecewise constant. As $\delta$ is integrated against a piecewise constant function, it may be averaged over the trading intervals without changing its objective function value. The fifth equality then follows from Proposition~\ref{Prop:D}, while the sixth equality follows from the definitions of $y_k$, $y^+$ and $y^-$ and from the non-negativity of $\bm{x^b}$, $\bm{x^r}$ and $\bm{\delta}$. The seventh equality, finally, holds because $\bm{\delta} \in \mathcal{D}_\mathcal{K}$ if and only if $\vert \bm{\delta} \vert \in \mathcal{D}^+_\mathcal{K}$ and because $y_k$ is nondecreasing in~$\bm{\delta}$ thanks to Proposition~\ref{Prop:yc}.
Assume now more generally that $t \in \mathcal{T}_k$ for some $k \in \mathcal{K}$. If the vehicle is driving in trading interval~$\mathcal{T}_k$, then $\bar{y}^+(t) = \bar{y}^-(t) = 0$ for all $t\in \mathcal{T}_k$. Thus, we have
\begin{align*}
\max_{\delta \in \mathcal{D}} y\left(x^b,x^r,\delta,y_0,t\right) = & \max_{\delta \in \mathcal{D}} y\left(x^b,x^r,\delta,y_0,(k-1)\mathcal{D}elta t\right) - \int_{(k-1) \mathcal{D}elta t}^{t} d(t') \, \mathrm{d}t' \\
\leq & \max_{\delta \in \mathcal{D}} y\left(x^b,x^r,\delta,y_0,(k-1)\mathcal{D}elta t\right) = \max_{\bm{\delta} \in \mathcal{D}_\mathcal{K}} y_{k-1}\left(\bm{x^b},\bm{x^r},\bm{\delta},y_0\right) ~\forall t \in \mathcal{T}_k,
\end{align*}
where the inequality holds because $d(t) \geq 0$ for all $t\in \mathcal{T}_k$, and the second equality follows from the first part of the proof. Alternatively, if the vehicle is parked in trading interval~$\mathcal{T}_k$, then $d(t) = 0$ for all $t\in \mathcal{T}_k$. Thus, we have
\begingroup
\allowdisplaybreaks
\begin{align*}
\max_{\delta \in \mathcal{D}} y\left(x^b,x^r,\delta,y_0,t\right) &
= \max_{\delta \in \mathcal{D}^+} y\left(x^b,x^r,\delta,y_0,t\right) \\
&= \max_{\delta \in \mathcal{D}^+} y\left(x^b,x^r,\delta,y_0,(k-1)\mathcal{D}elta t\right) + \int_{(k-1) \mathcal{D}elta t}^{t} \eta^+ y^+\left(x^b(t'),x^r(t'),\delta(t')\right) \, \mathrm{d}t' \\
& \leq \max_{\delta \in \mathcal{D}^+} y\left(x^b,x^r,\delta,y_0,k\mathcal{D}elta t\right) = \max_{\bm{\delta} \in \mathcal{D}_\mathcal{K}^+} y_{k}\left(\bm{x^b},\bm{x^r},\bm{\delta},y_0\right) = \max_{\bm{\delta} \in \mathcal{D}_\mathcal{K}} y_{k}\left(\bm{x^b},\bm{x^r},\bm{\delta},y_0\right)
\end{align*}
\endgroup
for all $t \in \mathcal{T}_k$, where the inequality holds because the integral is nondecreasing in~$t$, and the equalities follow from the first part of the proof. In summary, we have shown that
\begin{equation*}
\max_{\delta \in \mathcal{D}} y\left(x^b,x^r,\delta,y_0,t\right) \leq \max\left\{ \max_{\bm{\delta} \in \mathcal{D}_\mathcal{K}} y_{k-1}\left(\bm{x^b},\bm{x^r},\bm{\delta},y_0\right), \max_{\bm{\delta} \in \mathcal{D}_\mathcal{K}} y_{k}\left(\bm{x^b},\bm{x^r},\bm{\delta},y_0\right) \right\}
\end{equation*}
for all $t \in \mathcal{T}_k$ and $k \in \mathcal{K}$. This implies that
\begin{equation*}
\max_{t \in \mathcal{T}} \max_{\delta \in \mathcal{D}} y\left(x^b,x^r,\delta,y_0,t\right) \leq \max_{k \in \mathcal{K} \cup \{0\}} \max_{\bm{\delta} \in \mathcal{D}_\mathcal{K}} y_{k}\left(\bm{x^b},\bm{x^r},\bm{\delta},y_0\right).
\end{equation*}
On the other hand, we have
\begin{equation*}
\max_{t \in \mathcal{T}} \max_{\delta \in \mathcal{D}} y\left(x^b,x^r,\delta,y_0,t\right) \geq
\max_{k \in \mathcal{K} \cup \{0\}} \max_{\delta \in \mathcal{D}} y\left(x^b,x^r,\delta,y_0,k\mathcal{D}elta t\right) =
\max_{k \in \mathcal{K} \cup \{0\}} \max_{\bm{\delta} \in \mathcal{D}_\mathcal{K}} y_{k}\left(\bm{x^b},\bm{x^r},\bm{\delta},y_0\right),
\end{equation*}
where the equality follows from the first part of the proof. Combining the above inequalities implies~\eqref{eq:ybar}, and thus the claim follows.
\end{proof}
\begin{Prop}\label{Prop:yubar}
The following equivalence holds.
\begin{equation*}
y(x^b,x^r,\delta,y_0,t) \geq \ubar{y}~\forall \delta \in \mathcal{D}, \forall t \in \mathcal{T}
\iff y_{k}(\bm{x^b},\bm{x^r},\bm{\delta},y_0) \geq \ubar{y}~\forall \bm{\delta} \in \mathcal{D}_\mathcal{K}, \forall k \in \mathcal{K} \cup \{0\}
\end{equation*}
\end{Prop}
The proof of Proposition~\ref{Prop:yubar} \rev{is significantly more challenging than that of Proposition~\ref{Prop:ybar} because $y(x^b,x^r,\delta,y_0,t)$ is concave in~$\delta$. We make it more digestible by first proving two lemmas.}
\begin{lem}\label{lem:LDR_Opt}
If $f:\mathbb{R} \times \mathcal{T} \to \mathbb{R}$ is concave, continuous and nonincreasing in its first argument and piecewise constant on the trading intervals $\mathcal{T}_k$, $k \in \mathcal{K}$, in its second argument, then
\begin{equation*}
\min_{\delta \in \mathcal{D}^+} \int_{0}^{t} f\left(\delta(t'),t'\right) \, \mathrm{d}t' = \min_{\delta \in \mathcal{D}^+ \cap \set{L}(\mathcal{T}, \{0,1\})} \int_{0}^t f\left(\delta(t'),t'\right) \, \mathrm{d}t'\quad \forall t \in \mathcal{T}.
\end{equation*}
\end{lem}
\begin{proof}
For ease of exposition, assume first that~$t = T$ and define $t_k = \mathcal{D}elta t (k - 1)$ for every $k \in \mathcal{K}$. For every approximation parameter $N \in \mathbb{N}$ we define $\set{N} = \{1,\ldots,N\}$ and set
\begin{equation*}
\mathcal{T}_{k,n}^N = \left[\mathcal{D}elta t \left(k - 1 + \frac{n-1}{N}\right), \mathcal{D}elta t \left(k - 1 + \frac{n}{N}\right) \right) \quad \forall k \in \mathcal{K}, ~ \forall n \in \set{N}.
\end{equation*}
Note that the $\mathcal{T}^N_{k,n}$, $n \in \set{N}$, are mutually disjoint and that their union coincides with the $k$-th trading interval $\mathcal{T}_k$. Next, introduce a lifting operator $L_N: \mathbb{R}^{K \times N} \to \set{L}(\mathcal{T}, \mathbb{R})$ defined through $(L_N \bm{\delta})(t) = \delta_{k,n}$ if $t \in \mathcal{T}^N_{k,n}$ for $k \in \mathcal{K}$ and $n \in \set{N}$. In addition, let $L^\dagger_N : \set{L}(\mathcal{T},\mathbb{R}) \to \mathbb{R}^{K \times N}$ be the corresponding adjoint operator defined through $(L^\dagger_N \delta)_{k,n} = \frac{N}{\mathcal{D}elta t} \int_{\mathcal{T}^N_{k,n}} \delta(t) \, \mathrm{d}t$ for $k \in \mathcal{K}$ and $n \in \set{N}$. Using this notation, we first prove that
\begin{equation}\label{eq:Rie_Int}
\lim_{N \to \infty} \sum_{k \in \mathcal{K}} \sum_{n \in \set{N}} f\left( (L^\dagger_N \delta)_{k,n}, t_k \right) \frac{\mathcal{D}elta t}{N} = \int_0^T f\left(\delta(t),t\right) \, \mathrm{d}t
\end{equation}
for any fixed $\delta \in \mathcal{D}^+$. As $f$ is continuous and nonincreasing in its first argument and piecewise constant in its second argument, we have
\begin{align*}
\inf_{t \in \mathcal{T}^N_{k,n}} f\left( \delta(t), t \right) = f\bigg( \sup_{t \in \mathcal{T}^N_{k,n}} \delta(t), t_k \bigg) \leq f\left( \left(L^\dagger_N \delta\right)_{k,n}, t_k \right) \leq f\bigg( \inf_{t \in \mathcal{T}^N_{k,n}} \delta(t), t_k \bigg) = \sup_{t \in \mathcal{T}^N_{k,n}} f\left( \delta(t), t \right)
\end{align*}
for every $k \in \mathcal{K}$, $n \in \set{N}$ and $N \in \mathbb{N}$. Summing over $k$ and $n$ thus yields
\begin{equation*}
\sum_{k \in \mathcal{K}} \sum_{n \in \set{N}} \inf_{t \in \mathcal{T}^N_{k,n}} f\left( \delta(t), t \right) \frac{\mathcal{D}elta t}{N} \leq \sum_{k \in \mathcal{K}} \sum_{n \in \set{N}} f\left( \left(L^\dagger_N \delta \right)_{k,n}, t_k \right) \frac{\mathcal{D}elta t}{N} \leq \sum_{k \in \mathcal{K}} \sum_{n \in \set{N}} \sup_{t \in \mathcal{T}^N_{k,n}} f\left( \delta(t), t \right) \frac{\mathcal{D}elta t}{N}
\end{equation*}
for every $N \in \mathbb{N}$. As $f(\delta(t),t)$ constitutes a composition of a continuous function with a Riemann integrable function, it is also Riemann integrable. Thus, the lower and upper Riemann sums in the above inequality both converge to $\int_0^T f(\delta(t),t) \, \mathrm{d}t$ as $N$ tends to infinity. This observation establishes~\eqref{eq:Rie_Int}. As $\delta \in \mathcal{D}^+$ was chosen arbitrarily, we may thus conclude that
\begin{equation*}
\inf_{\delta \in \mathcal{D}^+} \int_{0}^{T} f\left( \delta(t),t\right) \, \mathrm{d}t = \inf_{\delta \in \mathcal{D}^+} \lim_{N \to \infty} \sum_{k \in \mathcal{K}} \sum_{n \in \set{N}} f\left( \left(L^\dagger_N\delta\right)_{k,n}, t_k \right) \frac{\mathcal{D}elta t}{N}.
\end{equation*}
For the following derivations we introduce the auxiliary uncertainty set
\begin{equation*}
\mathcal{D}^+_{KN} = \left\{ \bm{\delta} \in \left[-1,1\right]^{KN} : \sum_{l = 1 + [m - N \Gamma/\mathcal{D}elta t]^+}^{m} \delta_{l} \leq N \frac{\gamma}{\mathcal{D}elta t} \quad \forall m = 1,\ldots,KN \right\}
\end{equation*}
for $N \in \mathbb{N}$. By slight abuse of notation, we henceforth naturally identify any matrix $\bm \delta \in \mathbb R^{K \times N}$ with the vector obtained by concatenating the rows of $\bm \delta$. This convention allows us, for example, to write $\bm \delta \in \mathcal{D}^+_{KN}$ even if $\bm \delta$ was initially defined as a~$K \times N$-matrix. By repeating the arguments of Proposition~\ref{Prop:D}, it is easy to show that $L_N \mathcal{D}^+_{KN} \subseteq \mathcal{D}^+$ and $L^\dagger_N\mathcal{D}^+ = \mathcal{D}^+_{KN}$ for all $N \in \mathbb{N}$. Using these relations, we will now prove that
\begin{equation}\label{eq:Lim_Inf}
\inf_{\delta \in \mathcal{D}^+} \int_{0}^{T} f\left( \delta(t),t \right) \, \mathrm{d}t = \lim_{N \to \infty} \inf_{\bm{\delta} \in \mathcal{D}^+_{KN}} \sum_{k \in \mathcal{K}} \sum_{n \in \set{N}} f\left( \delta_{k,n}, t_k \right) \frac{\mathcal{D}elta t}{N}.
\end{equation}
To this end, select any $\epsilon > 0$ and $\delta^\star \in \mathcal{D}^+ $ with $\int_0^T f(\delta^\star(t),t) \, \mathrm{d}t \leq \inf_{\delta \in \mathcal{D}^+} \int_0^{\revv{T}} f(\delta(t),t) \, \mathrm{d}t + \epsilon$, and choose $N_\epsilon$ large enough such that
\begin{equation*}
\left \vert \sum_{k \in \mathcal{K}} \sum_{n \in \set{N}} f\left( \left(L^\dagger_N \delta^\star\right)_{k,n}, t_k \right) \frac{\mathcal{D}elta t}{N} - \int_0^T f\left( \delta^\star(t), t \right) \, \mathrm{d}t \right \vert \leq \epsilon \quad \forall N \geq N_\epsilon.
\end{equation*}
Note that such an $N_\epsilon$ exists thanks to~\eqref{eq:Rie_Int}. For any $N \geq N_\epsilon$, we thus find
\begin{align*}
0 \leq & \inf_{\bm{\delta} \in \mathcal{D}^+_{KN}} \sum_{k \in \mathcal{K}} \sum_{n \in \set{N}} f\left( \delta_{k,n}, t_k \right) \frac{\mathcal{D}elta t}{N} - \inf_{\delta \in \mathcal{D}^+} \int_{0}^{T} f\left( \delta(t),t \right) \, \mathrm{d}t \\
\leq & \inf_{\bm{\delta} \in \mathcal{D}^+_{KN}} \sum_{k \in \mathcal{K}} \sum_{n \in \set{N}} f\left( \delta_{k,n}, t_k \right) \frac{\mathcal{D}elta t}{N} - \int_0^T f\left( \delta^\star(t), t \right) \, \mathrm{d}t + \epsilon \\
\leq & \sum_{k \in \mathcal{K}} \sum_{n \in \set{N}} f\left( \left(L^\dagger_N \delta^\star\right)_{k,n}, t_k \right) \frac{\mathcal{D}elta t}{N} - \int_0^T f\left( \delta^\star(t), t \right) \, \mathrm{d}t + \epsilon \leq 2\epsilon,
\end{align*}
where the first inequality holds because $L_N\mathcal{D}^+_{KN} \subseteq \mathcal{D}^+$, the second inequality follows from the choice of $\delta^\star$, the third inequality exploits the identity $L^\dagger_N\mathcal{D}^+ = \mathcal{D}^+_{KN}$, and the fourth inequality holds because $N \geq N_\epsilon$. As $\epsilon > 0$ was chosen arbitrarily, Equation~\eqref{eq:Lim_Inf} follows.
In order to prove that
\begin{equation}\label{eq:Inf_dis}
\inf_{\delta \in \mathcal{D}^+} \int_{0}^{T} f\left( \delta(t), t \right) \, \mathrm{d}t
= \inf_{\delta \in \mathcal{D}^+ \cap \set{L}(\mathcal{T},\{0,1\})} \int_0^T f\left( \delta(t), t \right) \, \mathrm{d}t,
\end{equation}
we first observe that
\begin{align}
\inf_{\delta \in \mathcal{D}^+} \int_{0}^{T} f\left( \delta(t), t \right) \, \mathrm{d}t
= & \lim_{N \to \infty} \inf_{\bm{\delta} \in \mathcal{D}^+_{KN}} \sum_{k \in \mathcal{K}} \sum_{n \in \set{N}} f\left( \delta_{k,n}, t_k \right) \frac{\mathcal{D}elta t}{N} \notag \\
= & \lim_{N \to \infty} \inf_{\bm{\delta} \in \mathcal{D}^+_{KN} \cap \{0,1\}^{KN}} \sum_{k \in \mathcal{K}} \sum_{n \in \set{N}} f\left( \delta_{k,n}, t_k \right) \frac{\mathcal{D}elta t}{N}.\label{eq:Sum_bin}
\end{align}
Here, the first equality follows from~\eqref{eq:Lim_Inf}, and the second equality holds because $f$ is concave in its first argument, which implies that the minimum over $\delta$ is attained at a vertex of the polyhedron $\mathcal{D}^+_{KN}$. As all vertices of $\mathcal{D}^+_{KN}$ are binary by virtue of Lemma~\ref{lem:TUM} below, we can restrict $\bm{\delta}$ to $\{0,1\}^{K \times N}$ without loss of optimality. To prove~\eqref{eq:Inf_dis}, select any $\epsilon > 0$ and $N \in \mathbb{N}$ large enough such that
\begin{equation}
\label{eq:Min_inf}
\left \vert \min_{\bm{\delta} \in \mathcal{D}^+_{KN} \cap \{0,1\}^{K \times N}} \sum_{k \in \mathcal{K}} \sum_{n \in \set{N}} f\left( \delta_{k,n}, t_k \right) \frac{\mathcal{D}elta t}{N} - \inf_{\delta \in \mathcal{D}^+} \int_{0}^{T} f\left( \delta(t), t \right) \, \mathrm{d}t \right \vert \leq \epsilon.
\end{equation}
Note that such an $N$ exists because of~\eqref{eq:Sum_bin}. Next, let $\bm{\delta}^\star$ be a minimizer of the discrete optimization problem on the left hand side of the above expression, and set $\delta^\star = L_N \bm \delta^\star$. By~\eqref{eq:Min_inf} and because $\delta^\star$ is constant on the intervals $\mathcal{T}^N_{k,n}$, we thus have
\begin{equation*}
\left\vert \int_0^T f(\delta^\star(t),t) \, \mathrm{d}t - \inf_{\delta \in \mathcal{D}^+} \int_0^T f(\delta(t),t) \, \mathrm{d}t \right\vert \leq \epsilon.
\end{equation*}
As $L_N \mathcal{D}^+_{KN} \subseteq \mathcal{D}^+$ and $\bm \delta^\star \in \{0,1\}^{K \times N}$, we further have $\delta^\star \in \mathcal{D}^+ \cap \set{L}(\mathcal{T},\{0,1\})$. As $\epsilon$ was chosen arbitrarily, Equation~\eqref{eq:Inf_dis} follows.
If $t \in \mathcal{T}$ is a multiple of $1/(KN)$ for some $N \in \mathbb{N}$, then $f(\delta(t'),t')$ can be set to $0$ for all $t' \geq t$, and the above proof remains valid with obvious minor modifications. For any other $t \in \mathcal{T}$, the claim follows from a continuity argument. Details are omitted for brevity.
\end{proof}
\begin{lem}\label{lem:TUM}
For any $N \in \mathbb{N}$, all vertices of the polyhedron
\begin{equation*}
\mathcal{D}^+_{KN} = \left\{ \bm{\delta} \in \left[0,1\right]^{KN} : \sum\limits_{l= 1+\left[m - N\Gamma/\mathcal{D}elta t\right]^+}^{m} \delta_l \leq N\frac{\gamma}{\mathcal{D}elta t} ~ \forall m = 1,\ldots,KN \right\}
\end{equation*}
are binary vectors.
\end{lem}
\begin{proof}
The polyhedron $\mathcal{D}^+_{KN}$ can be represented more concisely as $\{\bm{\delta} \in \mathbb{R}_+^{KN}: \bm{A\delta} \leq \bm{b}\}$, where
\begin{equation*}
\bm{A} =
\begin{pmatrix}
\bm{C} \\
\bm{I}
\end{pmatrix} \in \mathbb{R}^{2KN \times KN}, \quad
\bm{b} =
\begin{bmatrix}
N\frac{\gamma}{\mathcal{D}elta t} \bm{1} \\
\bm{1}
\end{bmatrix} \in \mathbb{R}^{2KN}
\end{equation*}
and $\bm{C} \in \mathbb{R}^{KN \times KN}$ is defined through $C_{ij} = 1$ if~$i - N\Gamma/\mathcal{D}elta t < j \leq i$ and~$C_{ij} = 0$ otherwise. Here, $\bm{I}$ denotes the identity matrix and $\bm{1}$ the column vector of $1$s in $\mathbb{R}^{KN}$. By construction, $\bm{A}$ is a binary matrix where the $1$s appear consecutively in each row. Proposition~2.1 and Corollary~2.10 by \cite{GN99} thus imply that $\bm{A}$ is totally unimodular. As $\bm{b} \in \mathbb{Z}^{KN}$ because of Assumption~\ref{Ass:div}, all vertices of~$\mathcal{D}^+_{KN}$ are integral thanks to Proposition~2.2 again by \cite{GN99}. In addition, as $D^+_{KN} \subseteq [0,1]^{KN}$, the vertices of $\mathcal{D}^+_{KN}$ are in fact binary vectors.
\end{proof}
We are now ready to prove Proposition~\ref{Prop:yubar}.
\begin{proof}[Proof of Proposition~\ref{Prop:yubar}]
The claim follows if we can show that
\begin{equation}
\label{eq:yubar_start}
\min_{t \in \mathcal{T}} \min_{\delta \in \mathcal{D}} y(x^b, x^r, \delta, y_0, t) = \min_{k \in \mathcal{K} \cup \{0\}} \min_{\bm \delta \in \mathcal{D}_{\mathcal{K}}} y_k(\bm{x^b}, \bm{x^r}, \bm \delta, y_0).
\end{equation}
In the first part of the proof, we reformulate the continuous non-convex minimization problem $\min_{\delta \in \mathcal{D}} y(x^b, x^r, \delta, y_0, t)$ as a continuous linear program. To ease notation, we set $\mathcal{D}elta \eta = \frac{1}{\eta^-} - \eta^+ \geq 0$ and define the auxiliary functions
\begin{equation*}
\chi(\delta(t),t) = \max\left\{ \eta^+ x^r(t) \delta(t), \frac{1}{\eta^-}x^r(t)\delta(t) - \mathcal{D}elta \eta \, x^b(t) \right\}
\end{equation*}
and
\begin{equation*}
m(t) = \max\left\{ \eta^+ x^r(t), \frac{1}{\eta^-}x^r(t) - \mathcal{D}elta \eta \, x^b(t) \right\}
\end{equation*}
for all~$t \in \mathcal{T}$. The function $\chi(\delta(t),t) $ can be viewed as a {\em nonlinear decision rule} of the uncertain frequency deviation $\delta(t)$. Using these conventions, we find
\begin{equation}
\label{eq:yubar_cont_ldr}
\begin{aligned}
\min_{\delta \in \mathcal{D}} y\big(x^b,x^r,\delta,y_0,t\big)
& = \min_{\delta \in \mathcal{D}^+} y\big(x^b,x^r,-\delta,y_0,t\big) \\
&= y_0 + \min_{\delta \in \mathcal{D}^+} \int_0^{t} \eta^+ x^b(t') - \chi\left(\delta(t'),t'\right) - d(t') \, \mathrm{d}t'\\
& = y_0 + \min_{\delta \in \mathcal{D}^+ \cap \set{L}(\mathcal{T}, \{0,1\})} \int_0^{t} \eta^+ x^b(t') - \chi\left(\delta(t'),t'\right) - d(t') \, \mathrm{d}t'\\
& = y_0 + \min_{\delta \in \mathcal{D}^+ \cap \set{L}(\mathcal{T}, \{0,1\})} \int_0^{t} \eta^+ x^b(t') - m(t')\delta(t') - d(t') \, \mathrm{d}t' \\
& = y_0 + \min_{\delta \in \mathcal{D}^+} \int_0^{t} \eta^+ x^b(t') - m(t')\delta(t') - d(t') \, \mathrm{d}t',
\end{aligned}
\end{equation}
where the first equality holds because the statements $\delta \in \mathcal{D}$, $-\delta \in \mathcal{D}$ and $\vert \delta \vert \in \mathcal{D}^+$ are all equivalent and because $y$ is nondecreasing in $\delta$ thanks to Proposition~\ref{Prop:y}. The second equality follows from the definitions of $y$, $y^+$, $y^-$ and $\chi$, and the third equality is a direct consequence of Lemma~\ref{lem:LDR_Opt}, which applies because $-\chi$ is concave and nonincreasing in its first argument and, by virtue of Assumption~\ref{Ass:cst}, piecewise constant in its second argument. The fourth equality holds because $\chi(\delta(t'),t') = m(t')\delta(t')$ whenever $\delta(t') \in \{0,1\}$, and the last equality follows again from Lemma~\ref{lem:LDR_Opt}. Note that $m(t')\delta(t')$ constitutes a {\em linear decision rule} of $\delta(t)$.
In the second part of the proof we assume that $t = k\mathcal{D}elta t$ for some $k \in \mathcal{K} \cup \{0\}$ and show that
\begin{equation*}
\min_{\delta \in \mathcal{D}} y(x^b, x^r, \delta, y_0, t)
= \min_{\bm \delta \in \mathcal{D}_{\mathcal{K}}} y_k(\bm{x^b}, \bm{x^r}, \bm \delta, y_0).
\end{equation*}
To this end, we define $\chi_l(\delta_l) = \max\{\eta^+ x^r_l \delta_l, \frac{1}{\eta^-}x^r_l \delta_l - \mathcal{D}elta \eta \, x^b_l\}$ and $m_l = \max\{ \eta^+ x^r_l, \frac{x^r_l}{\eta^-} - \mathcal{D}elta \eta \, x^b_l \}$ for all~$l \in \mathcal{K}$. By~\eqref{eq:yubar_cont_ldr}, we thus have
\begin{equation}
\label{eq:yubar_res}
\begin{aligned}
\min_{\delta \in \mathcal{D}} y\big(x^b,x^r,\delta,y_0,k\mathcal{D}elta t\big)
& = y_0 + \min_{\delta \in \mathcal{D}^+} \int_0^{k \mathcal{D}elta t} \eta^+ x^b(t') - m(t')\delta(t') - d(t') \, \mathrm{d}t' \\
& = y_0 + \min_{\delta \in \mathcal{D}^+} \sum_{l=1}^k \int_{\mathcal{T}_l} \eta^+ x^b_l - m_l\delta(t') - d_l \, \mathrm{d}t' \\
& = y_0 + \min_{\bm{\delta} \in \mathcal{D}^+_\mathcal{K}} \mathcal{D}elta t \sum_{l=1}^{k} \eta^+ x^b_l - m_l\delta_l - d_l \\
& = \min_{\bm{\delta} \in \mathcal{D}_\mathcal{K}} y_k \left( \bm{x^b}, \bm{x^r}, \bm{\delta}, y_0 \right),
\end{aligned}
\end{equation}
where the second equality holds because $d, x^b$ and $x^r$ are piecewise constant by virtue of Assumption~\ref{Ass:cst}, which implies that $m(t') = m_l$ for every~$t' \in \mathcal{T}_l$. The third equality then follows from Proposition~\ref{Prop:D}. The fourth equality can be proved by reversing the arguments from~\eqref{eq:yubar_cont_ldr} with obvious minor modifications. In fact, as the frequency deviation scenarios are now piecewise constant and can be encoded by finite-dimensional vectors, the proof requires no cumbersome limiting arguments as the ones developed in the proof of Lemma~\ref{lem:LDR_Opt}. We omit the details for brevity.
In the third part of the proof we assume that $t \in \mathcal{T}_k$ for some $k \in \mathcal{K}$ and show that
\begin{equation*}
\min_{t \in \mathcal{T}_k} \min_{\delta \in \mathcal{D}} y(x^b, x^r, \delta, y_0, t) = \min_{l \in \{k-1,k\}} \min_{\bm \delta \in \mathcal{D}_\mathcal{K}} y_l(\bm{x^b}, \bm{x^r}, \bm \delta, y_0).
\end{equation*}
As in the proof of Proposition~\ref{Prop:ybar}, we distinguish whether or not the vehicle is driving in period~$\mathcal{T}_k$. Specifically, if the vehicle is driving in period~$\mathcal{T}_k$, then $\bar{y}^+(t) = \bar{y}^-(t) = 0$, which implies that
\begin{align*}
\min_{\delta \in \mathcal{D}} y(x^b,x^r,\delta,y_0,t) = & \min_{\delta \in \mathcal{D}} y(x^b,x^r,\delta,y_0,(k-1)\mathcal{D}elta t) - \int_{(k-1) \mathcal{D}elta t}^{t} d(t') \, \mathrm{d}t \\
= & \min_{\delta \in \mathcal{D}} y(x^b,x^r,\delta,y_0,k\mathcal{D}elta t) + \int_{t}^{k \mathcal{D}elta t} d(t') \, \mathrm{d}t \\
\geq & \min_{\delta \in \mathcal{D}} y(x^b,x^r,\delta,y_0,k\mathcal{D}elta t) = \min_{\bm{\delta} \in \mathcal{D}_\mathcal{K}} y_{k}(\bm{x^b}, \bm{x^r}, \bm{\delta},y_0).
\end{align*}
Here, the inequality holds because $d(t) \geq 0$ for all $t\in \mathcal{T}_k$, and the last equality follows from~\eqref{eq:yubar_res}. Otherwise, if the vehicle is parked in period $\mathcal{T}_k$, then $d(t) = 0$ for all $t\in \mathcal{T}_k$, and hence
\begin{equation}
\label{eq:local_sensitivity}
\begin{aligned}
& \phantom{=}\min_{\delta \in \mathcal{D}} y(x^b,x^r,\delta,y_0,t) \\
& = y_0 + \min_{\delta \in \mathcal{D}^+} \sum_{l=1}^{k-1} \int_{\mathcal{T}_l} \eta^+x^b_l - m_l \delta(t') - d_l \, \mathrm{d}t'
+ \int_{(k-1)\mathcal{D}elta t}^{t} \eta^+x^b_k - m_k \delta(t') \, \mathrm{d}t' \\
& = y_0 + \min_{\delta \in \mathcal{D}^+(t)}
\mathcal{D}elta t \sum_{l=1}^{k-1} \eta^+ x^b_l - d_l +
(t - (k - 1)\mathcal{D}elta t) \eta^+x^b_k - \sum_{l=1}^{k} \int_{\mathcal{T}_l} m_l \delta(t') \, \mathrm{d}t'\\
& = y_0 + \mathcal{D}elta t \sum_{l=1}^{k-1} \eta^+ x^b_l - d_l +
(t - (k - 1)\mathcal{D}elta t) \eta^+x^b_k
- \max_{\bm{\delta} \in \mathcal{D}^+_\mathcal{K}(t)} \mathcal{D}elta t \sum_{l=1}^{k} m_l \delta_l ,
\end{aligned}
\end{equation}
where we use the time-dependent uncertainty sets
\begin{equation*}
\mathcal{D}^+(t) = \bigg\{\delta \in \mathcal{D}^+ : \delta(t') = 0 ~\forall t' \in [t, k\mathcal{D}elta t]\bigg\} \quad \mathrm{and} \quad \mathcal{D}^+_\mathcal{K}(t) = \bigg\{ \delta \in \mathcal{D}^+_\mathcal{K}: \delta_k \leq \frac{t - (k-1)\mathcal{D}elta t}{\mathcal{D}elta t} \bigg\}
\end{equation*}
to simplify the notation. The first equality in~\eqref{eq:local_sensitivity} follows from~\eqref{eq:yubar_cont_ldr} and Assumption~\ref{Ass:cst}. Note that $\delta(t')$ does not impact the objective function of the resulting minimization problem over~$\mathcal{D}^+$ for any $t' > t$. It is therefore optimal to set $\delta(t') = 0$ for all $t' \geq t$ and, in particular, for all $t' \in [t, k\mathcal{D}elta t]$. This restriction has no impact on the objective function but maximizes nature's flexibility in selecting harmful frequency deviations $\delta(t')$ for $t' \leq t$. Hence, the second equality in~\eqref{eq:local_sensitivity} follows. As $\delta$ is now integrated against a piecewise constant function, it may be averaged over the trading intervals without changing its objective function value. The third equality in~\eqref{eq:local_sensitivity} thus holds because~$L^\dagger \mathcal{D}^+(t) = \mathcal{D}^+_\mathcal{K}(t)$, which can be proved similarly to Proposition~\ref{Prop:D} by noting that
\begin{equation*}
( \set{L}^\dagger \delta )_k
= \frac{1}{\mathcal{D}elta t} \int_{(k-1)\mathcal{D}elta t}^{t} \delta(t') \, \mathrm{d}t' \leq \frac{t - (k-1)\mathcal{D}elta t}{\mathcal{D}elta t}
\quad \forall \delta \in \mathcal{D}^+(t).
\end{equation*}
In the following we show that~\eqref{eq:local_sensitivity} is piecewise affine in~$t$. To this end, note that the optimization problem in the last line of~\eqref{eq:local_sensitivity} can be expressed more concisely as the standard form linear program
\begin{equation}
\label{pb:SP}
\begin{array}{{>{\displaystyle}c>{\displaystyle}l}}
\min_{\bm z \geq \bm 0} & \bm c^\top \bm z \\
\rm{s.t.} & \bm A \bm z = \bm b(t),
\end{array}
\end{equation}
where $\bm z^\top = (\bm \delta^\top, \bm s^\top) \in \mathbb{R}^{K} \times \mathbb{R}^{2K}$ combines the (averaged) frequency deviations in the trading intervals with a vector of slack variables. Here, the vector $\bm c \in \mathbb{R}^{3K}$ of objective function coefficients is defined through $c_l = -m_l \mathcal{D}elta t$ if $l \leq k$ and $c_l = 0$ otherwise. The constraints involve the matrix
\begin{equation*}
\bm A = \begin{pmatrix}
\bm C & \bm{I} & \bm{0} \\
\bm{I}& \bm{0} & \bm{I}
\end{pmatrix} \in \mathbb{R}^{2K \times 3K},
\end{equation*}
where $\bm{C} \in \mathbb{R}^{K \times K}$ is defined through $C_{ij} = 1$ if $i - \Gamma/\mathcal{D}elta t < j \leq i$ and $C_{ij} = 0$ otherwise, and the vector $\bm{b}(t) \in \mathbb{R}^{2K}$ is defined through $b_l(t) = \frac{t - (k-1)\mathcal{D}elta t}{\mathcal{D}elta t}$ if $l = k+K$ and $b_l = 1$ otherwise. By Lemma~\ref{lem:TUM} and Proposition~2.1 of \cite{GN99}, $\bm A$ is totally unimodular.
Note that~\eqref{pb:SP} is solvable for every $t \in \mathcal{T}_k$ because its feasible set is non-empty and compact. Next, choose any~$t_0$ in the interior of~$\mathcal{T}_k$, denote by~$\bm{B}$ an optimal basis matrix for problem~\eqref{pb:SP} at $t = t_0$, and define~$\bm{z}^\star(t) = \bm B^{-1} \bm b(t) $ for all~$t \in \mathcal{T}_k$. In the following, we will use local sensitivity analysis of linear programming to show that~$\bm z^\star(t)$ is optimal in~\eqref{pb:SP} for all~$t \in \mathcal{T}_k$. As the basis~$\bm B$ remains dual feasible when~$t$ deviates from~$t_0$, it suffices to show that
\begin{equation}
\label{eq:local_sen}
\bm{z}^\star(t) = \bm{z}^\star(t_0) + \frac{t-t_0}{\mathcal{D}elta t} \bm B^{-1}\bm{e}_{K+k}
\geq \bm 0 \quad \forall t \in \mathcal{T}_k,
\end{equation}
where~$\bm{e}_{K+k}$ denotes the $(K+k)$-th standard basis vector in~$\mathbb{R}^{2K}$~\citep[p.~207]{DB97}.
To this end, note that $\bm B$ is a non-singular square matrix constructed from $2K$~columns of~$\bm A$ and is therefore also totally unimodular. Moreover, $\bm B^{-1}$ is totally unimodular because pivot operations preserve total unimodularity~\citep[Proposition~2.1]{GN99}. Hence, we have~$\bm{B}^{-1}\bm{e}_{K+k} \in \{-1,0,1\}^{2K}$. By construction, we further have $\bm b(t) \in \{0,1\}^{2K}$ for $t = k \mathcal{D}elta t$, which implies that $\bm{z}^\star(k\mathcal{D}elta t) \in \mathbb{Z}^{2K}$. Evaluating~\eqref{eq:local_sen} at $t = k \mathcal{D}elta t$ then yields
\begin{equation*}
\bm z^\star(t_0) = \bm z^\star(k\mathcal{D}elta t) - \frac{k\mathcal{D}elta t - t_0}{\mathcal{D}elta t} \bm B^{-1} \bm e_{K+k},
\end{equation*}
which ensures that $\bm z^\star(k \mathcal{D}elta t) \ge \bm 0$. Indeed, if any component of the integral vector $\bm z^\star(k \mathcal{D}elta t)$ was strictly negative, it would have to be smaller or equal to~$-1$. As $t_0$ resides in the interior of~$\mathcal{T}_k$ and thus $\vert (k\mathcal{D}elta t - t_0)/\mathcal{D}elta t \vert < 1$, the corresponding component of~$\bm z^\star(t_0)$ would then also have to be strictly negative. This, however, contradicts the optimality of~$\bm z^\star(t_0)$, which implies that $\bm z^\star(t_0) \ge \bm 0$. Hence, we have $\bm z^\star(k \mathcal{D}elta t) \ge \bm 0$. One can use similar arguments to prove that $\bm z^\star((k-1)\mathcal{D}elta t) \ge \bm 0$. As $\bm z^\star(t)$ is affine in~$t$, it is indeed non-negative for all~$t \in \mathcal{T}_k$.
The above reasoning shows that~$\bm z^\star(t)$ is optimal in~\eqref{pb:SP} and that the minimum of~\eqref{pb:SP} is affine in~$t$ on~$\mathcal{T}_k$. Equation~\eqref{eq:local_sensitivity} further implies that~$\min_{\delta \in \mathcal{D}} y(x^b,x^r,\delta,y_0,t)$ is affine in~$t$ on~$\mathcal{T}_k$, and thus
\begin{equation*}\label{eq:yubar_squezze}
\min_{t \in \mathcal{T}_k} \min_{\delta \in \mathcal{D}} y(x^b,x^r,\delta,y_0,t) =
\min_{l\in\{k-1,k\}} \min_{\delta \in \mathcal{D}} y(x^b,x^r,\delta,y_0,l\mathcal{D}elta t) =
\min_{l\in\{k-1,k\}} \min_{\bm \delta \in \mathcal{D}_\mathcal{K}} y_l(\bm{x^b},\bm{x^r},\bm \delta,y_0),
\end{equation*}
where the second equality follows from~\eqref{eq:yubar_res}. As~$k \in \mathcal{K}$ was chosen arbitrarily, \eqref{eq:yubar_start} follows.
\end{proof}
\begin{Prop}\label{Prop:H} The following equality holds.
\begin{equation*}
\max_{\delta \in \set{\hat{D}}, y_0 \in \set{\hat{Y}}_0} \rev{\varphi(y(x^b,x^r,\delta,y_0,T))} = \max_{\bm{\delta} \in \set{\hat{D}}_\mathcal{K}, y_0 \in \set{\hat{Y}}_0} \rev{\varphi(y_K(\bm{x^b},\bm{x^r},\bm{\delta},y_0))}
\end{equation*}
\end{Prop}
\begin{proof}
By introducing an auxiliary epigraphical variable~$z$, we find
\begin{equation}
\begin{aligned}
\label{eq:z}
\max_{\delta \in \set{\hat{D}},y_0 \in \set{\hat{Y}}_0} \rev{\varphi(y(x^b,x^r,\delta,y_0,T))}
& = \left\{ \begin{array}{*1{>{\displaystyle}c}*1{>{\displaystyle}l}}
\min_{z} & \rev{z} \\
\rm{s.t.} & z \geq \max_{\delta \in \set{\hat{D}}, y_0 \in \set{\hat{Y}}_0} \rev{\varphi(y(x^b,x^r,\delta,y_0,T))}
\end{array}
\right. \\
& = \rev{\left\{ \begin{array}{*1{>{\displaystyle}c}*2{>{\displaystyle}l}}
\min_{z} & z \\
\rm{s.t.} & z \geq \max_{\delta \in \set{\hat{D}}, y_0 \in \set{\hat{Y}}_0}
a_n y(x^b,x^r,\delta,y_0,T) + b_n & \forall n \in \set{N}
\end{array}
\right.} \\
& = \rev{\left\{ \begin{array}{*1{>{\displaystyle}c}*2{>{\displaystyle}l}}
\min_{z} & z \\
\rm{s.t.} & z \geq \max_{\delta \in \set{\hat{D}}, y_0 \in \set{\hat{Y}}_0}
a_n y_K(\bm{x^b},\bm{x^r},\bm \delta,y_0) + b_n & \forall n \in \set{N}
\end{array}
\right.} \\
& = \max_{\bm{\delta} \in \set{\hat{D}}_\mathcal{K}, y_0 \in \set{\hat{Y}}_0} \rev{\varphi(y_K(\bm{x^b},\bm{x^r},\bm{\delta},y_0))}.
\end{aligned}
\end{equation}
\rev{The} second equality follows \rev{from the definition of $\varphi$ and the third equality follows} from Propositions~\ref{Prop:ybar} and~\ref{Prop:yubar}, which apply \rev{since} $\hat{\mathcal{D}}$ and $\hat{\mathcal{D}}_\mathcal{K}$ have the same structures as $\mathcal{D}$ and $\mathcal{D}_{\mathcal{K}}$, respectively.
\end{proof}
\begin{proof}[Proof of Theorem~\ref{th:time}]
The claim follows immediately from Propositions~\ref{Prop:yc}--\ref{Prop:H}.
\end{proof}
\begin{proof}[Proof of Theorem~\ref{th:lr}]
By introducing embedded optimization problems that evaluate the (decision-dependent) worst-case frequency deviation scenarios and by replacing the uncertain initial state-of-charge in each robust constraint with its (decision-\emph{in}dependent) worst-case value, \eqref{pb:R} becomes
\begin{equation}
\label{pb:R'}
\begin{array}{>{\displaystyle}c*3{>{\displaystyle}l}>{\displaystyle}c}
\min_{\bm{x^b},\bm{x^r} \in \set{X}_\mathcal{K}, z \in \mathbb{R}} & \multicolumn{4}{>{\displaystyle}l}{c_\mathcal{K}(\bm{x^b},\bm{x^r}) + \rev{z}} \\
\rm{s.t.} & \max_{\bm{\delta} \in \mathcal{D}_{\mathcal{K}}} y^+(x^b_k,x^r_k,\delta_k) &\leq \bar{y}^+_k
& \forall k \in \mathcal{K} & \hspace{0.75cm}\text{(a)} \\
& \max_{\bm{\delta} \in \mathcal{D}_{\mathcal{K}}} y^-(x^b_k,x^r_k,\delta_k) &\leq \bar{y}^-_k
& \forall k \in \mathcal{K} & \hspace{0.75cm}\text{(b)} \\
& \max_{\bm \delta \in \mathcal{D}_{\mathcal{K}}} y_{k}(\bm{x^b},\bm{x^r},\bm{\delta}, \bar y_0) &\leq \bar{y}
& \forall k \in \mathcal{K} \cup \{0\} & \hspace{0.75cm}\text{(c)} \\
& \min_{\bm \delta \in \mathcal{D}_{\mathcal{K}}} y_{k}(\bm{x^b},\bm{x^r},\bm{\delta}, \ubar y_0) &\geq \ubar{y}
& \forall k \in \mathcal{K} \cup \{0\} & \hspace{0.75cm}\text{(d)} \\
& \rev{\max_{y_0 \in \hat{\set{Y}}_0} \max_{\bm{\delta} \in \set{\hat{D}}_\mathcal{K}} \varphi\big(y_K(\bm{x^b},\bm{x^r},\bm{\delta},y_0)\big)}
& \rev{\leq z} & &\rev{\hspace{0.75cm}\text{(e)}}
\end{array}
\end{equation}
Here, the \rev{worst-case cost-to-go} has been moved from the objective function to the constraints by introducing the auxiliary epigraphical variable~$z$. To show that~\eqref{pb:R'} is equivalent to~\eqref{pb:LR}, we reuse several results derived for the proof of Theorem~\ref{th:time}. First, by Equation~\eqref{eq:yc} in the proof of Proposition~\ref{Prop:yc} the maximum charging power in~(\ref{pb:R'}a) equals
\begin{equation*}
\max_{\bm{\delta} \in \mathcal{D}_{\mathcal{K}}} y^+(x^b_k,x^r_k,\delta_k) = x^r_k + x^b_k.
\end{equation*}
Using similar arguments, it can be shown that the maximum discharging power in~(\ref{pb:R'}b) reduces to
\begin{equation*}
\max_{\bm{\delta} \in \mathcal{D}_{\mathcal{K}}} y^-(x^b_k,x^r_k,\delta_k) = x^r_k - x^b_k.
\end{equation*}
Next, Equation~\eqref{eq:ybar_full} in the proof of Proposition~\ref{Prop:ybar} reveals that, for any~$k \in \mathcal{K} \cup \{0\}$, the maximum state-of-charge in~(\ref{pb:R'}c) is given by
\begin{equation*}
\max_{\bm{\delta} \in \mathcal{D}_\mathcal{K}} y_{k}\left(\bm{x^b},\bm{x^r},\bm{\delta}, \bar y_0\right)
= \bar y_0 + \max_{\bm{\delta} \in \mathcal{D}^+_\mathcal{K}} \mathcal{D}elta t \sum_{l=1}^{k} \eta^+ \left( x^b_l + \delta_l x^r_l \right) - d_l.
\end{equation*}
Similarly, Equation~\eqref{eq:yubar_res} in the proof of Proposition~\ref{Prop:yubar} implies that, for every~$k \in \mathcal{K} \cup \{0\}$, the minimum state-of-charge in~(\ref{pb:R'}d) amounts to
\begin{equation*}
\min_{\bm{\delta} \in \mathcal{D}_\mathcal{K}} y_k \left( \bm{x^b}, \bm{x^r}, \bm{\delta}, \ubar y_0 \right) = \ubar y_0 + \min_{\bm{\delta} \in \mathcal{D}^+_\mathcal{K}} \mathcal{D}elta t \sum_{l=1}^{k} \eta^+ x^b_l - m_l\delta_l - d_l,
\end{equation*}
where $m_l = \max\{ \eta^+ x^r_l, \frac{1}{\eta^-} x^r_l - \mathcal{D}elta \eta \, x^b_l \}$ constitutes an implicit function of the market decisions~$x^b_l$ and~$x^r_l$. As~(\ref{pb:R'}d) imposes a~\emph{lower} bound on the minimum state-of-charge, $m_l$ may be reinterpreted as an auxiliary epigraphical variable that satisfies~$m_l \geq \eta^+ x^r_l$ and~$m_l \geq \frac{1}{\eta^-} x^r_l - \mathcal{D}elta \eta \, x^b_l$.
\rev{Finally, the maximum cost-to-go in~(\ref{pb:R'}e) can be reformulated as
\begin{align*}
& \max_{y_0 \in \hat{\set{Y}}_0} \max_{\bm{\delta} \in \set{\hat{D}}_\mathcal{K}} \varphi\big(y_K(\bm{x^b},\bm{x^r},\bm{\delta},y_0)\big)
= \max_{y_0 \in \hat{\set{Y}}_0}\max_{\bm{\delta} \in \set{\hat{D}}_\mathcal{K}} \max_{n \in \set{N}} \, a_n y_K(\bm{x^b},\bm{x^r},\bm{\delta},y_0) + b_n \\
& \quad = \max_{\bm \delta \in \set{\hat{D}}_\mathcal{K}} \max\left\{ \max_{n \in \set{N}_+} b_n + a_n y_K(\bm{x^b},\bm{x^r},\bm{\delta},\hat{\bar y}_0), \,
\max_{n \in \set{N}_-} b_n + a_n y_K(\bm{x^b},\bm{x^r},\bm{\delta},\ubar{\hat y}_0),\max_{n\in\mathcal{N}_0} b_n
\right\},
\end{align*}
where the first equality follows from the definition of the convex piecewise affine function~$\varphi$. The second equality holds because the order of maximization is immaterial, because~$a_n > 0$ for all~$n \in \set{N}_+$, $a_n < 0$ for all~$n \in \set{N}_-$ and~$a_n=0$ for all~$n\in\mathcal{N}_0$ and because the state-of-charge $y_K(\bm{x^b},\bm{x^r},\bm{\delta},y_0)$ increases with~$y_0$. Requiring the last expression to be smaller than or equal to~$z$ is equivalent to
\begin{align*}
\max_{y_0 \in \hat{\set{Y}}_0} \max_{\bm{\delta} \in \set{\hat{D}}_\mathcal{K}} \varphi\big(y_K(\bm{x^b},\bm{x^r},\bm{\delta},y_0)\big) \leq z
\iff
\begin{cases}
\displaystyle \max_{\bm{\delta} \in \set{\hat{D}}_\mathcal{K}} y_K(\bm{x^b},\bm{x^r},\bm{\delta},\hat{\bar y}_0) \leq (z-b_n)/a_n & \forall n \in \set{N}_+ \\
\displaystyle \min_{\bm{\delta} \in \set{\hat{D}}_\mathcal{K}} y_K(\bm{x^b},\bm{x^r},\bm{\delta},\ubar{\hat y}_0) \geq (z-b_n)/a_n & \forall n \in \set{N}_- \\
b_n\leq z & \forall n\in\mathcal{N}_0.
\end{cases}
\end{align*}
As~$\mathcal{D}_{\mathcal{K}}$ and~$\hat \mathcal{D}_{\mathcal{K}}$ have the same structure, the embedded optimization problems over $\bm \delta \in \hat{\set{D}}_\set{K}$ admit similar linear reformulations as the embedded optimization problems over $\bm \delta \in \set{D}_\set{K}$ in~(\ref{pb:R'}c) and~(\ref{pb:R'}d). Substituting all obtained reformulations into~\eqref{pb:R'} yields~\eqref{pb:LR}.
}
\end{proof}
\begin{proof}[Proof of Theorem~\ref{th:lp}]
Problem~\eqref{pb:LR} can be reformulated as a linear program by using the standard machinery of robust optimization~\citep{DB04,AB09}. For example, the robust upper bound on the state-of-charge for a fixed~$k \in \mathcal{K} \cup \{0\}$ is equivalent to
\begin{equation}
\label{eq:max_primal}
\bar y_0 + \mathcal{D}elta t \sum_{l = 1}^k \eta^+ \left( x^b_l + \delta_l x^r_l \right) - d_l \leq \bar y \quad \forall \bm \delta \in \mathcal{D}^+_\mathcal{K}
\iff
\max_{\bm \delta \in \mathcal{D}^+_\mathcal{K}} \mathcal{D}elta t \sum_{l = 1}^k \eta^+ \left( x^b_l + \delta_l x^r_l \right) - d_l \leq \bar y - \bar y_0.
\end{equation}
By strong linear programming duality, the maximization problem in~\eqref{eq:max_primal} is equivalent to
\begin{equation}
\label{eq:min_dual}
\begin{array}{*1{>{\displaystyle}c}*2{>{\displaystyle}l}}
\min_{\bm \Lambda^+, \bm \mathcal{T}heta^+ \in \mathbb{R}^{K \times K}_+} & \multicolumn{2}{>{\displaystyle}l}{\sum_{l = 1}^k \mathcal{D}elta t \left( \eta^+ x^b_l + \Lambda^+_{k,l} - d_l \right) + \gamma \mathcal{T}heta^+_{k,l} } \\
\rm{s.t.} &
\Lambda^+_{k,l} + \sum\limits_{i = l}^{j(k,l)} \mathcal{T}heta^+_{k,i} \geq \eta^+ x^r_l & \forall l \in \mathcal{K}: \; l \leq k, \\
\end{array}
\end{equation}
and the minimum of~\eqref{eq:min_dual} is smaller or equal to~$\bar y - \bar y_0$ if and only if problem~\eqref{eq:min_dual} has a feasible solution whose objective value is smaller or equal to~$\bar y - \bar y_0$. Therefore, the robust constraint~\eqref{eq:max_primal} is equivalent to the following system of ordinary linear constraints.
\begin{equation*}
\label{eq:min_dual_const}
\begin{array}{*3{>{\displaystyle}l}}
\sum_{l = 1}^k \mathcal{D}elta t \left( \eta^+ x^b_l + \Lambda^+_{k,l} - d_l \right) + \gamma \mathcal{T}heta^+_{k,l}
& \leq \bar y - \bar y_0 \\
\Lambda^+_{k,l} + \sum\limits_{i = l}^{I(k,l)} \mathcal{T}heta^+_{k,i} \geq \eta^+ x^r_l \quad \forall l \in \mathcal{K}: \; l \leq k \\
\end{array}
\end{equation*}
The remaining robust constraints in~\eqref{pb:LR} can be simplified in a similar manner.
\end{proof}
\rev{
\begin{proof}[Proof of Proposition~\ref{Prop:phi}]
We prove the claim by backward induction. Note first that $\varphi_{H}$ is convex and piecewise affine by definition. Next, fix any~$h\in \set H$, and assume that $\varphi_{h+1}$ is convex and piecewise affine, which implies that problem~\eqref{eq:dynamic-problem} is structurally equivalent to problem~\eqref{pb:Rc}. By Theorems~\ref{th:time}, \ref{th:lr} and~\ref{th:lp}, problem~\eqref{eq:dynamic-problem} can therefore be reformulated as a linear program whose right hand side coefficients depend affinely on~$y_h$. Global sensitivity analysis of linear programming~\citep[p.~214]{DB97} then ensures that $\varphi_h(y_h)$ is convex and piecewise affine in~$y_h$.
\end{proof}
}
\end{document} |
\begin{document}
\title{
evision{blue}
\begin{abstract}
This paper introduces the algorithmic design and implementation of Tulip, an open-source interior-point solver for linear optimization.
It implements \revision{red}{a regularized} homogeneous interior-point algorithm with multiple centrality corrections, and therefore handles unbounded and infeasible problems.
\revision{blue}{The solver is written in Julia, thus allowing for a flexible and efficient implementation:} Tulip's algorithmic framework is fully disentangled from linear algebra implementations \revision{blue}{and from a model's arithmetic}.
\revision{blue}{In particular}, this allows to seamlessly integrate specialized routines for structured problems.
Extensive computational results are reported.
We find that Tulip is competitive with open-source interior-point solvers on the \revision{blue}{H. Mittelmann's benchmark of barrier linear programming solvers}.
Furthermore, \revision{blue}{we design specialized linear algebra routines for structured master problems in the context of Dantzig-Wolfe decomposition.
These routines yield a tenfold speedup on large and dense instances that arise in power systems operation and two-stage stochastic programming, thereby outperforming state-of-the-art commercial interior point method solvers.
Finally, we illustrate Tulip's ability to use different levels of arithmetic precision by solving problems in extended precision.}
\end{abstract}
\section{Introduction}
\label{sec:intro}
Linear programming (LP) algorithms have been around for over 70 years, and LP remains a fundamental paradigm in optimization.
Indeed, although nowadays most real-life applications involve discrete decisions or non-linearities, the methods employed to solve them often rely on LP as their workhorse.
Besides algorithms for mixed-integer linear programming (MILP), these include cutting-plane and outer-approximation algorithms that substitute a non-linear problem with a sequence of iteratively refined LPs \cite{Kelley1960, Westerlund1995, Mitchell2009}.
Furthermore, LP is at the heart of classical decomposition methods such as Dantzig-Wolfe and Benders decompositions \cite{Dantzig1960, Benders1962}.
Therefore, efficient and robust LP technology is instrumental to our ability to solve more involved optimization problems.
Over the past few decades, interior-point methods (IPMs) have become a standard and efficient tool for solving LPs \cite{Wright1997, Gondzio2012_IPMreview}.
While IPMs tend to overcome Dantzig's simplex algorithm on large-scale problems, the latter is well-suited for solving sequences of closely related LPs, by taking advantage of an advanced basis.
Nevertheless, beyond sheer performance, it is now well recognized that a number of LP-based algorithms can further benefit from IPMs, despite their limited ability to warm start.
In cutting plane algorithms, stronger cuts are often obtained by cutting off an interior point rather than an extreme vertex \cite{Bixby1992, Mitchell2000, Mitchell2009}.
Similarly,
\revision{blue}{IPMs have been successfully employed in the context of decomposition methods \cite{Elhedhli2004,Babonneau2009,NaoumSawaya2013,Munari2013,Gondzio2016_PDCGMlarge}, wherein}
well-centered interior solutions typically provide a stabilization effect \cite{Gondzio1996_PDCGM, Rousseau2007,Gondzio2013_PDCGMnew}, thus reducing tailing-off and improving convergence.
\revision{red}{
\subsection{Exploiting structure in IPMs}
}
The remarkable performance of IPMs stems from both strong algorithmic foundations and efficient linear algebra.
Indeed, the main computational effort of IPMs resides in the resolution, at each iteration, of a system of linear equations.
Therefore, the efficiency of the underlying linear algebra has a direct impact of the method's overall performance.
\revision{red}{Remarkably}, while most IPM solvers employ general-purpose sparse linear algebra routines, substantial speedups can be obtained by exploiting a problem's specific structure.
\revision{red}{Nevertheless, successfully doing so requires (i) identifying a problem's structure and associated specialized linear algebra, (ii) integrating these custom routines within an IPM solver, and (iii) having a convenient and flexible way for the user to convey structural information to the solver.
The main contribution of our work is to simplify the latter two points.
}
\revision{red}{Numerous works have studied structure-exploiting IPMs, e.g., \cite{Birge1988,Hurd1988,Schultz1991,Choi1993,Jessup1994,Gondzio1997_GUB,Gondzio2003_IPMparLP,Gondzio2007_IPMparQP,Castro2017}.}
For instance, block-angular matrices typically arise in stochastic programming when using scenario decomposition.
In \cite{Birge1988} and later in \cite{Jessup1994}, the authors thus design specialized factorization techniques that outperform generic implementations.
\revision{red}{Schultz et al. \cite{Schultz1991} design a specialized IPM for block-angular problems; therein, linking constraints are handled separately, thus allowing to decompose the rest of the problem.}
Gondzio \cite{Gondzio1997_GUB} observed that the master problem in Dantzig-Wolfe decomposition possesses a block-angular structure.
Similar approaches have been explored for network flow problems \cite{Choi1993}, \revision{red}{multi-commodity flow problems \cite{Gondzio2003_IPMparLP}, asset management problems \cite{Gondzio2007_IPMparQP}}, and for solving facility location problems \cite{Hurd1988,Castro2017}.
The aforementioned works focus on devising specialized linear algebra for a particular structure or application.
On the other hand, a handful of IPM codes that accommodate various linear algebra implementations have been developed.
The OOQP software, developed by Gertz and Wright \cite{Gertz2003}, uses object-oriented design so that data structures and linear algebra routines can be tailored to specific applications.
Motivated by large-scale stochastic programming, PIPS \cite{Lubin2011} incorporates a large share of OOQP's codebase, alongside specialized linear solvers for block-angular matrices.
Nevertheless, to the best of the authors' knowledge, OOQP is no longer actively maintained, while current development on PIPS focuses on non-linear programming.\footnote{Personal communication with PIPS developers.}
In a similar fashion, OOPS \cite{Gondzio2003_IPMparLP,Gondzio2007_IPMparQP,Gondzio2009_OOPS} implements custom linear algebra that can exploit arbitrary block matrix structures.
We also note that both PIPS and OOPS are primarily intended for massive parallelism on high-performance computing infrastructure.
\revision{red}{Furthermore, the BlockIP software \cite{Castro2016} is designed for block-angular convex optimization problems, and solves linear systems with a combination of Cholesky factorization and preconditioned conjugate gradient.
Both OOPS and BlockIP can be accessed through SML \cite{Grothey2009} --which requires AMPL,}
and are distributed under a closed-source proprietary license.
\revision{blue}{
Finally, while nowadays most optimization solvers are written in C or C++, users are increasingly turning to higher-level programming languages such as Python, Matlab or Julia, alongside a variety of modeling tools, e.g, Pyomo \cite{hart2011pyomo}, CVXPY \cite{diamond2016cvxpy}, YALMIP \cite{Lofberg2004}, JuMP \cite{Dunning2017_JuMP}, to mention a few.
Thus, users of high-level languages often have to switch to a low-level language in order to implement performance-critical tasks such as linear algebra.
This situation, commonly referred to as the ``two-language problem", hinders code development, maintenance, and usability.
}\\
\subsection{Contributions and outline}
\label{sec:intro:subsec:outline}
In this paper, we describe \revision{blue}{the design and implementation of a modular interior-point solver, Tulip.
The solver is written in Julia \cite{Bezanson2017}, which offers several advantages.
First, Julia combines both high-level syntax and fast performance, thus addressing the two-language problem.
In particular, it offers built-in support for linear algebra, with direct access to dense and sparse linear algebra libraries such as BLAS, LAPACK and SuiteSparse \cite{SuiteSparse}.
Second, the Julia ecosystem for optimization comprises a broad range of tools, from solvers' wrappers to modeling languages, alongside a growing and dynamic community of users.
Finally, Julia's multiple dispatch feature renders Tulip's design fully flexible,}
thus allowing to disentangle the IPM algorithmic framework from linear algebra implementations, \revision{blue}{and to solve problems in arbitrary precision arithmetic.}
The remainder of the paper is structured as follows.
In Section \ref{sec:notations}, we introduce some notations and relevant definitions.
In Section \ref{sec:ipm}, we describe the homogeneous self-dual embedding, and Tulip's \revision{red}{regularized} homogeneous interior-point algorithm.
This feature contrasts with most IPM LP codes, namely, those that implement the almost-ubiquitous infeasible primal-dual interior-point algorithm \cite{Mehrotra1992}.
The main advantage of the homogeneous algorithm is its ability to return certificates of primal or dual infeasibility.
It is therefore better suited for use within cutting-plane algorithms or decomposition methods, wherein one may encounter infeasible or unbounded LPs.
\revision{red}{In Section \ref{sec:KKT}, we highlight the resolution of linear systems within Tulip, which builds on black-box linear solvers.
This modular design leverages Julia's multiple dispatch, thereby facilitating the integration of custom linear algebra with no performance loss due to using external routines.}
\revision{blue}{The presolve procedure is described in Section \ref{sec:presolve} and,}
in Section \ref{sec:implementation}, we provide further implementation details of Tulip, such as the treatment of variable bounds, default values of parameters, and \revision{red}{default linear solvers}.
Tulip is publicly available \cite{Tulip} under an open-source license.
It can be used as a stand-alone package in Julia, and through the solver-independent \revision{blue}{interface \texttt{MathOptInterface} \cite{Legat2020_MOI}}.
In Section \ref{sec:res}, we report on \revision{blue}{three} sets of computational experiments.
First, we compare Tulip to several open-source and commercial IPM solvers on \revision{blue}{a benchmark set of unstructured LP instances}.
We observe that, using generic sparse linear algebra, Tulip is competitive with open-source IPM solvers.
Second, we \revision{blue}{demonstrate Tulip's flexible design.
We consider block-angular problems with dense linking constraints from two column-generation applications, for which we design specialized linear algebra routines.
This implementation yields a tenfold speedup, thereby outperforming commercial solvers on large-scale instances.}
\revision{blue}{Third, we show how extended precision can alleviate numerical difficulties, thus illustrating Tulip's ability to work in arbitrary precision arithmetic.}
Finally, Section \ref{sec:conclusion} concludes the paper and highlights future research directions.
\section{Notations}
\label{sec:notations}
We consider LPs in primal-dual standard form
\begin{align}
\label{eq:standard_LP}
\begin{array}{rrl}
(P) \ \ \ \displaystyle \min_{x} \ \ \
& c^{T} x\\
s.t. \ \ \
& A x &= b,\\
& x & \geq 0,
\end{array}
&
\hspace*{1cm}
\begin{array}{rrl}
(D) \ \ \ \displaystyle \max_{y, s} \ \ \
& b^{T} y \ \ \ \ \ \ \\
s.t. \ \
& A^{T}y + s &= c,\\
& s & \geq 0,
\end{array}
\end{align}
where $c, x, s \in \mathbb{R}^{n}$, $b, y \in \mathbb{R}^{m}$, and $A \in \mathbb{R}^{m \times n}$ is assumed to have full row rank.
We follow the usual notations from interior-point literature, and write $X$ (resp. $S$) the diagonal matrix whose diagonal is given by $x$ (resp. $s$), i.e., $X := Diag(x)$ and $S := Diag(s)$.
We denote $I$ the identity matrix and $e$ the vector with all coordinates equal to one; their respective dimensions are always obvious from context.
The norm of a vector is written $\norm{\cdot}$ and, unless specified otherwise, it denotes the $\ell_{2}$ norm.
A primal solution $x$ is feasible if $Ax = b$ and $x \geq 0$.
A strictly feasible (or interior) solution is a primal feasible solution with $x > 0$.
Similarly, a dual solution $(y, s)$ is feasible if $A^{T}y + s = c$ and $s \geq 0$, and strictly feasible if, additionally, $s > 0$.
Finally, a primal-dual solution $(x, y, s)$ is optimal for \eqref{eq:standard_LP} if $x$ is primal-feasible, $(y,s)$ is dual-feasible, and their objective values are equal, i.e., $c^{T}x = b^{T}y$.
A solution $(x, y, s)$ with $x, s \geq 0$ is strictly complementary if
\begin{align}
\label{eq:strcict_complementarity}
\forall i \in \{1, \dots, n\}, \big( x_{i} s_{i} = 0 \text{ and } x_{i} + s_{i} > 0 \big).
\end{align}
The \revision{blue}{complementarity gap} is defined as $x^{T}s$.
When $(x, y, s)$ is primal-dual feasible, the \revision{blue}{complementarity gap} equals the classical optimality gap, i.e., we have $x^{T}s = c^{T}x - b^{T}y$.
For ease of reading, we assume, without loss of generality, that all primal variables are required to be non-negative.
The handling of free variables and of variables with finite upper bound will be detailed in Section \ref{sec:implementation}.
\section{\revision{red}{Regularized homogeneous interior-point algorithm}}
\label{sec:ipm}
In this section, we describe the homogeneous self-dual formulation and algorithm.
Our implementation largely follows the algorithmic framework of \cite{Xu1996} and \cite{Andersen2000}, \revision{red}{combined with the primal-dual regularization scheme of \cite{Friedlander2012}}.
Consequently, we focus on the algorithm's main components, and refer to \revision{red}{\cite{Xu1996, Andersen2000,Friedlander2012}} for convergence proofs and theoretical results.
Specific implementation details will be further discussed in Section \ref{sec:implementation}.
\subsection{Homogeneous self-dual embedding}
\label{sec:ipm:hsd_model}
The simplified homogeneous self-dual form was introduced in \cite{Xu1996}.
It consists in reformulating the primal-dual pair \eqref{eq:standard_LP} as a single, self-dual linear program, which writes
\begin{align}
\label{eq:HSD:obj}
(HSD) \ \ \
\min_{x, y, \tau} \ \ \
& 0 \\
s.t. \ \ \
\label{eq:HSD:dual}
& -A^{T}y + c \tau \geq 0,\\
\label{eq:HSD:primal}
& Ax - b \tau = 0,\\
\label{eq:HSD:gap}
& - c^{T}x + b^{T}y \geq 0,\\
\label{eq:HSD:domain}
& x, \tau \geq 0,
\end{align}
where $\tau$ \revision{red}{is a scalar variable.
Let $s$, $\kappa$ be the non-negative slacks associated to \eqref{eq:HSD:dual} and \eqref{eq:HSD:gap}, respectively.}
A solution $(x, y, s, \tau, \kappa)$ is strictly complementary if
\begin{align*}
x_{i}s_{i} = 0, x_{i} + s_{i} > 0, \text{ and } \tau \kappa = 0, \tau + \kappa > 0.
\end{align*}
Problem $(HSD)$ is always feasible, has empty interior and, under mild assumptions, possesses a strictly complementary feasible solution \cite{Xu1996}.
Let $(x^{*}, y^{*}, s^{*}, \tau^{*}, \kappa^{*})$ be a strictly complementary feasible solution for $(HSD)$.
If $\tau^{*} >0$, then $(\frac{x^{*}}{\tau^{*}}, \frac{y^{*}}{\tau^{*}}, \frac{s^{*}}{\tau^{*}})$ is an optimal solution for the original problem \eqref{eq:standard_LP}.
Otherwise, we have $\kappa^{*} >0$ and thus $c^{T}x^{*} - b^{T}y^{*} < 0$.
In that case, the original problem (P) is infeasible or unbounded.
If $c^{T}x^{*} < 0$, then (P) is unbounded and $x^{*}$ is an unbounded ray.
If $-b^{T}y^{*} < 0$, then (P) is infeasible and $y^{*}$ is an unbounded dual ray.
The latter is also referred to as a Farkas proof of infeasibility.
Finally, if both $c^{T}x^{*} < 0$ and $-b^{T}y^{*} < 0$, then both (P) and (D) are infeasible.
\revision{red}{
\subsection{Regularized formulation}
Friedlander and Orban \cite{Friedlander2012} introduce an exact primal-dual regularization scheme for convex quadratic programs, which we extend to the HSD form.
The benefits of regularizations will be further detailed in Section \ref{sec:KKT}.
Importantly, rather than viewing $(HSD)$ as a generic LP to which the regularization procedure of \cite{Friedlander2012} is applied, we exploit the fact that $(HSD)$ is a self-dual embedding of $(P){-}(D)$, and formulate the regularization in the original primal-dual space.
Thus, we consider a \emph{single}, regularized, self-dual problem
\begin{align}
\label{eq:rHSD:obj}
(rHSD) \ \ \
\min_{x, y, \tau} \ \ \
& \rho_{p} (x - \bar{x})^{T}x + \rho_{d} (y - \bar{y})^{T}y + \rho_{g} (\tau - \bar{\tau}) \tau \\
s.t. \ \ \
\label{eq:rHSD:dual}
& -A^{T}y + c \tau + \rho_{p} (x - \bar{x}) \geq 0,\\
\label{eq:rHSD:primal}
& Ax - b \tau + \rho_{d} (y - \bar{y}) = 0,\\
\label{eq:rHSD:gap}
& - c^{T}x + b^{T}y + \rho_{g} (\tau - \bar{\tau}) \geq 0,\\
\label{eq:rHSD:domain}
& x, \tau, \geq 0,
\end{align}
where $\rho_{p}, \rho_{d}, \rho_{g}$ are positive scalars, and $\bar{x} \in \mathbb{R}^{n}, \bar{y} \in \mathbb{R}^{m}, \bar{\tau} \in \mathbb{R}$ are given estimates of an optimal solution of $(HSD)$.
We denote by $s, \kappa$ the non-negative slack variables of constraints \eqref{eq:rHSD:dual} and \eqref{eq:rHSD:gap}, respectively.
The first-order Karush-Kuhn-Tucker (KKT) conditions for $(rHSD)$ can then be expressed in the following form:
\begin{align}
\label{eq:KKT:rHSD:dual}
\rho_{p} x - A^{T}y - s + c \tau &= \rho_{p} \bar{x},\\
\label{eq:KKT:rHSD:primal}
A x + \rho_{d} y - b \tau &= \rho_{d} \bar{y},\\
\label{eq:KKT:rHSD:gap}
-c^{T}x + b^{T}y + \rho_{g} \tau - \kappa &= \rho_{g} \bar{\tau},\\
\label{eq:KKT:rHSD:xs}
x_{j} s_{j} &= 0, & j=1, ..., n\\
\label{eq:KKT:rHSD:tk}
\tau \kappa &= 0,\\
\label{eq:KKT:rHSD:domain}
x, s, \tau, \kappa &\geq 0.
\end{align}
The correspondence between $(rHSD)$ and \cite{Friedlander2012} follows from the fact that, up to a constant term, the objective function \eqref{eq:rHSD:obj} equals
\begin{align*}
\frac{1}{2}
\left(
\rho_{p} \sqnorm{x - \bar{x}} + \rho_{d} \sqnorm{y - \bar{y}} + \rho_{g} \sqnorm{\tau - \bar{\tau}}
+ \rho_{p} \sqnorm{x} + \rho_{d} \sqnorm{y} + \rho_{g} \sqnorm{\tau}
\right).
\end{align*}
Note that, for $\rho_{p} = \rho_{d} = \rho_{g} = 0$, the regularized problem $(rHSD)$ reduces to $(HSD)$.
Furthermore, Theorem \ref{thm:exact_reg} shows that, for positive $\rho_{p}, \rho_{d}, \rho_{g}$, the regularization is exact.
\begin{theorem}
\label{thm:exact_reg}
Assume $\rho_{p}, \rho_{d}, \rho_{g} > 0$.
Let $(x^{*}, y^{*}, \tau^{*})$ be a complementary optimal solution of $(HSD)$, and let $(\bar{x}, \bar{y}, \bar{\tau}) = (x^{*}, y^{*}, \tau^{*})$ in the definition of $(rHSD)$.
Then, $(x^{*}, y^{*}, \tau^{*})$ is the unique optimal solution of $(rHSD)$.
\end{theorem}
\begin{proof}The uniqueness of the optimum is a direct consequence of $(rHSD)$ being a convex problem with strictly convex objective.
Next, we show that any feasible solution of $(rHSD)$ has non-negative objective.
Let $(x, y, s, \tau, \kappa)$ be a feasible solution of $(rHSD)$.
Substituting Eq. \eqref{eq:rHSD:dual}-\eqref{eq:rHSD:gap} into the objective \eqref{eq:rHSD:obj}, one obtains
\begin{align*}
Z &= \rho_{p} (x - \bar{x})^{T}x + \rho_{d} (y - \bar{y})^{T}y + \rho_{g} (\tau - \bar{\tau}) \tau\\
& = (A^{T}y + s - c\tau)^{T}x + (b \tau - Ax)^{T}y + (c^{T}x - b^{T}y + \kappa) \tau\\
& = x^{T}s + \tau \kappa \geq 0.
\end{align*}
Then, $(x^{*}, y^{*}, \tau^{*})$ is trivially feasible for $(rHSD)$, and its objective value is $(x^{*})^{T}s^{*} + \tau^{*} \kappa^{*} = 0$.
Thus, it is optimal for $(rHSD)$, which concludes the proof.
\qed
\end{proof}
}
\subsection{\revision{red}{Regularized homogeneous algorithm}}
\label{sec:ipm:hsd_algo}
We now describe the \revision{red}{regularized} homogeneous interior-point algorithm.
\revision{red}{
Similar to \cite{Friedlander2012}, we apply a single Newton iteration to a sequence of problems of the form $(rHSD)$ where, at each iteration, $\bar{x}, \bar{y}, \bar{\tau}$ are chosen to be the current primal-dual iterate.
}
Let $(x, y, s, \tau, \kappa)$ denote the current primal-dual iterate, with $(x, s, \tau, \kappa) >0$, and define the residuals
\begin{align}
r_{p} &= b \tau - Ax,\\
r_{d} &= c \tau - A^{T}y - s,\\
r_{g} &= c^{T}x - b^{T}y + \kappa,
\end{align}
and the barrier parameter
\[
\mu = \dfrac{x^{T}s + \tau \kappa}{n+1}.
\]
\revision{red}{For given $\bar{x}, \bar{y}, \bar{\tau}$,}
a search direction $(\delta_{x}, \delta_{y}, \delta_{s}, \delta_{\tau}, \delta_{\kappa})$ is computed by solving a Newton system of the form
\revision{red}{
\begin{align}
\label{eq:Newton:dual}
-\rho_{p} \delta_{x} + A^{T} \delta_{y} + \delta_{s} - c \delta_{\tau}
&= \eta \left( c \tau - A^{T}y - s + \rho_{p} (\bar{x} - x) \right),\\
\label{eq:Newton:primal}
A \delta_{x} + \rho_{d} \delta_{y} - b \delta_{\tau}
&= \eta \left( b \tau - Ax - \rho_{d}(y - \bar{y}) \right),\\
\label{eq:Newton:gap}
-c^{T} \delta_{x} + b^{T} \delta_{y} + \rho_{g} \delta_{\tau} - \delta_{\kappa}
&= \eta \left( c^{T}x - b^{T}y + \kappa - \rho_{g} (\tau - \bar{\tau}) \right),\\
\label{eq:Newton:xs}
S \delta_{x} + X \delta_{s}
&= -XSe + \gamma \mu e,\\
\label{eq:Newton:tk}
\kappa \delta_{\tau} + \tau \delta_{\kappa}
&= -\tau \kappa + \gamma \mu,
\end{align}}
where $\gamma$ and $\eta$ are non-negative scalars \revision{red}{whose values will be specified in Section \ref{sec:ipm:algo:direction}}.
\revision{red}{
We evaluate the Newton system at $(\bar{x}, \bar{y}, \bar{\tau}) = (x, y, \tau)$, which yields
\begin{align}
\label{eq:Newton:reg}
\begin{bmatrix}
-\rho_{p} I && A^{T} && I && -c && 0\\
A && \rho_{d} I && 0 && -b && 0\\
-c^{T} && b^{T} && 0 && \rho_{g} && -1\\
S && 0 && X && 0 && 0\\
0 && 0 && 0 && \kappa && \tau
\end{bmatrix}
\begin{bmatrix}
\delta_{x}\\
\delta_{y}\\
\delta_{s}\\
\delta_{\tau}\\
\delta_{\kappa}
\end{bmatrix}
=
\begin{bmatrix}
\eta r_{d}\\
\eta r_{p}\\
\eta r_{g}\\
-XSe + \gamma \mu e\\
-\tau \kappa + \gamma \mu
\end{bmatrix}
.
\end{align}
System \eqref{eq:Newton:reg} is identical to the Newton system obtained when solving $(HSD)$ (see, e.g., \cite{Andersen2000}), except for the regularization terms that appear in the left-hand side.
In particular, the right-hand side remains unchanged.
}
\iffalse
It then follows \cite{Andersen2000} that
\begin{align}
\label{eq:residuals_update}
(r_{p}^{+}, r_{d}^{+}, r_{g}^{+}) &= (1 - \alpha \eta) (r_{p}, r_{d}, r_{g}),
\end{align}
and
\begin{align}
\label{eq:complementary_update}
(x^{+})^{T}s^{+} + \tau^{+}\kappa^{+} &= \left( 1 - \alpha (1-\gamma) + \alpha^{2} \eta (1 - \gamma - \eta) \right) (x^{T} s + \tau \kappa).
\end{align}
Consequently, if $\eta = 1 - \gamma$, then infeasibility and optimality gap are decreased by the same factor $(1-\alpha \eta)$.
This remarkable property contrasts with classical infeasible primal-dual algorithms such as \cite{Mehrotra1992}, in which feasibility is often reached earlier than optimality.
\fi
\subsubsection{Starting point}
We choose the following default starting point
\[
(x^{0}, y^{0}, s^{0}, \tau^{0}, \kappa^{0}) = (e, 0, e, 1, 1).
\]
This initial point was proposed in \cite{Xu1996}.
Besides its simplicity, it has well-balanced \revision{blue}{complementarity} products, which are all equal to one.
\subsubsection{Search direction}
\label{sec:ipm:algo:direction}
At each iteration, a search direction is computed using Mehrotra's predictor-corrector technique \cite{Mehrotra1992}, combined with Gondzio's multiple centrality corrections \cite{Gondzio1996_correction}.
Following \cite{Andersen2000}, we adapt the original formulas of \cite{Mehrotra1992, Gondzio1996_correction} to account for the homogeneous embedding.
First, the affine-scaling direction $(\delta^{\text{aff}}_{x}, \delta^{\text{aff}}_{y}, \delta^{\text{aff}}_{s}, \delta^{\text{aff}}_{\tau}, \delta^{\text{aff}}_{\kappa})$ is obtained by solving the Newton system
\begin{align}
\label{eq:NS_aff_dual}
\revision{red}{-\rho_{p} \delta^{\text{aff}}_{x}} + A^{T} \delta^{\text{aff}}_{y} + \delta^{\text{aff}}_{s} - c \delta^{\text{aff}}_{\tau} &= r_{d},\\
\label{eq:NS_aff_primal}
A \delta^{\text{aff}}_{x} \revision{red}{+ \rho_{d} \delta^{\text{aff}}_{y}} -b \delta^{\text{aff}}_{\tau} &= r_{p},\\
\label{eq:NS_aff_opt}
-c^{T} \delta^{\text{aff}}_{x} + b^{T} \delta^{\text{aff}}_{y} \revision{red}{+ \rho_{g} \delta^{\text{aff}}_{\tau}} -\delta^{\text{aff}}_{\kappa} &= r_{g},\\
\label{eq:NS_aff_comp_xs}
S \delta^{\text{aff}}_{x} + X \delta^{\text{aff}}_{s} &= -XSe,\\
\label{eq:NS_aff_comp_tk}
\kappa \delta^{\text{aff}}_{\tau} + \tau \delta^{\text{aff}}_{\kappa} &= -\tau \kappa,
\end{align}
which corresponds to \revision{red}{\eqref{eq:Newton:reg} with} $\eta=1$ and $\gamma=0$.
Taking a full step ($\alpha=1$) would thus reduce both infeasibility and \revision{blue}{complementarity} gap to zero.
However, doing so is generally not possible, due to the non-negativity requirement on $(x, s, \tau, \kappa)$.
Consequently, a corrected search direction is computed, as proposed in \cite{Mehrotra1992}.
The corrected direction hopefully enables one to make longer steps, thus reducing the total number of IPM iterations.
Let $\eta = 1 - \gamma$, where
\begin{align}
\gamma = (1 - \alpha^{\text{aff}})^{2} \min\left( \revision{blue}{\gamma_{min}}, (1 - \alpha^{\text{aff}}) \right)
\end{align}
for some $\revision{blue}{\gamma_{min}} > 0$, and
\begin{align}
\alpha^{\text{aff}} = \max \left\{ 0 \leq \alpha \leq 1 \mid (x, s, \tau, \kappa) + \alpha (\delta^{\text{aff}}_{x}, \delta^{\text{aff}}_{s}, \delta^{\text{aff}}_{\tau}, \delta^{\text{aff}}_{\kappa}) \geq 0 \right\} .
\end{align}
The corrected search direction is then given by
\begin{align}
\label{eq:NS_cor_dual}
\revision{red}{-\rho_{p} \delta_{x}} + A^{T} \delta_{y} + \delta_{s} - c \delta_{\tau} &= \eta r_{d},\\
\label{eq:NS_cor_primal}
A \delta_{x} \revision{red}{+ \rho_{d} \delta_{y}} -b \delta_{\tau} &= \eta r_{p},\\
\label{eq:NS_cor_opt}
-c^{T} \delta_{x} + b^{T} \delta_{y} \revision{red}{+ \rho_{g} \delta_{\tau}} -\delta_{\kappa} &= \eta r_{g},\\
\label{eq:NS_cor_comp_xs}
S \delta_{x} + X \delta_{s} &= -XSe + \gamma \mu e - \Delta_{x}^{\text{aff}} \Delta_{s}^{\text{aff}} e,\\
\label{eq:NS_cor_comp_tk}
\kappa \delta_{\tau} + \tau \delta_{\kappa} &= -\tau \kappa + \gamma \mu - \delta_{\tau}^{\text{aff}} \delta_{\kappa}^{\text{aff}},
\end{align}
where $\Delta^{\text{aff}}_{x} = Diag(\delta^{\text{aff}}_{x})$ and $\Delta^{\text{aff}}_{s}=Diag(\delta^{\text{aff}}_{s})$.
\subsubsection{\revision{red}{Additional centrality corrections}}
\revision{red}{Additional centrality corrections}
aim at improving the centrality of the new iterate, i.e., to keep the complementary products well balanced.
Doing so generally allows to make longer steps, thus reducing the total number of IPM iterations.
We implement Gondzio's original technique \cite{Gondzio1996_correction}, with some modifications introduced in \cite{Andersen2000}.
Let $\delta = (\delta_{x}, \delta_{y}, \delta_{s}, \delta_{\tau}, \delta_{\kappa})$ be the current search direction, $\alpha^{max}$ the corresponding maximum step size, and define
\begin{align}
(\bar{x}, \bar{y}, \bar{s}, \bar{\tau}, \bar{\kappa}) := (x, y, s, \tau, \kappa) + \bar{\alpha} (\delta_{x}, \delta_{y}, \delta_{s}, \delta_{\tau}, \delta_{\kappa}),
\end{align}
where $\bar{\alpha} := \min(1, 2 \alpha^{max})$ is a tentative step size.
First, a soft target in the space of \revision{blue}{complementarity} products is computed as
\begin{align}
t_{j} &=
\left\{
\begin{array}{cl}
\mu_{l} - \bar{x}_{j} \bar{s}_{j} & \text{ if } \bar{x}_{j} \bar{s}_{j} < \mu_{l}\\
0 & \text{ if } \bar{x}_{j} \bar{s}_{j} \in [\mu_{l}, \mu_{u}]\\
\mu_{u} - \bar{x}_{j} \bar{s}_{j} & \text{ if } \bar{x}_{j} \bar{s}_{j} > \mu_{u}
\end{array}
\right.
, \ \
j = 1, \dots, n,\\
t_{0} &=
\left\{
\begin{array}{cl}
\mu_{l} - \bar{\tau} \bar{\kappa} & \text{ if } \bar{\tau} \bar{\kappa} < \mu_{l}\\
0 & \text{ if } \bar{\tau} \bar{\kappa} \in [\mu_{l}, \mu_{u}]\\
\mu_{u} - \bar{\tau} \bar{\kappa} & \text{ if } \bar{\tau} \bar{\kappa} > \mu_{u}
\end{array}
\right.
,
\end{align}
where $\mu_{l} = \gamma \mu \beta$ and $\mu_{u} = \gamma \mu \beta^{-1}$, for a fixed $0 < \beta \leq 1$.
Then, define
\begin{align}
v &= t - \dfrac{e^{T}t + t_{0}}{n+1} e,\\
v_{0} &= t_{0} - \dfrac{e^{T}t + t_{0}}{n+1}.
\end{align}
A correction is obtained by solving the linear system
\begin{align}
\label{eq:NS_cor2_dual}
\revision{red}{-\rho_{p} \delta^{c}_{x}} + A^{T} \delta^{c}_{y} + \delta^{c}_{s} - c \delta^{c}_{\tau} &= 0,\\
\label{eq:NS_cor2_primal}
A \delta^{c}_{x} \revision{red}{+ \rho_{d} \delta^{c}_{y}} -b \delta^{c}_{\tau} &= 0,\\
\label{eq:NS_cor2_opt}
-c^{T} \delta^{c}_{x} + b^{T} \delta^{c}_{y} \revision{red}{+ \rho_{g} \delta^{c}_{\tau}} -\delta^{c}_{\kappa} &= 0,\\
\label{eq:NS_cor2_comp_xs}
S \delta^{c}_{x} + X \delta^{c}_{s} &= v,\\
\label{eq:NS_cor2_comp_tk}
\kappa \delta^{c}_{\tau} + \tau \delta^{c}_{\kappa} &= v_{0},
\end{align}
which yields a corrected search direction
\begin{align*}
(
\delta_{x},
\delta_{y},
\delta_{s},
\delta_{\tau},
\delta_{\kappa}
) +
(
\delta^{c}_{x},
\delta^{c}_{y},
\delta^{c}_{s},
\delta^{c}_{\tau},
\delta^{c}_{\kappa}
).
\end{align*}
The corrected direction is accepted if it results in an increased step size.
Finally, additional centrality corrections are computed only if a sufficient increase in the step size is observed.
Specifically, as suggested in \cite{Andersen2000}, an additional correction is computed only if the new step size $\alpha$ satisfies
\begin{align}
\label{eq:test_keep_correcting}
\alpha \geq 1.10 \times \alpha^{\text{max}}.
\end{align}
\revision{red}{
\subsubsection{Regularizations}
Following \cite{Friedlander2012}, the regularizations are updated as follows.
Let $\rho_{p}^{k}, \rho_{d}^{k}, \rho_{g}^{k}$ denote the regularization terms at iteration $k$.
We set $\rho_{p}^{0} = \rho_{d}^{0} = \rho_{g}^{0} = 1$, and use the update rule
\begin{align}
\rho_{p}^{k+1} = \max \left( \sqrt{\epsilon}, \frac{\rho_{p}^{k}}{10} \right),\\
\rho_{d}^{k+1} = \max \left( \sqrt{\epsilon}, \frac{\rho_{d}^{k}}{10} \right),\\
\rho_{g}^{k+1} = \max \left( \sqrt{\epsilon}, \frac{\rho_{g}^{k}}{10} \right),
\end{align}
where $\epsilon$ denotes the machine precision, e.g., $\epsilon \simeq 10^{-16}$ for double-precision floating point arithmetic.
Further details on the role of regularizations in the resolution of the Newton system are given in Section \ref{sec:KKT}.
Let us only mention here that $\rho_{p}, \rho_{d}, \rho_{g}$ may become too small to ensure that the Newton system is properly regularized, e.g., for badly scaled problems.
When this is the case, we increase the regularizations by a factor of $100$, and terminate the algorithm if three consecutive increases fail to resolve the numerical issues.
}
\subsubsection{Step size}
Once the final search direction has been computed, the step size $\alpha$ is given by
\begin{align}
\alpha = 0.9995 \times \alpha^{max},
\end{align}
where
\begin{align*}
\alpha^{max} = \max \left\{ 0 \leq \alpha \leq 1 \mid (x, s, \tau, \kappa) + \alpha (\delta_{x}, \delta_{s}, \delta_{\tau}, \delta_{\kappa}) \geq 0 \right\}.
\end{align*}
\subsubsection{Stopping criteria}
\label{sec:ipm:stoppingCriteria}
The algorithm stops when, up to numerical tolerances, one of the following three cases holds: the current iterate is optimal, the primal problem is proven infeasible, the dual problem is proven infeasible (unbounded primal).
The problem is declared solved to optimality if
\begin{align}
\label{eq:stop_primalfeas}
\dfrac{\normInf{r_{p}}}{\tau (1 + \normInf{b})} &< \varepsilon_{p},\\
\label{eq:stop_dualfeas}
\dfrac{\normInf{r_{d}}}{\tau (1 + \normInf{c})} &< \varepsilon_{d},\\
\label{eq:stop_opt}
\dfrac{|c^{T}x - b^{T}y|}{\tau + |b^{T}y|} &< \varepsilon_{g},
\end{align}
where $\varepsilon_{p}, \varepsilon_{d}, \varepsilon_{g}$ are positive parameters.
The above criteria are independent of the magnitude of $\tau$, and correspond to primal feasibility, dual feasibility and optimality, respectively.
Primal or dual infeasibility is detected if
\begin{align}
\label{eq:stop_inf_mu}
\mu &< \varepsilon_{i},\\
\label{eq:stop_inf_tau}
\frac{\tau}{\kappa} &< \varepsilon_{i},
\end{align}
where $\varepsilon_{i}$ is a positive parameter.
When this is the case, a \revision{blue}{complementary} solution with small $\tau$ has been found.
If $c^{T}x < - \varepsilon_{i}$, the problem is declared dual infeasible (primal unbounded), and $x$ is an unbounded ray.
If $-b^{T}y < - \varepsilon_{i}$, the problem is declared primal infeasible (dual unbounded), and $y$ is a Farkas dual ray.
Finally, premature termination criteria such as numerical instability, time limit or iteration limit are discussed in Section \ref{sec:implementation}.
\revision{red}{
\section{Solving linear systems}
\label{sec:KKT}
}
Search directions and centrality corrections are obtained by solving several Newton systems such as \eqref{eq:NS_aff_dual}-\eqref{eq:NS_aff_comp_tk}, all with identical left-hand side matrix but different right-hand side.
\revision{red}{Specifically,} each Newton system has the form
\begin{align}
\label{eq:NewtonSystem}
\left[
\begin{array}{ccccccccccc}
\revision{red}{-\rho_{p} I} && A^{T} && I && -c && \\
A && \revision{red}{\rho_{d} I} && && -b && \\
-c^{T} && b^{T} && && \revision{red}{\rho_{g}} && -1 \\
S && && X && && \\
&& && && \kappa && \tau
\end{array}
\right]
\left[
\begin{array}{l}
\delta_{x}\\
\delta_{y}\\
\delta_{s}\\
\delta_{\tau}\\
\delta_{\kappa}
\end{array}
\right]
=
\left[
\begin{array}{l}
\xi_{d}\\
\xi_{p}\\
\xi_{g}\\
\xi_{xs}\\
\xi_{\tau \kappa}
\end{array}
\right]
,
\end{align}
where $\xi_{p}, \xi_{d}, \xi_{g}, \xi_{xs}, \xi_{\tau \kappa}$ are \revision{red}{appropriate} right-hand side vectors.
\revision{red}{The purpose of this section is to provide further details on the techniques used for the resolution of \eqref{eq:NewtonSystem}, and their implementation in Tulip.
}
\revision{red}{
\subsection{Augmented system}
}
First, we eliminate $\delta_{s}$ and $\delta_{\kappa}$ as follows:
\begin{align}
\delta_{s} &= X^{-1}(\xi_{xs} - S\delta_{x}),\\
\delta_{\kappa} &= \tau^{-1}(\xi_{\tau \kappa} - \kappa \delta_{\tau}),
\end{align}
which yields
\begin{align}
\label{eq:NewtonSystem_reduced}
\left[
\begin{array}{ccccccccccc}
\revision{red}{-(\Theta^{-1} + \rho_{p} I)} && A^{T} && -c \\
A && \revision{red}{\rho_{d} I} && -b \\
-c^{T} && b^{T} && \tau^{-1} \kappa \revision{red}{ + \rho_{g}}
\end{array}
\right]
\left[
\begin{array}{l}
\delta_{x}\\
\delta_{y}\\
\delta_{\tau}
\end{array}
\right]
=
\left[
\begin{array}{l}
\xi_{d} - X^{-1} \xi_{xs}\\
\xi_{p}\\
\xi_{g} +\tau^{-1} \xi_{\tau \kappa}
\end{array}
\right]
,
\end{align}
where $\Theta = XS^{-1}$.
\revision{red}{As outlined in \cite{Andersen2000,Wright1997}}, a solution to \eqref{eq:NewtonSystem_reduced} is obtained by first solving
\begin{align}
\label{eq:augsys_hsd}
\left[
\begin{array}{ccccccccccc}
\revision{red}{-(\Theta^{-1} + \rho_{p} I)} && A^{T} \\
A && \revision{red}{\rho_{d} I}
\end{array}
\right]
\left[
\begin{array}{l}
p\\
q
\end{array}
\right]
&=
\left[
\begin{array}{l}
c\\
b
\end{array}
\right]
,
\end{align}
and
\begin{align}
\label{eq:augsys_hsd_bis}
\left[
\begin{array}{ccccccccccc}
\revision{red}{-(\Theta^{-1} + \rho_{p} I)} && A^{T} \\
A && \revision{red}{\rho_{d} I}
\end{array}
\right]
\left[
\begin{array}{l}
u\\
v
\end{array}
\right]
&=
\left[
\begin{array}{l}
\xi_{d} - X^{-1} \xi_{xs}\\
\xi_{p}
\end{array}
\right]
.
\end{align}
\revision{red}{Linear systems of the form \eqref{eq:augsys_hsd} and \eqref{eq:augsys_hsd_bis} are referred to as \emph{augmented systems}.}
Then, $\delta_{x}, \delta_{y}, \delta_{\tau}$ are computed as follows:
\begin{align}
\delta_{\tau} &= \frac{\xi_{g} + \tau^{-1}\xi_{\tau \kappa} + c^{T}u + b^{T}v}{\tau^{-1} \kappa \revision{red}{+ \rho_{g}} - c^{T}p + b^{T}q},\\
\delta_{x} &= u + \delta_{\tau} p,\\
\delta_{y} &= v + \delta_{\tau} q.
\end{align}
\revision{red}{Note that \eqref{eq:augsys_hsd} does not depend on the right-hand side $\xi$.
Thus, it is only solved once per IPM iteration, and its solution is reused when solving subsequent Newton systems.
Finally, as pointed in \cite{Friedlander2012}, the augmented system's structure motivates the following observations.
First, the use of primal-dual regularizations controls the effective condition number of the augmented system, which, in turn, improves the algorithm's numerical behavior.
Second, the augmented system's matrix is symmetric quasi-definite.
This allows the use of efficient symmetric indefinite factorization techniques, which only require one symbolic analysis at the beginning of the optimization.
In particular, dual regularizations ensure that this quasi-definite property is retained even when $A$ does not have full rank.
Third, directly solving the augmented system implicitly handles dense columns in $A$, which make the system of normal equations dense \cite{Wright1997}.
We have also found this approach to be more numerically stable than a normal equations system-based approach.
}
\revision{red}{
\subsection{Black-box linear solvers}
The augmented system may be solved using a number of techniques, with direct methods --namely, symmetric factorization techniques-- being the most popular choice.
Importantly, the algorithm itself is unaffected by \emph{how} the augmented system is solved, provided that it is solved accurately.
}
Our implementation leverages Julia's multiple dispatch feature and built-in support for linear algebra,
thus allowing to disentangle the algorithmic framework from the linear algebra implementation.
First, the interior-point algorithm is defined over abstract linear algebra structures.
Namely, the constraint matrix $A$ is \revision{red}{treated} as an \texttt{AbstractMatrix}, whose concrete type is only known once the model is instantiated.
Julia's standard library includes extensive support for linear algebra, thus removing the need for a custom abstract linear algebra layer.
\revision{red}{
Second, while the reduction from the Newton system to the augmented system is performed explicitly, the latter is solved by a black-box linear solver.
Specifically, we design an \texttt{AbstractKKTSolver} type, from which concrete linear solver implementations inherit.
The \texttt{AbstractKKTSolver} interface is deliberately minimal, and consists of three functions:\footnote{In Julia, a \texttt{!} is appended to functions that mutate their arguments.} \texttt{setup}, \texttt{update!}, and \texttt{solve!}.
A linear solver is instantiated at the beginning of the optimization using the \texttt{setup} function.
Custom options can be passed to \texttt{setup} so that the user can select a linear solver of their choice.
At the beginning of each IPM iteration, the linear solver's state is updated by calling the \texttt{update!} function.
For instance, if a direct method is used, this step corresponds to updating the factorization.
Following the call to \texttt{update!}, augmented systems can be solved through the \texttt{solve!} function.
Default, generic, linear solvers are described in Section \ref{sec:implementation:linalg}, and an example of specialized linear solver is given in Section \ref{sec:res:colgen}.
Specific details are provided in Tulip's online documentation.\footnote{\url{https://ds4dm.github.io/Tulip.jl/dev/}}
}
\revision{red}{Finally}, specialized methods are automatically dispatched based on the (dynamic) type of $A$.
These include matrix-vector and matrix-matrix product, as well as matrix factorization routines.
We emphasize that the dispatch feature is a core component of the Julia programming language, and is therefore entirely transparent to the user.
Consequently, one can easily define custom routines that exploit certain properties of $A$, so as to speed-up computation or reduce memory overheads.
Furthermore, this customization is entirely independent of the interior-point algorithm, thus allowing to properly assess the impact of different linear algebra implementations.
\iffalse
Finally, the augmented systems \eqref{eq:augsys_hsd} and \eqref{eq:augsys_hsd_bis} are first reduced to the normal equations system, by pivoting out the diagonal block $-\Theta^{-1}$.
In the case of \eqref{eq:augsys_hsd_bis}, the normal equations write
\begin{align}
\label{eq:normaleq}
(A \Theta A^{T}) v &= \xi_{p} + A \Theta^{-1}(\xi_{d} - X^{-1} \xi_{xs})
.
\end{align}
One then recovers $u = \Theta^{-1}(A^{T}v - \xi_{d} + X^{-1} \xi_{xs})$.
The normal equations are typically solved by a direct method, namely by computing a Cholesky factorization of the positive definite matrix $A \Theta A^{T}$.
This factorization is computed only once per iteration, and is re-used in subsequent solves.
Further details on the practical computation of the Cholesky factors will be given in Section \ref{sec:implementation} and Section \ref{sec:linalg}.
\fi
\revision{blue}{
\section{Presolve}
\label{sec:presolve}
Tulip's presolve module performs elementary reductions, all of which are described in \cite{Andersen1995} and \cite{Gondzio1997_presolve}.
Therefore, in this section, we only outline the presolve procedure; further implementation details are given in Section \ref{sec:implementation}.
\subsection{Presolve}
We only perform reductions that do not introduce any additional non-zero coefficients, i.e., fill-in, to the problem.
The presolve procedure is outlined in Algorithm \ref{alg:presolve}, and proceeds as follows.
First, we ensure all bounds are consistent, remove all empty rows and columns, and identify all row singletons, i.e., rows that contain with a single non-zero coefficient.
Then, a series of passes is performed until no further reduction is possible.
At each pass, the following reductions are applied: empty rows and columns, fixed variables, row singletons, free and implied free column singletons, forcing and dominated rows, and dominated columns.
The presolve terminates if infeasibility or unboundedness is detected, in which case an appropriate primal or dual ray is constructed.
If all rows and columns are eliminated, the problem is declared solved, and a primal-dual optimal solution is constructed.
Finally, to improve the numerical properties of the problem, rows and columns are re-scaled as follows:
\begin{align}
\label{eq:presolve:scaling}
\tilde{A} = D^{(r)} \times A \times D^{(c)},
\end{align}
where $\tilde{A}$ is the scaled matrix, $A$ is the constraint matrix of the reduced problem, and $D^{(r)}$, $D^{(c)}$ are diagonal matrices with coefficients
\begin{align}
D^{(r)}_{i} &= \frac{1}{\sqrt{\norm{A_{i, \cdot}}}}, \ \ \ \forall i,\\
D^{(c)}_{j} &= \frac{1}{\sqrt{\norm{A_{\cdot, j}}}}, \ \ \ \forall j.
\end{align}
Column and row bounds, as well as the objective, are scaled appropriately.
\begin{algorithm}
\begin{algorithmic}
\mathbb{R}EQUIRE Initial LP
\STATE Remove empty rows
\STATE Remove empty columns
\mathbb{R}EPEAT
\STATE Check for bounds inconsistencies
\STATE Remove empty columns
\STATE Remove row singletons
\STATE Remove fixed variables
\STATE Remove row singletons
\STATE Remove forcing/dominated rows
\STATE Remove row singletons
\STATE Remove free columns singletons
\STATE Remove row singletons
\STATE Remove dominated columns
\UNTIL{No reduction is found}
\STATE Scale rows and columns
\end{algorithmic}
\caption{Presolve procedure}
\label{alg:presolve}
\end{algorithm}
\subsection{Postsolve}
A primal-dual solution to the presolved problem is computed using the interior-point algorithm described in Section \ref{sec:ipm}.
A solution to the original problem is then constructed in a postsolve phase, whose algorithmic details are detailed in \cite{Andersen1995,Gondzio1997_presolve}.
Note that, in general, the postsolve solution is \emph{not} an interior point with respect to the original problem, e.g., some variables may be at their upper or lower bound.
}
\section{Implementation details}
\label{sec:implementation}
\revision{blue}{Tulip is an officially registered Julia package}, and is publicly available\footnote{Source code is available at \url{https://github.com/ds4dm/Tulip.jl}, and online documentation at \url{https://ds4dm.github.io/Tulip.jl/dev/}} under an open-source license.
\revision{blue}{The entire source code comprises just over $4,000$ lines of Julia code, which makes it easy to read and to modify.
}
The code is single-threaded, however external linear algebra libraries may exploit multiple threads.
\revision{blue}{
We provide an interface to \texttt{MathOptInterface} \cite{Legat2020_MOI}, a solver-agnostic abstraction layer for optimization.
Thus, Tulip is readily available through both \texttt{JuMP} \cite{Dunning2017_JuMP}, an open-source algebraic modeling language embedded in Julia, and the convex optimization modeling framework \texttt{Convex} \cite{Convex.jl-2014}.
Finally, Tulip supports arbitrary precision arithmetic, thus allowing, for instance, to solve problems in quadruple (128 bits) precision.
This functionality is available from Tulip's direct API and through the \texttt{MathOptInterface} API; it is illustrated in Section \ref{sec:res:precision}.
}
\subsection{Bounds on variables}
\label{sec:implementation:bounds}
\revision{blue}{
Tulip stores LP problems in the form
\begin{align}
\label{eq:LP_gen}
\begin{array}{rrcll}
(LP) \ \ \
\displaystyle \min_{x} \ \ \
&& c^{T}x & + \ c_{0}\\
s.t. \ \ \
& l^{b}_{i} \leq & \sum_{j} a_{i, j} x_{j} & \leq u^{b}_{i}, & \ \ \ \forall i = 1, ..., m,\\
& l^{x}_{j} \leq & x_{j} & \leq u^{x}_{j}, & \ \ \ \forall j = 1, ..., n,\\
\end{array}
\end{align}
where $l^{b,x}_{i, j}, u^{b, x}_{i, j} \in \mathbb{R} \cup \{ - \infty, + \infty \}$, i.e., some bounds may be infinite.
Before being passed to the interior-point optimizer, the problem is transformed into standard form.
This transformation occurs after the presolve phase, and is transparent to the user.
In particular, primal-dual solutions are returned with respect to formulation \eqref{eq:LP_gen}.
}
Free variables are an outstanding issue for interior-point methods, see, e.g. \cite{Wright1997, Anjos2008}, and are not supported explicitly in Tulip.
Instead, free variables are automatically split into the difference of two non-negative variables, with the knowledge that this reformulation may introduce some numerical instability.
Although finite upper bounds may be treated as arbitrary constraints, it is more efficient to handle them separately.
Let $\mathcal{I}$ denote the set of indices of upper-bounded variables.
Upper-bound constraints then write
\begin{align}
x_{i} \leq u_{i}, \ \ \forall i \in \mathcal{I},
\end{align}
which we write in compact form $Ux \leq u$, where $U \in \mathbb{R}^{|\mathcal{I}| \times n}$ and
\begin{align*}
U_{i, j} =
\left\{
\begin{array}{ll}
1 & \text{if } i = j \in \mathcal{I}\\
0 & \text{otherwise}
\end{array}
\right.
.
\end{align*}
Therefore, \revision{blue}{internally}, Tulip \revision{blue}{solves} linear programs of the form
\begin{align}
\label{eq:standard_LP_bounds}
\begin{array}{rll}
(P) \ \ \ \displaystyle \min_{x, w} \ \ \
& c^{T} x\\
s.t. \ \ \
& A x = b,\\
& U x + w =u\\
& x, w \geq 0,
\end{array}
&
\hspace*{1cm}
\begin{array}{rll}
(D) \ \ \ \displaystyle \max_{y, s, z} \ \ \
& b^{T} y - u^{T}z \ \ \ \ \ \ \\
s.t. \ \
& A^{T}y + s - U^{T}z= c,\\
& s, z \geq 0.\\
\
\end{array}
\end{align}
Let us emphasize that handling upper bounds separately only affects the underlying linear algebra operations, not the interior-point algorithm.
The Newton system \eqref{eq:NewtonSystem} then writes
\begin{align}
\label{eq:NS_bounds}
\left[
\begin{array}{ccccccccccccccc}
\revision{red}{-\rho_{p} I} && && A^{T} && I && -U^{T} && -c && \\
A && && \revision{red}{\rho_{d} I} && && && -b && \\
U && I && && && && -u && \\
-c^{T} && && b^{T} && && -u^{T} && \revision{red}{\rho_{g}} && -1 \\
S && && && X && && && \\
&& Z && && && W && && \\
&& && && && && \kappa && \tau
\end{array}
\right]
\left[
\begin{array}{l}
\delta_{x}\\
\delta_{w}\\
\delta_{y}\\
\delta_{s}\\
\delta_{z}\\
\delta_{\tau}\\
\delta_{\kappa}
\end{array}
\right]
=
\left[
\begin{array}{l}
\xi_{d}\\
\xi_{p}\\
\xi_{u}\\
\xi_{g}\\
\xi_{xs}\\
\xi_{wz}\\
\xi_{\tau \kappa}
\end{array}
\right]
,
\end{align}
and it reduces, after performing diagonal substitutions, to solving two augmented systems of the form
\begin{align}
\label{eq:augsys_hsd_bounds_reduced}
\left[
\begin{array}{cccccc}
\revision{red}{-(\tilde{\Theta}^{-1} + \rho_{p} I)} && A^{T} \\
A && \revision{red}{\rho_{d} I}
\end{array}
\right]
\left[
\begin{array}{l}
p\\
q
\end{array}
\right]
&=
\left[
\begin{array}{l}
\tilde{\xi}_{d}\\
\tilde{\xi}_{p}
\end{array}
\right]
,
\end{align}
where $\tilde{\Theta} = \left( X^{-1}S + U^{T}(W^{-1}Z)U \right)^{-1}$.
Note that $\tilde{\Theta}$ is a diagonal matrix with positive diagonal.
Therefore, system \eqref{eq:augsys_hsd_bounds_reduced} has the same size and structure as \eqref{eq:augsys_hsd}.
Furthermore, $\tilde{\Theta}$ can be computed efficiently using only vector operations, i.e., without any matrix-matrix nor matrix-vector product.
\subsection{Solver parameters}
\label{sec:implementation:params}
The default values for numerical tolerances of Section \ref{sec:ipm:stoppingCriteria} are
\begin{align*}
\varepsilon_{p} = \revision{blue}{\sqrt{\epsilon}},\\
\varepsilon_{d} = \revision{blue}{\sqrt{\epsilon}},\\
\varepsilon_{g} = \revision{blue}{\sqrt{\epsilon}},\\
\varepsilon_{i} = \revision{blue}{\sqrt{\epsilon}},
\end{align*}
\revision{blue}{where $\epsilon$ is the machine precision, which depends on the arithmetic.
For instance, double precision (64 bits) floating point arithmetic corresponds to $\epsilon_{64} \simeq 10^{-16}$, while quadruple precision (128 bits) corresponds to $\epsilon_{128} \simeq 10^{-34}$.}
When computing additional centrality corrections, we use the following default values:
\begin{align*}
\revision{blue}{\gamma_{min}} = 10^{-1},\\
\beta = 10^{-1}.
\end{align*}
The default maximum number of centrality corrections is set to $5$.
Finally, the maximum number of IPM iterations is set to a default of $100$.
A time limit may be imposed by the user, in which case it is checked at the beginning of each IPM iteration.
\revision{red}{
\subsection{Default linear solvers}
\label{sec:implementation:linalg}
}
\revision{red}{Several generic linear algebra implementations are readily available in Tulip, and can be selected without requiring any additional implementation.
The default settings are as follows.
First, $A$ is stored in a \texttt{SparseMatrixCSC} struct, i.e., in compressed sparse column format.
Elementary linear algebra operations, e.g., matrix-vector products, employ Julia's standard library \texttt{SparseArrays}.
Augmented systems are then solved by a direct method, namely, an $LDL^{T}$ factorization of the quasi-definite augmented system.
Sparse factorizations use either the CHOLMOD module of SuiteSparse \cite{SuiteSparse}, or the \texttt{LDLFactorizations} package \cite{LDLFactorizations}, a Julia translation of SuiteSparse's $LDL^{T}$ factorization code that supports arbitrary arithmetic.
Tulip uses the former for double precision floating point arithmetic, and the latter otherwise.
Finally, the solver's log indicates: the model's arithmetic, the linear solver's backend, e.g., CHOLMOD, and the linear system being solved, i.e., either the augmented system of the normal equations system.
As mentioned in Section \ref{sec:KKT}, custom options for linear algebra can be passed to the solver.
Specifically, the \texttt{MatrixOptions} parameter lets the user select a matrix implementation of their choice, and the \texttt{KKTOptions} parameter is used to specify a choice of linear solver.
Their usage is depicted in Figure \ref{fig:KKTOptions}.
In Figure \ref{fig:KKTOptions:default}, the default settings are used.
The model is instantiated at line 3; the \texttt{Model\{Float64\}} syntax indicates that \texttt{Float64} arithmetic is used.
Then, the problem is read from the \texttt{problem.mps} file at line 4, and the model is solved at line 6.
Figures \ref{fig:KKTOptions:Dense}, \ref{fig:KKTOptions:CholmodPD}, \ref{fig:KKTOptions:LDLFact} are identical, but select different linear algebra implementations by setting the appropriate \texttt{MatrixOptions} and \texttt{KKTOptions} parameters.
Figure \ref{fig:KKTOptions:Dense} illustrates the use of dense linear algebra.
Line 7 indicates that $A$ should be stored as a dense matrix.
Then, at line 8, a dense linear solver is selected through the \texttt{SolverOptions(Dense\_SymPosDef)} setting.
In this case, the augmented system is reduced to the (dense) normal equations systems, and a dense Cholesky factorization is applied; BLAS/LAPACK routines are automatically called when using single and double precision floating point arithmetic, otherwise Julia's generic routines are called.
In the example of Figure \ref{fig:KKTOptions:CholmodPD}, linear systems are reduced to the normal equations system, and CHOLMOD's sparse Cholesky factorization is applied.
Note that a single dense column in $A$ results in a fully dense normal equations systems.
Thus, in the absence of a mechanism for handling dense columns, this approach may be impractical for some large problems.
Finally, in Figure \ref{fig:KKTOptions:LDLFact}, the augmented system is solved using an $LDL^{T}$ factorization, computed by \texttt{LDLFactorizations}.
}
\begin{figure}
\caption{Sample code using default linear algebra settings}
\label{fig:KKTOptions:default}
\caption{Sample code using dense linear algebra}
\label{fig:KKTOptions:Dense}
\caption{Sample code using CHOLMOD to solve the normal equations system}
\label{fig:KKTOptions:CholmodPD}
\caption{Sample code using \texttt{LDLFactorizations}
\label{fig:KKTOptions:LDLFact}
\caption{Code examples for reading and solving a problem with various linear algebra implementations.}
\label{fig:KKTOptions}
\end{figure}
\section{Computational results}
\label{sec:res}
\revision{blue}{
In this section, we compare Tulip to several open-source and commercial solvers, focusing on those that are available to Julia users.
Let us emphasize that our goal is \emph{not} to perform a comprehensive benchmark of interior-point LP solvers.
We evaluate Tulip's performance and robustness in the following three settings.
First, in Section \ref{sec:res:plato}, we consider general LP instances from H. Mittelmann's benchmark,\footnote{\url{http://plato.asu.edu/ftp/lpbar.html}} which are solved using generic sparse linear algebra.
Then, in Section \ref{sec:res:colgen}, we consider structured instances that arise in decomposition methods, for which we develop specialized linear algebra.
Finally, in Section \ref{sec:res:precision}, we illustrate Tulip's ability to use different levels of arithmetic precision by solving problems in higher precision.
}
\subsection{\revision{blue}{ Results on general LP instances}}
\label{sec:res:plato}
\revision{blue}{
We select all instances from H. Mittelmann's benchmark of barrier LP solvers, except \texttt{qap15} and \texttt{L1\_sixm1000obs}.
The former is identical to \texttt{nug15}, and the latter could not be solved by any solvers in the prescribed time limit.
This yields a testset of 43 medium to large-scale instances.}
We compare the following open-source and commercial solvers: Clp $1.17$ \cite{CLP}, GLPK 4.64 \cite{GLPK}, ECOS 2.0\cite{Domahidi2013}, Tulip 0.5.0, CPLEX 12.10 \cite{CPLEX}, Gurobi 9.0 \cite{Gurobi} and Mosek 9.2 \cite{Mosek}.
All are accessed through their respective Julia interface.
We run the interior-point algorithm of each solver with a single thread, no crossover, and a \revision{blue}{$10,000$s} time limit.
\revision{blue}{For Tulip, the maximum number of IPM iterations is increased from the default 100 to 500.}
All other parameters are left to their default values.
Experiments are carried out on \revision{blue}{a cluster of machines equipped with dual Intel Xeon 6148-2.4GHz CPUs, and varying amounts of RAM. Each job is run with a single thread and 16GB of memory.}
Scripts for running these experiments are available online,\footnote\revision{blue}{\url{https://github.com/mtanneau/LPBenchmarks}}
\revision{blue}{together with the logfiles of each solver.}
Computational results are displayed in Table \ref{tab:res:plato}.
\revision{blue}{
For each solver, we report the total number of instances solved, the mean runtime, and individual runtimes for each instance.
Segmentation faults are indicated by \texttt{seg}, timeouts by \texttt{t}, other failures by \texttt{f}, and reduced accuracy solutions by \texttt{r}.
The time to read in the data is not included.
}
\revision{red}{
Mean runtimes are shifted geometric means
\begin{align*}
\mu_{\delta}(t_{1}, ..., t_{N})
= \left( \prod_{i=1}^{N} (t_{i} + \delta) \right)^{\frac{1}{N}} - \delta
= \exp \left[ \frac{1}{N} \sum_{i=1}^{N} \log (t_{i} + \delta) \right] - \delta,
\end{align*}
with $\delta=10$ seconds.
}
\input{tex/tables/table_plato}
\revision{blue}{
First, the three commercial solvers CPLEX, Gurobi and Mosek display similar performance and robustness, and outperform open-source alternatives by one to two orders of magnitude.
While CPLEX and Gurobi encountered numerical issues on a few instances, we found that these were resolved by activating crossover.
Second, Clp displays a worse performance than expected, solving only $25$ problems with an average runtime about two times larger than Tulip's.
In fact, out of $43$ instances, we recorded $5$ segmentation faults, $8$ unidentified errors, with the $10,000$s time limit being reached on the remaining $10$ unsolved instances.
A more detailed analysis of the log suggests that segmentation faults and some unknown errors are caused by memory-related issues, i.e., large Cholesky factors that do not fit in memory.
We note that those errors do not occur when running Clp through its command-line executable: the executable performs additional checks to decide whether the model should be dualized; this can yield smaller linear systems and thus avoid memory issues.
Nevertheless, given that the \texttt{dualize} option is not available in Clp's C interface\footnote{See discussion in \url{https://github.com/coin-or/Clp/issues/151}}, on which Clp's Julia wrapper is built, the present results best represent the behavior that Julia users would encounter.
Third, among open-source solvers, Tulip is the top performer with $33$ instances solved and a mean runtime of $604.6$s, while GLPK has the worst performance with only $6$ instances reportedly solved.
Tulip's $5$ failures include $3$ instances that out of memory; for the remaining $2$, i.e., \texttt{ns1688926} and \texttt{watson\_2}, Tulip fails to reach the prescribed accuracy due to numerical issues.
A possible remedy to the latter will be discussed in Section \ref{sec:res:precision}.
Finally, out of the $26$ instances reported as solved by ECOS, $6$ were solved to reduced accuracy.
This situation typically corresponds to ECOS encountering numerical issues close to optimality, but a feasible or close-to-feasible solution is still available.
}
\subsection{\revision{blue}{Results on structured LP instances}}
\label{sec:res:colgen}
We now compare Tulip to state-of-the-art commercial solvers on a
\revision{blue}{
collection of structured problems, for which we design specialized linear algebra routines.
Specifically, we consider the context of Dantzig-Wolfe (DW) decomposition \cite{Dantzig1960} in conjunction with a column-generation (CG) algorithm; we refer to \cite{Desaulniers2006column} for a thorough overview of DW decomposition and CG algorithms.
Here, we focus on the resolution of the master problem, i.e., we consider problems of the form
\begin{align}
\label{eq:MP:objective}
(MP) \ \ \ \min_{\lambda} \ \ \ & \sum_{r=1}^{R} \sum_{j=1}^{n_{r}} c_{r, j} \lambda_{r, j} + c_{0}^{T} \lambda_{0}\\
\label{eq:MP:convexity}
s.t. \ \ \
& \sum_{j=1}^{n_{r}} \lambda_{r, j} =1, \ \ \ r=1, ..., R,\\
\label{eq:MP:linking}
& \sum_{r=1}^{R} \sum_{j=1}^{n_{r}} a_{r, j} \lambda_{r, j} + A_{0} \lambda_{0} = b_{0},\\
\label{eq:MP:domain}
& \lambda \geq 0,
\end{align}
where $R$ is the number of sub-problems, $m_{0}$ is the number of linking constraints, $n_{r}$ is the number of columns from sub-problem $r$, $A_{0} \in \mathbb{R}^{m_{0} \times n_{0}}$, and $\forall (r, j), a_{r, j} \in \mathbb{R}^{m_{0}}$.
Let $M = R + m_{0}$ and $N = n_{0} + n_{1} + \dots + n_{R}$ be the number of constraints and variables in $(MP)$, respectively.
In what follows, we focus on the case where (i) $R$ is large, typically in the thousands or tens of thousands, (ii) $m_{0}$ is not too large, typically in the hundreds, and (iii) the vectors $a_{r, j} \in \mathbb{R}^{m_{0}}$ and $A_{0}$ are dense.
}
\revision{blue}{
\subsubsection{Instance collection}
\label{sec:res:colgen:instances}
We build a collection of master problems from two sources.
First, we generate instances of Distributed Energy Resources (DER) coordination from \cite{Anjos2019}.
We select a renewable penetration rate $\xi =0.33$, a time horizon $T= \{24, 48, 96\}$, and a number of resources $R=\{1024, 2048, 4096, 8192, 16384, 32768\}$.
Second, we select all two-stage stochastic programming (TSSP) problems from \cite{Gondzio2016_PDCGMlarge} that have at least $1,000$ scenarios.
This yields $18$ DER instances, and $27$ TSSP instances.
Then, each instance is solved by column generation; master problems are solved with Gurobi's barrier (with crossover) and sub-problems are solved with Gurobi's default settings.
In the case of DER instances, which contain mixed-integer variables, only the root node of a branch-and-price tree is solved.
Finally, at every tenth CG iteration and the last, the current master problem is saved.
Thus, we obtain a dataset of $153$ master problems of varying sizes.
CG algorithms benefit from sub-optimal, well-centered interior solutions from the master problem \cite{Gondzio1996_PDCGM}, which are typically obtained by simply relaxing an IPM solver's optimality tolerance.
These provide the double benefit of stabilizing the CG procedure, thus reducing the number of CG iterations, and speeding-up the resolution of the master problem by stopping the IPM early.
Importantly, this approach requires \emph{feasible}, but sub-optimal, dual solutions from the master problem.
While in classical primal-dual IPMs, feasibility is generally reached earlier than optimality, in the homogeneous algorithm, infeasibilities and complementarity are reduced at the same rate \cite{Andersen2000}.
As a consequence, for IPM solvers that implement the homogeneous algorithm, such as Mosek, ECOS and Tulip, relaxing optimality tolerances yields no computational gain.
Nevertheless, let us formally restate that our present goal is \emph{not} to implement a state-of-the-art column-generation solver, but to quantify the benefits of specialized linear algebra in that context; in particular, specialized linear algebra would equally benefit classical primal-dual IPMs, since the approach of \cite{Gondzio1996_PDCGM} does not affect the master problem's structure.
Therefore, we only implement a vanilla CG procedure, which is described in Appendix \ref{sec:colgen}.
In particular, we do not make use of any acceleration technique beyond the use of partial pricing.
Table \ref{tab:der:stats} and Table \ref{tab:tssp:stats} display some statistics for DER and TSSP instances, respectively.
For each instance, we report: the number of sub-problems $R$, the number of CG iterations (Iter), total time spent solving the master problem (Master) and pricing sub-problems (Pricing) during the CG procedure and, for the final $(MP)$: the number of linking constraints ($m_{0}$), the number of variables ($N$), and the proportion of non-zero coefficients in the linking constraints ($\%$nz).
From the two tables, we see that \texttt{DER}, \texttt{4node} and \texttt{4node-base} instances display relatively dense linking rows, with $35$ to $90\%$ coefficients being non-zeros, and a modest number of linking constraints.
Other instances are either sparser, e.g., the \texttt{env} and \texttt{env-diss} instances whose linking rows are only $13\%$ dense, or have few linking constraints, e..g, \texttt{phone}.
Therefore, we expect that our specialized implementation will yield larger gains for the former instances.
\input{tex/tables/der_stats}
\input{tex/tables/tssp_stats}
}
\revision{red}{
\subsubsection{Specialized linear algebra}
\label{sec:res:colgen:linalg}
}
We now describe a specialized Cholesky factorization that exploits the block structure of the master problem.
First, the constraint matrix of $(MP)$ is unit block-angular, i.e., it has the form
\begin{align}
\label{eq:struct_A_unitblockangular}
A & =
\begin{bmatrix}
e^{T} & &&0\\
& \ddots & & \vdots\\
& & e^{T} & 0\\
A_{1} & \cdots & A_{R} & A_{0}
\end{bmatrix},
\end{align}
where
\begin{align}
A_{r} &=
\begin{pmatrix}
| & & |\\
a_{r, 1} & \dots & a_{r, n_{r}}\\
| & & |
\end{pmatrix}
\in \mathbb{R}^{m_{0} \times n_{r}}
.
\end{align}
Let us recall that the normal equations system writes
\begin{align}
\label{eq:normaleq}
\revision{red}{\left(A(\Theta^{-1}+\rho_{p} I)^{-1} A^{T} + \rho_{d} I \right) \delta_{y}} = \xi,
\end{align}
where \revision{red}{$\delta_{y} \in \mathbb{R}^{M}$}, and $\Theta \in \mathbb{R}^{N \times N}$ is a diagonal matrix with positive diagonal.
\revision{red}{Let $S$ denote the left-hand matrix of \eqref{eq:normaleq}, and define
\begin{align}
\label{eq:struct_D_blockdiagonal}
\tilde{\Theta} = (\Theta^{-1}+\rho_{p} I)^{-1} =
\begin{pmatrix}
\tilde{\Theta}_{1} & & \\
& \ddots & \\
& & \tilde{\Theta}_{R}\\
&&& \tilde{\Theta}_{0}
\end{pmatrix}
,
\end{align}
and $\tilde{\theta}_{r} = \tilde{\Theta}_{r}e \in \mathbb{R}^{n_{r}}$, for $r=0, ..., R$.}
Consequently, the normal equations system has the form
\revision{red}{
\begin{align}
\label{eq:normalEq_blocks}
\begin{bmatrix}
d_{1} && && && (A_{1}\tilde{\theta}_{1})^{T}\\
&& \ddots && && \vdots\\
&& && d_{R} && (A_{R}\tilde{\theta}_{R})^{T}\\
A_{1}\tilde{\theta}_{1} && \cdots && A_{R} \tilde{\theta}_{R} && \Phi
\end{bmatrix}
\begin{bmatrix}
(\delta_{y})_{1}\\
\vdots\\
(\delta_{y})_{R}\\
(\delta_{y})_{0}
\end{bmatrix}
&=
\begin{bmatrix}
\xi_{1}\\
\vdots\\
\xi_{R}\\
\xi_{0}
\end{bmatrix},
\end{align}}
where \revision{red}{
\begin{align}
d_{r} &= e^{T}\tilde{\theta}_{r} + \rho_{d}, \ \ \ r=1, ..., R,\\
\Phi &= \sum_{r=0}^{R} A_{r} \tilde{\Theta}_{r} A_{r}^{T} + \rho_{d} I.
\end{align}
}
Then, define
\begin{align}
l_{r} & = \frac{1}{\revision{red}{d_{r}}} A_{r} \tilde{\theta}_{r} \in \mathbb{R}^{m_{0}}, \ \ \ r=1, ..., R,\\
\label{eq:schur_complement}
C &= \Phi - \sum_{r=1}^{R} \frac{1}{\revision{red}{d_{r}}} (A_{r} \tilde{\theta}_{r}) (A_{r} \tilde{\theta}_{r})^{T} \in \mathbb{R}^{m_{0} \times m_{0}}.
\end{align}
Given that both \revision{red}{$S$} and its upper-left block are positive definite, so is the Schur complement $C$.
Therefore, its Cholesky factorization exists, which we denote $C = L_{C} D_{C} L_{C}^{T}$.
It then follows that a Cholesky factorization of \revision{red}{$S$} is given by
\begin{align}
\revision{red}{S} & =
\underbrace{
\begin{bmatrix}
1 & \\
& \ddots \\
& & 1\\
l_{1} & \cdots & l_{R} & L_{C}
\end{bmatrix}
}_{L}
\times
\underbrace{
\begin{bmatrix}
\revision{red}{d_{1}} & & \\
& \ddots & \\
& & \revision{red}{d_{R}} \\
& & & D_{C}
\end{bmatrix}
}_{D}
\times
\underbrace{
\begin{bmatrix}
1 & \\
& \ddots \\
& & 1\\
l_{1} & \cdots & l_{R} & L_{C}
\end{bmatrix}^{T}
}_{L^{T}}
.
\end{align}
Finally, once the Cholesky factors $L$ and $D$ are computed, the normal equations \eqref{eq:normalEq_blocks} are solved as follows:
\begin{align}
\label{eq:normalEq_globalSolve}
\revision{red}{(\delta_{y})_{0}} &= (L_{C} D_{C} L_{C}^{T})^{-1} \left( \xi_{0} - \sum_{r=1}^{R} \xi_{r} l_{r} \right), \\
\label{eq:normalEq_localSolve}
\revision{red}{(\delta_{y})_{r}} &= \dfrac{1}{\revision{red}{d_{r}}} \xi_{r} - l_{r}^{T} \revision{red}{(\delta_{y})_{0}}, \ \ \ r=1, ..., R.
\end{align}
Exploiting the structure of $A$ yields several computational advantages.
First, the factors $L$ and $D$ can be computed directly from $A$ and $\Theta$, i.e., the matrix \revision{red}{$S$} does not need to be explicitly formed nor stored, thus saving both time and memory.
Second, the sparsity structure of $L$ is known beforehand.
Specifically, the lower blocks $l_{1}, \dots, l_{R}$ are all dense column vectors, and the Schur complement $C$ is a dense $m_{0}\times m_{0}$ matrix.
Therefore, one does not need a preprocessing phase wherein a sparsity-preserving ordering is computed, thus saving time and making memory allocation fully known in advance.
Third, since most heavy operations are performed on dense matrices, efficient cache-exploiting kernels for dense linear algebra can be used, further speeding-up the computations.
Finally, note that most operations such as forming the Cholesky factors and performing the backward substitutions, are amenable to parallelization.
\revision{blue}{
\subsubsection{Experimental setup}
\label{sec:res:colgen:setup}
We implement the specialized routines described above in Julia.\footnote{\url{https://github.com/mtanneau/UnitBlockAngular.jl}}
Specifically, we define a \texttt{UnitBlockAngularMatrix} type, together with specialized matrix-vector product methods, and a \texttt{UnitBlockAngularFactor} type for computing factorizations and solving linear systems.
Dense linear algebra operations are performed by BLAS/LAPACK routines directly, and the entire implementation is less than 250 lines of code.
This specialized implementation is passed to the solver by setting the \texttt{MatrixOptions} and \texttt{KKTOptions} parameters accordingly, as illustrated in Figure \ref{code:KKT:unitblockangular}.
A \texttt{Model} object is first created at line 4, and the problem data is imported at line 5.
At line 11, we set the \texttt{MatrixOptions} parameter to specify that the constraint matrix is of the \texttt{UnitBlockAngularMatrix} type with $m_{0}=24$ linking constraints, $n_{0}=72$ linking variables, $n=6421$ non-linking variables, and $R=1024$ unit blocks.
Then, at line 16, we select the \texttt{UnitBlockAngularFactor} type as a linear solver.
Finally, the correct matrix and linear solver are instantiated within the \texttt{optimize!} call at line 20.
Importantly, let us emphasize that no modification was made to Tulip's source code: the correct methods are automatically selected by Julia's multiple dispatch feature, with no performance loss for calling an external function.
Experiments are carried out on an Intel Xeon E5-2637@3.50GHz CPU, 128GB RAM machine running Linux; scripts and data for running these experiments are available online.\footnote{Code for generating DER instances is available at \url{https://github.com/mtanneau/DER_experiments} and for TSPP instances at \url{https://github.com/mtanneau/TSSP}}
We compare the following IPM solvers: CPLEX 12.10 \cite{CPLEX}, Gurobi 9.0 \cite{Gurobi}, Mosek 9.2.5 \cite{Mosek}, Tulip 0.5.0 with generic linear algebra, and Tulip 0.5.0 with specialized linear algebra; the latter is denoted Tulip*.
We run each solver on a single thread, and no crossover.
Presolve may alter the structure of $A$ in several ways by, e.g., reducing the number of linking constraints, eliminating variables --possibly some entire blocks-- or modifying the unit blocks during scaling.
Therefore, since we are interested in comparing the per-iteration cost among solvers, we also deactivate presolve.
Finally, none of the selected IPM solvers have any warm-start capability, i.e., in a CG algorithm, master problems would effectively be solved from scratch at each CG iteration.
Thus, solving master problems independently of one another, as is done here, does not invalidate our analysis.
}
\begin{figure}
\caption{Sample Julia code illustrating the use of a custom \texttt{UnitBlockAngularMatrix}
\label{code:KKT:unitblockangular}
\end{figure}
\revision{blue}{
\subsubsection{Results}
\label{src:res:colgen:res}
Results are reported in Table \ref{tab:res:rmp:short}; for conciseness, only the final master problem of each CG instance is included here.
Results for the entire collection can be found in Table \ref{tab:res:rmp:full}, Appendix \ref{app:res}.
For each instance and solver, we report total CPU time (T), in seconds, and the number of IPM iterations (Iter).
In Table \ref{tab:res:rmp:full}, the number of CG iterations (at which the instance was obtained) is also displayed.
\input{tex/tables/res_rmp_short}
We begin by comparing Tulip with and without specialized linear algebra.
First, the number of IPM iterations is almost identical between the two, with differences never exceeding 6 IPM iterations.
The differences are caused by small numerical discrepancies between the linear algebra implementations, which remain negligible until close to the optimum.
Second, using specialized linear algebra results in a significant speedup, especially on larger and denser instances.
Indeed, on large \texttt{DER} and \texttt{4node} instances, we typically observe a tenfold speedup.
For smaller and sparser instances, e.g., the \texttt{env} instances, or with very few linking constraints such as \texttt{phone}, using specialized linear algebra still brings a moderate performance improvement.
Next, we compare Tulip with specialized linear algebra, Tulip*, against state-of-the-art commercial solvers.
Given CPLEX's poorer relative performance on this test set, in the following we mainly discuss the results of Tulip* in comparison with Mosek and Gurobi.
First, our specialized implementation is able to outperform commercial codes on the larger and denser instances, while remaining within a reasonable factor on smaller and sparse instances.
The largest performance improvement is observed on the \texttt{DER-48} instance with $R=32,768$, for which Tulip* achieves a $30 \%$ speedup over the fastest commercial alternative.
This demonstrates that, when exploiting structure, open-source solvers can compete with state-of-the-art commercial codes.
Second, Tulip's iteration count is typically $50$ to $100 \%$ larger than that of Mosek and Gurobi.
When comparing average per-iteration times on the denser instances, we observe that Tulip is generally $1.5$ to $3$ times faster than Gurobi and Mosek.
}
Recall that the cost of an individual IPM iteration depends not only on problem size and the efficiency of the underlying linear algebra, but also on algorithmic features such as the number of corrections, which we cannot \revision{blue}{measure} directly.
\revision{blue}{Nevertheless, the performance difference is significant enough to suggest that algorithmic improvements aimed at reducing the number of IPM iterations would substantially improve Tulip's performance.}
{
\subsection{Solving problems in extended precision}
\label{sec:res:precision}
Almost all optimization solvers perform computations in double precision (64 bits) floating-point arithmetic, denoted by \texttt{double} and \texttt{Float64} in C and Julia, respectively.
Julia's parametric type system and multiple dispatch allow to write generic code: in the present case, this results in Tulip's code can be used with \emph{arbitrary} arithmetic.
We now illustrate this functionality for solving problems in higher precision.
The ability to use extended precision is useful is various contexts.
First, while typical numerical tolerances for most LP solvers range from $10^{-6}$ to $10^{-8}$, one may \emph{require} levels of precision that exceed what double-precision arithmetic can achieve.
For instance, in \cite{Ma2015}, the authors consider problems where variations of order $10^{-6}$ to $10^{-10}$ are meaningful.
One remedy to this issue is to use, e.g., quadruple-precision arithmetic.
Second, even with ``standard" tolerances, solvers may encounter numerical issues for badly scaled problems, sometimes resulting in the optimization being aborted.
These issues may be alleviated by using higher precision, thereby allowing to solve a given challenging instance, albeit at a performance cost.
Finally, in the course of developing a new optimization software or algorithmic technique, identifying whether inconsistencies are due to numerical issues, mathematical errors, or software bugs, can be a daunting and time-consuming task.
In that context, the ability to easily switch between different arithmetics enables one to factor out rounding errors and related issues, thereby identifying --or ruling out-- other sources of errors.
Let us note that a handful of simplex-based solvers have the capability to compute extended-precision or exact solutions to LP problems, either by performing computations in exact arithmetic, solving a sequence of LPs with increasing precision, or using iterative refinement techniques; the reader is referred to \cite{Gleixner2016} for an overview of such approaches and available software.
We are not aware of any existing interior-point solver with this capability.
As pointed out in \cite{Gleixner2016}, performing all computations in the prescribed arithmetic, as is the case in Tulip, is intractable for large problems.
Consequently, Tulip should not be viewed as a competitive tool for solving LPs in extended precision.
Rather, the main advantage of our implementation is its simplicity and flexibility: it required no modification of the source code, runs the same algorithm regardless of the arithmetic, and its use is straightforward.
Indeed, as Figure \ref{fig:arithmetic} illustrates, besides loading the appropriate packages, the user only needs to specify the arithmetic when creating a model; the rest of the code is identical.
Therefore, using Tulip with higher-precision arithmetic is best envisioned as a prototyping tool, or to occasionally solve a numerically challenging problem.
\begin{figure}
\caption{Using \texttt{Float64}
\label{fig:arithmetic:F64}
\caption{Using \texttt{Double64}
\label{fig:arithmetic:D64}
\caption{Sample Julia code illustrating the use of different arithmetics.}
\label{fig:arithmetic}
\end{figure}
As an example of this use case, we consider the $6$ instances from Section \ref{sec:res:plato} that required more than $100$ IPM iterations; this generally indicates numerical issues.
Each instance is solved with Tulip in quadruple-precision arithmetic.
We use the \texttt{Double64} type from the \texttt{DoubleFloats} Julia package, which implements the so-called ``double-double" arithmetic, wherein a pair of double-precision numbers is used to approximate one quadruple-precision number.
This implementation allows to exploit fast, hardware-implemented, double-precision arithmetic, while achieving similar precision as 128 bits floating point arithmetic.
Experiments were carried on the same cluster of machines as in Section \ref{sec:res:plato}.
Besides the different arithmetic, we increase the time limit to $40,000$s and set tolerances to $10^{-8}$, that is, the problems are solved up to usual double-precision tolerances.
All other settings are left identical to those of Section \ref{sec:res:plato}.
Results are displayed in Table \ref{tab:res:plato:Double64}.
For each instance and arithmetic, we report the total solution time (CPU) in seconds, the number of IPM iterations (Iter), and the solver's result status (Status).
We first note that, when using \texttt{Double64} arithmetic, all instances are solved to optimality.
This validates the earlier finding that instances \texttt{ns1688926} and \texttt{watson\_2} did encounter numerical issues.
Second, we observe a drastic reduction in the number of IPM iterations from \texttt{Float64} to \texttt{Double64}, with decreases in iteration counts ranging from $40\%$ to over $90\%$ in the case of \texttt{neos2} and \texttt{ns1688926}.
Third, while the per-iteration cost of \texttt{Double64} is typically 8x larger than that of \texttt{Float64}, overall computing times do not increase as much due to the reduction in IPM iterations.
In fact, in the extreme cases of \texttt{ns1688926}, solving the problem in \texttt{Double64} is significantly faster than solving it in \texttt{Float64}.
Finally, the results of Table \ref{tab:res:plato:Double64} suggest that Tulip would most benefit from greater numerical stability on instances such as \texttt{neos2}, \texttt{ns1688926}, \texttt{stat96v1} and \texttt{watson\_2}.
This may include, for instance, the use of iterative refinement when solving Newton systems.
On the other hand, similar iterations counts for both arithmetics, would have suggested algorithmic issues, e.g., short steps being taken due to the iterates being far from the central path.
\begin{table}
\centering
\caption{Problematic instances from the Mittelmann benchmark}
\label{tab:res:plato:Double64}
\begin{tabular}{lcrrrcrrr}
\toprule
&& \multicolumn{3}{c}{\texttt{Float64}} && \multicolumn{3}{c}{\texttt{Double64}}\\
\cmidrule(rl){3-5} \cmidrule(rl){7-9}
Instance && CPU (s) & Iter & Status && CPU(s) & Iter & Status\\
\midrule
\texttt{neos2} && 462.1 & 460 & Optimal && 265.1 & 37 & Optimal\\
\texttt{ns1688926} && 1007.7 & 500 & Iterations && 142.8 & 18 & Optimal\\
\texttt{s250r10} && 257.2 & 169 & Optimal && 1385.0 & 93 & Optimal\\
\texttt{shs1023} && 371.6 & 266 & Optimal && 968.8 & 105 & Optimal\\
\texttt{stat96v1} && 41.3 & 275 & Optimal && 30.4 & 42 & Optimal\\
\texttt{watson\_2} && 295.7 & 500 & Iterations && 243.4 & 67 & Optimal\\
\bottomrule
\end{tabular}
\end{table}
}
\section{Conclusion}
\label{sec:conclusion}
In this paper, we have described a \revision{red}{regularized} homogeneous interior-point algorithm and its implementation in Tulip, an open-source linear optimization solver.
\revision{blue}{Our solver is written in Julia, and leverages some of the language's features to propose a flexible and easily-customized implementation.}
Most notably, \revision{blue}{Tulip's} algorithmic framework is fully disentangled from linear algebra implementations \revision{blue}{and the choice of arithmetic}.
The performance of the code has been evaluated on \revision{blue}{generic instances from H. Mittelmann's benchmark} testset, on \revision{blue}{two sets} of structured instances for which we developed specialized linear algebra routines, \revision{blue}{and on numerically problematic instances using higher-precision arithmetic.
The computational evaluation has shown three main results.
First, when solving generic LP instances, Tulip is competitive with open-source IPM solvers that have a Julia interface.
Second, when solving structured problems, the use of custom linear algebra routines yields a tenfold speedup over generic ones, thereby outperforming state-of-the-art commercial IPM solvers on larger and denser instances.
}
These results demonstrate the benefits of being able to seamlessly integrate specialized linear algebra within an interior-point algorithm.
\revision{blue}{Third, in a development context, Tulip can be conveniently used in conjunction with higher-precision arithmetic, so as to alleviate numerical issues.}
\revision{blue}{
Finally, future developments will consider the use of iterative methods for solving linear systems, the development of more general structured linear algebra routines and their multi-threaded implementation, and more efficient algorithmic techniques for solving problems in extended precision.
Because of the way in which Tulip has been designed, all those developments do not require any significant rework of the code structure.}
\appendix
\section{Dantzig-Wolfe decomposition and column generation}
\label{sec:colgen}
In this section, we present the Dantzig-Wolfe decomposition principle \cite{Dantzig1960} and the basic column-generation framework.
We refer to \cite{Desaulniers2006column} for a thorough overview of column generation, and the relation between Dantzig-Wolfe decomposition and Lagrangian decomposition.
\subsection{Dantzig-Wolfe decomposition}
\label{sec:colgen:subsec:dw}
\revision{blue}{Consider} the problem
\begin{align*}
(P) \ \ \ \min_{x} \ \ \ & \sum_{r=0}^{R} c_{r}^{T} x_{r}\\
s.t. \ \ \ & \sum_{r=0}^{R} A_{r} x_{r} = b_{0},\\
& \revision{blue}{x_{0} \geq 0,}\\
& x_{r} \in \mathcal{X}_{r}, \ \ \ \revision{blue}{r=1, ..., R},
\end{align*}
\revision{blue}{where}, for each $r\revision{blue}{=1, ..., R}$, $\mathcal{X}_{r}$ is defined by a finite number of linear inequalities, plus integrality restrictions on some of the coordinates of $x_{r}$.
Therefore, \revision{blue}{the convex hull of $\mathcal{X}_{r}$, denoted by $conv(\mathcal{X}_{r})$, is a polyhedron whose set of extreme points (resp. extreme rays) is denoted by $\Omega_{r}$ (resp. $\Gamma_{r}$).
Any element of $conv(\mathcal{X}_{r})$ can thus be written as a convex combination of extreme points $\{ \omega \}_{\omega \in \Omega_{r}}$, plus a non-negative combination of extreme rays $\{ \rho \}_{\rho \in \Gamma_{r}}$ i.e.,}
\begin{align}
conv(\mathcal{X}_{r}) =
\left\{
\sum_{\omega \in \Omega_{r}} \lambda_{\omega} \omega
\revision{blue}{+ \sum_{\rho \in \Gamma_{r}} \lambda_{\rho} \rho}
\ \middle| \
\lambda \geq 0,
\sum_{\omega}\lambda_{\omega} = 1
\right\}.
\end{align}
The Dantzig-Wolfe decomposition principle \cite{Dantzig1960} then consists in \revision{blue}{substituting $x_{r}$ with such a combination of extreme points and extreme rays.}
This change of variable yields the \revision{blue}{so-called} \emph{Master Problem}
\begin{align}
\label{eq:MP_objective}
(MP) \ \ \ \min_{x, \lambda} \ \ \
& \revision{blue}{c_{0}^{T}x_{0} +} \sum_{r=1}^{R} \sum_{\omega \in \Omega_{r}} c_{r, \omega} \lambda_{r, \omega}
+ \sum_{r=1}^{R} \sum_{\rho \in \Gamma_{r}} c_{r, \rho} \lambda_{r, \rho} \\
\label{eq:MP_convexity}
s.t. \ \ \
& \sum_{\omega \in \Omega_{r}} \lambda_{r, \omega} =1, \ \ \ \revision{blue}{r=1, ..., R}\\
\label{eq:MP_linking}
& \revision{blue}{A_{0}x_{0}} + \sum_{r=1}^{R} \sum_{\omega \in \Omega_{r}} a_{r, \omega} \lambda_{r, \omega} + \sum_{r=1}^{R} \sum_{\rho \in \Gamma_{r}} a_{r, \rho} \lambda_{r, \rho} = b_{0},\\
\label{eq:MP_positive}
& \revision{blue}{x_{0}}, \lambda \geq 0,\\
\label{eq:MP:integer}
& \revision{blue}{\sum_{\omega \in \Omega_{r}} \lambda_{r, \omega} \omega
+ \sum_{\rho \in \Gamma_{r}} \lambda_{r, \rho} \rho \in \mathcal{X}_{r}, \ \ \ r=1, ..., R}
\end{align}
where $c_{r, \omega} = c_{r}^{T} \omega$, \revision{blue}{$c_{r, \rho} = c_{r}^{T} \rho$}, and $a_{r, \omega} = A_{r} \omega$, \revision{blue}{$a_{r, \rho} = A_{r} \rho$}.
\revision{blue}{Constraints \eqref{eq:MP_convexity} and \eqref{eq:MP_linking} are referred to as \emph{convexity} and \emph{linking} constraints, respectively.
The linear relaxation of $(MP)$ is given by \eqref{eq:MP_objective}-\eqref{eq:MP_positive}; its objective value is greater or equal to that of the linear relaxation of $(P)$ \cite{Desaulniers2006column}.
Note that if $(P)$ in a linear program, i.e., all variables are continuous, then constraints \eqref{eq:MP:integer} are redundant, and \eqref{eq:MP_objective}-\eqref{eq:MP_positive} is equivalent to $(P)$.
In the mixed-integer case, problem \eqref{eq:MP_objective}-\eqref{eq:MP_positive} is the root node in a branch-and-price tree.
In this work, we focus on solving this linear relaxation.
Thus, in what follows, we make a slight abuse of notation and use the term ``Master Problem" to refer to \eqref{eq:MP_objective}-\eqref{eq:MP_positive} instead.
}
\subsection{Column generation}
\label{sec:colgen:subsec:colgen}
The Master Problem has exponentially many variables.
Therefore, it is typically solved by column generation, wherein only a small subset of the variables are considered.
Additional variables are generated iteratively by solving an auxiliary sub-problem.
Let $\bar{\Omega}_{r}$ \revision{blue}{(resp. $\bar{\Gamma}_{r}$)} be a small subset of $\Omega_{r}$ \revision{blue}{(resp. of $\Gamma_{r}$)}, and define the \emph{Restricted Master Problem} (RMP)
\begin{align}
\label{eq:RMP_objective}
(RMP) \ \ \ \min_{\lambda} \ \ \
& \revision{blue}{c_{0}^{T}x_{0} +}
\sum_{r=1}^{R} \sum_{\omega \in \bar{\Omega}_{r}} c_{r, \omega} \lambda_{r, \omega}
\revision{blue}{+ \sum_{r=1}^{R} \sum_{\rho \in \bar{\Gamma}_{r}} c_{r, \rho} \lambda_{r, \rho}} \\
\label{eq:RMP_convexity}
s.t. \ \ \
& \sum_{\omega \in \bar{\Omega}_{r}} \lambda_{r, \omega} =1, \ \ \ \revision{blue}{r=1, ..., R}\\
\label{eq:RMP_linking}
& \sum_{r=1}^{R} \sum_{\omega \in \bar{\Omega}_{r}} a_{r, \omega} \lambda_{r, \omega}
\revision{blue}{+ \sum_{r=1}^{R} \sum_{\rho \in \bar{\Gamma}_{r}} a_{r, \rho} \lambda_{r, \rho}} = b_{0},\\
\label{eq:RMP_positive}
& \revision{blue}{x_{0}}, \lambda \geq 0.
\end{align}
In all that follows, we assume that $(RMP)$ is feasible and bounded.
Note that feasibility can be obtained by adding artificial slacks and surplus variables with sufficiently large cost, effectively implementing an $l_{1}$ penalty.
If the RMP is unbounded, then so is the MP.
\revision{blue}{Let $\sigma \in \mathbb{R}^{R}$ and $\pi \in \mathbb{R}^{m_{0}}$ denote the vector of dual variables associated to convexity constraints \eqref{eq:RMP_convexity} and linking constraints constraints \eqref{eq:RMP_linking}, respectively.
Here, we assume that $(\sigma, \pi)$ is dual-optimal for $(RMP)$; the use of interior, sub-optimal dual solutions is explored in \cite{Gondzio1996_PDCGM}.
Then, for given $r$, $\omega \in \Omega_{r}$ and $\rho \in \Gamma_{r}$, the reduced cost of variable $\lambda_{r, \omega}$ is
\[
\bar{c}_{r, \omega} = c_{r, \omega} - \pi^{T} a_{r, \omega} - \sigma_{r} = (c_{r}^{T} - \pi^{T} A_{r}) \omega - \sigma_{r},
\]
while the reduced cost of variable $\lambda_{r, \rho}$ is
\[
\bar{c}_{r, \rho} = c_{r, \rho} - \pi^{T} a_{r, \rho} = (c_{r}^{T} - \pi^{T} A_{r}) \rho.
\]
If $\bar{c}_{r, \omega} \geq 0$ for all $r$, $\omega \in \Omega_{r}$ and $\bar{c}_{r, \rho} \geq 0$ for all $r$, $\rho \in \Gamma_{r}$, then the current solution is optimal for the MP.
Otherwise, a variable with negative reduced cost is added to the RMP.
Finding such a variable, or proving that none exists, is called the \emph{pricing step}.
Explicitly iterating through the exponentially large sets $\Omega_{r}$ and $\Gamma_{r}$ is prohibitively expensive.
Nevertheless, the pricing step can be written as the following MILP:
\begin{align}
(SP_{r}) \ \ \ \min_{x_{r}} \ \ \ & (c_{r}^{T} - \pi^{T}A)x_{r} - \sigma_{r}\\
s.t. \ \ \ & x_{r} \in \mathcal{X}_{r},
\end{align}
which we refer to as the $r^{th}$ \emph{sub-problem}.
If $SP_{r}$ is infeasible, then $\mathcal{X}_{r}$ is empty, and the original problem $P$ is infeasible. This case is ruled out in all that follows.
Then, since the objective of $SP_{r}$ is linear, any optimal solution is either an extreme \revision{blue}{point} $\omega \in \Omega_{r}$ (bounded case), or an extreme ray $\rho \in \Gamma_{r}$ (unbounded case).
The corresponding variable $\lambda_{r, \omega}$ or $\lambda_{r, \rho}$ is identified by retrieving an optimal \revision{blue}{point} or unbounded ray.
Finally, note that all $R$ sub-problems $SP_{1}, \dots, SP_{R}$ can be solved independently from one another.
Optimality in the Master Problem is attained when no variable with negative reduced cost can be identified from all $R$ sub-problems.
We now describe a basic column-generation procedure, which is formally stated in Algorithm \ref{alg:colgen}.
The algorithm starts with an initial RMP that contains a small subset of columns, some of which may be artificial to ensure feasibility.
At the beginning of each iteration, the RMP is solved to optimality, and a dual solution $(\pi, \sigma)$ is obtained which is used to perform the pricing step.
Each sub-problem is solved to identify a variable with most negative reduced cost.
If a variable with negative reduced cost is found, it is added to the RMP; if not, the column-generation procedure stops.
}
\begin{algorithm}[H]
\small
\begin{algorithmic}[1]
\mathbb{R}EQUIRE Initial RMP
\WHILE{stopping criterion not met}
\STATE Solve RMP and obtain optimal dual variables $(\pi, \sigma)$
\STATE \COMMENT{\emph{Pricing step}}
\FORALL{$r \in \mathcal{R}$}
\STATE Solve $SP_{r}$ with the query point $(\pi, \sigma_{r})$; obtain $\omega^{*}$ or $\rho^{*}$
\IF{$\bar{c}_{r,\omega^{*}} < 0$ or $\bar{c}_{r,\rho^{*}} < 0$}
\STATE Add corresponding column to the RMP
\ENDIF
\ENDFOR
\STATE \COMMENT{\emph{Stopping criterion}}
\IF{no column added to RMP}
\STATE STOP
\ENDIF
\ENDWHILE
\end{algorithmic}
\caption{Column-generation procedure}
\label{alg:colgen}
\end{algorithm}
For large instances with numerous subproblems, full pricing, wherein all subproblems are solved at each iteration, is often not the most efficient approach.
Therefore, we implemented a partial pricing strategy, in which subproblems are solved in a random order until either all subproblems have been solved, or a user-specified number of columns with negative reduced cost have been generated.
\revision{blue}{
\section{Detailed results on structured LP instances}
\label{app:res}
\revision{blue}{
\begin{longtable}{lrrrrrrrrrrrr}
\caption{Structured instances: performance comparison of IPM solvers \label{tab:res:rmp:full}}\\
\toprule
& & & \multicolumn{2}{c}{CPLEX} & \multicolumn{2}{c}{Gurobi} & \multicolumn{2}{c}{Mosek} & \multicolumn{2}{c}{Tulip} & \multicolumn{2}{c}{Tulip*}\\
\cmidrule(rl){4-5} \cmidrule(rl){6-7} \cmidrule(rl){8-9} \cmidrule(rl){10-11} \cmidrule(rl){12-13}
Instance & $R$ & CG & T(s) & Iter & T(s) & Iter & T(s) & Iter & T(s) & Iter & T(s) & Iter\\
\midrule
\endfirsthead
\caption{(continued)}\\
\toprule
& & & \multicolumn{2}{c}{CPLEX} & \multicolumn{2}{c}{Gurobi} & \multicolumn{2}{c}{Mosek} & \multicolumn{2}{c}{Tulip} & \multicolumn{2}{c}{Tulip*}\\
\cmidrule(rl){4-5} \cmidrule(rl){6-7} \cmidrule(rl){8-9} \cmidrule(rl){10-11} \cmidrule(rl){12-13}
Instance & $R$ & CG & T(s) & Iter & T(s) & Iter & T(s) & Iter & T(s) & Iter & T(s) & Iter\\
\midrule
\endhead
\midrule
\endfoot
\bottomrule
\endlastfoot
DER-24 & 1024 & 10 & \textbf{ 0.0} & 19 & 0.0 & 15 & 0.1 & 19 & 0.4 & 19 & 0.3 & 19\\
DER-24 & 1024 & 20 & 0.1 & 27 & \textbf{ 0.1} & 23 & 0.1 & 21 & 0.5 & 23 & 0.3 & 23\\
DER-24 & 1024 & 30 & 0.1 & 28 & \textbf{ 0.1} & 22 & 0.1 & 24 & 0.8 & 26 & 0.3 & 26\\
DER-24 & 1024 & 40 & 0.2 & 44 & \textbf{ 0.2} & 27 & 0.2 & 28 & 1.2 & 37 & 0.4 & 39\\
DER-24 & 1024 & 43 & 0.2 & 33 & 0.2 & 27 & \textbf{ 0.2} & 21 & 1.1 & 33 & 0.5 & 33\\
DER-24 & 2048 & 10 & 0.2 & 31 & \textbf{ 0.1} & 21 & 0.1 & 20 & 0.8 & 23 & 0.3 & 23\\
DER-24 & 2048 & 20 & 0.3 & 30 & \textbf{ 0.1} & 19 & 0.2 & 18 & 1.0 & 22 & 0.3 & 22\\
DER-24 & 2048 & 30 & \textbf{ 0.3} & 29 & 0.3 & 28 & 0.3 & 20 & 1.4 & 30 & 0.5 & 30\\
DER-24 & 2048 & 40 & 0.4 & 48 & \textbf{ 0.4} & 36 & 0.4 & 27 & 2.5 & 47 & 0.6 & 47\\
DER-24 & 4096 & 10 & 0.4 & 35 & \textbf{ 0.2} & 20 & 0.3 & 19 & 1.3 & 28 & 0.5 & 28\\
DER-24 & 4096 & 20 & 0.8 & 39 & 0.4 & 23 & \textbf{ 0.4} & 22 & 2.1 & 29 & 0.5 & 29\\
DER-24 & 4096 & 30 & 1.3 & 65 & 1.0 & 42 & \textbf{ 0.7} & 29 & 5.4 & 58 & 0.9 & 56\\
DER-24 & 4096 & 40 & 1.1 & 38 & 1.1 & 32 & \textbf{ 0.9} & 26 & 5.0 & 38 & 0.9 & 38\\
DER-24 & 4096 & 41 & 1.0 & 40 & 1.0 & 32 & \textbf{ 0.8} & 26 & 4.7 & 38 & 1.0 & 38\\
DER-24 & 8192 & 10 & 0.9 & 32 & \textbf{ 0.5} & 18 & 0.6 & 21 & 2.6 & 25 & 0.7 & 25\\
DER-24 & 8192 & 20 & 2.0 & 39 & 1.1 & 26 & \textbf{ 1.1} & 21 & 5.9 & 34 & 1.1 & 34\\
DER-24 & 8192 & 30 & 2.9 & 62 & \textbf{ 1.8} & 36 & 2.1 & 40 & 12.5 & 55 & 1.9 & 55\\
DER-24 & 8192 & 40 & 4.3 & 79 & 2.7 & 46 & \textbf{ 2.4} & 38 & 19.0 & 67 & 2.6 & 68\\
DER-24 & 16384 & 10 & 2.5 & 47 & 1.3 & 26 & 1.7 & 26 & 9.0 & 39 & \textbf{ 1.2} & 36\\
DER-24 & 16384 & 20 & 4.1 & 42 & 2.1 & 29 & 2.5 & 22 & 13.8 & 37 & \textbf{ 2.0} & 37\\
DER-24 & 16384 & 30 & 5.4 & 55 & 3.4 & 36 & 3.6 & 26 & 23.1 & 48 & \textbf{ 3.0} & 48\\
DER-24 & 16384 & 40 & 12.4 & 110 & 10.2 & 88 & \textbf{ 6.0} & 51 & 57.6 & 100 & 6.7 & 100\\
DER-24 & 16384 & 42 & 10.8 & 93 & 5.3 & 48 & \textbf{ 5.3} & 42 & 49.8 & 86 & 5.3 & 83\\
DER-24 & 32768 & 10 & 4.6 & 39 & 3.3 & 34 & 3.5 & 23 & 17.9 & 36 & \textbf{ 2.2} & 34\\
DER-24 & 32768 & 20 & 11.0 & 53 & 8.8 & 52 & 8.0 & 39 & 47.4 & 66 & \textbf{ 5.5} & 65\\
DER-24 & 32768 & 30 & 14.5 & 68 & 12.3 & 56 & \textbf{ 8.2} & 31 & 96.1 & 100 & 11.2 & 100\\
DER-24 & 32768 & 40 & 33.9 & 148 & 19.4 & 85 & 12.3 & 43 & 103.6 & 91 & \textbf{ 11.4} & 86\\
DER-48 & 1024 & 10 & 0.1 & 24 & \textbf{ 0.1} & 13 & 0.1 & 21 & 0.8 & 24 & 0.3 & 24\\
DER-48 & 1024 & 20 & 0.2 & 26 & \textbf{ 0.2} & 20 & 0.2 & 22 & 1.1 & 26 & 0.3 & 26\\
DER-48 & 1024 & 30 & 0.3 & 31 & \textbf{ 0.2} & 16 & 0.2 & 22 & 1.5 & 32 & 0.5 & 32\\
DER-48 & 1024 & 40 & 0.4 & 32 & 0.4 & 21 & \textbf{ 0.3} & 22 & 1.6 & 30 & 0.4 & 30\\
DER-48 & 1024 & 49 & 0.5 & 37 & 0.4 & 19 & \textbf{ 0.3} & 21 & 1.8 & 28 & 0.4 & 28\\
DER-48 & 2048 & 10 & 0.3 & 26 & 0.3 & 19 & \textbf{ 0.3} & 20 & 1.3 & 24 & 0.4 & 25\\
DER-48 & 2048 & 20 & 0.7 & 37 & 0.5 & 21 & 0.5 & 22 & 2.2 & 31 & \textbf{ 0.4} & 31\\
DER-48 & 2048 & 30 & 0.8 & 37 & 0.6 & 19 & 0.5 & 21 & 2.6 & 27 & \textbf{ 0.5} & 27\\
DER-48 & 2048 & 40 & 1.2 & 38 & 0.9 & 24 & 0.7 & 21 & 4.1 & 33 & \textbf{ 0.7} & 33\\
DER-48 & 2048 & 49 & 1.6 & 40 & 1.0 & 21 & \textbf{ 0.8} & 25 & 5.7 & 37 & 0.9 & 37\\
DER-48 & 4096 & 10 & 0.8 & 34 & 0.6 & 19 & 0.6 & 21 & 3.2 & 28 & \textbf{ 0.4} & 28\\
DER-48 & 4096 & 20 & 1.5 & 41 & 1.1 & 24 & 1.0 & 24 & 5.8 & 34 & \textbf{ 0.8} & 34\\
DER-48 & 4096 & 30 & 1.7 & 38 & 1.4 & 23 & 1.4 & 23 & 8.2 & 35 & \textbf{ 1.0} & 35\\
DER-48 & 4096 & 40 & 3.0 & 40 & 2.2 & 30 & 1.9 & 25 & 9.9 & 33 & \textbf{ 1.4} & 33\\
DER-48 & 4096 & 49 & 4.1 & 44 & 2.0 & 25 & 2.0 & 27 & 14.1 & 39 & \textbf{ 1.7} & 39\\
DER-48 & 8192 & 10 & 2.1 & 39 & 1.5 & 26 & 1.5 & 25 & 8.1 & 32 & \textbf{ 1.1} & 32\\
DER-48 & 8192 & 20 & 3.9 & 44 & 1.9 & 18 & 2.0 & 23 & 12.7 & 31 & \textbf{ 1.5} & 31\\
DER-48 & 8192 & 30 & 7.2 & 55 & 2.9 & 26 & 2.9 & 26 & 20.4 & 39 & \textbf{ 2.2} & 39\\
DER-48 & 8192 & 40 & 7.3 & 45 & 4.0 & 24 & 3.8 & 22 & 25.4 & 38 & \textbf{ 2.4} & 38\\
DER-48 & 8192 & 50 & 9.7 & 51 & 4.2 & 20 & 4.5 & 24 & 37.0 & 46 & \textbf{ 3.4} & 47\\
DER-48 & 16384 & 10 & 5.0 & 49 & 2.9 & 25 & 3.8 & 35 & 22.4 & 41 & \textbf{ 2.3} & 41\\
DER-48 & 16384 & 20 & 7.8 & 45 & 5.5 & 28 & 5.2 & 26 & 31.5 & 37 & \textbf{ 2.8} & 37\\
DER-48 & 16384 & 30 & 14.6 & 59 & 7.3 & 29 & 6.3 & 25 & 53.6 & 50 & \textbf{ 5.0} & 48\\
DER-48 & 16384 & 40 & 16.3 & 53 & 9.3 & 27 & 8.5 & 27 & 64.5 & 50 & \textbf{ 5.2} & 45\\
DER-48 & 16384 & 48 & 22.3 & 64 & 9.9 & 29 & 9.3 & 28 & 89.7 & 60 & \textbf{ 7.4} & 57\\
DER-48 & 32768 & 10 & 10.8 & 49 & 7.4 & 27 & 8.1 & 29 & 46.9 & 41 & \textbf{ 4.1} & 41\\
DER-48 & 32768 & 20 & 16.8 & 47 & 8.6 & 24 & 11.2 & 32 & 69.5 & 42 & \textbf{ 5.7} & 41\\
DER-48 & 32768 & 30 & 30.5 & 61 & 14.9 & 26 & 13.4 & 26 & 107.5 & 51 & \textbf{ 10.0} & 51\\
DER-48 & 32768 & 40 & 36.2 & 57 & 21.1 & 31 & 16.9 & 28 & 133.8 & 51 & \textbf{ 10.4} & 46\\
DER-48 & 32768 & 47 & 57.1 & 85 & 21.6 & 32 & 21.1 & 33 & 178.8 & 59 & \textbf{ 14.2} & 54\\
DER-96 & 1024 & 10 & 0.5 & 27 & 0.3 & 18 & \textbf{ 0.3} & 20 & 1.4 & 23 & 0.5 & 23\\
DER-96 & 1024 & 20 & 0.8 & 29 & 0.5 & 18 & 0.5 & 25 & 2.4 & 27 & \textbf{ 0.4} & 27\\
DER-96 & 1024 & 30 & 1.2 & 32 & 0.6 & 17 & 0.6 & 23 & 3.5 & 30 & \textbf{ 0.5} & 30\\
DER-96 & 1024 & 40 & 1.6 & 34 & 1.0 & 19 & 0.7 & 22 & 4.1 & 31 & \textbf{ 0.6} & 32\\
DER-96 & 1024 & 50 & 2.2 & 34 & 1.2 & 19 & 0.8 & 22 & 5.8 & 30 & \textbf{ 0.7} & 30\\
DER-96 & 1024 & 60 & 2.6 & 34 & 1.4 & 19 & 1.0 & 23 & 6.9 & 31 & \textbf{ 0.8} & 31\\
DER-96 & 1024 & 64 & 3.3 & 38 & 1.2 & 19 & 0.9 & 22 & 6.6 & 31 & \textbf{ 0.9} & 31\\
DER-96 & 2048 & 10 & 1.2 & 37 & 0.8 & 21 & 0.7 & 29 & 4.0 & 29 & \textbf{ 0.5} & 29\\
DER-96 & 2048 & 20 & 2.2 & 33 & 1.1 & 19 & 0.9 & 23 & 5.9 & 26 & \textbf{ 0.8} & 26\\
DER-96 & 2048 & 30 & 2.5 & 44 & 1.6 & 22 & 1.3 & 25 & 9.9 & 35 & \textbf{ 1.1} & 35\\
DER-96 & 2048 & 40 & 4.8 & 38 & 2.3 & 26 & 1.5 & 23 & 12.4 & 33 & \textbf{ 1.2} & 33\\
DER-96 & 2048 & 50 & 6.7 & 41 & 2.4 & 23 & 1.8 & 25 & 15.2 & 36 & \textbf{ 1.6} & 36\\
DER-96 & 2048 & 56 & 7.9 & 45 & 2.4 & 20 & 1.7 & 21 & 18.2 & 38 & \textbf{ 1.7} & 37\\
DER-96 & 4096 & 10 & 3.0 & 41 & 1.7 & 24 & 1.5 & 28 & 9.5 & 32 & \textbf{ 1.0} & 32\\
DER-96 & 4096 & 20 & 4.4 & 51 & 2.9 & 27 & 1.9 & 26 & 18.2 & 39 & \textbf{ 1.6} & 40\\
DER-96 & 4096 & 30 & 6.0 & 53 & 3.6 & 24 & 2.7 & 28 & 21.1 & 35 & \textbf{ 2.0} & 36\\
DER-96 & 4096 & 40 & 13.6 & 53 & 4.4 & 24 & 3.3 & 27 & 31.2 & 39 & \textbf{ 2.4} & 39\\
DER-96 & 4096 & 50 & 14.2 & 45 & 5.7 & 25 & 4.2 & 25 & 36.1 & 37 & \textbf{ 2.7} & 37\\
DER-96 & 4096 & 53 & 16.3 & 51 & 5.5 & 24 & 5.2 & 28 & 42.6 & 40 & \textbf{ 3.2} & 40\\
DER-96 & 8192 & 10 & 5.6 & 53 & 4.3 & 27 & 3.0 & 28 & 23.0 & 35 & \textbf{ 2.6} & 35\\
DER-96 & 8192 & 20 & 11.1 & 62 & 7.2 & 33 & 4.9 & 32 & 43.4 & 40 & \textbf{ 3.4} & 40\\
DER-96 & 8192 & 30 & 13.5 & 59 & 8.8 & 31 & 5.9 & 26 & 54.4 & 40 & \textbf{ 3.8} & 40\\
DER-96 & 8192 & 40 & 32.7 & 63 & 12.6 & 35 & 7.4 & 25 & 77.6 & 45 & \textbf{ 5.0} & 45\\
DER-96 & 8192 & 50 & 39.3 & 65 & 11.5 & 25 & 10.1 & 33 & 89.1 & 44 & \textbf{ 6.6} & 45\\
DER-96 & 8192 & 60 & 51.7 & 75 & 15.5 & 29 & 11.1 & 31 & 137.6 & 60 & \textbf{ 8.8} & 57\\
DER-96 & 16384 & 10 & 12.6 & 61 & 11.0 & 37 & 6.9 & 34 & 55.0 & 41 & \textbf{ 4.4} & 41\\
DER-96 & 16384 & 20 & 21.2 & 62 & 14.5 & 31 & 10.9 & 31 & 92.3 & 42 & \textbf{ 6.1} & 42\\
DER-96 & 16384 & 30 & 30.1 & 68 & 18.5 & 32 & 14.2 & 34 & 147.9 & 50 & \textbf{ 10.1} & 52\\
DER-96 & 16384 & 40 & 70.0 & 69 & 21.8 & 28 & 16.1 & 30 & 196.5 & 54 & \textbf{ 11.5} & 52\\
DER-96 & 16384 & 50 & 85.5 & 73 & 27.7 & 29 & 18.6 & 32 & 231.8 & 57 & \textbf{ 14.5} & 54\\
DER-96 & 16384 & 57 & 107.8 & 86 & 31.9 & 31 & 24.4 & 39 & 260.0 & 55 & \textbf{ 17.3} & 59\\
DER-96 & 32768 & 10 & 28.1 & 70 & 25.4 & 45 & 18.0 & 39 & 152.5 & 52 & \textbf{ 11.8} & 49\\
DER-96 & 32768 & 20 & 39.9 & 57 & 33.8 & 36 & 18.9 & 28 & 180.4 & 37 & \textbf{ 10.7} & 39\\
DER-96 & 32768 & 30 & 61.9 & 72 & 46.6 & 34 & 27.9 & 31 & 337.6 & 58 & \textbf{ 21.3} & 58\\
DER-96 & 32768 & 40 & 174.6 & 88 & 70.8 & 42 & 40.4 & 39 & 483.2 & 69 & \textbf{ 30.0} & 66\\
DER-96 & 32768 & 50 & 233.6 & 102 & 58.8 & 32 & 46.8 & 36 & 609.0 & 74 & \textbf{ 43.1} & 72\\
DER-96 & 32768 & 54 & 291.9 & 119 & 102.9 & 54 & \textbf{ 55.5} & 47 & 753.7 & 89 & 65.4 & 86\\
4node & 1024 & 10 & 0.1 & 28 & 0.3 & 53 & \textbf{ 0.1} & 28 & 0.9 & 31 & 0.5 & 30\\
4node & 1024 & 20 & 0.2 & 27 & 0.2 & 22 & \textbf{ 0.2} & 26 & 1.0 & 27 & 0.4 & 27\\
4node & 1024 & 24 & 15.7 & 21 & 0.4 & 43 & \textbf{ 0.2} & 25 & 1.3 & 30 & 0.5 & 32\\
4node & 2048 & 10 & 19.6 & 24 & 0.7 & 51 & \textbf{ 0.4} & 32 & 1.9 & 44 & 0.9 & 37\\
4node & 2048 & 20 & 0.7 & 38 & 0.8 & 42 & \textbf{ 0.5} & 37 & 1.9 & 33 & 0.9 & 32\\
4node & 2048 & 24 & 0.7 & 38 & 0.6 & 27 & \textbf{ 0.6} & 25 & 2.1 & 36 & 0.9 & 36\\
4node & 4096 & 10 & 0.9 & 36 & 2.1 & 63 & \textbf{ 0.7} & 28 & 3.6 & 40 & 1.3 & 40\\
4node & 4096 & 20 & 0.9 & 23 & 1.0 & 27 & \textbf{ 0.6} & 19 & 3.7 & 26 & 1.3 & 26\\
4node & 4096 & 22 & 1.1 & 27 & 1.7 & 37 & \textbf{ 0.7} & 17 & 4.5 & 28 & 1.2 & 28\\
4node & 8192 & 10 & \textbf{ 1.8} & 33 & 3.4 & 62 & 1.8 & 33 & 10.2 & 44 & 2.1 & 43\\
4node & 8192 & 20 & 3.2 & 42 & 4.3 & 51 & \textbf{ 2.3} & 36 & 14.2 & 43 & 3.2 & 44\\
4node & 8192 & 23 & 2.7 & 30 & 2.3 & 29 & \textbf{ 1.8} & 24 & 12.3 & 35 & 2.7 & 33\\
4node & 16384 & 10 & 6.8 & 61 & 11.4 & 85 & \textbf{ 5.7} & 53 & 34.4 & 62 & 5.7 & 67\\
4node & 16384 & 20 & 7.0 & 42 & 20.8 & 108 & 6.4 & 44 & 31.6 & 45 & \textbf{ 5.5} & 44\\
4node & 16384 & 23 & 5.8 & 29 & 10.7 & 53 & \textbf{ 4.0} & 22 & 26.4 & 33 & 4.6 & 33\\
4node & 32768 & 10 & 9.8 & 42 & 11.8 & 42 & 9.1 & 40 & 56.4 & 52 & \textbf{ 8.3} & 53\\
4node & 32768 & 20 & 17.0 & 58 & 35.0 & 95 & \textbf{ 13.5} & 45 & 81.9 & 60 & 15.7 & 65\\
4node & 32768 & 21 & 17.0 & 57 & 18.7 & 55 & 14.6 & 41 & 74.8 & 56 & \textbf{ 14.2} & 59\\
4node-base & 1024 & 10 & \textbf{ 0.1} & 24 & 0.2 & 18 & 0.3 & 21 & 0.8 & 24 & 0.3 & 24\\
4node-base & 1024 & 20 & 0.3 & 25 & 0.3 & 23 & \textbf{ 0.2} & 28 & 1.3 & 28 & 0.5 & 28\\
4node-base & 1024 & 26 & 17.0 & 17 & 1.0 & 60 & \textbf{ 0.3} & 27 & 1.4 & 28 & 0.6 & 27\\
4node-base & 2048 & 10 & 15.0 & 15 & 0.8 & 36 & \textbf{ 0.3} & 23 & 1.4 & 29 & 0.5 & 30\\
4node-base & 2048 & 20 & 0.8 & 37 & 1.0 & 31 & \textbf{ 0.7} & 35 & 3.1 & 36 & 0.8 & 36\\
4node-base & 2048 & 27 & 1.0 & 35 & 2.6 & 72 & \textbf{ 0.8} & 33 & 3.7 & 32 & 0.9 & 33\\
4node-base & 4096 & 10 & 0.9 & 35 & 3.3 & 92 & \textbf{ 0.7} & 24 & 4.4 & 38 & 0.9 & 42\\
4node-base & 4096 & 20 & 1.8 & 38 & 4.4 & 76 & \textbf{ 1.1} & 30 & 8.8 & 42 & 1.6 & 42\\
4node-base & 4096 & 25 & 2.3 & 38 & 5.3 & 72 & \textbf{ 1.5} & 34 & 9.1 & 34 & 1.8 & 34\\
4node-base & 8192 & 10 & 1.8 & 33 & 2.0 & 21 & 1.5 & 26 & 7.6 & 29 & \textbf{ 1.5} & 29\\
4node-base & 8192 & 20 & 4.4 & 39 & 16.3 & 133 & 3.9 & 40 & 18.1 & 39 & \textbf{ 2.7} & 39\\
4node-base & 8192 & 22 & 3.8 & 29 & 3.7 & 27 & \textbf{ 2.6} & 25 & 19.7 & 36 & 2.8 & 36\\
4node-base & 16384 & 10 & 4.4 & 38 & 10.1 & 57 & 3.6 & 30 & 19.3 & 39 & \textbf{ 3.4} & 39\\
4node-base & 16384 & 20 & 10.6 & 49 & 17.1 & 51 & 6.9 & 36 & 46.2 & 46 & \textbf{ 5.6} & 45\\
4node-base & 16384 & 25 & 13.5 & 53 & 26.2 & 74 & 8.0 & 37 & 63.4 & 53 & \textbf{ 7.0} & 47\\
4node-base & 32768 & 10 & 10.9 & 45 & 76.1 & 214 & 10.3 & 40 & 44.3 & 36 & \textbf{ 6.0} & 36\\
4node-base & 32768 & 20 & 27.8 & 68 & 80.1 & 125 & 25.9 & 72 & 119.3 & 59 & \textbf{ 15.6} & 63\\
4node-base & 32768 & 23 & 20.3 & 37 & 29.0 & 43 & 14.9 & 30 & 107.7 & 48 & \textbf{ 12.9} & 50\\
assets & 37500 & 6 & 1.6 & 21 & \textbf{ 0.6} & 12 & 1.1 & 20 & 2.0 & 13 & 1.0 & 13\\
env & 1200 & 6 & 0.0 & 21 & \textbf{ 0.0} & 12 & 0.1 & 16 & 0.3 & 16 & 0.3 & 16\\
env & 1875 & 6 & 0.1 & 22 & \textbf{ 0.0} & 12 & 0.1 & 13 & 0.4 & 16 & 0.4 & 16\\
env & 3780 & 6 & 0.1 & 25 & \textbf{ 0.1} & 12 & 0.1 & 14 & 0.7 & 17 & 0.5 & 17\\
env & 5292 & 6 & 0.2 & 27 & \textbf{ 0.1} & 13 & 0.1 & 13 & 0.7 & 17 & 0.7 & 17\\
env & 8232 & 6 & 0.3 & 26 & \textbf{ 0.2} & 13 & 0.3 & 14 & 1.1 & 18 & 1.2 & 18\\
env & 32928 & 6 & 1.7 & 26 & \textbf{ 0.9} & 13 & 1.3 & 17 & 5.1 & 21 & 4.4 & 21\\
env-diss & 1200 & 10 & 0.0 & 17 & \textbf{ 0.0} & 19 & 0.0 & 16 & 0.4 & 22 & 0.4 & 22\\
env-diss & 1200 & 13 & 0.1 & 15 & \textbf{ 0.0} & 15 & 0.1 & 17 & 0.4 & 23 & 0.4 & 23\\
env-diss & 1875 & 10 & 0.1 & 27 & \textbf{ 0.1} & 17 & 0.1 & 20 & 0.6 & 22 & 0.5 & 22\\
env-diss & 1875 & 15 & 0.1 & 17 & \textbf{ 0.1} & 18 & 0.1 & 18 & 0.6 & 22 & 0.5 & 22\\
env-diss & 3780 & 10 & 0.2 & 23 & \textbf{ 0.1} & 16 & 0.1 & 17 & 0.8 & 21 & 0.7 & 21\\
env-diss & 3780 & 15 & 0.2 & 20 & \textbf{ 0.1} & 18 & 0.2 & 18 & 1.0 & 22 & 1.0 & 22\\
env-diss & 5292 & 10 & 0.4 & 31 & \textbf{ 0.2} & 25 & 0.2 & 21 & 1.0 & 25 & 1.3 & 26\\
env-diss & 5292 & 15 & 0.3 & 22 & \textbf{ 0.3} & 23 & 0.3 & 22 & 1.3 & 25 & 1.5 & 25\\
env-diss & 8232 & 10 & 0.6 & 26 & 0.4 & 22 & \textbf{ 0.4} & 18 & 1.7 & 22 & 1.8 & 22\\
env-diss & 8232 & 15 & 1.0 & 31 & 0.6 & 29 & \textbf{ 0.5} & 23 & 3.2 & 35 & 2.6 & 35\\
env-diss & 32928 & 10 & 4.6 & 37 & 2.8 & 36 & \textbf{ 1.9} & 17 & 8.0 & 27 & 7.2 & 27\\
env-diss & 32928 & 14 & 4.8 & 28 & \textbf{ 2.1} & 22 & 2.5 & 19 & 10.0 & 27 & 7.7 & 27\\
phone & 32768 & 5 & 0.5 & 15 & \textbf{ 0.4} & 8 & 0.6 & 8 & 1.9 & 10 & 0.7 & 10\\
stormG2 & 1000 & 10 & 0.7 & 35 & 0.5 & 21 & \textbf{ 0.3} & 21 & 2.0 & 32 & 1.5 & 31\\
stormG2 & 1000 & 20 & 1.4 & 33 & 0.8 & 18 & \textbf{ 0.5} & 19 & 4.5 & 29 & 1.7 & 29\\
stormG2 & 1000 & 21 & 1.6 & 37 & 0.8 & 18 & \textbf{ 0.5} & 22 & 4.0 & 29 & 1.7 & 28\\
\end{longtable}
}
}
\end{document} |
\begin{document}
\title{Constructive nonlocal games with very small classical values}
\author{
M. Rosicka${}^{1,2}$,
S. Szarek${}^{3,4}$, A. Rutkowski${}^1$, P. Gnaci\'nski${}^1$, M. Horodecki${}^{1,5}$}
\affiliation{
${}^1$Institute of Theoretical Physics and Astrophysics \\ Faculty of Mathematics, Physics and Informatics, University of Gda\'{n}sk, 80-952 Gda\'{n}sk, \\ National Quantum Information Centre in Gda\'{n}sk, 81-824 Sopot, Poland}
\affiliation{
${}^2$Institute of Informatics,
University of Gda\'{n}sk,
80-952 Gda\'{n}sk
}
\affiliation{ ${}^3$Case Western Reserve University, Department of Mathematics, Applied Mathematics and Statistics, 10900 Euclid Avenue, Cleveland, Ohio 44106, USA }
\affiliation{
${}^4$ Sorbonne Universit\'e, Institut de Math\'ematiques de Jussieu-PRG, 4 place Jussieu, 75005 Paris, France}
\affiliation{
${}^5$ International Centre for Theory of Quantum Information, University of Gda\'nsk, 80-308 Gda\'nsk, Poland
}
\begin{abstract}
There are few explicit examples of bipartite nonlocal games with a large gap between classical and quantum value. One of the reasons is that estimating the classical value is usually a hard computational task.
This paper is devoted to analyzing classical values of the so-called linear games (generalization of XOR games to a larger number of outputs).
We employ nontrivial results from graph theory and combine them with number theoretic results used earlier in the context of harmonic analysis to obtain a novel tool -- {\it the girth method} -- allowing to provide explicit examples of linear games with prescribed low classical value. In particular, we provide games with
minimal possible classical value. We then speculate on the potential unbounded violation, by comparing the obtained classical values with a known upper bound for the quantum value. If this bound can be even asymptotically saturated, our games would
have the best ratio of quantum to classical value as a function of the product of the number of inputs and outputs when compared to other explicit (i.e. non-random) constructions.
\end{abstract}
\keywords{classical bound, quantum bound, graph, nonlocal correlations, contextual game, Bell inequalities}
\maketitle
\section{Introduction}
Nonlocal games can be regarded as a particular type of Bell-type inequalities and it is well known that Bell-type inequalities \cite{Scarani2009} are at the heart of quantum information science. While being initially only of philosophical interest, they now form the basis for device independent quantum cryptography \cite{Ekert2014,Scarani-DI} as well as for quantum advantage \cite{Koenig}.
By a nonlocal game we mean a scenario in which two cooperating players are asked to assign values $a$ and $b$ to certain randomly chosen variables $x\in X$ and $y\in Y$, respectively. Neither of the players knows which variable was chosen for the other one and they are not allowed to communicate with each other. For each pair $x, y$ of variables which may be chosen, we want the~values $a$ and $b$ to satisfy certain constraints. If $a$ and $b$ satisfy the constraints, the players win, otherwise they lose.
We can distinguish two types of strategies to win a nonlocal game, namely: the quantum strategy, where the players have access to a shared entangled state and can perform local measurements of observables corresponding to the questions, and the classical one, where only classical shared randomness is permitted. The maximum probability of winning a given game with a classical strategy is called the \textit{classical value} or \textit{classical winning probability} $p_{Cl}$ of the game. Similarly, the \textit{quantum value} (or \textit{quantum winning probability}, $p_Q$) of the game is the highest probability of winning the game using a quantum strategy.
If $p_{Cl}< p_Q$, we say that the game exhibits a {\em quantum violation}.
It is known that in order to get an arbitrarily large ratio between quantum and classical values of winning probability of a bipartite nonlocal game (an {\em unbounded quantum violation}), one has to grow the number of inputs as well as outputs \cite{Tsirelson1993,Acin2006}. Moreover, it is well known that calculating -- or even estimating -- classical values of nonlocal games is a very hard computational task \cite{Kempe2008}. (In general, it is more appropriate to compare {\em biases} rather than values, but in our setting the two quantities nearly coincide, see Section \ref{subsec:biases}.)
At the moment, there are very few explicit examples of games that exhibit unbounded quantum violation (for example, Khot-Vishnoi game \cite{Khot2005, Kempe2008a, Regev} or Hidden Matching game \cite{Buhrman}).
Others, such as \cite{Junge2010}, are random constructions.
Such random constructions -- even if they, as is frequently the case, lead to stronger results -- are not fully satisfactory: for actual implementation one needs explicit objects, and even if a randomly obtained game has the desired properties, certifying that fact may be computationally infeasible.
In this paper we make a step towards such new explicit examples by analyzing classical values of so called linear games \cite{Zukowski1997}. In particular, we describe methods of obtaining constructive examples of games with extremely low classical values. This paves the way towards a large gap between the classical and quantum values of a game, namely: we provide explicit and transparent methods of constructing games with low classical values with a relatively small number of outputs. Additionally, we have shown that these games have an unbounded ratio between the classical value and the upper bound on the quantum value found in \cite{RAM}.
By some measures this ratio scales better than those exhibited by any explicit (i.e., non-random) games known so far.
We also address the problem of calculating classical values of games and provide exact solutions for certain classes of games.
To achieve the above results we introduce a novel tool -- {\it the girth method} -- resulting from combining mathematical results from two distant domains of mathematics, namely (i) {\it Number theory related to harmonic analysis:} We employ construction of set of natural numbers, introduced by Rudin \cite{Rudin} as a tool for harmonic analysis, which have the sum property, i.e., for any pair of subsets with number of elements smaller than some fixed $s$ and equal, their sums are never the same;
(ii) {\it Graph theory:} We use the result saying (quantitatively) that a graph with low girth cannot have too many edges - the first and, up to a constant, optimal result was conjectured by Erd\H os in 1964 and proved in 1974 in \cite{Bondy-girth}.
In Section \ref{sec:est} we introduce the main framework used in this paper. We connect the classical value of a game to the rigidity of a matrix which represents the game. We provide ways of finding classical values in general, as well as easier methods for a few specific classes of games.
In Section \ref{sec:girth-method} we introduce graphs $H$ and $H_{opt}$ associated with games and study the classical value of games in terms of the girths of the corresponding graphs.
In Section \ref{sec:construction} we provide two different explicit methods for constructing low-dimensional non-local games with low classical values, as well as attempt to bound the number of inputs necessary to minimize the classical value of a game with a given number of inputs.
In Section \ref{sec:graphs} we combine our present approach, in which games are represented as matrices, with a framework for studying non-local games based on labeled graphs of \cite{RS} and \cite{RRGHS}.
\section{Linear games and preliminaries on their classical values}
\label{sec:est}
In this section we apply the matrix framework to study properties of nonlocal games where a particular nonlocal game is described by means of a matrix. In particular, we ask how the properties of the matrix affect the classical value of the corresponding game. We provide a method of finding the classical value of any matrix in terms of minors of size 2, as well as examples of more efficient methods for specific types of matrices.
Let us start by introducing the basic concepts and notation used in this article.
\subsection{Nonlocal games: }
Let $X,Y,A,B$ be nonempty finite sets. In the spirit of \cite{Cleve2004}, a nonlocal
game is determined by $\pi$, a probability distribution
on the Cartesian product $X\times Y$, and $\text{Pred}:X\times Y\times A\times B\rightarrow\left\{ 0,1\right\}$, the
so called predicate function.
The rules of the game are the following: the referee
will randomly choose questions $\left(x,y\right)\in X\times Y$ according
to probability distribution $\pi$ and send them to two players,
Alice and Bob. In what follows, the questions will be called {\it inputs}.
The players will answer the questions with {\it outputs} $\left(a,b\right)\in A\times B$
without communication. They win the game iff $\text{Pred}\left(a,b|x,y\right)=1$.
The classical value of a game is the maximal winning probability
while using only classical strategies. We then have
\begin{equation} \label{classical_value}
p_{Cl}=\max_{a,b}\sum_{x,y}\pi\left(x,y\right)\text{Pred}\left(a\left(x\right),b\left(y\right)|x,y\right),
\end{equation}
where the maximum is taken over all functions $a:X\rightarrow A$
and $b: Y\rightarrow B.$ (At the face value, the formula \eqref{classical_value}
accounts only for {\em deterministic}
strategies, but the distinction between that setting and the one involving shared randomness is well understood and largely immaterial to our discussion.)
A quantum strategy for the players involves using a quantum state $\rho$
(living on the Hilbert space $H_{A}\otimes H_{B}$)
shared by Alice and Bob, and a quantum measurement for Alice
(respectively for Bob) for each $x\in X$ $\left(y\in Y\right)$.
For every question $\left(x,y\right),$ the probability of the output
$\left(a,b\right)$ is given by:
\begin{equation}
P\left(a,b|x,y\right)=\text{Tr}\left((E_{x}^{a}\otimes E_{y}^{b})\rho\right),
\end{equation}
where $\mathbold{i}g(E_{x}^{a}\mathbold{i}g)_{a\in A}$ and $\mathbold{i}g(E_{y}^{b}\mathbold{i}g)_{b\in B}$ are POVMs living on
$H_{A}$ and $H_{B}$ respectively.
The quantum value of the game is given by
\begin{equation}
p_{Q}=\sup\sum_{x,y}\sum_{a,b}\pi\left(x,y\right)\text{Pred}\left(a,b|x,y\right)\text{Tr}\left((E_{x}^{a}\otimes E_{y}^{b})\rho\right),
\end{equation}
where the $\sup$ is taken over all possible quantum strategies.
\begin{example}
The CHSH game (named after Clauser, Horn, Shimony and Holt) is the
nonlocal game for which we have: $X=Y=A=B=\left\{ 0,1\right\} $,
the probability distribution $\pi$ is given by
\begin{equation}
\pi\left(0,0\right)=\pi\left(0,1\right)=\pi\left(1,0\right)=\pi\left(1,1\right)=\frac{1}{4},
\end{equation}
and the predicate function is
\begin{equation}
{\rm Pred}\left(a,b|x,y\right)=\begin{cases}
1 & \text{if}\quad a+ b=x y \mod 2\\
0 & \text{if}\quad a+ b\neq x y \mod 2
\end{cases},
\end{equation}
The classical value of the CHSH game is $p_{Cl}=\frac{3}{4}$, while
its quantum value is $p_{Q}= \cos^2(\pi/8) \eqsim 0.85$.
\end{example}
\subsection{Linear games and their matrix representation:}
We have already provided the definition of a nonlocal game as well as its \textit{classical value} $p_{Cl}$ and its \textit{quantum value} $p_{Q}$. In this paper we focus on nonlocal games called \textit{linear games} \cite{Zukowski1997}, for which the predicate $\text{Pred}$ is given by
\begin{equation}
\label{eq:V}
\text{Pred}\left(a,b|x,y\right)=\begin{cases}
1 & \text{if}\quad b=a + k_{xy} \mod d \\
0 & \text{else}
\end{cases}.
\end{equation}
where $k_{xy}\equiv k(x,y)\in\{1,\ldots,d\}$, and $d$ is the number of outputs (equal for both parties). We shall also consider only
uniform probability distribution over inputs
\begin{equation}
\pi(x,y)=\frac{1}{n_A n_B},
\end{equation}
where $n_A=|X|$ and
$n_B=|Y|$ are the numbers of inputs for Alice and Bob respectively.
(In this paper, most of the time we will have $n_A=n_B=n$. To avoid trivialities, we will {\em always} assume that $n_A, n_B \geq 2$.)
This type of game can be represented as an $n_A\times n_B$ matrix $M$.
The elements of the matrix $M$ will be complex roots of $1$ of order $d$:
\begin{equation}
M=\left(\begin{array}{cccccc}
m_{11} & m_{12} & . & . & . & m_{1 n_B}\\
m_{21} & m_{22} & & & & m_{2n_B}\\
. & & . & & & .\\
. & & & . & & .\\
. & & & & . & .\\
m_{n_A 1} & . & . & . & . & m_{n_A n_B}
\end{array}\right),
\end{equation}
where
\begin{align}
\label{eq:def-m-omega}
m_{xy}= \omega^{k_{xy}},\quad
\text{with} \quad \omega=e^{i 2 \pi/d},
\end{align}
where $k_{xy}$'s are the same as in \eqref{eq:V}
.
For instance, the CHSH game is a linear game, and it has the following matrix representation:
\begin{equation}
M_{CHSH}=\left(\begin{array}{cc}
1 & 1\\
1 & -1
\end{array}\right)
\end{equation}
We shall later use the notions of game and matrix interchangeably.
\subsection{Equivalence of games (matrices).}
Here we introduce important definition of equivalence of games in matrix representation.
\begin{definition}
\label{def:equiv}
We consider two matrices to be {\it equivalent} if one can be obtained from the other by a sequence of the following operations:
\begin{enumerate}
\item[(i)] multiplying a row or column by a root of unity,
\item[(ii)] swapping two rows or two columns.
\item[(iii)] transposition
\end{enumerate}
\end{definition}
The transformations (ii) and (iii) can be interpreted as relabeling the inputs or outputs of the game.
The transformation (i) is
relabeling outputs of each observable separately.
Thus the above operations do not change classical or quantum values of the game. In other words, equivalent games have the same classical (quantum) value.
\begin{remark} \label{order}
Note that given two equivalent matrices,
a transformation between them can be always achieved by first applying operations of type (i), then operations of type (ii), and then (iii), if applicable
(or any other order of (i), (ii) and (iii)).
\end{remark}
Let us notice that if we use the above operations we can bring the matrix of any game
to a ``standard form'' via following proposition.
\begin{proposition}
\label{prop1}
Every matrix is equivalent to a matrix in which all elements in the first row and column are equal to $1$.
\end{proposition}
This can be easily shown for any matrix via a series of transformations, in which rows and columns are multiplied by appropriate factors (see Appendix).
\subsection{Classical value of linear games and contradiction number.}
Let us start with several basic definitions and a few elementary observations.
\begin{definition}
\label{contradiction}
By a contradiction for a given game and a strategy $a(x)$ and $b(y)$ we mean a pair $(x,y)$ such that $b(y) \not = a(x) + k_{xy} \mod d.$
\end{definition}
\begin{definition}
\label{contradiciotn_numb}
Contradiction number is the minimum number of contradictions over all classical strategies and it is denoted by: $\beta_C(M)$.
\end{definition}
\begin{fact}
\label{classical_val}
The classical value of a game defined by an $n_A\times n_B$ matrix $M$ can expressed as
follows
\begin{equation}
p_{Cl}(M)=1 - \frac{\beta_C(M)}{n_An_B} .
\end{equation}
\end{fact}
\begin{fact}
Games whose matrices are equivalent have the same contradiction number.
\end{fact}
A matrix with no contradictions has rank $1$ and is equivalent to one in which all elements are equal to $1$. Thus, the contradiction number $\beta_C(M)$ is equal to the rigidity ${\rm Rig}(M,1)$ of the corresponding matrix, which in turn, is defined as follows:
\begin{definition}
\label{rigidity}
For a matrix $M$, let the weight ${\rm wt}(M)$ be the number of nonzero elements within the matrix. The rigidity ${\rm Rig}(M,k)$ is defined by
\begin{center}
\begin{equation}
{\rm Rig}(M,k)=\min_{M'}\{{\rm wt}(M-M')\left| {\rm rk}(M')\leq k\right.\}
\end{equation}
\end{center}
\noindent where $M'$ ranges over all matrices of the same size as $M$.
\end{definition}
In other words, the rigidity ${\rm Rig}(M,k)$ of a matrix is the number of elements which need to be changed in order to obtain a matrix of rank at most $k$.
The concept of rigidity was introduced in \cite{Valiant77}. The problem of calculating rigidity has been studied in papers such as
\cite{Friedman93} \cite{Kashin98} and \cite{Lokam01}, largely in terms of providing lower and upper bounds for certain types of matrices. In our approach we have not used rigidity, but in our opinion it should be explored more in the context of linear games.
It follows from Proposition \ref{prop1} that the contradiction number of a $k\times l$ game cannot be greater than $(k-1)(l-1)$ so that we have
\begin{fact}
\label{fact:min-clas-value}
For any linear game with $n_A\times n_B$ outputs the classical value satisfies
\begin{eqnarray}
p_{Cl}\geq \frac{n_A+n_B-1}{n_A n_B}.
\end{eqnarray}
Equivalently, the contradiction number satisfies
\begin{eqnarray} \label{beta_max}
\beta_C \leq (n_A -1)(n_B-1).
\end{eqnarray}
\end{fact}
An interesting question is under what conditions the inequality in \eqref{beta_max} is saturated. This is studied more in-depth in Section \ref{sec:girth-method}, cf. in particular Lemma \ref{lem:max-contr-distinct}.
\subsection{Contradiction number via minors}
As mentioned before, the classical value of a game is closely tied to the contradiction number of the matrix and $\beta_C(M)={\rm Rig}(M,1)$. We will now show how this can be used to calculate the classical value of a game defined by a matrix.
The rank of a matrix is the size of its largest nonzero minor. If all minors of size $2$ are equal $0$, there can be no nonzero minor of size $3$ or more and the rank of the matrix is $1$. Thus, we have the following
\begin{remark}
A matrix contains no contradiction iff all minors of size 2 are equal to $0.$
\end{remark}
It is also immediate to see that
\begin{remark} \label{minor2minor}
The equivalence operations of Def. \ref{def:equiv} transform nonzero minors into non-zero minors. Consequently, such operations also transform submatrices of $M$ into submatrices of $M'$ of the same rank.
\end{remark}
A minor of size $2$ of $M=\mathbold{i}g(m_{ij}\mathbold{i}g)$ can be expressed as
\begin{equation}
m_{ij}m_{st}-m_{it}m_{sj}.
\end{equation}
This means that the minor equals $0$ iff
\begin{equation}
\label{minors}
m_{ij}m_{st}=m_{it}m_{sj},
\end{equation}
Since the contradiction number of a matrix is the number of elements we must change in order to obtain a matrix without nonzero minors of size $2$, it follows that the contradiction number of a matrix is the number of elements which need to be changed in order to obtain a matrix which satisfies equation (\ref{minors}) for all $i,j,s$ and $t$
or, equivalently,
\begin{equation}
\label{minorsplus}
k_{ij}+k_{st}=k_{it}+k_{sj} \mod d,
\end{equation}
\noindent where as in \eqref{eq:def-m-omega} $m_{ij}=\omega^{k_{ij}}, m_{st}=\omega^{k_{st}},m_{it}=\omega^{k_{it}},m_{sj}=\omega^{k_{sj}}$ and $k_{ij},k_{st},k_{it},k_{sj}\in \textbf{Z}_d$.
Since the number of equations is polynomial in $n$, this gives us a good method for
for checking whether $\beta_C(M)=0$. Variations of this trick help when the matrix has a special structure.
Unfortunately, the number of solutions to these equations increases exponentially in $n$, making this method inefficient in the general case.
In some specific cases it is possible to determine the classical value much more easily, using the following results.
\subsection{Game with only diagonal elements in matrix representation}
\label{sec:diagonal}
Notice first that every $n\times n$ matrix with at most one element not equal to 1 in each row and in each column is equivalent in the sense of Definition
\ref{def:equiv} to
\begin{equation} \label{diagonal}
M=\left(\begin{array}{cccccc}
m_{00} & 1 & . & . & . & 1\\
1 & m_{11} & 1 & & & 1\\
. & & . & & & .\\
. & & & . & & .\\
. & & & & . & .\\
1 & . & . & . & 1 & m_{n-1 n-1}
\end{array}\right),
\end{equation}
where $m_{ii}= 1$ for $0\leq i\leq l < n$ and $m_{ii}\neq 1$ for $l< i\leq n $ (for some $l\in \{0,1,2,\ldots,n\})$. Recall that this equivalence preserves both classical and quantum values. Thus, showing the following for the above matrix $M$ also proves it for all matrices equivalent to $M$.
\begin{proposition}
\label{diag}
Let $n \geq 4$ and let $M$ be an $n\times n$ matrix, for which the only elements different from $1$ are on the diagonal. The contradiction number of $M$ is equal to the number of elements different from $1$.
\end{proposition}
For $n=3$ the above assertion does not hold, see the example in Proposition \ref{prop:diag3by3}.
\begin{proof} We can assume that the matrix $M$ is of the form \eqref{diagonal}. Clearly $\beta_C(M) \leq n-l$, the number of entries of $M$ different from $1$.
For the opposite inequality, suppose there is a matrix $M'$ equivalent to $M$ with larger number of $1$'s than $M$. {\it A fortiori}, such $M'$ must have at least $l+1$ ``free'' rows and columns (i.e. rows and columns containing no elements different from $1$).
Moreover, since
swapping rows/columns and a transposition do not change the number of $1$s,
we can assume that $M$ is transformed into $M'$ only by multiplying rows and columns by roots of unity (cf. Remark \ref{order}).
We now consider two cases.
\noindent Case $1^\circ$: \ $l \geq 1$. We have then at least $l+1 \geq 2$ ``free'' columns of $M'$ and exactly $l$ ``free'' columns of $M$.
Consequently, there is a pair of columns, say the $j$th and the $k$th, such that the $n\times 2$ submatrix of $M'$ consisting of the $j$th and the $k$th column
has all entries equal to $1$ while the corresponding $n\times 2$ submatrix of $M$ has one or two entries (namely, $m_{jj}$ or $m_{kk}$, or both) that are different from $1$. This is impossible since every $2\times 2$ minor of the submatrix of $M'$ is equal to zero, while there must be a nonzero $2\times 2$ minor of the submatrix of $M$, contradicting the observation from Remark \ref{minor2minor}. The nonzero minor uses one of the $j$th row if $m_{jj}\neq 1$ (and the $k$th row otherwise) and any row other than the $j$th and the $j$th.
(This part of the argument requires only $n\geq 3$.) Equivalently, we could argue that the above submatrix of $M'$ is of rank $1$, while the corresponding submatrix of $M$ is of rank $2$.
\noindent Case $2^\circ$ \ $l = 0$. We have then a pair of columns, say the $j$th and the $k$th, such that the $n\times 2$ submatrix of $M'$ formed by them contains at most one entry different from $1$. Consequently, there is a further $(n-1) \times 2$ submatrix that consists exclusively of $1$s and so all its $2\times 2$ minors are equal to zero. At the same time, since $l=0$ and so every column of $M$ contains exactly one element different from $1$, the corresponding $(n-1) \times 2$ submatrix of $M$ contains at least one (and possibly two, in different rows) element different from $1$, which is again impossible by the same argument as in Case $1^\circ$ as long as $n-1\geq 3$, i.e. $n\geq 4$, concluding the argument.
\end{proof}
\subsection{Game with nontrivial elements only in one row (or column) in matrix representation}
\label{sec:row}
Let us now consider an $n_A\times n_B$ matrix in which all elements different from $1$ are in the same row (or in the same column).
\begin{proposition} \label{prop:one_row}
If $M$ is an $n_A\times n_B$ matrix
\begin{equation}\label{last_row}
M = \left(\begin{array}{ccccccc}
1 & . & . & . & 1\\
. & . & . & . & .\\
. & . & . & . & .\\
1 & . & . & . & 1\\
m_1 &.& . & . & m_{n_B}
\end{array}\right)
\end{equation}
\noindent
such that the number of the most common elements in the last row is $k$,
then $\beta_C(M)=n_B-k.$
\end{proposition}
Note that any matrix in which all elements not equal to 1 are in the same row or in the same column is equivalent to a matrix of form \eqref{last_row}.
\begin{proof} First, if the last row of $M$ contains $k$ elements equal to $z$, then multiplying the last row by $z^{-1}$ we create $k$ entries equal to $1$, which shows that $\beta_C(M)\leq n_B-k.$
For the opposite inequality, suppose $\beta_C(M)\leq n_B-k-1$. As in the proof of Proposition \ref{diag}, this means that there is a matrix $M'$ obtained from $M$ by multiplying rows and columns by roots of unity, which contains at most $n_B-k-1$ elements different from $1$. {\it A fortiori}, there are at least $k+1$ ``free'' columns of $M'$ that consist exclusively of $1$s. Consequently, there are two such columns, say the $j$th and the $l$th, for which $m_j\neq m_l$. (Recall that no more than $k$ of the $m_j$s can take the same value.) But this is impossible: every $2\times 2$ minor of the $n_A\times 2$ matrix consisting of the $j$th and the $l$th column of $M'$ is equal to zero, while obviously there are minors of the corresponding $n_A\times 2$ submatrix of $M$ that are nonzero (namely, minors involving the last row), contradicting the observation from Remark \ref{minor2minor}.
\end{proof}
\section{The girth method for bounding classical value}
\label{sec:girth-method}
In this section we analyze the connection between the classical value of a game and the cycles within certain graphs $H(M)$ and $H_{opt}(M)$ derived from the matrix which defines the game. It turns out the length of cycles permitted in $H_{opt}$ gives us control over the number of contradictions in $M$ and thus the classical value of the game.
We call it {\it the girth method}, as the girth of a graph is the length of its shortest cycle,
and our method will be precisely to construct games with controlled girth.
\begin{definition}[Graph of a game]
For any $n_A\times n_B$ game matrix $M=\{m_{ij}\}$ we define $H(M)$ as a bipartite graph with the vertex set $V(H)=\{1_A,2_A...,n_A,1_B,2_B,...,n_B\}$. Two vertices $i_A,j_B$ in $H$ are adjacent in $H$ iff $m_{i_A j_B}=1$ in $M$.
\end{definition}
\begin{definition}[Optimal graph of the game]
Let $M$ be any game matrix and let $M_{opt}$ be a matrix equivalent to $M$ with the maximum number of elements equal to $1$. We will refer to the graph $H_{opt}(M)=H(M_{opt})$ as an optimal game graph of $M$.
\end{definition}
The optimal game graph is not necessarily unique, but the considerations that follow are not affected by how we make the selection.
\begin{remark}
\label{rem:no-permutations-needed}
Note that permutations of columns and rows, as well as transposition of matrix do not change the number of $1$'s in a matrix.
Hence an optimal matrix can be obtained from the original matrix solely by multiplying rows and columns by roots of $1$.
\end{remark}
By construction, the classical value of the game associated with $M$ is given as follows.
\begin{fact}
\label{fact:cl-H}
For any $n\times n$ game matrix $M$
\begin{equation}
\label{eq:cl-edges}
p_{Cl}=\frac{m}{n^{2}}
\end{equation}
where $m$ is a number of edges in $H_{opt}(M)$.
\end{fact}
Accordingly, to get games with low classical value we need to construct matrices which correspond to graphs $H_{opt}$ with small number of edges. On the other hand, it is well known that a graph which does not have short cycles cannot have too many edges. Hence our aim will be to construct matrices such that the corresponding graphs do not have short cycles.
We shall illustrate this idea by means of the following example.
\begin{center}
\begin{figure*}
\caption{Cycles in graph $H$ and the matrix of the game. There are two cycles, marked in green and red.}
\label{fig:cycle}
\end{figure*}
\end{center}
{\it Example.}
Let us note that every cycle in the graph $H_{opt}$ corresponds to a family of elements in the matrix $M$, to which we will also refer as a cycle.
For instance in Fig. \ref{fig:cycle} the cycle $1_A \to 1_B \to 2_A \to 2_B \to 3_A \to 3_B \to 1_A$ in the graph
(the red cycle) is related to the cycle
$m_{11}\to m^T_{12}\to m_{22}\to m^T_{23}\to m_{33}\to m^T_{31}\to m_{11}$ in the matrix (here $M^T=\mathbold{i}g(m^T_{ij}\mathbold{i}g)$ is the transpose of $M=\mathbold{i}g(m_{ij}\mathbold{i}g)$.
The green cycle $5_A \to 3_B \to 6_A \to 4_B\to 5_A$
corresponds to $m_{53} \to m^T_{36}\to m_{64} \to m^T_{45} \to m_{53}$.
Since the matrix $M_{opt}$ is equivalent to $M$ and can be obtained by multiplying rows of $M$ by $\eta_i$ and columns by $\xi_j$,
we have $m_{ij}\, \eta_i\, \xi_j=1$ for all pairs $(i,j)$ such that $a_ib_j$ is and edge in $H_{opt}$. Hence in the red cycle we have
\begin{equation}
\begin{aligned}
(\eta_1 m_{11} \xi_1)(\xi_1 m^T_{12} \eta_2)^{-1}
(\eta_2 m_{22} \xi_2)(\xi_2 m^T_{23} \eta_3)^{-1}
\\(\eta_3 m^T_{33} \xi_3)(\xi_3 m^T_{31} \eta_1)^{-1}= 1
\end{aligned}
\end{equation}
which gives
\begin{equation}
m_{11} (m^T_{12})^{-1} m_{22} (m^T_{23})^{-1} m_{33} (m^T_{31})^{-1} =1
\end{equation}
or equivalently
\begin{equation}
m_{11} (m_{21})^{-1} m_{22} (m_{32})^{-1} m_{33} (m_{13})^{-1} =1
\end{equation}
which, since $m_{ij}=\omega^{k_{ij}}$, means
\begin{equation}
k_{11}+k_{22}+k_{33}=k_{21}+k_{32}+k_{13} \mod d
\end{equation}
Thus the sum of three numbers $k_{ij}$ is equal to the sum of three other numbers.
\noindent {\it The general case.}
In general, we will define a cycle in a matrix $M$ as a set of matrix elements corresponding to a cycle in the complete bipartite graph $K_{n_A,n_B}.$
\begin{definition}[Cycle]
A cycle $C$ in matrix $M$ is a subset of matrix entries determined by ordered sets of rows $S_A=(i_1,\ldots i_l)$ and columns $S_B=(j_1,\ldots j_l)$, $l\geq2$, as follows
\begin{align}
\{ m_{i_1,j_1}, m_{j_1,i_2}^T, m_{i_2,j_2}, m_{j_2,i_3}^T, m_{i_3,j_3} \ldots m_{i_l,j_l}, m_{j_l, i_1}^T \},
\end{align}
\noindent where $m_{ij}^T=m_{ji}.$
\end{definition}
\begin{definition}[Good cycle]
\label{def:good-cycle}
A cycle $C$ in matrix $M$ given by subsets
$S_A=(i_1,\ldots i_l)$ and $S_B=(j_1,\ldots j_l)$ of rows and columns respectively, is referred to as a {\rm good cycle}
if it satisfies
\begin{eqnarray}
\label{eq:good-cycles-matrix}
&&m_{i_1,j_1} (m_{j_1,i_2}^T)^{-1} m_{i_2,j_2} (m_{j_2,i_3}^T)^{-1} m_{i_3,j_3} \times \nonumber \\
&&\times \ldots \times m_{i_l,j_l} ( m_{j_l, i_1}^T)^{-1} =1
\end{eqnarray}
equivalently
\begin{equation}
\label{eq:ks}
\begin{aligned}
k_{i_1, j_1} + k_{i_2,j_2} + k_{i_3,j_3} +\ldots+ k_{i_l,j_l} = \\ k_{j_1,i_2}+ k_{j_2,i_3} + k_{j_3,i_4} +\ldots+ k_{j_l,i_1} \mod d
\end{aligned}
\end{equation}
where $M=(m_{ij})=(\omega^{k_{ij}})$ with $\omega=e^{2 \pi i/d}$.
\end{definition}
Note that good cycle in matrix corresponds to good cycle in the labeled graph framework discussed in Section \ref{sec:graphs}.
In the next proposition we observe that a cycle in $H_{opt}$ corresponds to a good cycle in $M$. This correspondence is direct if the optimal matrix leading to $H_{opt}$ was obtained without permutations and transposition (i.e. solely by multiplying rows and columns with roots of unity). Otherwise,
the two cycles are related by permutations or transposition (the latter acts as reflection).
\begin{proposition}
\label{prop:goodcycle}
Consider a cycle $C$ in matrix $M$ given by subsets
$S_A=(i_1,\ldots i_l)$ and $S_B=(j_1,\ldots j_l)$ of rows and columns respectively.
If the graph $H_{opt}(M)$ contains the corresponding cycle then the cycle $C$ in $M$
is a good cycle.
Conversely, if
matrix $M$ contains a good cycle, then there exists equivalent matrix $M'$ such that $H(M')$ contains the corresponding cycle.
\end{proposition}
\begin{proof}
Let $M$ be a game matrix and let $C$ be the cycle defined by $S_A=(i_1,\ldots i_l)$ and $S_B=(j_1,\ldots j_l)$.
As was noted in Remark \ref{rem:no-permutations-needed}, optimal matrix can be obtained
without permutations of rows/columns or transposition.
Thus,
$C$ corresponds to a cycle in so obtained $H_{opt}$ if and only if there exists an optimal matrix $M_{opt}$ in which all elements on the cycle determined by the cycle from $H_{opt}$ are equal to $1$. We can obtain such $M_{opt}$ through multiplying each row of $M$ by some $\eta_i$ and each column by some $\xi_j$, where $i$ and $j$ are the indices of rows and columns, respectively. Thus on the cycle $C$ we have
\begin{align}
\label{eq:proofgood}
&m_{i_1,j_1} (m_{j_1,i_2}^T)^{-1} m_{i_2,j_2} (m_{j_2,i_3}^T)^{-1} m_{i_3,j_3}\times\ldots\times \nonumber\\
&\times m_{i_l,j_l} ( m_{j_l, i_1}^T)^{-1} =
\eta_{i_1}m_{i_1,j_1}\xi_{j_1} \times \nonumber\\
&\times (\eta_{i_2}m_{j_1,i_2}^T\xi_{j_1})^{-1} \eta_{i_2}m_{i_2,j_2}\xi_{j_2} (\eta_{i_3}m_{j_2,i_3}^T\xi_{j_2})^{-1} \eta_{i_3}m_{i_3,j_3}\xi_{j_3}\times \nonumber\\
&\times \ldots \times \eta_{i_l}m_{i_l,j_l}\xi_{j_l} ( \eta_{i_1}m_{j_l, i_1}^T\xi_{j_l})^{-1} = 1 \times\ldots\times 1 = 1.
\end{align}
which means that the cycle $C$ is a good one.
\color{black}
Let us now prove the second assertion.
Suppose that matrix $M$ has a cycle
\begin{align}
\{ m_{i_1,j_1}, m_{j_1,i_2}^T, m_{i_2,j_2}, m_{j_2,i_3}^T, m_{i_3,j_3} \ldots m_{i_l,j_l}, m_{j_l, i_1}^T \}
\end{align}
which is good, i.e.
if we multiply the above numbers they will give 1, as in \eqref{def:good-cycle}.
We shall now consider multiplication of rows and columns with $\xi$'s and $\eta$'s, and will show that
these numbers can be chosen in such a way, that all the matrix elements of the cycle in the resulting matrix $M'$ are equal to 1. To this end, note that
these matrix elements look as follows
\begin{align}
&& m'_{i_1,j_1}= \eta_{i_1}m_{i_1,j_1}\xi_{j_1} \nonumber\\
&& {m'}^T_{j_1,i_2}=\eta_{i_2}m_{j_1,i_2}^T\xi_{j_1}
\nonumber \\
&& m'_{i_2,j_2}=\eta_{i_2}m_{i_2,j_2}\xi_{j_2}
\nonumber \\
&& {m'}^T_{j_2,i_3}=\eta_{i_3}m_{j_2,i_3}^T\xi_{j_2}
\nonumber\\
&& m'_{i_3,j_3}=\eta_{i_3}m_{i_3,j_3}\xi_{j_3}
\nonumber\\
&&\cdots
\nonumber\\
&& m'_{i_l,j_l}=\eta_{i_l}m_{i_l,j_l}\xi_{j_l}
\nonumber \\
&& {m'}^T_{j_l,i_1}=\eta_{i_1}m_{j_l, i_1}^T\xi_{j_l}
\end{align}
We choose $\eta_{i_1}=1$ and
$\xi_{j_1}=m_{i_1,j_1}^{-1}$.
This gives $m'_{i_1,j_1}=1$.
Next, we choose $\eta_{i_2}$ in such a way that ${m'}^T_{j_1,j_2}=1$.
We can continue this way to make all $m'$ elements of the cycle equal to one, apart from the last one. Since all indices $i_1,\ldots i_l$ are distinct (and similarly $j_1, \ldots j_l$) the above procedure ensures that $\eta_i$'s and $\xi_j$'s are well defined. Indeed, we have always freedom (except of the last element) to choose either some $\xi$ or $\eta$ that was not fixed before.
For the last element, we do not have freedom
because, $\eta_{i_1}$ and $\xi_{j_l}$ that appear there, have been already fixed.
But since the cycle was good
(i.e. elements of cycle in $M$ satisfy
\eqref{eq:good-cycles-matrix}),
then
according to \eqref{eq:proofgood} the
elements in corresponding cycle of $M'$
also satisfy \eqref{eq:good-cycles-matrix}.
Therefore, since all those matrix elements apart from the last one are equal to $1$, then the last one must be also equal to $1$.
Thus for our choice of $\xi$'s and $\eta$'s, the cycle in the matrix $M'$
consists of $1$'s. Then by definition of $H$ the graph $H(M')$ contains the corresponding cycle.
This completes the proof.
The procedure of assigning values to $\eta_i$ and $\xi_j$ can be visualised on the complete bipartite graph. Consider e.g. red cycle in graph from Fig. \ref{fig:cycle}. We make it directed (e.g. first edge let be directed to the right):
$1_A \to 1_B \to 2_A \to 2_B \to 3_A \to 3_B \to 1_A$.
The first assignment ($\eta_{1}=1$) is arbitrary.
the next one, namely $\xi_1=m_{1,1}^{-1}$ corresponds to head of the next arc $1_A\to 1_B$. The next assignment, i.e. that of $\eta_2$ which assures $m_{21}'=1$, corresponds to the head of the arc $1_B\to 2_A$.
in this way, for each arc, except from the last one, we can
change the value of matrix element corresponding to that arc to $1$, by assigning value of the parameter corresponding to the head of the arc. Only, for the last arc, we cannot do this, as the parameter associated $\eta_1$ with its head was already set to $1$ at the beginning. However it is automatically one, as the cycle was good.
\end{proof}
As already said, the optimal graph is non-unique. However we shall later use its property, which does not depend on particular representative. Namely, if for a game matrix $M$
equation \eqref{eq:ks} does not hold for any cycle of length up to, say, $2s$,
then $H_{opt}(M)$
does not have cycles of length $2s$, irrespectively of the choice of $H_{opt}$.
Therefore, in such case
the graph $H_{opt}$
cannot have too many edges, ergo -- due to Fact \ref{fact:cl-H} --
the classical value of the game is small.
We shall actually mostly use the following relaxation of Proposition \ref{prop:goodcycle}, where we will demand that the equality \eqref{eq:ks} is valid for arbitrary equal subsets of entries:
\begin{corollary}
\label{cor:condition-for-s-cycles}
Suppose that for the game matrix $M=(m_{ij})=(\omega^{k_{ij}})$ with $\omega=e^{2 \pi i/d}$,
we have that for any two disjoint, right? subsets $S_1$, $S_2$ of matrix entries with $|S_1|=|S_2|=s$
\begin{eqnarray}
\sum_{M_{ij}\in S_1} k_{ij} \not = \sum_{M_{ij}\in S_2} k_{ij} \mod d
\end{eqnarray}
then the graph $H_{opt}$ does not have cycles of length $2s$.
\end{corollary}
Using this corollary we shall bound classical value from above in subsequent subsections.
Finally we shall provide a result that will later allow to bound the classical value from below.
\begin{proposition}
\label{prop:connected}
For arbitrary game matrix $M$ there is an equivalent matrix $M'$, such that $H(M')$ contains $H(M)$
and is connected. In particular each optimal graph $H_{opt}(M)$ is connected.
\end{proposition}
\begin{proof}
Suppose $H$ can be decomposed into two nonempty disjoint subgraphs $H_1,H_2$.
Of course, each of the subgraphs must be bipartite, i.e., $V(H_i)= A_i\cup B_i$ with
$A_1\cup A_2 = A$ and $B_1\cup B_2 = B$, where $A,B$ are the parts of $H$.
Assume that $i_0\in A_1$ and $j_0\in B_2$.
The decomposition property means in particular that there is no edge between a vertex in $A_1$ and a vertex in $B_2$, so $(i_0,j_0)$ is not an edge of $H$.
Denote by $(\eta_i)$ and $(\xi_j)$ the sequences of multipliers used to obtain the matrix $M'$ from
$M=\mathbold{i}g(m_{ij}\mathbold{i}g)$. Let $\zeta=\eta_{i_0}m_{i_0j_0}\xi_{j_0}$ and define
\begin{eqnarray}
\eta_i' = \left\{ \begin{array}{l c l} \eta_i & \hbox{ if } & i\in A_1\\
\zeta \eta_i & \hbox{ if } & i\in A_2\end{array} \right. , \ \
\xi_j' = \left\{ \begin{array}{l c l} \xi_j & \hbox{ if } & j\in B_1\\
\zeta^{-1} \xi_j & \hbox{ if } & j\in B_2\end{array} \right.. \nonumber \\
\end{eqnarray}
This construction assures that if $M'=\mathbold{i}g(\eta_i' m_{ij}\xi_j' \mathbold{i}g)$, then the $(i_0,j_0)$th entry of $M'$ is $1$, while at the same time leaving all the entries of $M'$ that in matrix $M$ were equal to $1$
are equal to $1$ also in $M'$. This means that $H(M)\subsetneq H(M')$ (a strict inclusion). At the same time, the described operation decreases the number of connected components
of the graph by one. Repeating the operation a finite number of times we arrive at a matrix $M'$
such that $H(M')$ has only one connected component, i.e. it is connected.
The construction above assumes tacitly that sets $A_1$ and $B_2$ are both nonempty; if that is not the case, we proceed similarly for $A_2$ with $B_1$.
(If one of the $A_i$'s {\em and } one of the $B_i$'s empty, then the graph has no edges and any choice $i_0\in A$, $j_0\in B$ will work.)
Finally, we note that since optimal graph $H_{opt}$ cannot be extended by definition, it must be connected.
\end{proof}
\color{black}
\subsection{Games with maximal contradiction number}
Let us now consider in more detail games with maximal number of contradictions, which due to Fact \ref{fact:min-clas-value} is given by $(n_A-1)(n_B-1)$. Such games exist at least for $n_A=n_B$ and examples of various level of sophistication will be presented in Section \ref{sec:construction}.
To begin with, we make the following simple but enlightening observation.
\begin{fact}
\label{fact:max-contradiction-tree}
An $n_A \times n_B$ game matrix $M$ has the maximal number of contradictions, i.e., $\beta_C= (n_A-1)(n_B-1)$, if and only if
the optimal graph $H_{opt}(M)$ is a tree.
\end{fact}
\begin{proof}
If $\beta_C= (n_A-1)(n_B-1)$, then, by definition of the optimal graph, $H_{opt}$ has $n_A+n_B -1 $ edges. By Proposition \ref{prop:connected}, the optimal graph is connected. But arbitrary connected graph of $n$ vertices which has $n-1$ edges must be a tree. The same calculation gives the reverse argument (Proposition \ref{prop:connected} is not even needed).
\end{proof}
\begin{remark}
An example of such optimal graph for a matrix $M$ with maximal contradictions
is the following. By Proposition \ref{prop1}, for any matrix $M$ we can find equivalent matrix $M'$ to $M$
which has $1$'s in the first row and in the first column, hence it has at least $n_A +n_B - 1 $ of $1$'s.
If $M$ has maximal possible contradiction number, there does not exist equivalent matrix with more $1$'s. Thus
the $H(M')$
is optimal for $M$, and it is a tree.
\end{remark}
We can now formulate a corollary, which characterizes games possessing the maximal number of contradictions.
This result can be also proven within the approach of labeled graphs of
\cite{RS} and \cite{RRGHS}
(cf. Lemma \ref{lcycles}).
\begin{corollary}
\label{cor:good-cycle}
A game matrix $M$ has the maximal number of contradictions
given by $(n_A-1)(n_B-1)$
if and only if
it does not contain any good cycle.
\end{corollary}
\begin{proof}
If $M$ has a good cycle, then
there is equivalent matrix $M'$ such that $H(M')$ contains a cycle. By Proposition \ref{prop:connected} there is equivalent matrix $M''$ such that $H(M'')$ contains that cycle, and is moreover connected.
Thus the
optimal graph has at least $n_A+n_B$ edges, so that the number of contradictions cannot be the
maximal one.
Indeed, connected graphs of $n$ vertices which do not contain any cycles are by definition trees, and a tree with $n$ vertices contains exactly $n-1$ edges.
Suppose now that $M$ does not have any good cycle. Then Proposition \ref{prop:goodcycle} asserts that
$H_{opt}$ does not have any cycle, hence it is a tree and by Fact \ref{fact:max-contradiction-tree} the matrix has the maximal number of contradictions
\end{proof}
Finally, let us consider matrix $M$ that has $1$'s in the first row and the first column, and
suppose that it has the maximal number of contradictions. We then will argue, that all the other matrix elements has to be distinct from each other, and distinct from 1:
\begin{lem}
\label{lem:max-contr-distinct}
\label{lll}
If a $n_A \times n_B$ game matrix of the form
\begin{equation}
\label{eq:ones}
M = \left(\begin{array}{cccccc}
1 & 1 & ... & 1\\
1 & m_{22} & ... & m_{2 n_B}\\
\vdots & \vdots & & \vdots\\
1 &m_{n_A2}& ... & m_{n_An_B}
\end{array}\right)
\end{equation}
has $(n_A-1)(n_B-1)$ contradictions then all elements $m_{ij}$, for $2\leq i\leq n_A$ and $2\leq j\leq n_B$, are distinct and different from $1$.
\end{lem}
\begin{proof}
A direct proof by matrix transformations is provided in the Appendix. Here we provide an argument which utilizes the tools introduced in present section.
Suppose that there are two elements among $m_{ij}$, equal to $v$.
First, consider the case when they are in the same row (if the are in the same column the
reasoning is analogous). We then look at submatrix
\begin{eqnarray}
\label{eq:22}
\left(
\begin{array}{ll}
1 & 1\\
v & v \\
\end{array}
\right)
\end{eqnarray}
We see that $ 1 \times v^{-1} \times v \times 1=1$.
Thus the matrix contains a good cycle, and by Corollary \ref{cor:good-cycle} it cannot have the maximal number of contradictions. The same argument applies when the identical entries are in the same column.
Now assume that the two equal elements are in different rows and columns, i.e. $m_{i_1j_1}=m_{i_2j_2}=v.$ We can assume without loss of generality that $i_1<i_2$ and $j_1<j_2.$ In this case $M$ contains the following submatrix
\begin{eqnarray}
\left(
\begin{array}{lll}
1 & \textbf{1} & \textbf{1}\\
\textbf{1} & \textbf{v} & u\\
\textbf{1} & w & \textbf{v}\\
\end{array}
\right)
\end{eqnarray}
Since $ 1 \times v^{-1} \times 1 \times 1 \times v \times 1=1$, the bolded elements of the matrix form a good cycle and thus the contradiction number is again not the maximal one.
To summarize, we have shown that
a matrix of the form \eqref{eq:ones} with the maximal contradiction number
must have all the entries $m_{ij}$
with $2\leq i \leq n_A$
and $2\leq j\leq n_B$
distinct.
Finally we can see
that those entries
cannot be equal to 1. Indeed,
if any such entry if equal to $1$
we have trivially a good cycle formed by
\begin{eqnarray}
\label{eq:22-1s}
\left(
\begin{array}{ll}
1 & 1\\
1 & 1 \\
\end{array}
\right)
\end{eqnarray}
This can also be seen directly from the definition
of the contradiction number (Def. \ref{contradiciotn_numb}) since
taking classical strategy $a(x)=b(y)=1$
leads to the number of contradictions that
is strictly smaller than $(n_A-1)(n_B-1)$.
\end{proof}
\section{Construction of games with low classical values}
\label{sec:construction}
In this section we provide explicit constructions of games with low classical value.
In particular, we show that an $n\times n$ matrix with maximum number of contradictions can be obtained with $d\leq 2^nn^{3n}$ outputs. We also provide a method for constructing such matrices, as well as ones with a large, but not maximum number of contradictions and with the number of outputs that is polynomial in $n$.
\subsection{Warmup: a simple construction of games with minimal possible classical values}
\label{sub:max}
In this section we construct a family of games with the minimum classical values in the form of $n_A\times n_B$ matrices with $d=2^{(n_A-1)(n_B-1)-1}$ outputs. We also show that it is possible to achieve the maximum number of contradictions with an even smaller number $d$ of outputs and attempt to find a lower bound on the necessary number of outputs.
It follows from Lemma \ref{lll} that the maximum number of contradictions in an $n_A\times n_B$ matrix cannot be achieved with fewer than $(n_A-1)(n_B-1)+1$ outputs. However, this is not a sufficient condition and, in general, a larger number of outputs is necessary.
We will now sketch a ``brute force'' way to construct a matrix with maximal number of contradictions, using
Corollary \ref{cor:good-cycle}.
It says
that in order to maximize the number of contradictions one must ensure
the matrix $M$ contains no good cycles.
Let us note that according to Definition \ref{def:good-cycle} a good cycle in
matrix $M$
can be equivalently characterized as follows:
\begin{enumerate}
\item[(i)] $X = X_1 \cup X_2,$ the two subsets are disjoint and $\left|X_1\right| = \left|X_2\right| \geq 2$;
\item[(ii)] Each column contains either no elements from $X$ or exactly one element from $X_1$ and one from $X_2;$
\item[(iii)] Each row contains either no elements from $X$ or exactly one element from $X_1$ and one from $X_2;$
\item[(iv)] $\sum\limits_{m_{ij} \in X_1} k_{ij} \ { =} \sum\limits_{ m_{ij} \in X_2} k_{ij} \mod d$.
\end{enumerate}
(regarding item (iv), recall that $m_{ij}=\omega^{k_{ij}}$).
It follows that if condition (iv) fails for every pair of sets $X_1,X_2$ verifying conditions (i)-(iii) (i.e., every cycle is not a good one), then the contradiction number is maximal.
{\it Explicit construction of the game.}
The above scheme allows us to apply this approach to matrices of the form \eqref{eq:ones}. Our strategy will be the following: we shall construct that the condition (iv)
is not satisfied for arbitrary sets $X_1,X_2$ satisfying (i).
Note that the condition (iv) fails if we take $k_{ij}$ from a sequence
\begin{equation}
\label{k_i}
k_l=2^{l}, \ l=0,1,2,3,\ldots
\end{equation}
This follows from the uniqueness of a representation of an integer in base $2$ and the fact that
the $0$'s in the sums in the condition (iv) corresponding to the $1$'s from the first row and the first column in \eqref{eq:ones} can be ignored, because by (ii) and (iii), the set $X$ can contain at most $3$ such elements.
Essentially the same argument works for any $(k_l)$ such that $k_0 \geq 1$ and $k_l\geq2k_{l-1}$
for all $l\geq 1$.
The above scheme allows us to construct $n_A\times n_B$ matrices with the maximal number of contradictions with
\begin{equation}
d=2^{(n_A-1)(n_B-1)+1}
\end{equation}
outputs.
{\it Example.}
Here is game matrix with $n_A=4,n_B=3$ inputs
containing 6 contradictions based on the above construction.
\begin{equation}
M=\left(\begin{array}{cccc}
1 & 1 & 1 & 1\\
1 & k_{1} & k_{2} & k_{3}\\
1 & k_{4} & k_{5} & k_{6}
\end{array}\right)
\end{equation}
The game has $2^{7}$ outputs.
The classical value of this game is the minimal over all $4\times3$ games, i.e. $\beta_C(M)=6$ and $\beta_{Cl}(M)=\frac{1}{2}.$
\subsection{Controlling the contradiction number by the girth of $H_{opt}$}
\label{sub:controlled}
Here we provide a different approach to controlling the number of contradictions within a matrix. In order to ensure a large number of contradictions we will control the length of the cycles permitted in the graph $H_{opt}$. If we make sure that our $k_{ij}$'s are chosen in such a way that any two sums of the same lengths less than or equal to $s$
are not equal to one another, we will guarantee that there are no cycles of length less than or equal to $2s$ in $H_{opt}$, i.e. the girth of $H_{opt}$ is more than $2s$. Since a graph with no short cycles can not have too many edges (a more precise result to that effect is stated later in this section),
and the edges of $H_{opt}$ correspond to $1$'s in an optimal matrix,
we can use this scheme to obtain a bound on the contradiction number of $M$.
We will now show how to find such $k_{ij}$'s.
Using Rudin's method from \cite{Rudin} we will construct a set $A$ of integers which satisfies the so-called {\it $s$-sum property}. Namely, the sum of any $t$ elements of the set cannot be equal to the sum of any other $t$ elements for any $t\leq s$. Then we choose the number $d$ of outputs large enough that none of these sums are greater then $d-1$, thus ensuring that the sums modulo $d$ are also distinct. We can then construct a matrix with a large number of contradictions with elements in the form $\omega^{k_{ij}}$ with $k_{ij}$'s taken from the set $A$.
{\it Construction of the set A.}
Let us recall the method (item 4.7 in \cite{Rudin}, page 219-220) that allows us to construct the set $A$ with number of elements equal to a prime number $p$
which satisfies the {\it $s$-sum property}.
To this end consider finite sets $A=A(s,p)$, with $s=2,3,4,\ldots$
where $s<p$. We now define $\lambda(k),$ for
any positive integer $k$, by
\begin{equation}
\lambda(k)\equiv k\:\left(\text{mod}\,p\right),\quad0\leq\lambda(k)\leq p-1,
\end{equation}
and we let $A(s,p)$ be the set consisting of the $p$ integers
\begin{equation}
x_{k}=\sum_{i=0}^{s-1}\lambda\left(k^{s-i}\right)\left(sp\right)^{i}\quad\left(k=0,\ldots,p-1\right).
\end{equation}
The sum reflects representation of $x_k$
in the number system whose base is $sp$ with the leading digit
being $\lambda(k)=k$.
Hence, we see that $0=x_{0}<x_{1}<\ldots<x_{p-1}$ and
\begin{equation}
x_{p-1}\leq\left(p-1\right)\sum_{i=0}^{s-1}\left(sp\right)^{i}<s^{s-1}p^{s}.
\end{equation}
We now argue that the so constructed
set $A(s,p)$ satisfies the $s$-sum property.
Namely, suppose that
\begin{equation}
y=x_{k_{1}}+x_{k_{2}}+\ldots+x_{k_{s}},\label{eq:sumx_k}
\end{equation}
and express $y$ in base $sp$:
\begin{equation}
y=\sum_{i=0}^{s-1}y_{i}\left(sp\right)^{i}\quad\left(0\leq y_{i}<sp\right).
\end{equation}
Since $\lambda(k)<p$ and hence the sum
$\sum_{j=1}^{s}\lambda\left(k_{j}^{s-i}\right) < sp$, we have
\begin{equation}
y_{i} = \sum_{j=1}^{s}\lambda\left(k_{j}^{s-i}\right)\qquad\left(i=0,\ldots,s-1\right),
\end{equation}
so that
\begin{equation}
\sum_{j=1}^{s}k_{j}^{l}= y_{s-l}\mod p\quad\left(l=1,\ldots,s\right).
\end{equation}
Thus the digits $y_{i}$ of $y$ determine the first $s$ power sums
of $k_{1},\ldots,k_{s}$ in the cyclic field of $p$ elements; hence
they determine the elementary symmetric function of $k_{1},\ldots,k_{s}$
in this field, and this in turn implies that $k_{1},\ldots,k_{s}$ are
determined by (\ref{eq:sumx_k}), up to permutations. This proves that
the representation for any $y$ in the form (\ref{eq:sumx_k}) is unique, up to permutations of the $x_{k_{i}}.$
The same argument works for any lengths $t$ smaller than $s$.
Having constructed the family of sets $A(s,p)$
we are now ready to formulate
the main result of this section, contained in the following proposition.
\begin{proposition}
\label{prop:controlled}
For any $n\geq 2$ and $s\leq n$ there exists a game associated with an $n\times n$ matrix with $d\leq (2sn^2)^s$ outputs such that $p_{Cl}\leq 2n^{-1+\frac{1}{s}}.$
Moreover, for any $n\geq 2$ there exists a game associated with an $n\times n$ matrix with $d\leq 2^n n^{3n}$ outputs such that $p_{Cl}=\frac{2n-1}{n^2}$.
\end{proposition}
\begin{proof}
We aim to construct a class of games with controlled number of contradictions.
Corollary \ref{cor:condition-for-s-cycles} tells us that, to this end, one should find $n^2$ integers $k_{ij}$ satisfying the ``mod-$d$'' $s$-sum property, i.e., such that their sums of lengths $t\leq s$ are to be distinct modulo $d$. Thus
we will need to have
a set $A$ which (i) has at least $n^2$ elements and (ii) satisfies the above mentioned ``mod-$d$'' $s$-sum property.
We will achieve property (i) by choosing a suitable prime $p$ in Rudin's construction described above, while (ii) will assured by choosing $d$ large enough.
Namely, let us begin with a set $A=A(s,p)$, where $s\geq 2$ and $p>s$ is a prime.
By the argument above, the set $A$ consists of $p$ numbers that satisfy the $s$-sum property.
The numbers are not larger than $s^{s-1}p^s-1$.
Now, to have ``mod-$d$'' $s$-sum property, we will choose $d$ to be larger than any of the sums. To this end, we take $d=s \times ( s^{s-1}p^s)=(sp)^s$. So we are done with (ii).
To ensure (i), we note that according to the Bertrand-Chebyshev theorem there is always a prime number between $N$ and $2N$, for $N> 2$.
Let $p$ be a prime between $n^2$ and $2n^2$, then $A(s,p)$ has more than $n^2$ elements and (i) is satisfied.
Note that since $p\leq 2 n^2$, our number of outputs satisfies
\begin{equation}
\label{dbound1}
d\leq (2sn^2)^s .
\end{equation}
Having our set $A(s,p)$ satisfying (i) and (ii)
we now consider a game $M$ consisting of elements in the form $\omega^{k_{ij}}$, where $k_{ij}\in A$ and $\omega=e^{i2\pi/d}$.
We shall now derive a bound on the classical value of the game constructed above using the girth method. To this end, we turn to the graph $H_{opt}$
corresponding to such a game. Due to the properties of the set $A$,
the graph has girth $g>2s$ i.e. it
does not have cycles of length up to $2s$. Now, we need to estimate
the number of edges of the graph $H_{opt}$, which in turn, according to \eqref{eq:cl-edges}, will determine the classical value.
Erd\H os conjectured in 1964, and it was proved in 1974 in \cite{Bondy-girth} that $N$-vertex graph which does not have a cycle of length less than or equal to $2s$ must $m\leq O(N^{1+1/s})$ edges.
Thus, we have a bound on the number $m$ of edges in $H_{opt}$.
\begin{equation}
\label{eq:erdos}
m\lesssim (2n)^{1 + 1/s}
\end{equation}
A more precise result concerning graphs from \cite{Hoory} is the following. For a bipartite graph with $m$ edges, $n_A + n_B$ vertices and girth $g>2s$ (hence $g\geq 2(s+1)$) we have
\begin{equation}
n_A\geq \sum_{i=0}^{s}\left(d_A-1\right)^{\left\lceil i/2\right\rceil}\left(d_B-1\right)^{\left\lfloor i/2\right\rfloor},
\end{equation}
\begin{equation}
n_B\geq \sum_{i=0}^{s}\left(d_B-1\right)^{\left\lceil i/2\right\rceil}\left(d_A-1\right)^{\left\lfloor i/2\right\rfloor},
\end{equation}
where $d_A=\frac{m}{n_A}$ and $d_B=\frac{m}{n_B}$ are the {\sl average degrees} in the respective parts of the graph.
For $n_A=n_B=n$ and $d_A=d_B=\frac{m}{n}$ this gives
\begin{equation} \label{eq:max-girth}
n\geq \sum_{i=0}^{s}\left(\frac{m}{n}-1\right)^i
\end{equation}
In particular, $\left(\frac{m}{n}-1\right)^s \leq n$, and solving this inequality for $m$ we obtain
\begin{equation} \label{eq:max-girth-simple}
m\leq n+ n^{1+\frac{1}{s}} < 2n^{1+\frac{1}{s}},
\end{equation}
\noindent which gives us a bound for classical value
$p_{Cl}\leq 2/n^{1-\frac{1}{s}}$, as needed.
Finally, if we set $s=n$ in equation \eqref{dbound1}, we obtain a bound on the number of outputs needed to maximize the number of contradictions.
\begin{equation}
\label{dbound2}
d\leq 2^n n^{3n}
\end{equation}
In this case, i.e. for $s=n$,
we can actually provide exact expression for the classical value. Indeed,
recall that for our matrix constructed based on set $A$, the graph $H_{opt}$ does not have cycles of length up to $2s$. On the other hand the graph has $2n$ vertices and is connected (by Prop. \ref{prop:connected}).
Thus for $s=n$ the graph has no cycles, and therefore it is a tree, which has $m=2n-1$ edges. That gives us the exact classical value of $p_{Cl}=\frac{2n-1}{n^2}$.
\end{proof}
Again, the smallest values of $m$ that are of interest are $m=2n-1$, when the graph is a tree with $g=\infty$, and $m=2n$, when the graph is a cycle of length $g=2n$. (In both of these cases
\eqref{eq:max-girth} becomes an equality.) Next, the permitted values of $g$ decrease quickly as $m$ increases. For example, if $m=3n$ (average degree $d_A=d_B=3$), \eqref{eq:max-girth} implies
$g \leq 2 \log_2(n+1)$; this bound is (approximately) saturated by Ramanujan expander graphs \cite{LPS}, which can be exhibited explicitly.
For $s=3$ (i.e. if the graphs do not contain cycles of length $6$ or smaller), \eqref{eq:max-girth-simple}
reduces to $m\leq 2n^{4/3}$ and so the classical winning probability is upper-bounded as follows
\begin{equation}
p_{Cl} = \frac{m}{n^2} \leq \frac{2}{n^{2/3}}.
\end{equation}
\subsection{Prospects towards unbounded quantum violation.}
\label{subsec:biases}
For nonlocal games, the most important feature is how much the quantum value exceeds the classical one. As an indicator we can use the following ratio of biases:
\begin{equation}
R=\frac{p_Q-p_{rand}}{p_{Cl}-p_{rand}}
\end{equation}
where $p_{rand}$ is a random strategy (i.e. Alice and Bob provide always a random output).
We say that there is an unbounded violation if $R$ grows to infinity for growing number of inputs and outputs.
As already mentioned, there are very few explicit examples of games which exhibit unbounded violation. Let us now argue that our results give a hope to obtain a new class of unbounded violation with unique features. Namely, in Ref. \cite{RAM} there is the following upper bound on quantum winning probability for linear games.
\begin{equation}
p_Q\leq \bar p_Q\equiv\frac1d\left(1+ \frac{1}{n_An_B}\sum_{k=1}^{d-1}||M_k||\right)
\end{equation}
where $M_k$ is entrywise $k$-th power, i.e. $(M_k)_{ij}=(M_{ij})^k$,
and the norm is operator one. We consider now the case $n_A=n_B=n$.
Using the fact that operator norm is no smaller than maximal norms of the columns, which is $\sqrt{n}$ we obtain that $\bar p_Q\geq \frac{1}{\sqrt{n}}$.
We shall now suppose that the upper bound on quantum winning probability is be saturated for a given game.
In such case the ratio of the biases $R$
(and the ratio of probabilities) would satisfy
\begin{equation}
R=\frac{\overline{p}_Q -\frac1d}{p_{Cl}-\frac1d}\geq \frac{\overline{p}_Q}{p_{Cl}} \geq
\frac {n^{-\frac12}}{2n^{-\frac{2}{3}}}= \frac{n^{1/6}}{2} .
\end{equation}
We can therefore hope for $R = \Omega( n^{1/6})$ with the number of outputs $d\sim O(n^6)$ (as given by \eqref{dbound1}).
Note that although $R$ is unbounded, both the quantum value and the classical value go to zero with growing number of inputs and outputs. In this regard our game is like Khot-Vishnoy game
\cite{Khot2005,Kempe2008a},
and should be contrasted with e.g. Hidden Matching game of \cite{Buhrman} where the quantum value stays constant. This means that in our case, one would need to repeat the experiment approximately $\sqrt{n}$ times to demonstrate quantum advantage.
For a general $s$, one may hope to achieve (by the same argument)
\begin{equation}
R = \Omega\mathbold{i}g( n^{\frac12 - \frac1s}\mathbold{i}g) .
\end{equation}
In particular, if we take $s=\Theta(\log(n))$, the ratio $R$ is nearly $n^{1/2}$.
While, for small $n$, it is possible to achieve the maximum contradiction number with a significantly smaller number of outputs, it is conceivable that
the above scaling (i.e., $\log d \sim n \log n$) is asymptotically optimal.
As a matter of fact, if the bound of \cite{RAM} was saturated for our games (or even approximately saturated), one would obtain unbounded violation for a regime, which is not covered in existing explicit constructions.
Indeed, one can measure the efficiency of obtaining large violation by considering $R$ as a function of $\rm io=\# \text{ inputs} \times \# \text{ outputs}$.
Table \ref{tab:io} shows comparison of the existing results on explicit constructions with the unbounded violation that we are aware of with those potentially offered by our results.
\begin{table*}
\begin{center}
\begin{tabular}{|c|c|c|c|}
\hline
& parallel repetition techniques& Khot-Vishnoi game & our (provided saturation holds)\\
\hline\hline
$R(\rm io) \gtrsim $ & $\rm io^{10^{-5}/2}$ & $ \frac{\log(\rm io)}{[\log\log(\rm io)]^2} $ & $\rm io^{1/10}$\\
\hline
\end{tabular}
\end{center}
\caption{\label{tab:io} Violation as a function of product of the number of inputs and outputs (denoted by "io"). The best ratio for non-constructive games is
$R(\rm io)=\rm io^{1/4}/\log(\rm io)$ of \cite{Junge2011}.}
\end{table*}
Here we give value of
Khot-Vishnoi game
\cite{Khot2005,Kempe2008a} in the version from \cite{Buhrman}
(the construction of
\cite{Regev} is slightly worse according to criterion indicated in the previous paragraph). By the "parallel repetition techniques"
we mean the games that originate from the
Magic Square game played in parallel (cf. \cite{Raz-parallel} and \cite{Junge2010}).
We hope therefore that our results will stimulate the search for the quantum value of linear games, especially of the type constructed by us.
\subsection{Lower bounds on the number of outputs}
Both methods described above provide explicit constructions of matrices with maximum contradictions and reasonably low numbers of outputs. However, neither of them is optimal.
It is, in fact, possible to achieve the maximum number of contradictions with a much fewer outputs. For example, the matrix
\begin{align}
\left(\begin{array}{ccc}
1 & 1 & 1\\
1 & \omega & \omega^{3}\\
1 & \omega^4 & \omega^5
\end{array}\right)
\end{align}
\noindent with $d = 6$ outputs has $4$ contradictions, see Section \ref{3x3}, Table \ref{tab:3x3}.
We later show (see Proposition \ref{prop:chromatic}) that $6$ is the minimum number of outputs necessary to achieve maximum number of contradictions in a $3\times 3$ matrix. The exact values of $d_{min}$ for larger matrix sizes remain unknown. We do however provide a lower bound on the number of outputs. To this end we construct a graph $G_n$ such that $d$ can be bounded from below in terms of its chromatic number.
Every set $X$ within a matrix which corresponds to a cycle in $H_{opt}$ is defined by a pair of permutations $\pi_1,\pi_2 \in S_n$. The set $X$ consists of elements $x_{i\pi_j(i)}$ for $i\in [n], j\in\{1,2\}$. If $\pi_1(i)=\pi_2(i)$ for some $i\in [n]$, the element $x_{i\pi_j(i)}$ is removed from $X$.
It follows that if for each pair of permutations $\pi_1,\pi_2 \in S_n$ we have
\begin{center}
\begin{equation}
\label{permut}
\sum\limits_{j=0}^{n-1} k_{j,\pi_1(j)} \neq \sum\limits_{j=0}^{n-1} k_{j,\pi_2(j)} \mod d,
\end{equation}
\end{center}
\noindent then the graph contains no cycles without contradictions, and thus $\beta_C=(n-1)^2.$
However, not every such pairs of permutations defines exactly one cycle in $K_{n,n}$. For a set $\pi_1,\pi_2$ defining two or more disjoint cycles it is possible that (\ref{permut}) holds but no cycle without contradiction exists.
\begin{proposition}
\label{prop:chromatic}
The number $d$ of outputs necessary to achieve the maximum number of contradictions in an $n\times n$ matrix is at least the chromatic number of the graph $G_n$, in which the vertices are all permutations $\pi \in S_n$ and two vertices $\pi_1,\pi_2$ are adjacent if and only if they define exactly one cycle in $K_{n,n},$ i.e. $\pi=\pi_1\pi_2^{-1}$ is a cyclic permutation.
That in turn is bounded from below by the maximum of:
\begin{enumerate}
\item $\left|C\right|+1$, where $C\subset S_n$ is the largest set of cyclic permutations such that $\pi_i\pi_j^{-1}$ is a cycle for any $\pi_i, \pi_j\in C$.
\item $\frac{n!}{\left|J\right|},$ where $J$ is the largest set of cyclic permutations such that $\pi_i\pi_j^{-1}$ is not cyclic for any $\pi_i,\pi_j\in J.$
\end{enumerate}
\end{proposition}
\begin{center}
\begin{figure}
\caption{The graph $G_n$ for $n=4$ is $K_{4,4,4,4,4,4}
\label{fig:G4}
\end{figure}
\end{center}
The chromatic number of the graph $G_n$ in Proposition \ref{prop:chromatic} can also be bounded in terms of eigenvalues of its adjacency matrix $A(G)$. For example in \cite{Hoffman} it is shown that $\chi_{G}\geq 1+\frac{\lambda_1}{\left|\lambda_n\right|}$, where $\lambda_1$ and $\lambda_n$ are the largest and smallest eigenvalues of $A(G).$ Since $G_n$ is a regular graph, it can be shown that $\lambda_1$ is equal to the degree of the vertices, i.e. $\lambda_1=\sum_{i=2}^{n}\frac{n!}{(n-i)!i}$.
For $n=3$ the graph $G_n$ is the complete graph $K_6$. Its chromatic number is $6$, which is, in fact the minimum number of outputs necessary to construct a $3\times 3$ matrix with four contradictions. In the case of the graph $G_4$ in Fig. \ref{fig:G4}, however, the chromatic number is still $6$, which does not appear to be a sufficient number of outputs to achieve maximum contradiction number in a $4\times 4$ matrix. We believe that for larger values of $n$ the chromatic number of $G_n$ increases, providing a reasonable bound on $d$, although we do not yet know whether this bound in achieved for any $n>3$.
\section{Connection to a labeled graph framework}
\label{sec:graphs}
In this section we compare the matrix-based approach to the framework described in \cite{RS} and \cite{RRGHS}, in which linear games
are described in terms of graphs with permutations assigned to the edges. In many ways the two frameworks are similar. A labeled graph can be easily translated into a matrix and vice-versa. However, each approach has its own strengths and weaknesses. Some properties of these games are easier to prove in graph-theoretic terms, others are more evident in a matrix.
Actually, the labeled graph approach is suitable for a slightly general family of games, called ``unique games.''
A~unique game is a game in which the variables can take values from the set $[n]=\{0,...,n-1\}$ and the constraints are in the form $\pi_{xy}(a)=b,$ where $a$ and $b$ are values assigned to $x$ and $y,$ respectively and each $\pi_{xy}$ is a permutation. Thus we shall recall the approach involving labeled graphs in the more general context of unique games, and then will specialize to the case of linear games.
In subsection \ref{3x3} we provide, as an example, an systematic analysis of games with three inputs on each side, using a combination of graph and matrix tools.
\subsection{Unique games as labeled graphs}
\label{sec:unique}
A unique game can be described in terms of a labeled graph $(G,K)$, where $G$ is a graph with vertex set $V$ and edge set $E$, and the edge-labeling $K:E\mapsto S_n$ assigns a permutation of the set $[n]=\{0,1,...,n-1\}$ to each edge of $G$. Typically, $G$ is a directed graph, but it may be undirected if all permutations used are such that $\pi^{-1}=\pi$. In a bipartite graph, we are going to assume a left to right default orientation for all edges.
A classical strategy for a game defined in these terms corresponds to a vertex-assignment $f:V\mapsto [n].$ By a \textit{contradiction} in a given assignment $f$ we mean an edge $xy\in E$ such that $\pi_{xy}(f(x))\neq f(y).$ The \textit{contradiction number} $\beta_C$ is the minimum number of contradictions over all possible assignments. The classical value of a game represented by $(G,K)$ can be expressed as $p_{Cl}=1-\frac{\beta_C(G,K)}{\left|E(G)\right|}$ We consider two labeled graphs to be equivalent if one can be obtained from the other through the following operations:
\begin{enumerate}
\item Isomorphism between the underlying (unlabeled) graphs
\item In a directed graph, replacing an edge $\overrightarrow{xy}$ labeled with $\pi$ with $\overrightarrow{yx}$ labeled with $\pi^{-1}.$
\item Switching operations $s(x,\sigma)$ for any vertex $x\in V(G)$ and permutation $\sigma\in S_n,$ defined as follows. For every vertex $y\in N_{G}(x)$:
\begin{enumerate}
\item if $\overrightarrow{yx}\in E(G),$ we replace $K(\overrightarrow{yx})=\pi$ with $K'(\overrightarrow{yx})=\sigma\pi$,
\item if $\overrightarrow{xy}\in E(G),$ we replace $K(\overrightarrow{xy})=\pi$ with $K'(\overrightarrow{xy})=\pi\sigma^{-1}$.
\end{enumerate}
\end{enumerate}
Since the equivalence relation preserves the~contradiction number of a labeled graph, it is clear that games defined by equivalent labeled graphs have the same classical value.
The same is true about their quantum values since graph isomorphisms and switches represent the corresponding relabeling of the inputs $x,y$ and outputs $a,b$, respectively. Thus, we only need to calculate the value for one labeled graph in each equivalence class. Hence unique games defined by equivalent labeled graphs are considered equivalent.
\subsection{Linear games: labeled graphs versus matrices }
The complete bipartite graph $K_{n,n}$ labeled with permutations $\sigma_{xy}(a)=a+k_{xy} \mod d$, defines the same type of game introduced in Section \ref{sec:est}, with the predicate given by equation \eqref{eq:V}.
Thus, we can also describe this game in terms of a matrix, in which all elements are complex roots of $1$. Notice that in this case equivalent labeled graphs correspond to equivalent matrices. The matrix operations defined in Section \ref{sec:est} are essentially the same as the graph operations in Section \ref{sec:unique}. The first one is a switch on a vertex. The second one is an isomorphism changing the order of vertices on one side of the graph. The third one swaps the two sides.
This connection allows us to examine the games in question from two different angles, applying both graph and matrix tools to study their properties.
It follows from the results in \cite{RS} that for any bipartite graph $G$ all labelings $K:E\mapsto L_d'=\{\tilde{\sigma_i}\in S_d: \tilde{\sigma_i}(a)=i+a \mod d\}$ with no contradiction are equivalent to the labeling $K_0$, where $K_0(e)=\mathrm{id}$ for all $e\in E$. In terms of matrices this means that every game with no contradiction is equivalent to a matrix in which all elements are equal to 1, i.e. a matrix of rank 1. Thus, the contradiction number of a game, as defined in terms of a labeled graph, is equal to the rigidity ${\rm Rig}(M,1)$ of the corresponding matrix.
It is easy to see that contradictions within a labeled graph arise from cycles. Furthermore, it is shown in \cite{RS} and \cite{RRGHS} that a graph labeled with $L_d'$ has a contradiction if and only if it contains a bad cycle, i.e., a cycle with a contradiction. Every cycle in the labeled graph $(G,K)$ corresponds to a cycle in the matrix $M$. Cycles which do not give rise to contradictions are called good cycles and correspond directly to good cycles in a matrix, i.e.,
those satisfying Eq. \eqref{eq:good-cycles-matrix}.
More precisely, a good cycle in $(G,K)$ corresponds to a cycle in the corresponding graph $H_{opt}$ and, as such, can be defined by a set of matrix elements satisfying the four conditions in Section \ref{sub:max}. A cycle containing a contradiction corresponds to a set satisfying conditions 1-3, but not 4.
Accordingly, in what follows we will consider cycles within the labeled graph.
However, we do not need to consider all cycles in $(G,K)$. It follows from the results of \cite{RS} that a complete bipartite graph in which no cycle of length $4$ contains a contradiction, cannot in fact contain any contradiction. Thus we only need to consider cycles of length $4$, which correspond to minors of size 2 in a matrix.
\subsection{Labeled graph representation versus the graphs $H$ and $H_{opt}$}
\label{sub:label-versus-H}
Notice that the graphs $H$ and $H_{opt}$ introduced in Section \ref{sec:girth-method} can be alternatively defined as subgraphs of the graph $G$ in $(G,K)$. The vertex set of these graphs is the same as that of $G$. The edge set of $H$ is the set of edges of $G$ labeled with $\mathrm{id}$. The graph $H_{opt}$ is a subgraph whose edge set is a maximal set of edges containing no contradiction. It follows that every cycle in $H_{opt}$ corresponds to a good cycle in the labeled graph $(G,K)$, and vice versa.
Some properties of a game can be inferred from $H$ or $H_{opt}$ alone.
However, the labeled graph $(G,K)$ contains more information about the game and
{\it prima facie} it
can be used to show things that are not evident from $H_{opt}$.
As an illustration of the relationship between the two approaches, we will state a fact which is a counterpart of
Corollary \ref{cor:good-cycle} and prove it within the labeled graph framework.
\begin{lem}
\label{lcycles}
A complete bipartite labeled graph $(K_{kl}, K:E\mapsto L_d')$ has the maximum number of contradictions iff every cycle in the graph contains a contradiction.
\end{lem}
\begin{proof}
It is clear that a graph with no cycles cannot contain a contradiction. Let $C$ be a cycle in $K_{kl}$ with no contradiction. We define the subgraph $G$ of $K_{kl}$, where $V(G)=V(K_{kl})$ and $E(G)$ is obtained by taking all edges of $C$ and adding as many other edges from $E(K_{kl})$ as possible without creating another cycle in $G.$ Clearly, the graph $G$ has no contradiction and $\left|E(G)\right|=k+l$, as contradictions can only occur within cycles. It follows that $K_{kl}$ has no more than $kl-(k+l) = (k-1)(l-1)-1$ contradictions. We have already showed in Section \ref{sec:girth-method} that the maximum contradiction number is $(k-1)(l-1)$ Thus the maximum number of contradictions can only be achieved if there is a contradiction in every cycle.
Now we show that $\beta_C<(k-1)(l-1)$ implies the existence of a cycle with no contradiction. If $\beta_C(K_{kl},K)$ is less than the maximum, then there exists a labeling $K'$ of $K_{kl}$ such that $(K_{kl},K')$ is equivalent to $(K_{kl},K)$ and $K'(e)=\mathrm{id}$ for at least $k+l$ edges $e\in E(K_{kl})$. But a graph with $k+l$ vertices and $k+l$ edges must contain a cycle. Thus there is a cycle with no contradiction in $(K_{kl},K')$, which implies the existence of a cycle with no contradiction in $(K_{kl},K)$. Therefore, if $(K_{kl},K)$ has no cycle without contradiction, then $\beta_C(K_{kl},K)=(k-1)(l-1)$.
\end{proof}
Thus every $k\times l$ matrix with $(k-1)(l-1)$ contradictions corresponds to a complete bipartite graph $K_{kl}$ in which every cycle contains a contradiction, and vice versa. The graph $H_{opt}$ corresponding to such a matrix is a tree (cf. Fact \ref{fact:max-contradiction-tree}).
The labeled graph framework can be applied to a wider variety of quantum systems than the matrix framework described in this paper. For example this type of matrix corresponds only to complete bipartite graphs. However, the matrix approach can be modified to describe a wider variety of games. A graph labeled with a different set of permutations than $L_d'$ can also be described in terms of a matrix with elements from a group other than the complex roots of $1$. The matrix approach can also be extended to non-bipartite graphs, as every graph can be represented as an adjacency matrix. A formalism based on adjacency matrices would require us to modify our approach somewhat and, in the specific case of complete bipartite graphs, it would be unnecessarily complicated, but it can be a useful tool for calculations based on the graph framework.
\subsection{Detailed analysis of the case $n=3$}
\label{3x3}
We will now combine matrix and graph methods to study the contradiction number of a game defined by a $3\times 3$ matrix
\begin{align}
\label{eq:general3x3}
M=\left(\begin{array}{ccc}
1 & 1 & 1\\
1 & w & x\\
1 & y & z
\end{array}\right).
\end{align}
The main results of this section are contained in Tables \ref{tab:3x3} and \ref{tab:3x3-ones}, where we provide the contradiction number of $M$ for arbitrary values of the above four parameters $w,x,y,z$.
In Table
\ref{tab:3x3} we present the cases where
none of the numbers $x,y,z,w$ are equal to $1$, while in Table \ref{tab:3x3-ones} we treat the cases where some of them are $1$s.
We will first prove the following proposition, which determines the contradiction numbers of $3\times 3$ diagonal matrices, the size that was not addressed in Proposition \ref{diag}.
\begin{proposition}
\label{prop:diag3by3}
For a $3\times 3$ matrix
\begin{align}
\label{eq:diagonal3x3}
M=\left(\begin{array}{ccc}
x_0 & 1 & 1\\
1 & x_1 & 1\\
1 & 1 & x_2
\end{array}\right).
\end{align}
If $x_i\neq 1$ for $i=1,2,3$, then we have \\
{\rm (i)} $\beta_C(M)= 2$ if $x_1=x_2=x_0^{-1}$ or $x_0=x_2=x_1^{-1}$
or
$x_0=x_1=x_2^{-1}$\\
{\rm (ii)} $\beta_C(M)= 3$ otherwise.\\
If some of the diagonal entries are equal to $1$, $\beta_C(M)$ equals to the number of elements different from $1$.
\end{proposition}
Note that since every $3\times 3$ matrix which has at most one element different from $1$ in each row and each column is equivalent to a matrix of form \eqref{eq:diagonal3x3}, Proposition \ref{prop:diag3by3} allows to determine contradiction numbers of all such matrices.
\begin{proof}
Consider a $3\times 3$ matrix $M$ of the form \eqref{eq:diagonal3x3}
and let $M'$ be equivalent to $M$. We first observe that all elements of $M'$ that are different from $1$ can not be located in the same row (or in the same column). Indeed, if that was the case, then $M'$ would contain a $2\times 3$ (or $3\times 2$) submatrix of rank $1$, while all $2\times 3$ or $3\times 2$ submatrices of $M$ are clearly of rank $2$, which would yield a contradiction in view of Remark \ref{minor2minor}.
Consequently, every $M'$ equivalent to $M$ must contain at least two entries different from $1$ (hence $\beta_C(M) \geq 2$) and if it contains exactly two such entries, then those two entries must be in different columns and different rows.
With the above observation in mind, let us investigate the constraints imposed on $M$ by the condition $\beta_C(M) = 2$. As an illustration, suppose that a matrix of the form
\begin{equation}\label{2elements}
M'= (m'_{ij})_{i,j=0}^2 = \left(\begin{array}{ccc}
1 & 1 & 1\\
1 & 1 & y_2\\
1 & y_1 & 1
\end{array}\right)
\end{equation}
is equivalent to $M$. We note that $M'$ has (at least) two $2\times 2$ minors that consist solely of $1$s, namely those given by $i,j \in \{0,1\}$ and by $i,j\in \{0,2\}$. By Remark \ref{minor2minor}, the same must be true for $M$. Now, every $2\times 2$ submatrix of $M$ includes at least one diagonal element $x_i$ and if it includes exactly one, then the corresponding minor can not be zero. So only submatrices containing two diagonal elements may lead to a zero minor. If those two minors are given by $i,j \in \{0,1\}$ and by $i,j\in \{0,2\}$, the requirements for them to be zero are respectively
\begin{equation} \label{conditions}
x_0x_1-1=0 \ \hbox{ and } \ x_0x_2-1=0,
\end{equation}
which is equivalent to $x_1=x_2=x_0^{-1}$. By symmetry, the other two sets of constraints from (i) are related to the remaining choices of two pairs of indices from among $\{0,1\}$, $\{0,2\}$ and $\{1,2\}$.
Conversely, if the
constraints \eqref{conditions} are satisfied, then multiplying
the first row of $M$ by $x_0^{-1}$ and then the second and the third column by $x_0$ we obtain
\begin{equation} \label{suffice}
M' = \left(\begin{array}{ccc}
1 & 1 & 1\\
1 & x_0x_1 & x_0\\
1 & x_0 & x_0x_2
\end{array}\right)=
\left(\begin{array}{ccc}
1 & 1 & 1\\
1 & 1 & x_0\\
1 & x_0 & 1
\end{array}\right),
\end{equation}
which shows that $\beta_C(M')=\beta_C(M) \leq 2$, and hence both are equal to $2$.
The same argument shows that any matrix of form \eqref{2elements} with $y_1, y_2\neq 1$, or equivalent to it, satisfies $\beta_C(M')=2$. Together with Proposition \ref{prop:one_row} (or Remark \ref{minor2minor}), this also justifies the last assertion of the Proposition.
\end{proof}
Now let us return to the contradiction number of the matrix \eqref{eq:general3x3}.
To begin with, note that the number of contradictions in this matrix is at most the number of elements $w,x,y,z$ different from 1. For the time being let us assume that $1\notin\{w,x,y,z\}$.
It is easy to see that $\beta_C = 1$ if $w=x=y=z,$ as multiplying the last two rows by $x^{-1}$ and the first column by $x$ transforms it into
\begin{align}
M'=\left(\begin{array}{ccc}
x & 1 & 1\\
1 & 1 & 1\\
1 & 1 & 1
\end{array}\right).
\end{align}
If any three of the elements are equal, say $w=x=y,$
and the fourth element is different,
multiplying the last two rows by $x^{-1}$ and the first column by $x$ transforms it into
\begin{align}
M'=\left(\begin{array}{ccc}
x & 1 & 1\\
1 & 1 & 1\\
1 & 1 & zx^{-1}
\end{array}\right).
\end{align}
The corresponding labeled graph has multiple bad 4-cycles and no single edge belonging to all of them. Thus, $\beta_C=2.$ (This also follows from Proposition \ref{prop:diag3by3}.)
Consider next the case of exactly one equality among $w,x,y,z$. If the two equal elements are in the same row or column, we can easily reduce the number of non-1's to three by multiplying that row or column by the appropriate factor. For example, if $w=x$, we multiply the second row by $x^{-1}$. The resulting matrix
\begin{align}
M'=\left(\begin{array}{ccc}
1 & 1 & 1\\
x^{-1} & 1 & 1\\
1 & y & z
\end{array}\right)
\end{align}
has precisely one zero minor (this uses $x\neq y$, $x\neq z$) and so, by the argument from the proof of Proposition \ref{prop:diag3by3}, can not be equivalent to a matrix with two (or less) non-$1$ entries. It follows that, in that case, $\beta_C(M)=3$.
If the two equal elements are not in the same row or column, for example $w=z$, we can multiply the last two rows by $z^{-1}$ and the first column by $z$. Then we obtain the matrix
\begin{align}
M'=\left(\begin{array}{ccc}
z & 1 & 1\\
1 & 1 & xz^{-1}\\
1 & yz^{-1} & 1
\end{array}\right).
\end{align}
To show that there is no equivalent matrix with fewer non-1 elements, we use Proposition \ref{prop:diag3by3} and it follows that the original matrix $M$ had the contradiction number equal to $3$.
The above argument also applies if {\sl two pairs} of elements on the diagonals are equal, i.e., if $w=z$ and $x=y$, but $x\neq z$. This is because the condition (i) from Proposition \ref{prop:diag3by3} would imply $xz^{-1}= z^{-1}$ and so it can not be satisfied; accordingly, the contradiction number is likewise equal to $3$.
If $w=x$ and $y=z$, the matrix is clearly equivalent to
\begin{equation}
M'=\left(\begin{array}{ccc}
1 & 1 & 1\\
x^{-1} & 1 & 1\\
z^{-1} & 1 & 1
\end{array}\right).
\end{equation}
If $x\neq z$ (and hence $x^{-1}\neq z^{-1}$), it follows from Proposition \ref{prop:one_row} that $\beta_C=2.$
Similarly, if $w=y$ and $x=z$, but $x\neq y$, then $\beta_C = 2$.
It follows that $\beta_C = 4$ is only achievable if no two elements in the set $\{w,x,y,z\}$ are equal. However, it is still possible that all these elements are different and $\beta_C=3.$ We shall analyze that situation using the labeled graph formalism.
\begin{center}
\begin{figure}
\caption{Each edge color represents a different permutation from the set $L_d'=\{\tilde{\sigma_i}
\label{fig:K33}
\end{figure}
\end{center}
The labeled graph described by the matrix $M$ (see Fig. \ref{fig:K33}) contains fifteen overlapping cycles (see Fig. \ref{fig:C4} and \ref{fig:C6}).
\begin{center}
\begin{figure}
\caption{Cycles of length 4 within the graph.}
\label{fig:C4}
\end{figure}
\end{center}
If no two elements in the set $\{w,x,y,z\}$ are equal, then all cycles $a)-h)$ and $n)-o)$ contain contradictions. To get rid of all those cycles we must delete at least three edges.
\begin{center}
\begin{figure}
\caption{Cycles of length 6 within the graph.}
\label{fig:C6}
\end{figure}
\end{center}
Deleting any three edges such that the remaining graph does not contain any of the cycles $a)-h)$ and $n)-o)$ leaves us with exactly one of the cycles $i) - m)$. Thus, clearly, $\beta_C = 3$ if at least one of these cycles contains no contradiction, and $\beta_C=4$ otherwise.
Recall that the elements of the matrix correspond directly to permutations from the set $L_d'=\{\tilde{\sigma_i}\in S_d: \tilde{\sigma_i}(a)=i+a$ mod $d\}$ assigned to the edges of the graph. The number $\omega^i$ represents an edge labeled with the permutation $\tilde{\sigma_i}$. In particular, a $1$ represents the identity, or $\tilde{\sigma_0}.$ Thus, the conditions for specific cycles containing no contradictions can be written in terms of the values in the matrix for example $\sigma_w^{-1}\sigma_y\sigma_z^{-1}\sigma_x=\tilde{\sigma_0}$, the condition for the cycle i) being good, can also be written as $w^{-1}yz^{-1}x=1.$
\begin{table}
\begin{center}
\begin{tabular}{|c|c|}
\hline
\textbf{Values of $w,x,y,z$} & \textbf{Number of contradictions}
\\\hline\hline $w=x=y=z$ & $\beta_C = 1$
\\\hline any three values equal & $\beta_C = 2$
\\\hline $w=x$ and $y=z$ & $\beta_C = 2$
\\\hline $x=z$ and $w=y$ & $\beta_C = 2$
\\\hline $x=y$ and $w=z$ & $\beta_C = 3$
\\\hline any two values equal & $\beta_C = 3$
\\\hline all values different and&
\\$w^{-1}yz^{-1}x=1$&
\\or&
\\$yz^{-1}x=1$&
\\or&
\\$w^{-1}z^{-1}x=1$& $\beta_C = 3$
\\or&
\\$w^{-1}yx=1$&
\\or&
\\$w^{-1}yz^{-1}=1$&
\\\hline all values different, otherwise & $\beta_C = 4$
\\\hline
\end{tabular}
\end{center}
\caption{\label{tab:3x3} Characterization of $3\times3$ games \eqref{eq:general3x3} with respect to the contradiction number if $1 \not\in \{w,x,y,z\}$.}
\end{table}
\begin{table}
\begin{center}
\begin{tabular}{|c|c|}
\hline
\textbf{Values of $w,x,y,z$} & \textbf{Number of contradictions}
\\\hline\hline
one non-$1$ & $\beta_C = 1$
\\\hline two equal non-$1$'s &
\\ in the same & $\beta_C = 1$
\\ row or column &
\\ \hline
two non-$1$'s, otherwise &
$\beta_C=2$
\\ \hline
three non-$1$'s and &
\\ $x=y$ &
\\ or $x=z$ & $\beta_C=2$
\\ or $xy=z$ &
\\ \hline
three non-$1$'s, otherwise& $\beta_C=3$
\\\hline
\end{tabular}
\end{center}
\caption{\label{tab:3x3-ones} Complete characterization of $3\times3$ games \eqref{eq:general3x3} with respect to the contradiction number if some of $x,y,z,w$
are equal to $1$.}
\end{table}
The five conditions in the second to last row of Table \ref{tab:3x3} can also be expressed by the equalities:
\begin{enumerate}[i)]
\item $w^{-1}yz^{-1}x=1$ $\Leftrightarrow$ $yw^{-1}=zx^{-1}$;
\item $yz^{-1}x=1$ $\Leftrightarrow$ $zx^{-1}=y$;
\item $w^{-1}z^{-1}x=1$ $\Leftrightarrow$ $xw^{-1}=z$;
\item $w^{-1}yx=1$ $\Leftrightarrow$ $wy^{-1}=x$;
\item $w^{-1}yz^{-1}=1$ $\Leftrightarrow$ $yz^{-1}=w$.
\end{enumerate}
Indeed, the matrix can be transformed as follows
\begin{align}
&\left(\begin{array}{ccc}
1 & 1 & 1\\
1 & w & x\\
1 & y & z
\end{array}\right) \rightarrow
\left(\begin{array}{ccc}
1 & w^{-1} & x^{-1}\\
1 & 1 & 1\\
1 & yw^{-1} & zx^{-1}
\end{array}\right) \rightarrow
\nonumber \\
&\rightarrow \left(\begin{array}{ccc}
1 & w^{-1} & x^{-1}\\
1 & 1 & 1\\
z^{-1}x & yw^{-1}z^{-1}x & 1
\end{array}\right)
\end{align}
or
\begin{align}
&\left(\begin{array}{ccc}
1 & 1 & 1\\
1 & w & x\\
1 & y & z
\end{array}\right) \rightarrow
\left(\begin{array}{ccc}
1 & 1 & x^{-1}\\
1 & w & 1\\
1 & y & zx^{-1}
\end{array}\right) \rightarrow
\nonumber \\
&\rightarrow \left(\begin{array}{ccc}
1 & 1 & x^{-1}\\
1 & w & 1\\
z^{-1}x & yz^{-1}x & 1
\end{array}\right)
\end{align}
\color{black}
Assuming all of the values $\{w,x,y,z\}$ are different, it is clear that the final matrices in the above transformations have three contradictions iff the values satisfy conditions i) and ii), respectively. Conditions iii) - v) can be analyzed via similar transformations.
Finally, lets us comment on the case when some of the parameters $w,x,y,z$ in \eqref{eq:general3x3} are equal to $1$. The analysis of most of such instances is implicit in the argument above. For example, if there is only one entry that is different from $1$, then $\beta_C(M)=1$. If there are two such entries contained in a single row or column, then
$\beta_C$ equals $1$ or $2$ depending on whether these entries are equal or not (Proposition \ref{prop:one_row}). If the two entries different from $1$ do not belong to the same row nor column, we have an instance of a matrix of type \eqref{2elements}, which -- as we determined -- has the contradiction number equal to $2$. If exactly three entries are different from $1$, say
\begin{align}
\label{eq:3elements}
M=\left(\begin{array}{ccc}
1 & 1 & 1\\
1 & 1 & x\\
1 & y & z
\end{array}\right),
\end{align}
then -- as in the proof of Proposition \ref{prop:diag3by3} -- a necessary condition for $\beta_C(M)\leq 2$ is that at least one $2\times 2$ minor of $M$ is zero in addition to the obvious one ($i,j \in \{0,1\}$). By direct checking, we see that happens only if $y=z$ or $x=z$, or if $xy=z$. It is readily verified that all these conditions are also sufficient for $\beta_C(M)= 2$.
{\it Acknowledgments}
We thank Ravishankar Ramanathan for discussions, and for pointing out the
notion of matrix rigidity.
The research of SJS was supported in part by the grant DMS-1600124 from the National Science Foundation (USA).
MR, AR, PG and MH are supported by National Science Centre, Poland,
grant OPUS 9. 2015/17/B/ST2/01945.
M.H. also acknowledges support from the Foundation for Polish Science through IRAP project co-financed by EU within the Smart Growth Operational Programme (contract no.2018/MAB/5).
\mathbold{i}bliographystyle{unsrtnat}
\section*{Appendix }
\begin{proof}[Proof of Proposition \ref{prop1}]
Let
\begin{align}
M=\left(\begin{array}{cccc}
m_{00} & m_{01} & ... & m_{0k}\\
m_{10} & m_{11} & ... & m_{1k}\\
\vdots & \vdots & & \vdots\\
m_{l0} & m_{l1} & ... & m_{lk}
\end{array}\right).
\end{align}
After multiplying each row by $m_{i0}^{-1},$ where $i$ is the number of the row, we obtain the matrix
\begin{align}
\left(\begin{array}{cccc}
1 & m_{01}m_{00}^{-1} & ... & m_{0k}m_{00}^{-1}\\
1 & m_{11}m_{10}^{-1} & ... & m_{1k}m_{10}^{-1}\\
\vdots & \vdots & & \vdots\\
1 & m_{l1}m_{l0}^{-1} & ... & m_{lk}m_{l0}^{-1}
\end{array}\right).
\end{align}
Next we multiply columns $1$ - $k$ by $m_{00}m_{0j}^{-1},$ where $j$ is the number of the column to obtain
\begin{align}
M'=\left(\begin{array}{cccc}
1 & 1 & ... & 1\\
1 & m_{11}m_{10}^{-1}m_{00}m_{01}^{-1} & ... & m_{1k}m_{10}^{-1}m_{00}m_{0k}^{-1}\\
\vdots & \vdots & & \vdots\\
1 & m_{l1}m_{l0}^{-1}m_{00}m_{01}^{-1} & ... & m_{lk}m_{l0}^{-1}m_{00}m_{0k}^{-1}
\end{array}\right).
\end{align}
\noindent $M'$ is a matrix in which all elements of the first row and the first column are equal to $1$ {and it is equivalent to $M$ by construction, as needed.}
\end{proof}
{Note that essentially the same argument allows to obtain $M'$ in which the locations of entries equal to $1$ correspond to any given tree
represented as a bipartite graph on $n_A\times n_B$ vertices. }
\begin{proof}[Proof of Lemma \ref{lll}]
Let $M$ be a matrix of the form \ref{eq:ones}. If two non-one elements in the same row (or column) are equal, we can transform the matrix as follows
\begin{align}
&\left(\begin{array}{cccccc}
1 & 1 & 1 & 1 & ... & 1\\
1 & x & x & m_{13} & ... & m_{1l}\\
1 & m_{21} & & ... & & m_{2l}\\
\vdots & \vdots & & & & \vdots\\
1 & m_{k1} & & ... & & m_{kl}
\end{array}\right) \rightarrow \nonumber \\
&\rightarrow \left(\begin{array}{cccccc}
1 & 1 & 1 & 1 & ... & 1\\
m^{-1} & 1 & 1 & m_{13}m^{-1} & ... & m_{1l}m^{-1}\\
1 & m_{21} & & ... & & m_{2l}\\
\vdots & \vdots & & & & \vdots\\
1 & m_{k1} & & ... & & m_{kl}
\end{array}\right).
\end{align}
If the two equal elements are neither in the same row, nor in the same column, we have
\begin{align}
&\left(\begin{array}{cccccc}
1 & 1 & 1 & 1 & ... & 1\\
1 & m_{11} & m & m_{13} & ... & m_{1l}\\
1 & m & m_{22} & ... & & m_{2l}\\
\vdots & \vdots & & & & \vdots\\
1 & m_{k1} & m_{k2} & ... & & m_{kl}
\end{array}\right) \rightarrow
\nonumber \\
&\rightarrow \left(\begin{array}{cccccc}
1 & m^{-1} & m^{-1} & m_{13}^{-1} & ... & m_{1l}^{-1}\\
1 & m_{11}m^{-1} & 1 & 1 & ... & 1 \\
1 & 1 & m_{22}m^{-1}& ... & & m_{2l}m_{1l}^{-1}\\
\vdots & \vdots & & & & \vdots\\
1 & m_{k1}m^{-1} & m_{k2}m^{-1}& ... & & m_{kl}m_{1l}^{-1}
\end{array}\right)\rightarrow
\nonumber \\
&\rightarrow \left(\begin{array}{cccccc}
x & 1 & 1 & mm_{13}^{-1} & ... & mm_{1l}^{-1}\\
1 & m_{11}m^{-1} & 1 & 1 & ... & 1 \\
1 & 1 & m_{22}m^{-1}& ... & & m_{2l}m_{1l}^{-1}\\
\vdots & \vdots & & & & \vdots\\
1 & m_{k1}m^{-1} & m_{k2}m^{-1}& ... & & m_{kl}m_{1l}^{-1}
\end{array}\right)
\end{align}.
In both cases the contradiction number is shown to be less than the maximum.
\end{proof}
However, simply making all elements different from 1 distinct is not enough to ensure the maximum number of contradictions.
\begin{proof}[Proof of Proposition \ref{prop:chromatic}]
The maximum number of contradictions in an $n\times n$ matrix is only achieved if every cycle in the corresponding graph $K_{n,n}$ contains a contradiction. Thus for every pair of permutations $\pi_1,\pi_2\in S_n$ which defines exactly one cycle the sums
$s_i=\sum\limits_{j=0}^{n-1} k_{j,\pi_i(j)}$
must be different. Therefore, assigning these sums to the vertices of $G_n$ produces a proper coloring. Since $s_i$ can have no more than $d$ different values, this is impossible for $d<\chi(G_n).$
Notice that for $N_{G_n}(\mathrm{id})$ is the set of all cyclic permutations $\pi=(x_1\ldots x_t)$. It is easy to see that if $\pi_1\pi_2 = (x_1\ldots x_t)$ is a cycle then $\pi_1\sigma\pi_2\sigma = (y_1...y_t)$ is also a cycle for any permutation $\sigma\in S_n$. It follows that $\pi_1,\pi_2$ are adjacent in $G_n$ if and only if $\pi_1\pi_2^{-1}=(x_1...x_t)$ and that every function $f:S_n\mapsto S_n$, where $f(\pi)=\pi\sigma$ is an automorphism on $G_n.$ Thus, the largest set of cyclic permutations $C\subset S_n$ such that $\pi_i\pi_j^{-1}$ is a cycle for any $\pi_i, \pi_j\in C$, plus $\mathrm{id}$, is the largest clique in $G_n.$ It is well known that for any graph $G$ the chromatic number is at least the size of the largest clique in $G$.
The independence number of $G_n$ is the size of the largest set $J$ of cyclic permutations such that $\pi=\pi_i\pi_j^{-1}$ is not a cyclic permutation for any $\pi_i,\pi_j\in J.$ Since $\chi(G)\geq \frac{\left|V(G)\right|}{\alpha(G)}$ for any $G$, we have $\chi(G_n)\geq\frac{n!}{\left|J\right|}.$
\end{proof}
\end{document} |
\begin{document}
\begin{abstract} It was shown by A. Beauville that if the canonical map $\varphi_{|K_M|}$ of a complex smooth projective surface $M$ is generically finite, then $\deg(\varphi_{|K_M|})\leq 36$. The first example of a surface with canonical degree 36 was found by the second author. In this article, we show that for any surface which is a degree four Galois \'etale cover of a fake projective plane $X$ with the largest possible automorphism group ${\rm Aut}(X)=C_7:C_3$ (the unique non-abelian group of order 21), the base locus of the canonical map is finite, and we verify that 35 of these surfaces have maximal canonical degree 36. We also classify all smooth degree four Galois \'etale covers of fake projective planes, which give possible candidates for surfaces of canonical degree $36$. Finally, we also confirm in this paper the optimal upper bound of the canonical degree of smooth threefolds of general type with sufficiently large geometric genus, related to earlier work of C. Hacon and J.-X. Cai.\end{abstract}
{\rm m}aketitle
\section{Introduction}
Let $M$ be a smooth complex projective minimal surface of general type with $p_g(M)\neq0$. Assume that the canonical map, $$\varphi=\varphi_{|K_M|}:M\dashrightarrow W:=\overline{\varphi(M)}\subseteq{\rm m}athbb{P}^{p_g(M)-1}$$
is generically finite onto its image. We are interested in the \emph{canonical degree} of $M$, the degree of $\varphi$. If $\varphi$ is not generically finite, we simply say that $M$ has canonical degree zero. The following proposition was proved in \cite{B}, cf. \cite{Y1}. We include the proof here for completeness.
\begin{proposition}\label{Bea} Let $M$ be a minimal surface of general type whose canonical map $\varphi=\varphi_{|K_M|}$ is generically finite. Then $\deg \varphi\leq 36$. Moreover, $\deg\varphi=36$ if and only if $M$ is a smooth ball quotient ${\rm m}athbb{B}_{\rm m}athbb{C}^2/\Sigma$ with $p_g(M)=3$, $q(M)=0$, and $|K_M|$ is base point free.
\end{proposition}
\begin{proof} Let $P$ be the mobile part of $|K_M|$. Let $S\rightarrow M$ be a resolution of $P$ and let $P_S$ be the induced base point free linear system defining $S\rightarrow W=\overline{\varphi(M)}$. Then
\begin{align*} \deg\varphi\cdot(p_g-2)\leq\deg\varphi\cdot\deg W
= P_S^2\leq P^2\leq K_M^2
\leq9{\rm ch}i({\rm m}athcal{O}_M)\leq9(1+p_g).
\end{align*}
The first inequality is the degree bound for a non-degenerate surface in ${\rm m}athbb{P}^n$ given in \cite{B}, while
the fourth inequality is the Bogomolov-Miyaoka-Yau inequality. Hence as $p_g\geq3$, we have
\begin{align*} \deg\varphi\leq 9(\frac{1+p_g}{p_g-2})\leq36.
\end{align*}
Moreover, $\deg\varphi=36$ only when $p_g(M)=3$, $q(M)=0$, and $P_S^2=P^2=K_M^2$. This is only possible when $|K_M|$ is base point free. In such a case,
$K_M^2=36=9{\rm ch}i({\rm m}athcal{O}_M)$ and hence $M$ is a smooth ball quotient ${\rm m}athbb{B}_{\rm m}athbb{C}^2/\Sigma$ by results of Aubin and Yau, cf. \cite{B} or \cite{BHPV}.
\end{proof}
\noindent{\bf Notation.} Throughout this paper, we do not distinguish line bundles with divisors. The linear equivalence and numerical equivalence of divisors are written respectively as $D_1\sim D_2$ and $D_1\equiv D_2$. The cyclic group of order $n$ is denoted by $C_n$. The group $C_7:C_3$ is the unique non-abelian group of order 21. The projective space of dimension $n$ over ${\rm m}athbb{C}$ is denoted by ${\rm m}athbb{P}^n$. A finite field of order $n$ is denoted by $F_n.$
\vskip 0.2 cm
From Proposition \ref{Bea}, it is an interesting problem to know the geometric realization of possible canonical degrees and many surfaces with canonical degree at most $16$ have been constructed, see \cite{P} or \cite{DG} for more references. However, the first example of a surface with maximal canonical degree 36 was constructed only recently by \cite{Y1} as a suitably chosen $C_2\times C_2$-Galois cover of a special fake projective plane $X$. The fake projective plane $X$ in \cite{Y1} has ${\rm Aut}(X)=C_7:C_3$, and by \cite{LY} it satisfies $h^0(X,2L_X)=0$ for every ample generator $L_X$ of ${\rm NS}(X)$. The choice of the lattice for the ball quotient $M$ is explicitly described in \cite{Y1} via the classifying data of \cite{PY} and \cite{CS}.
Here are the main goals of this paper. The first goal is to construct more examples of surfaces with maximal canonical degree. This is given as Theorem \ref{main} below. Then we examine the corresponding question in complex dimension 3, given as Corollary \ref{3fold} below. A second goal is to identify all potential examples of surfaces of canonical degree 36 constructed as a degree four Galois \'etale cover of a fake projective plane. We prove that for these Galois covers the canonical maps have at worst discrete base locus whenever the underlying fake projective plane has the largest possible automorphism group $C_7:C_3$. This is given as Theorem \ref{iso} and Proposition \ref{all}. For the presentation of this paper, we start with Theorem \ref{iso} hoping that it would give the reader a more comprehensible overall picture.
We remark that our proof of Theorem \ref{main} is essentially independent of Theorem \ref{iso} and Proposition \ref{all}. A reader who is interested only in new surfaces of canonical degree $36$ may briefly go over statements in earlier sections and proceed directly to Section \ref{new} of the paper.
Recall that a fake projective plane is a ball quotient $X={\rm m}athbb{B}_{\rm m}athbb{C}^2/\Pi$ for some lattice $\Pi\subseteq{\rm PU}(2,1)$, where $\Pi$ is constructed as a subgroup of a maximal arithmetic lattice $\overline \Gamma$. An unramified cover $M$ of $X$ is given by
${\rm m}athbb{B}^2_{\rm m}athbb{C}/\Sigma$ for a normal subgroup $\Sigma\lhd\Pi$ of finite index. For the sequence of Galois covers
$$M:=B_{{\rm m}athbb{C}}^2/\Sigma\stackrel{p}\rightarrow X=B_{{\rm m}athbb{C}}^2/\Pi\stackrel{q}\rightarrow B_{{\rm m}athbb{C}}^2/\overline \Gamma$$
corresponding to the normal subgroups $\Sigma\lhd\Pi\lhd\overline \Gamma$, one has the covering group ${\rm Gal}(M/X)=\Pi/\Sigma$ and ${\rm Aut}(X)=\overline \Gamma/\Pi$. We focus on the case when $|{\rm Gal}(M/X)|=4$ and ${\rm Aut}(X)=C_7:C_3$. Our first theorem identifies potential examples of surfaces of canonical degree 36.
\begin{theorem}\label{iso} Let $M\rightarrow X$ be a degree four Galois \'etale cover over a fake projective plane $X$ with ${\rm Aut}(X)=C_7:C_3$. Then $q(M)=0$ and the base locus of the linear system $|K_M|$ is discrete.
\end{theorem}
A degree four Galois \'etale cover $M\rightarrow X$ over a fake projective plane $X$ is determined by a quotient of $H_1(X,{\rm m}athbb{Z})$ of order four, to be explained in details in Lemma \ref{cover} of Section \ref{pre}. The degree of this cover is dictated by the possible existence of a surface of maximal canonical degree, i.e., $K_M^2/K_X^2=4$. There are many degree four covers of fake projective planes. For future reference, we classify all such surfaces. In the table below, only lattices of fake projective planes giving rise to Galois \'etale covers of degree four are listed, which is the case if there is a normal subgroup of index four in the lattice $\Pi$ corresponding to a given fake projective plane $X={\rm m}athbb{B}^2_{\rm m}athbb{C}/\Pi$. This list of the fake projective planes follows the conventions in \cite{PY} and \cite{CS}.
In the following table, we have
\begin{enumerate}
\item column 1: $k$ is a totally real number field, $\ell$ is a totally imaginary extension of $k$, and ${\rm m}athcal T$ represents a finite number of places relevant to the classification. These are notations used to classify fake projective planes defined in \cite{PY};
\item column 2 the corresponding naming of classes of maximal arithmetic lattices containing fake projective planes in \cite{CS} corresponding to $\overline{\Gamma}$ in the notation of \cite{PY}, where $a$ and $p$ are data from the first column;
\item column 3: the naming of the individual fake projective planes in each class used in \cite{CS};
\item column 4: ${\rm m}box{Aut}(X)$ is the automorphism group of a fake projective plane $X$;
\item column 5: the first homology class of a fake projective plane $X$;
\item column 6: $N_0$ is the number of degree $4$ coverings of $X$, which is the number of subgroups of index four of the lattice $\Pi$;
\item column 7: $N_1$ denotes the number of normal coverings among the degree $4$ coverings above.
\end{enumerate}
All the examples in the last column satisfy $H_1(M,{\rm m}athbb{Q})=0$, which implies $q(M)=0$ by Poincar\' e Duality.
\vskip 0.2 cm
\begin{adjustbox}{center, rotate=0, nofloat, caption=[Table 1]}
$\begin{array}{|c|c|c|c|c|c|c|}
\hline
(k,\ell,{\rm m}athcal{T})&{\rm m}box{class}&X&{\rm Aut}(X)&H_1(X,{\rm m}athbb{Z})&N_0&N_1\\ \hline\hline
({\rm m}athbb{Q},{\rm m}athbb{Q}(\sqrt{-1}),\{5\})&(a=1,p=5,\emptyset)&(a=1,p=5,\emptyset, D_3)&C_3&C_2\times C_4\times C_{31}&4&3\\ \cline{2-7}
&(a=1,p=5,\{2\})&(a=1,p=5, \{2\},D_3)&C_3&C_4\times C_{31}&4&1\\ \cline{1-7}
({\rm m}athbb{Q},{\rm m}athbb{Q}(\sqrt{-1}),\{2,5\})&(a=1,p=5,\{2I\})&(a=1,p=5,\{2I\})&\{1\}&C_2\times C_3\times C_4^2&47&19\\ \hline
({\rm m}athbb{Q},{\rm m}athbb{Q}(\sqrt{-2}),\{3\})&(a=2,p=3,\emptyset)&(a=2,p=3,\emptyset, D_3)&C_3&C_2^2\times C_{13}&4&1\\ \cline{2-7}
&(a=2,p=3,\{2\})&(a=2,p=3, \{2\},D_3))&C_3&C_2^2\times C_{13}&4&1\\ \cline{1-7}
({\rm m}athbb{Q},{\rm m}athbb{Q}(\sqrt{-2}),\{2,3\})&(a=2,p=3,\{2I\})&(a=2,p=3,\{2I\})&\{1\}&C_2^4\times C_3&83&35\\ \hline
({\rm m}athbb{Q},{\rm m}athbb{Q}(\sqrt{-7}),\{2\})&(a=7,p=2,\emptyset) &(a=7,p=2,\emptyset, D_3 2_7)&C_7:C_3&C_2^4&91&35\\ \cline{3-7}
&&(a=7,p=2,\emptyset,7_{21})&\{1\}&C_2^2\times C_3\times C_7&3&1\\ \cline{2-7}
&(a=7,p=2,\{7\})&(a=7,p=2,\{7\},D_3 2_7)&C_7:C_3&C_2^3&7&7\\ \cline{3-7}
&&(a=7,p=2,\{7\},D_3 7'_7)&C_3&C_2^2\times C_7&2&1\\ \cline{3-7}
&&(a=7,p=2,\{7\},7_{21})&\{1\}&C_2^3\times C_3&19&7\\ \hline
({\rm m}athbb{Q},{\rm m}athbb{Q}(\sqrt{-7}),\{2,3\})&(a=7,p=2,\{3\})&(a=7,p=2,\{3\},D_3)&C_3&C_2\times C_4\times C_7&4&3\\ \cline{3-7}
&&(a=7,p=2,\{3\},3_3)&\{1\}&C_2^2\times C_3\times C_4&19&11\\ \cline{2-7}
&(a=7,p=2,\{3,7\})&(a=7,p=2,\{3,7\},D_3)&C_3&C_4\times C_7&2&1\\ \cline{3-7}
&&(a=7,p=2,\{3,7\},3_3)&\{1\}&C_2\times C_3\times C_4&7&3\\ \hline
({\rm m}athbb{Q},{\rm m}athbb{Q}(\sqrt{-7}),\{2,5\})&(a=7,p=2,\{5\})&(a=7,p=2,\{5\})&\{1\}&C_2^2\times C_9&3&1\\ \cline{1-7}
({\rm m}athbb{Q},{\rm m}athbb{Q}(\sqrt{-15}),\{2\})&(a=15,p=2,\emptyset)&(a=15,p=2,\emptyset,D_3)&C_3&C_2^2\times C_7&2&1\\ \cline{3-7}
&&(a=15,p=2,\emptyset,3_3)&\{1\}&C_2^3\times C_9&11&7\\ \cline{2-7}
&(a=15,p=2,\{3\})&(a=15,p=2,\{3\},3_3)&C_3&C_2^3\times C_3&19&7\\ \cline{2-7}
&(a=15,p=2,\{5\})&(a=15,p=2,\{5\},3_3)&\{1\}&C_2^2\times C_9&3&1\\ \cline{2-7}
&(a=15,p=2,\{3,5\})&(a=15,p=2,\{3,5\},3_3)&C_3&C_2^2\times C_3&1&1\\ \hline
({\rm m}athcal{C}_{18},\{v_3\})&({\rm m}athcal{C}_{18},p=3,\emptyset)&({\rm m}athcal{C}_{18},p=3,\emptyset,d_3 D_3)&C_3\times C_3&C_2^2\times C_{13}&1&1\\ \hline
({\rm m}athcal{C}_{20},\{v_2\})&({\rm m}athcal{C}_{20},\{v_2\},\emptyset)&({\rm m}athcal{C}_{20},\{v_2\},\emptyset,D_3 2_7)&C_7:C_3&C_2^6&651&651\\ \cline{2-7}
&({\rm m}athcal{C}_{20},\{v_2\},\{3+\})&({\rm m}athcal{C}_{20},\{v_2\},\{3+\},D_3)&C_3&C_4\times C_7&2&1\\ \cline{3-7}
&&({\rm m}athcal{C}_{20},\{v_2\},\{3+\},\{3+\}_3)&\{1\}&C_2\times C_3\times C_4&7&3\\ \cline{2-7}
&({\rm m}athcal{C}_{20},\{v_2\},\{3-\})&({\rm m}athcal{C}_{20},\{v_2\},\{3-\},D_3)&C_3&C_4\times C_7&2&1\\ \cline{3-7}
&&({\rm m}athcal{C}_{20},\{v_2\},\{3-\},\{3-\}_3)&\{1\}&C_2\times C_3\times C_4&7&3 \\ \hline
\end{array}$
\end{adjustbox}
\vskip 0.2cm
\begin{center}
{\sc Table 1}
\end{center}
\begin{proposition}\label{all} There are altogether $835$ lattices which give rise to $1670$ non-biholomorphic smooth minimal surfaces as degree four Galois \'etale covers of fake projective planes with $q(M)=0$.
\end{proposition}
From Table 1, there are 35 degree four Galois \'etale covers of the fake projective plane $(a=7, p=2,\emptyset, D_32_7)$, which all have Galois group $C_2\times C_2$. Generalizing the result of \cite{Y1}, we show that these \'etale covers all have canonical degree 36.
\begin{theorem}\label{main} The $35$ degree four Galois \'etale covers of the fake projective plane $(a=7, p=2,\emptyset, D_32_7)$, all with Galois group $C_2\times C_2$, are minimal surfaces of general type with canonical degree $36$.
\end{theorem}
Our result has the implication on the optimal canonical degree for smooth threefolds of general type with large geometric genus. We refer the readers to Section \ref{sec3fold} for more details.
\begin{corollary}\label{3fold} There exist many examples of smooth minimal threefolds of general type $Y$ with the degree of the canonical map $\deg(\Phi_{|K_Y|})=72$. In fact, there exist such threefolds with $p_g(Y)=3g$ and $K_Y^3=72(g-1)$ for each $g\geqslant 2$.
\end{corollary}
The surface studied in \cite{Y1} has Picard number one, which is a deep result in automorphic forms from \cite{Ro}, \cite{BR}, and is used in \cite{Y1} to simplify the geometric arguments. For a general degree four \'etale cover of a fake projective plane, it is not clear whether the Picard number equals to one. Comparing to the result in \cite{Y1}, one technical improvement in the present article is to show that any surface as in Theorem \ref{iso} possesses a generically finite canonical map. Continuing from this, mobility of the canonical system is proved but in a different argument from \cite{Y1}. In fact, we can show that any degree four \'etale cover of a fake projective plane with ${\rm Aut}(X)=C_7:C_3$ has generically finite canonical map and at worst discrete base locus. To get rid of the finite number of base points, we need more detailed information about the canonical sections as given in \cite{Y1}, see in particular the corrigendum there. By analyzing carefully the method used in \cite{Y1}, we come up with new examples of surfaces with maximal canonical degree by considering new degree four Galois \'etale covers of the same fake projective plane $X$ used in \cite{Y1}. These new \'etale covers correspond to various $C_2\times C_2$ quotient groups of $H_1(X,{\rm m}athbb{Z})=C_2^4$. In such cases, we are able to write down relevant global sections explicitly with the help of Magma and finish the prove of base point freeness. This last step is where we have to restrict further the type of lattice $\Sigma$ associated to $M$.
To find which \'etale cover works for our scheme, as a first step we list all normal subgroups of index four in a lattice associated to a fake projective plane. All fake projective planes supporting such a subgroup are listed in the third column of Table 1 above. Now for each of the listed surfaces, we exhaust all possible normal subgroups of index four. The procedure of finding such a surface as well as verification of necessary conditions stated in Theorem \ref{iso} and Proposition \ref{all} is similar to that in \cite{Y1}. In \cite{Y1}, the choice of the $C_2\times C_2$ Galois \'etale cover is very specific and has to come from killing the $2$-torsion invariant line bundles under a Sylow $3$-subgroup of the automorphism group $C_7:C_3$. In this paper, we obtain more examples by overcoming this technical hurdle, namely, we consider all possible $C_2\times C_2$ Galois \'etale covers of the fake projective plane in \cite{Y1}.
The explicit computation is accomplished by using Magma. The proof of Theorem \ref{main} generalizes the argument of \cite{Y1}.
Here is the organization of this paper. We first prepare some preliminary results related to our construction in Section \ref{pre}. The proofs of Theorem \ref{iso} and \ref{main} are given in Section \ref{seciso} and \ref{new} respectively.
Finally we study the corresponding problem in dimension three in Section \ref{sec3fold}.
\section{Preliminary discussions and idea of proofs}\label{pre}
Let $X={\rm m}athbb{B}_{\rm m}athbb{C}/\Pi$ be a fake projective plane with $\pi_1(X)=\Pi$. It is known from definition that the first Betti number of $X$ is trivial. According to \cite{PY}, there is always a nontrivial
torsion element in $H_1(X,{\rm m}athbb{Z})$. The torsion group $H_1(X,{\rm m}athbb{Z})$ is available from \cite{CS}.
\begin{lemma}\label{cover} A fake projective plane $X$ possesses a degree four Galois \'etale cover if and only if there is a quotient group of order four of $H_1(X,{\rm m}athbb{Z})$.
\end{lemma}
\begin{proof} We know that $H_1(X,{\rm m}athbb{Z})$ is a direct sum of finite cyclic abelian groups as the first Betti number of $X$ is trivial. If $Q$ is a quotient group of order four of $H_1(X,{\rm m}athbb{Z})$, then there is a homomorphism
$$\rho:\Pi\rightarrow\Pi/[\Pi,\Pi]=H_1(X,{\rm m}athbb{Z})\rightarrow Q.$$
The kernel of $\rho$ gives rise to a normal subgroup $\Sigma$ of index four in $\Pi$, with $Q$ as the deck transformation group of the covering map $M={\rm m}athbb{B}^2_{\rm m}athbb{C}/\Sigma\rightarrow X={\rm m}athbb{B}^2_{\rm m}athbb{C}/\Pi$.
On the other hand, if there is a normal subgroup $\Sigma$ of index four in $\Pi$, it leads to a homomorphism $\sigma:\Pi\rightarrow \Pi/\Sigma$. As a group of order four is always abelian, $\sigma$ factors through a homomorphism $\Pi/[\Pi,\Pi]\rightarrow\Pi/\Sigma$. We conclude that $\Pi/\Sigma$ lives as a quotient group of order four of $\Pi/[\Pi,\Pi]=H_1(X,{\rm m}athbb{Z})$.
\end{proof}
We consider an \'etale cover $\pi:M\rightarrow X$ corresponding to a subgroup $\pi_1(M)\leq\Pi$ of index four. In particular, the finite group ${\rm m}athcal{G}=\Pi/\pi_1(M)$ is either $C_2\times C_2$ or $C_4$.
\begin{lemma}\label{gg} Let $M$ be a smooth projective surface and assume that there is an \'etale cover $\pi:M\rightarrow X$ of degree four over a fake projective plane $X$. Suppose that $q(M)=0$, then $p_g(M)=3$.
\end{lemma}
\begin{proof} Since $\pi:M\rightarrow X$ is \'etale and $p_g(X)=q(X)=0$, ${\rm ch}i({\rm m}athcal{O}_M)=4{\rm ch}i({\rm m}athcal{O}_X)=4$. It follows that $p_g(M)=3$ if $q(M)=0$.
\end{proof}
Suppose now a surface $M$ is constructed as in Lemma \ref{gg}. We study
the canonical map $\varphi=\varphi_{|K_M|}:M\dashrightarrow{\rm m}athbb{P}^2$. We will assume that $\pi:M\rightarrow X$ is a \emph{Galois cover}, i.e., $\Sigma:=\pi_1(M)\leq\Pi$ is normal.
Note that then $|K_M|$ is invariant under the Galois group ${\rm m}athcal{G}:={\rm Gal}(M/X)=\Pi/\pi_1(M)$.
Let us relate the canonical sections from Lemma \ref{gg} to divisors on $X$.
It is known from the Universal Coefficient Theorem that torsions in $H_1(X.{\rm m}athbb{Z})$ give rise to a torsion line bundle on $X$, cf. Lemma 4 of \cite{LY}. Denote by ${\rm m}athcal{L}_{\rm ch}i$ the invertible sheaf on $X$ corresponding to a torsion line bundle on $X$ given by a character ${\rm ch}i$. In this case, the trivial character ${\rm m}athcal{O}_X$ is denoted by ${\rm m}athcal{L}_1$.
The push forward of the structure sheaf of $M$ splits into eigen-sheaves
$$
\pi_*{\rm m}athcal{O}_M=\bigoplus_{{\rm ch}i:{\rm m}athcal{G}\rightarrow {\rm m}athbb{C}^*}{\rm m}athcal{L}_{\rm ch}i,
$$
Denote by $\omega_M$ the dualizing sheaf of a surface $M$. Then
$$\pi_*\omega_M=\bigoplus_{{\rm ch}i:{\rm m}athcal{G}\rightarrow {\rm m}athbb{C}^*}\omega_X\otimes {\rm m}athcal{L}_{\rm ch}i.$$
It follows from the degeneration of the Leray spectral sequence that
\begin{equation}
H^i(M,\omega_M)=\bigoplus_{{\rm ch}i:{\rm m}athcal{G}\rightarrow {\rm m}athbb{C}^*}H^i(X,\omega_X\otimes {\rm m}athcal{L}_{\rm ch}i)
\end{equation}
for all $i$. Hence vanishing of $q(M)$ implies that $H^1(X,\omega_X\otimes {\rm m}athcal{L}_{\rm ch}i)=0$ for
all ${\rm ch}i:{\rm m}athcal{G}\rightarrow {\rm m}athbb{C}^*$. By Serre Duality, $h^2(X,\omega_X\otimes{\rm m}athcal{L}_{\rm ch}i)=h^0(X,{\rm m}athcal{L}_{\rm ch}i^{-1})$, which is either
$0$ or $1$ depending on whether ${\rm ch}i$ is trivial of not. From Riemann-Roch formula and the fact that $X$ is a fake projective plane,
it follows that $h^0(X,\omega_X\otimes{\rm m}athcal{L}_{\rm ch}i)=1$ for each ${\rm ch}i\neq 1$, which corresponds to three linearly independent sections in Lemma \ref{gg}.
Denote by $D_1, D_2, D_3$ the corresponding curves on $X$. It follows that $H^0(M,K_M)$ is generated by $\pi^*(D_i)$, $i=1,2,3$, noting that
$\pi^* {\rm m}athcal{L}_{\rm ch}i\cong{\rm m}athcal{O}_M$.
\begin{lemma}\label{bpf23} Assume that $q(M)=0$ and let $D_1, D_2, D_3$ be divisors obtained as above. Assume that $D_1\cap D_2\cap D_3=\emptyset$. Then
$H^0(M,K_M)$ is base point free and the canonical degree of $M$ is $36$.
\end{lemma}
\begin{proof} Let $x$ be a point in the base point set of $|K_M|$. Since $|K_M|$ is invariant under the Galois group ${\rm m}athcal{G}$, $\pi(x)\in D_1\cap D_2\cap D_3$, which
is empty. It follows from Proposition \ref{Bea} that the canonical degree of $M$ is $36$.
\end{proof}
The last lemma would be utilized in Section \ref{new} to give a proof of Theorem \ref{main}. The presentation here is a simplification of the original one,
thanks to the suggestion of the referee.
\section{General constraints on base point set}\label{seciso}
The goal of this section is to give a proof of Theorem \ref{iso}, which gives constraints on the base point set of $|K_M|$ without knowledge on an explicit description of fake projective plane $X$. Here as $\rho(X)=1$, we always denote by $L_X$ an ample generator of ${\rm Pic}(X)$. Also recall that for a fake projective plane $X$, we have $p_g(X)=q(X)=0$ and $L_X^2=1$ by definition. We begin with the following simple observations.
\begin{lemma}\label{gen} Let $X$ be a fake projective plane and let $L_X$ be an ample generator of ${\rm Pic}(X)$. Then $h^0(X,L)\leq1$ for any line bundle $L\equiv L_X$ and $h^0(X,L')\leq 2$ for any line
bundle $L'\equiv 2L_X$.
\end{lemma}
\begin{proof} If $L''$ is a line bundle with $L''\equiv 4L_X$, then by Riemann-Roch formula $h^0(X,L'')=3$. But if $L\equiv L_X$ and $H^0(X,L)$ has two linearly independent sections $x$ and $y$, then
$\{x^4,x^3y,x^2y^2,xy^3,y^4\}$ are five linearly independent sections of $H^0(X,L^{\otimes 4})$, which is absurd. The second statement is proved similarly.
\end{proof}
\begin{lemma}\label{Sch} If $C$ is an irreducible and reduced curve on a fake projective plane $X$ with $C\equiv L_X$, then $C$ is smooth of genus 3.
\end{lemma}
\noindent{\bf Proof.} Given an irreducible and reduced curve $C$, we denote by $C^\nu$ the normalization of $C$ and $\nu:C^\nu\rightarrow C$ the normalization morphism. The ${\rm m}athcal{O}_C$ sheaf
$\delta:=\nu_*{\rm m}athcal{O}_{C^\nu}/{\rm m}athcal{O}_C$ is the cokernel of the natural map ${\rm m}athcal{O}_C\rightarrow\nu_*{\rm m}athcal{O}_{C^\nu}$ and satisfies
$$g(C^\nu)=p_a(C^\nu)=p_a(C)-h^0(C,\delta).$$
We first remark that $g(C^\nu)\geq2$ as $X$ is hyperbolic. The Ahlfors-Schwarz Lemma applied to the composition map induced by the normalization $\nu':C^\nu\xrightarrow{\nu}C\hookrightarrow X$ (cf. \cite{CCL}) for the manifolds equipped with Poincar\'e metrics implies that the K\"ahler forms satisfy $\nu'^*\omega_X\leq\omega_{C^\nu}$, with equality if and only if it is a holomorphic isometry leading to totally geodesic $C$. Since there is no totally geodesic curve on a fake projective plane from the proof of \cite[Lemma 6]{LY}, the inequality is strict. Hence for $C\equiv kL_X$ with $k\geq1$, integrating over $C^\nu$, we get
$$2k= \frac{2}{3}(K_X\cdot C)<\deg(K_{C^\nu})=2g(C^\nu)-2=k(k+3)-2h^0(C,\delta),$$
where we used the fact that the Ricci curvature is $\frac{3}{2}$ of the holomorphic sectional
curvature for the Poincar\'e metric on $X$ and the adjunction $p_a(C)=\frac{1}{2}C\cdot(K_X+C)$. Hence $k = 1$ implies that $h^0(C,\delta) = 0$ and $C$ is smooth with $g(C)=3$.
\qed
\begin{lemma}\label{inv} Let $X$ be a fake projective plane with a nontrivial automorphism group and let $C$ be an effective divisor such that $C\equiv L_X$. For any nontrivial subgroup $H\leq{\rm Aut}(X)$ with $H\cong C_3$ or $C_7$, $h^*C\neq C$ for any $h\in H-\{e\}$.
\end{lemma}
\begin{proof} Clearly $C$ must be reduced and irreducible as $\rho(X)=1$. From Lemma \ref{Sch}, $C$ is smooth of genus three. Suppose now $h^*C=C$ for all $h\in H$. From \cite[Lemma 6]{LY}, $H$ must act non-trivially on $C$. Note that $H$ can only be $C_3$ or $C_7$ from the list of \cite{CS}.
If $H\cong C_7$, then there exists an $H$-fixed point on $C$, as by the Hurwitz formula there is no \'etale cover of degree 7 from a smooth genus three curve.
By \cite[Lemma 7]{LY}, for $x=\dim_{\rm m}athbb{C} H^1(C,{\rm m}athcal{O}_C)^{\rm inv}$ we have the equation,
$$n=2-2\cdot3+\frac{2\cdot7}{7-1}(3-x)\ \Rightarrow\ 3n+7x=9. $$
The only solution is $(n,x)=(3,0)$ and $C/C_7\subseteq X/C_7$ is a smooth rational curve. But then there is a non-constant lifted map from ${\rm m}athbb{P}^1$ to the universal cover ${\rm m}athbb{B}^2_{\rm m}athbb{C}$ of $X/C_7$, this contradicts to Liouville's theorem.
If $H\cong C_3$, then there exists an $H$-fixed point on $C$, as by the Hurwitz formula there is no \'etale cover of degree 3 from a smooth genus three curve. By the same argument as above, we see that $(n,x)=(5,0)$ or $(2,1)$. In either cases, there is a non-constant lifted map from ${\rm m}athbb{P}^1$ or ${\rm m}athbb{C}$ to ${\rm m}athbb{B}^2_{\rm m}athbb{C}$, which again contradicts Liouville's theorem.
\end{proof}
\begin{lemma}\label{propgenfin} Let $X$ be a fake projective plane with ${\rm Aut}(X)=C_7:C_3$. Suppose that there is a Galois \'etale cover $\pi:M\rightarrow X$ of degree four and $q(M)=0$, then the canonical map $\varphi:M\dashrightarrow{\rm m}athbb{P}^2$ is generically finite.
\end{lemma}
\begin{proof} From Lemma \ref{gg}, we know that $p_g(M)=3$ and hence the canonical map maps $M$ to ${\rm m}athbb{P}^2$. Write $|K_M|=P+F$, where $P$ is the mobile part and $F$ is the fixed divisor. By construction, we have $\varphi=\varphi_{|K_M|}=\varphi_{P}:M\dashrightarrow{\rm m}athbb{P}^2$. We will abuse the notation: $P$ will be the mobile linear system or a general member in it.
Assume that $\overline{\varphi(M)}=C\subseteq{\rm m}athbb{P}^2$ is a curve. We will derive a contradiction.
First of all, we claim that $P$ is not base point free, or equivalently $P^2\neq0$. Assume now $P^2=0$. We consider ${\rm m}athcal{G}={\rm Gal}(M/X)$. Since $g^*K_M=K_M$ for any $g\in{\rm m}athcal{G}$, we have that $g^*F=F$ for each $g\in{\rm m}athcal{G}$. Indeed, $g^*P$ is a mobile sub-linear system of $|K_M|$ and hence $g^*F\geq F$ as Weil divisors. Hence as $\pi$ is Galois, $F=\pi^*F_X$ for an effective divisor $F_X$ on $X$. Moreover, if ${\rm NS}(X)=\langleL_X\rightarrowngle$ for an ample divisor $L_X$, then $K_X\equiv3L_X$, $F_X\equiv lL_X$ for some $0\leq l\leq 3$, and $P\equiv\pi^*(3-l)L_X$. Now, $P^2=0$ implies that $l=3$ and hence $P\equiv0$. This is a contradiction as a non-zero effective divisor cannot be numerically trivial.
Since $\varphi:M\dashrightarrow C\subseteq{\rm m}athbb{P}^2$ is not a morphism, we take a composition of finitely many smooth blow-ups $\rho:\widehat{M}\rightarrow M$ to resolve $P$ and let $\psi:\widehat{M}\rightarrow C\subseteq{\rm m}athbb{P}^2$ be the induced morphism. We have the following diagram after taking the Stein factorization of $\psi:S\rightarrow C$:
\begin{center}
\begin{tikzpicture}
implies/.style={double double equal sign distance, -implies},
\node (m) at (0,2) {$\widehat{M}$};
\node (M) at (0,0) {$M$};
\node (C) at (2,0) {$C$};
\node (in) at (2.5,0) {$\subseteq$};
\node (P) at (3,0) {${\rm m}athbb{P}^2$};
\node (CC) at (2,2) {$\tilde{C}$};
\path[->] (m) edge node[left]{$\rho$}(M);
\path[->] (m) edge node[above]{$\beta$} (CC);
\path[dashed,->] (M) edge node[below]{$\varphi$} (C);
\path[->] (CC) edge node[right]{$\alpha$} (C);
\path[->] (m) edge node[above]{$\psi$} (C);
\end{tikzpicture}
\end{center}
If $\rho^*P=\widehat{P}+\widehat{F}$, where $\widehat{P}=\psi^*|{\rm m}athcal{O}_C(1)|$ is base point free, $\widehat{F}\geq0$ is the fixed divisor, and $\psi=\psi_{\widehat{P}}$, then $\widehat{F}$ is a non-trivial effective $\rho$-exceptional divisor with $\beta(\widehat{F})=\tilde{C}$. In particular, $\tilde{C}\cong{\rm m}athbb{P}^1$ as all the irreducible components of $\widehat{F}$ are rational. Since $\alpha:\tilde{C}\rightarrow C$ is defined by $\alpha^*|{\rm m}athcal{O}_C(1)|\subseteq|{\rm m}athcal{O}_{{\rm m}athbb{P}^1}(d)|$ for some $d\geq1$ and hence an element in $\widehat{P}$ is given by $\beta^*H$ for some $H\in|{\rm m}athcal{O}_{{\rm m}athbb{P}^1}(d)|$, we have $\widehat{P}\supseteq\beta^*|{\rm m}athcal{O}_{{\rm m}athbb{P}^1}(d)|$. In particular, we get
$$\widehat{P}=\psi^*|{\rm m}athcal{O}_C(1)|=\beta^*\alpha^*|{\rm m}athcal{O}_C(1)|=\beta^*|{\rm m}athcal{O}_{{\rm m}athbb{P}^1}(d)|.$$
As $\dim \widehat{P}=p_g(M)=3$, we get $d=2$ and $C\subseteq{\rm m}athbb{P}^2$ being irreducible and non-degenerate is a smooth conic in ${\rm m}athbb{P}^2$.
Let $\widehat{M}_{c}$ be a general fibre of $\widehat{M}\rightarrow\tilde{C}$ and $D:=\rho_*(\widehat{M}_{c})\equiv P/2$ be the corresponding prime divisor on $M$. Recall that $\pi:M\rightarrow X$ is Galois, $K_M=\pi^*K_X\equiv\pi^*(3L_X)$ and $P\equiv\pi^*(lL_X)$ for some $1\leq l\leq 3$ as $P^2\neq0$, where ${\rm NS}(X)=\langle L_X\rightarrowngle$ and $L_X^2=1$. It follows from the genus formula,
$$(K_M+D)\cdot D=2g_a(D)-2\in2{\rm m}athbb{Z}$$
that $l=2$ is the only possibility. Hence $P\equiv\pi^*(2L_X)$, $F=\pi^*F_X\equiv\pi^*L_X$, and $D\equiv\pi^*L_X$. Note that if $h^0(X,2L_X)=0$ for any ample generator $L_X$ on $X$, then we arrive the required contradiction as $2F_X\neq0$. This is exactly the argument in \cite{Y1}, where the vanishing holds for $X$ a very special fake projective plane as discussed in the introduction. Below we provide a more elementary argument.
It is easy to see that ${\rm m}athcal{G}$ acts on $C\cong{\rm m}athbb{P}^1$ holomorphically and induces an action on $\tilde{C}$. We claim that there is always a fixed point on $\tilde{C}={\rm m}athbb{P}^1$. If ${\rm m}athcal{G}$ acts trivially, then every point is a fixed point.\footnote{In fact, this case is absurd. If ${\rm m}athcal{G}$ acts trivially on $C$, then ${\rm m}athcal{G}$ also acts trivially on $\tilde{C}\cong{\rm m}athbb{P}^1$. Any fibre of $\beta:\widehat{M}\rightarrow\tilde{C}$ as a section of $H^0({\rm m}athbb{P}^1,{\rm m}athcal{O}_{{\rm m}athbb{P}^1}(1))$ is ${\rm m}athcal{G}$-fixed and descends to a ${\rm m}athcal{G}$-invariant section $D\equiv\pi^*L_X$ on $M$, which then descends to a section $D_X\equiv L_X$ on $X$. For any two such sections $D$ and $D'$ on $M$, $D\sim D'$ implies that $D_X\equiv D'_X\equiv L_X$ where $\pi^*D_X=D$ and $\pi^*(D_X')=D'$. Since $X$ has only finitely many nontrivial torsion but $H^0({\rm m}athbb{P}^1,{\rm m}athcal{O}_{{\rm m}athbb{P}^1}(1))$ is infinite, we can find a line bundle $L=L_X+T_X$ for some torsion line bundle $T_X$ on $X$ with $\dim|L|\geq1$. This contradicts Lemma \ref{gen}.} Otherwise, ${\rm m}athcal{G}$ has two fixed points on $\tilde{C}$ from the Lefschetz fixed point formula. In particular, the fiber $\widehat{M}_c$ over a fixed point $c$ is ${\rm m}athcal{G}$-invariant and descends to an effective divisor $G^X\equiv L_X$ on $X$.\footnote{Up to here everything works for all fake projective planes with a nontrivial automorphism group.}
Suppose now that ${\rm Aut}(X)=C_7:C_3$. Note that in this case a non-trivial torsion elements is always a 2-torsion. In particular for any $\sigma\in{\rm Aut}(X)$,
$\sigma^*G^X\sim G^X+T_\sigma$ for some 2-torsion $T_\sigma$ and
$$\sigma^*(2G^X)=2\sigma^*(G^X)\sim 2G^X+2T_\sigma=2G^X.$$
On the other hand, for any non-trivial element $\sigma\in{\rm Aut}(X)$, $G^X\neq\sigma^*G^X$ by Lemma \ref{inv}. The curves $G^X$ and $\sigma^*G^X$ intersect at a unique point $Q_\sigma$ as $G^X\cdot(\sigma^*G^X)=L_X^2=1$. We claim that there are three linearly independent sections of the form $2\sigma^*G^X$ in $|2G^X|$, which then contradicts to Lemma \ref{gen}.
We fix one non-trivial $\sigma$ and consider $Q:=Q_\sigma$. Note that then
$2G^X$ intersects with $\sigma^*(2G^X)$ only at $Q$ with multiplicity four. By the result of \cite{PY}, the isotropic group at $Q$ cannot be the whole ${\rm Aut}(X)$. Hence there exists a nontrivial element $\tau\in{\rm Aut}(X)$, $\tau\neq\sigma$, such that $\tau^*Q\neq Q$.
In particular, $\tau^*(2G^X)$ only intersects with $\tau^*\sigma^*(2G^X)$ at $\tau^*Q$ with multiplicity four. Since elements in the pencil $\langle{\rm m}u\cdot2G^X+\lambda\cdot2\sigma^*G^X\rightarrowngle$ must pass through $Q$ with multiplicity four, one of $\tau^*(2G^X)$ and $\tau^*\sigma^*(2G^X)$ is not in $\langle2G^X,2\sigma^*G^X\rightarrowngle$ or otherwise $\tau^*Q=Q$. Hence $h^0(X,2G^X)>2$ and we have a contradiction to Lemma \ref{gen}.
Hence we conclude that $\dim\overline{\varphi(M)}\neq1$. Since $\varphi(M)\subseteq{\rm m}athbb{P}^2$ has to be positive dimensional, we conclude that $\varphi:M\dashrightarrow{\rm m}athbb{P}^2$ must be dominant and hence generically finite.
\end{proof}
\begin{lemma}\label{codim1} Let $M\rightarrow X$ be a Galois \'etale cover of degree four of a fake projective plane $X$ with ${\rm Aut}(X)=C_7:C_3$. If $q(M)=0$, then the canonical linear system $|K_M|$ is mobile, i.e., there is no codimension one base locus.
\end{lemma}
\begin{proof} We follow the same notation as in the proof of Lemma \ref{propgenfin}: ${\rm NS}(X)=\langleL_X\rightarrowngle$ for an ample divisor $L_X$, $K_X\equiv3L_X$, $F_X\equiv lL_X$ for some $0\leq l\leq 3$, and $P\equiv\pi^*(3-l)L_X$. We claim that $l=0$.
Since $\dim P=p_g(M)-1=2>0$, $P$ contains a nontrivial effective divisor and hence $l\neq 3$.
If $l=1$, then we consider the action of ${\rm Aut}(X)=C_7:C_3$ on $F_X=L_X+T$, where $T$ is a 2-torsion. Then the same argument as in the proof of Lemma \ref{propgenfin} produces a line bundle ${\rm m}athcal{L}\equiv 2L_X$ with $h^0(X,{\rm m}athcal{L})>2$, but this violates Lemma \ref{gen}.
If $l=2$, then we consider the same argument as above on $P_X\equiv L_X$.
Here is an alternate argument. In the above setting, if $H^0(X,2L_X)=0$ for $L_X$ any ample generator of ${\rm Pic}(X)$, then $|K_M|=P$ is mobile. Indeed, the assumption also implies that $H^0(X,L_X)=0$ for any ample generator of ${\rm Pic}(X).$ Hence for $F=\pi^*F_X$ with $F_X\equiv lL_X$, $l=0$ is the only possibility and $F=0$. The hypothesis holds for any fake projective plane with an automorphism group of order 21 by a result of \cite{LY}.
\end{proof}
{\it Proof of Theorem \ref{iso}} First of all, from Magma, all Galois coverings of a fake projective plane of index $4$ can be listed, as is done in the proof of Proposition \ref{all} below.
Furthermore, Magma tells us that abelianization of the lattices associated to such coverings are all trivial. Hence $q(M)=0$ for our examples. Theorem \ref{iso} now follows from Lemma \ref{codim1}.
\begin{proof}[of Proposition \ref{all}] We simply apply the procedure of construction as in \cite{Y1} to each of the fake projective plane listed in column 3 of Table 1. We first need to enumerate all possible surfaces as degree four Galos \'etale cover associated to fake projective planes as listed. It turns out that the number of index four subgroups of the lattice $\Pi$ to a fake projective plane
in the table is recorded in the column $N_1$ in Table 1. This could be seen by considering subgroups of order $4$ in $H_1(X,{\rm m}athbb{Z})$ as in Lemma \ref{cover}, or by listing index four subgroups of $\Pi$ from Magma.
Now we claim that all the different sub-lattices of index $4$ of $\Pi$ in Table 1 give rise to non-isometric complex hyperbolic forms in terms of the Killing metrics on the locally symmetric spaces. For this purpose, we assume that $\Lambda_1$ and $\Lambda_2$ are two groups obtained from the above procedure and $B_{{\rm m}athbb{C}}^2/\Lambda_1$ is isometric to $B_{{\rm m}athbb{C}}^2/\Lambda_2$. From construction, $\Lambda_1$ and $\Lambda_2$ are normal subgroups of index 4 in two lattices $\Pi_1$ and $\Pi_2$ corresponding to the fundamental groups
of fake projective planes. Let $\overline \Gamma_1$ and $\overline \Gamma_2$ be the corresponding maximal arithmetic groups in the respective classes. As $B_{{\rm m}athbb{C}}^2/\Lambda_1$ and $B_{{\rm m}athbb{C}}^2/\Lambda_2$ are isometric, $\Lambda_1$ is conjugate to $\Lambda_2$ as discrete subgroups of the same algebraic group $G$ with $G\otimes {\rm m}athbb{R}\cong PU(2,1)$. Hence the two corresponding maximal lattices satisfy $\overline \Gamma_1\cong \overline \Gamma_2$, and similarly $\Pi_1\cong\Pi_2$. It follows that they have to come from the same row in the Table 1 and hence correspond to the same subgroup of index $4$ in the same lattice associated to some fake projective plane. Hence there are altogether $835$ non-isometric complex two ball quotients obtained in this way, by summing over the column of $N_1$ in Table 1.
Now for each locally symmetric space $M=B_{{\rm m}athbb{C}}^2/\Lambda$ obtained as above, it gives rise to a pair of complex structures $J_1$ and $ J_2$, which are conjugate to each other.
These two complex structures give rise to two non-biholomorphic complex surfaces $S_1=(M,J_1)$ and $S_2=(M,J_2)$. In fact, if they are biholomorphic, the corresponding
four-fold quotient $S_1/[\Pi,\Lambda]$ and $S_2/[\Pi,\Lambda]$ are biholomorphic and are fake projective space. This contradicts the results in \cite{KK}, see also the Addendum of \cite{PY}, that conjugate complex structures on a fake projective space give rise to two different complex structures.
In general, let $(M_1, J_1)$ and $(M_2,J_2)$ be two complex ball quotients obtained from taking degree 4 \'etale covers of some possibly different fake projective planes. If $(M_1, J_1)$ and $(M_2,J_2)$ are biholomorphic, they are isometric with respect to the corresponding Bergman (Killing) metrics. Hence from the earlier argument, $M_1$ is isometric to $M_2$ and we may regard $M_1=M_2$. Now the argument of the last paragraph implies that $J_1=J_2$. In conclusion, we conclude that the $1670$ complex surfaces obtained from the pair of conjugate complex structures on the $835$ underlying locally symmetric structures give rise to distinct complex surfaces. This concludes the proof of Proposition \ref{all}.
\end{proof}
\section{New examples of surfaces with maximal canonical degree}\label{new}
Our goal in this section is to prove Theorem \ref{main}. The surface studied in \cite{Y1} and here is constructed from the fake projective plane $X$ given in \cite[Section 5.9]{PY} in the class of $(a=7, p=2)$ and is denoted by $(a=7, p=2,\emptyset, D_32_7)$ in the notation of \cite{CS}.
\begin{proof}[of Theorem \ref{main}] We consider $\pi:M\rightarrow X$ a Galois $C_2\times C_2$--\'etale cover of the fake projective plane $X$ in the class $(a=7, p=2,\emptyset, D_32_7)$. From Magma computation, the irregularity $q(M)=0$, cf. Proposition \ref{all}. Hence by Lemma \ref{bpf23}, it suffices for us to prove that the canonical map of $M$ is base point free. From the discussion in Section \ref{pre}, there are non-trivial 2-torsions $\tau_i\in{\rm Pic}^0(X)$ for $i=1,2,3$ corresponding to characters of ${\rm m}athcal{G}={\rm Gal}(M/X)=C_2\times C_2$ such that $H^0(X,K_X+\tau_i)=\langlet_i\rightarrowngle$ and $H^0(M,K_M)=\langle\pi^*t_i|\ i=1,2,3\rightarrowngle$.
For the convenience of the reader, we recall the key steps of the argument in \cite{Y1}. For simplicity, we denote by $G$ the automorphism group ${\rm Aut}(X)=C_7:C_3$. The automorphism group of $X$ has a presentation
$G=\langle a,b|a^7=b^3=1, bab^{-1}=a^2\rightarrowngle.$ The group $G$ contains a normal Sylow 7-subgroup $G_7=\langlea\rightarrowngle$, and seven conjugate Sylow $3$-subgroups, one of which is $G_3:=\langleb\rightarrowngle$. We know from the Riemann-Roch formula that $h^0(X,2K_X)=10$. In terms of the explicit basis of $H^0(X,2K_X)$ given by \cite{BK}, the action of $G$ is presented by
\begin{eqnarray}
&&a(u_0:u_1:u_2:u_3:u_4:u_5:u_6:u_7:u_8:u_9)\nonumber \\
&=&(u_0:\zeta_7^6u_1:\zeta_7^5u_2:\zeta_7^3u_3:\zeta_7u_4:\zeta_7^2u_5:\zeta_7^4u_6:\zeta_7u_7:\zeta_7^2u_8:\zeta_7^4u_9)\\
&&b(u_0:u_1:u_2:u_3:u_4:u_5:u_6:u_7:u_8:u_9) \nonumber \\
&=&(u_0:u_2:u_3:u_1:u_5:u_6:u_4:u_8:u_9:u_7)
\end{eqnarray}
From the Corrigendum of \cite{Y1}, under the action of $G_7$, $S:=\cup_{\Sigma\in C_2^4-\{1\}} H^0(X,K_X+\Sigma)$ consists of 3 orbits, where we recall that a $p$-torsion element $\Sigma\in H_1(X,{\rm m}athbb{Z})=C_2^4$ correspond to a $p$-torsion element $\Sigma\in{\rm Pic}^0(X)$ by the universal coefficient theorem (see \cite[Lemma 4]{LY}).
\begin{enumerate}
\item $\langle\widetilde{t}_0\rightarrowngle=H^0(X,K_X+\Sigma_0)$, where $\Sigma_0$ is $G$-invariant corresponding to an element in $H_1(X/G,{\rm m}athbb{Z})^\times$ and $\widetilde{t}_0^2=u_0$.
\item Two disjoint $G_7$ orbits $\langlea\rightarrowngle\widetilde{t}_1$ and $\langlea\rightarrowngle\widetilde{t}_2$, where $\widetilde{t}_i$'s are $G_3$-invariant corresponding to elements in $H_1(X/G_3,{\rm m}athbb{Z})^\times-\{\Sigma_0\}$.
\end{enumerate}
Let $v_0=u_0, v_1=u_1+u_2+u_3, v_2=u_4+u_5+u_6$, and $v_3=u_7+u_8+u_9$. From \cite{Y1}, one finds that \begin{equation}\label{4.3}\begin{cases} \widetilde{t}_0^2=v_0,\\ \widetilde{t}_1^2=v_0+\frac12(1+\sqrt{-7})v_1,\\ \widetilde{t}_2^2=v_0+(-5+\sqrt{-7})v_1+4(1-\sqrt{-7})v_2-4(v_3)\end{cases}
\end{equation}
with the help of elementary command \verb'IsDomain' in Magma. It is proved that $\cap_{i=0}^2Z_{t_i^2}=\emptyset$, which was verified in the Corrigendum of \cite{Y1} by checking that $\cap_{i=0}^2Z_{t_i^2}=\emptyset$ on $X$ modulo $p=23$ from the command \verb'HilbertPolynomial' in Magma. We remark that the same example was also studied later in \cite{Ri}, where the author independently verified with more sophisticated techniques in Magma that the sections obtained from the above procedure do give rise to sections in $H^0(M,K_M)$.
Now under the action of $G_7$, the explicit sections $\widetilde{t}_0$ and $a^j\widetilde{t}_i$, $i=1,2$ and $0\leq j\leq 6$, precisely give the effective sections of $S:=\cup_{\Sigma\in C_2^4-\{1\}} H^0(X,K+\Sigma)$. We will prove that $\cap_{i=0}^2Z_{t^2_i}=\emptyset$ by consider possible choices of $\{t_1,t_2,t_3\}\subseteq S=\langle\widetilde{t}_0\rightarrowngle\cup\langlea\rightarrowngle\widetilde{t}_1\cup\langlea\rightarrowngle\widetilde{t}_2$ and check by Magma whether these sections have common intersection.
Conjugating by an element in $G_7$, we may assume that $t_1$ belongs to $\{\widetilde{t}_0,\widetilde{t}_1,\widetilde{t}_2\}$. Suppose $t_1=\widetilde{t}_0$, where $\widetilde{t}_0$ is invariant as a set under $G$, then conjugate by an element in $G_7$, we may assume that $t_2=\widetilde{t}_1$. But by construction $\tau_3=\tau_1\cdot\tau_2$ is determined by $\tau_1=\sigma_0$ and $\tau_2$, which gives $\widetilde{t}_2\in H^0(X,K_X+\tau_0\cdot\tau_1)=H^0(X,K_X+\tau_2)$. In particular, this case was already checked in \cite{Y1} as
$\cap_{i=0}^2Z_{t^2_i}= Z_{v_0}\cap Z_{\widetilde{t}_1^2}\cap Z_{\widetilde{t}^2_2}=\emptyset$
and we are done.
Consider now the case that none of $t_i$'s is $\widetilde{t}_0$. In this scenario, $t_i$ belongs to the orbits of $\widetilde{t}_1$ or $\widetilde{t}_2$. Again we use the fact that effective divisors $D_i$'s have common intersections if and only if $2D_i$'s have common intersections. Hence it suffices for us to prove the following claim.
\begin{lemma}\label{check} Let $i, j\in \{1,\dots,6\}$. Then
\begin{enumerate}[$(a)$]
\item $Z_{\widetilde{t}_1}\cap Z_{a^i\widetilde{t}_1}\cap Z_{a^j\widetilde{t}_2}=\emptyset$ for $1\leq i,j\leq6;$
\item $Z_{\widetilde{t}_2}\cap Z_{a^i\widetilde{t}_1}\cap Z_{a^j\widetilde{t}_2}=\emptyset$ for $1\leq i,j\leq6;$
\item $Z_{\widetilde{t}_1}\cap Z_{a^i\widetilde{t}_1}\cap Z_{a^j\widetilde{t}_1}=\emptyset$ for $1\leq i<j\leq6;$
\item $Z_{\widetilde{t}_2}\cap Z_{a^i\widetilde{t}_2}\cap Z_{a^j\widetilde{t}_2}=\emptyset$ for $1\leq i<j\leq6.$
\end{enumerate}
\end{lemma}
\begin{proof} In terms of the basis chosen with action of $G_7$ given in equation (4.1) and the explicit sections listed in (4.3), statement $(a)$ in Lemma \ref{check} holds if there is no the common intersection for the following sections,
\begin{eqnarray*}
&\{&u_0+\frac12(1+\sqrt{-7})(u_1+u_2+u_3),\\
&&u_0+\frac12(1+\sqrt{-7})(\zeta_7^{-i}u_1+\zeta_7^{-2i}u_2+\zeta_7^{-4i}u_3),\\
&&u_0+(-5+\sqrt{-7})(\zeta_7^{-j}u_1+\zeta_7^{-2j}u_2+\zeta_7^{-4j}u_3)+4(1-\sqrt{-7})(\zeta_7^{j}u_4
+\zeta_7^{2j}u_5+\zeta_7^{4j}u_6)\\&&-4(\zeta_7^{j}u_7+\zeta_7^{2j}u_8+\zeta_7^{4j}u_9)\}.
\end{eqnarray*}
Instead of using the command \verb'HilbertPolynomial' over the cyclotomic field ${\rm m}athbb{Q}(\zeta_7)$ on $X$, we specialize it to the finite field $F_{29}$, where $16$ is a primitive $7$-th root of unity and $14$ serves as $\sqrt{-7}$. In this way, computing over the finite field $F_{29}$, we verify from Magma that the above three polynomials do not have common intersection on $X$ for all $i, j\in \{1,\dots,6\}$ in $F_{29}$. This implies that the original
equations do not have common zero over the algebraic number field ${\rm m}athbb{Q}(\zeta_7)$. Similar arguments applies to $(b)$, $(c)$, and $(d)$ in the Lemma \ref{check}.
\end{proof}
We remark that Lemma \ref{check} actually is stronger than what is sufficient for our purpose. For example, consider the case of $(a)$. It is enough to check $Z_{\widetilde{t}_1}\cap Z_{a^i\widetilde{t}_1}\cap Z_{a^j\widetilde{t}_2}=\emptyset$ for one pair of $(i,j)$ corresponding to the elements ${\rm m}athcal{G}-\{1\}=\{\tau_1,\tau_2,\tau_3\}.$ However, since
we are checking by Magma, the extra computation does not make any essential difference in computer time. Similar argument applies to the cases $(b)$, $(c)$, $(d)$ as well.
Theorem \ref{main} follows immediately from Lemma \ref{check}.
\end{proof}
\section{Remark on maximal canonical degree of threefolds}\label{sec3fold}
Theorem \ref{main} has an implication on the canonical degree bound of threefolds. The purpose of this section is to explain literatures in this direction and relations to Theorem \ref{main}. From this point on, let $Y$ be a Gorenstein minimal complex projective threefold of general type with locally factorial terminal singularities. Suppose that the linear system $|K_Y|$ defines a generically finite map $\Phi=\Phi_{|K_Y|}:Y\dashrightarrow{\rm m}athbb{P}^{p_g(Y)-1}.$ M. Chen asked in \cite{Ch} if there is an upper bound of $\deg(\Phi)$. A positive answer was provided in \cite{Hac} with $\deg(\Phi)\leq576.$ Later on, it was improved in \cite{DG2} that $\deg(\Phi)\leq360$ (with equality if and only if $p_g(Y)=4, q(Y)=2, {\rm ch}i(\omega_Y)=5, K_Y^3=360$, and $|K_Y|$ is base point free.) In \cite{C}, it is shown that $\deg(\Phi)\leq 72$ if the geometric genus satisfies $p_g(Y)>10541$.
As a corollary of Theorem \ref{main} and the above discussion, we conclude that the canonical degree 72 can be achieved as stated in Corollary \ref{3fold}.
\begin{proof}[of Corollary \ref{3fold}] Equipped with Theorem \ref{main}, the corollary follows essentially from an observation of \cite[Section 3]{C}.
Take $C$ a smooth hyperelliptic curve of genus $g\geq2$, then the canonical map $\varphi_{|K_C|}:C\rightarrow{\rm m}athbb{P}^{g-1}$ is the composition of the double cover $C\rightarrow{\rm m}athbb{P}^1$ with the $(g-1)$-Veronese embedding ${\rm m}athbb{P}^1\hookrightarrow{\rm m}athbb{P}^{g-1}$. In particular, $\deg(\varphi_{|K_C|})=2$, cf. \cite{Har}. Take $M$ a surface satisfying the optimal degree bound $\deg(\varphi_{|K_M|})=36$ as in Theorem \ref{main}, then $\varphi=\varphi_{|K_M|}:M\rightarrow{\rm m}athbb{P}^2$ is a generically finite morphism of $\deg(\varphi)=K_M^2=36$.
Now let $Y=X\times C$, then $Y$ is a smooth projective threefold of general type with $p_g(Y)=3g$ and $\Phi=\Phi_{|K_Y|}:Y\rightarrow{\rm m}athbb{P}^{3g-1}$ a morphism. From our construction, it follows that $\Phi$ is generically finite and
$$\deg{\Phi}\cdot\deg W=K_Y^3=3K_X^2\cdot K_C=3\cdot36\cdot(2g-2),$$
where $W=\Phi(Y)$ is the image of the composition maps $Y\hookrightarrow{\rm m}athbb{P}^2\times{\rm m}athbb{P}^{g-1}\hookrightarrow{\rm m}athbb{P}^{3g-1}$ defined by $|K_Y|$ and ${{\rm m}athcal{O}_{{\rm m}athbb{P}^2\times{\rm m}athbb{P}^{g-1}}(1,1)}$. Hence $\deg W=3(g-1)$ and $\deg(\Phi)=72.$
\end{proof}
\noindent{\bf Acknowledgements.}\label{ackref}
It is a pleasure for the second author to thank Donald Cartwright for his help on Magma commands. The authors would like to express their appreciation and thankfulness to the referee for very helpful comments and suggestions on the paper. This work is partially done during the first author's visit at Research Institute of Mathematical Sciences in Kyoto, National Center of Theoretical Sciences and National Taiwan University in Taiwan, and the second author's visit of the Institute of Mathematics of the University of Hong Kong. The authors thank the warm hospitality of the institutes.
\end{document} |
\betagin{document}
\betagin{abstract}
We study shrinking targets problems for discrete time flows on a homogenous space $\Gamma\backslash G$ with $G$ a semisimple group and $\Gamma$ an irreducible lattice. Our results apply to both diagonalizable and unipotent flows, and apply to very general families of shrinking targets. As a special case, we establish logarithm laws for cusp excursions of unipotent flows answering a question of Athreya and Margulis.\end{abstract}
\title{Shrinking targets problems for flows on homogeneous spaces}
\section{Introduction}
Consider an ergodic dynamical system given by the iteration of a measure preserving map $T: \mathcal{X}\to\mathcal{X}$ on a probability space $(\mathcal{X},\mu)$. From ergodicity, it follows that generic orbits become dense, and shrinking target problems are a way to quantify the rate. That is, to determine how fast we can make a sequence of targets shrink so that a typical orbit will keep hitting the targets infinitely often.
A natural bound for this rate comes from the easy half of Borel-Cantelli, stating that for any sequence of sets, $\{B_m\}_{m\in\mathbb N}$, if
$\sum_{m=1}^{\infty} \mu(B_m)<\infty$ then for a.e. $x\in \mathcal{X}$ from some point on $T^m x\not\in B_m$.
For chaotic dynamical systems, it could be expected that this bound is sharp, and much work has gone into proving this in various examples of fast mixing dynamical systems (under some regularity restrictions on the shrinking sets).
In particular, this was done for shrinking cusp neighborhoods of homogenous spaces \cite{Sullivan1982, KleinbockMargulis1999,GorodnikShah11,AthreyaMargulis09,KelmerMohammadi12, AthreyaMargulis14,Yu17},
and more generally for shrinking metric balls in a metric space \cite{ChernovKleinbock01,Dolgopyat04,Galatolo07,KleinbockZhao2017}.
Recently, in \cite{Kelmer17b}, the first author introduced a new method for attacking this problem for discrete time homogenous flows on (the frame bundle of) finite volume hyperbolic manifolds. This method works for any monotone family of shrinking targets in the hyperbolic manifold, and applies also for unipotent flows with arbitrarily slow polynomial mixing rate. In this paper we adapt this method to treat the general case of discrete time homogenous flows on a homogenous space $\mathcal{X}=\Gamma\backslash G$ with $G$ a connected semisimple Lie group with finite center and no compact factors, and $\Gamma$ an irreducible lattice.
\subsection{General setup, terminology, and notations}
Let $G$ denote a connected semisimple Lie group with finite center and no compact factors, let $\Gamma\leq G$ be an irreducible lattice, and let $\mu$ denote the $G$-invariant probability measure on $\mathcal{X}=\Gamma\backslash G$, coming from the Haar measure of $G$.
We fix once and for all a maximal compact subgroup $K\leq G$ and denote by $\mathcal{H}=G/K$ the corresponding symmetric space. We say that a subset $B\subseteq \mathcal{X}$ is spherical if it is invariant under the right action of $K$ and we identify spherical sets as subsets of the locally symmetric space $\Gamma\backslash\mathcal{H}$.
One-parameter flows on $\mathcal{X}=\Gamma\backslash G$ are given by the right action of one-parameter subgroups of $G$.
Explicitly, the one-parameter group generated by an element, $X_0$, in the Lie algebra, $\mathfrak g=\mathrm{Lie}(G)$, is given by
$\{h_t=\exp(tX_0):t\in \mathbb R\}$. The corresponding discrete time flow is then given by the action of the discrete subgroup $H=\{h_m\}_{m\in \mathbb Z}$.
We will always assume that the subgroup is unbounded and recall that, by Moore's ergodicity theorem, the action of any unbounded subgroup is ergodic and mixing.
We say that a family $\{B_t\}_{t>0}$ is a monotone family of shrinking targets if $B_t\subseteq B_s$ when $t\geq s$ and $\mu(B_t)\to 0$, and we say it is a family of spherical shrinking targets if all sets are spherical.
Given an unbounded discrete time one-parameter flow $\{h_m\}_{m\in \mathbb{Z}}$, following \cite{ChernovKleinbock01}, we say that a sequence of sets $\{B_m\}_{m\in \mathbb N}$
is \textit{Borel-Cantelli} (BC) for $\{h_m\}_{m\in\mathbb{Z}}$, if for a.e. $x\in\mathcal{X}$ the set
$\{m\in \mathbb N\ |\ xh_m\in B_m\}$ is unbounded, and we say it is \textit{strongly Borel-Cantelli} (sBC) if for a.e. $x\in \mathcal{X}$
\betagin{equation}\langlebel{e:sBC}
\lim_{m\to\infty}\frac{\#\{1\leq j\leq m: xh_j\in B_j\}}{\sum_{1\leq j\leq m}\mu(B_j)}=1.\end{equation}
We say that a collection of sets is (strongly) Borell-Cantelli (resp. monotone sBC) if any sequence (resp. monotone sequence) from this collection with divergent measure is (strongly) Borell-Cantelli.
In what follows we adopt the notation $A(t)\ll B(t)$ or $A(t)=O(B(t))$ to indicate that there is a constant $c>0$ such that $A(t)\leq cB(t)$, and we write $A(t)\asymp B(t)$ to indicate that
$A(t)\ll B(t)\ll A(t)$. The implied constants may always depend on the group, $G$, the lattice, $\Gamma$, and the flow that we think of as fixed. We will use subscripts to indicate the dependance of the implied constants on any additional parameters.
\subsection{Logarithm laws}
Our first result establishes a logarithm law for the first hitting time function (see \cite{GalatoloKim07} for the relation between such logarithm laws and Borel-Cantelli properties).
Given an unbounded discrete time flow and a subset $B\subseteq \mathcal{X}$ the first hitting time function is defined for $x\in \mathcal{X}$ by
\betagin{equation}\langlebel{e:hitting}
\tau_{B}(x)=\min\{m\in \mathbb N: xh_m\in B\}.
\end{equation}
\betagin{Thm}\langlebel{t:hittingtime}
Assume that either $G$ has property $(T)$, or that $G$ is of real rank one.
Let $\{B_t\}_{t>0}$ denote a monotone family of spherical shrinking targets in $\mathcal{X}=\Gamma\backslash G$. Let $\{h_m\}_{m\in \mathbb Z}$ denote an unbounded discrete time flow on $\mathcal{X}$. Then for a.e. $x\in \mathcal{X}$
\betagin{equation}
\langlebel{hit}\lim_{t\to\infty}\frac{\log(\tau_{B_t}(x))}{-\log(\mu(B_t))}=1.
\end{equation}
\end{Thm}
\betagin{Rem}\langlebel{r:conditional}
Since all simple groups without property $(T)$ are of real rank one, Theorem \ref{t:hittingtime} holds for all noncompact connected simple groups with finite center. In fact, the only groups of higher rank without property $(T)$ are (almost) products $G=\prod G_i$ with at least one of the factors a simple rank one group without property $(T)$. In this case, the same result still holds, unless the flow is a unipotent flow on one of those rank one factors. Even then, according to the congruence subgroup conjecture (see e.g. \cite{Rapinchuk92,Raghunathan04}), for higher rank groups the only irreducible lattices are congruence lattices. For congruence lattices, if we further assume the Selberg-Ramanujan Conjecture (see \cite{Shahidi04,Sarnak05}), the resulting bounds on decay of matrix coefficients are sufficient to obtain the same result also in these cases (in fact, with the exception of $G_i$ locally isomorphic to $\SL_2(\mathbb R)$ for some $i$ the unconditional bounds towards the Selberg-Ramanujan Conjecture obtained in \cite{KimSarnak03, BlomerBrumley11,BergeronClozel13,BergeronClozel17} are already sufficient). Hence, conditional on these conjectures our result holds in full generality.
\end{Rem}
Since spherical sets in the homogenous space, $\Gamma\backslash G$, can be naturally identified with subsets of the locally symmetric space, $\Gamma\backslash \mathcal{H}$, our result holds for any monotone sequence of shrinking targets in $\Gamma\backslash \mathcal{H}$. A special case of such a sequence, that has received much attention, occurs when $\Gamma\backslash \mathcal{H}$ is not compact and the shrinking sets are cusp neighborhoods \cite{Sullivan1982, KleinbockMargulis1999,AthreyaMargulis09, KelmerMohammadi12,AthreyaMargulis14,Yu17}. For these problems, logarithm laws can be expressed in terms of a distance function measuring how far out a point is in the cusp. Explicitly, given a continuous distance function
on $\Gamma\backslash \mathcal{H}$ that we lift to a $K\times K$-invariant function $d(\cdot,\cdot)$ on $\Gamma\backslash G\times \Gamma\backslash G$, we can define spherical shrinking cusp neighborhoods of $\mathcal{X}$ by
$$B_{t}=\{x\in \mathcal{X}: d(x,x_0)>t\},$$
where $x_0\in \mathcal{X}$ is some fixed base point.
We assume that the measure of these cusp neighborhoods decay exponentially with rate $\varkappa>0$ in the sense that
\betagin{equation}\langlebel{e:cuspdecay}
\lim_{t\to\infty} \frac{-\log\mu(B_t)}{t}=\varkappa.
\end{equation}
In particular, this holds when the distance function\mathfrak ootnote{While these distance function are defined on $\Gamma\backslash G$ and are not necessarily lifts from a distance function on $\Gamma\backslash \mathcal{H}$, the corresponding cusp neighborhoods can always be approximated by spherical cusp neighborhoods as explained in Remark \ref{r:sphericalcusps} below.} is determined by a right $G$-invariant, bi $K$-invariant Riemannian
metric on $G$ as in \cite{KleinbockMargulis1999}, or more generally by a norm-like pseudometric on $G$ as in \cite{AthreyaMargulis14}.
For any such distance function, a consequence of Theorem \ref{t:hittingtime} applied to this family of shrinking targets is the following.
\betagin{Cor}\langlebel{loglaws}
For $G$ a connected semisimple Lie group with finite center and no compact factors, and $\Gamma\leq G$ an irreducible lattice,
for any unbounded one-parameter flow $\{h_t\}_{t\in\mathbb R}$ on $\mathcal{X}= \Gamma\backslash G$, for a.e. $x\in \mathcal{X}$
\betagin{equation}\langlebel{e:loglawcusp}
\limsup_{t\to\infty}\frac{d(xh_t,x_0)}{\log t}=\frac{1}{\varkappa}.
\end{equation}
\end{Cor}
For diagonalizable flows, the logarithm law for cusp excursions \eqref{e:loglawcusp} was established by Sullivan \cite{Sullivan1982} for $G=\SO_0(d,1)$, and by Kleinbock and Margulis \cite{KleinbockMargulis1999} in general.
For unipotent flows, Athreya and Margulis \cite{AthreyaMargulis14} showed that there is some $c\in (0,1]$ so that for a.e. $x\in \mathcal{X}$,
$\limsup_{t\to\infty}\frac{d(xh_t,x_0)}{\log t}=\frac{c}{\varkappa}$, and raised the question if it is always the case that the constant $c=1$. This question was previously answered affirmatively in some specific examples, such as unipotent flows on the space of lattices and on hyperbolic manifolds \cite{AthreyaMargulis09,KelmerMohammadi12,Yu17}. Our result settles this problem and gives an affirmative answer to their question in general.
\subsection{Summable decay of matrix coefficients}
A key ingredient in the proof of Theorem \ref{t:hittingtime} is a uniform rate on decay of spherical matrix elements under one-parameter flows.
We say $\varphi\in L^2(\Gamma\backslash G)$ is spherical if it is invariant under the action of $K$, and we identify the space of spherical functions with $L^2(\Gamma\backslash \mathcal{H})=L^2(\Gamma\backslash G/K )$.
Let $\pi$ denote the regular representation of $G$ on $L^2(\Gamma\backslash G)$ and let $L^2_0(\Gamma\backslash G)$ denote the space orthogonal to the constant functions. For any $\psi,\varphi\in L^2(\Gamma\backslash G)$ and $g\in G$ the corresponding matrix element is given by
$$\left\langle\pi(g)\varphi,\psi\right\rangle=\int_{\Gamma\backslash G}\varphi(xg)\overline{\psi(x)}d\mu(x).$$
By using the results of Oh \cite{Oh98} on effective property $(T)$, we show the following uniform bound on decay of matrix coefficients for one-parameter flows.
\betagin{Thm}\langlebel{t:edecay}
Assume that $G$ has property $(T)$ and let $\{h_t\}_{t\in \mathbb R}$ denote an unbounded one-parameter subgroup. For all sufficiently small $\epsilon>0$,
for any spherical $\varphi,\psi\in L^2_0(\Gamma\backslash G)$ and for all $|t|\geq 1$ we have
$$|\left\langle\pi(h_t)\varphi,\psi\right\rangle|\ll_\epsilon\frac{\|\varphi\|_2\|\psi\|_2}{|t|^{1-\epsilon}}.$$
\end{Thm}
It is remarkable that this uniform rate, which is exactly the rate needed for Theorem \ref{t:hittingtime}, can be obtained for all one-parameter flows for all semisimple groups with property $(T)$. The rate of decay here is $\epsilon$ away from being summable, and, while this is good enough to establish a logarithm law, for other applications it just falls short. In many cases, however, we can establish a slightly better rate. In order to distinguish these cases we use the following terminology.
\betagin{Def}
We say that a one-parameter flow on $\Gamma\backslash G$ has \em{Summable Decay} (or SD) if there is $\eta>1$
such that for any spherical $\varphi,\psi\in L^2_0(\Gamma\backslash G)$, for all $|t|\geq 1$ we have
$$|\left\langle\pi(h_t)\varphi,\psi\right\rangle|\ll_\eta\frac{\|\varphi\|_2\|\psi\|_2}{|t|^{\eta}}.$$
\end{Def}
For SD flows we can show the stronger result implying that the bound coming from Borel-Cantelli is sharp.
\betagin{Thm}\langlebel{t:SBC}
For any discrete time one-parameter SD flow on $\Gammaamma\backslash G$, the collection of spherical subsets is strongly Borel-Cantelli.
\end{Thm}
Following this result it would be useful to categorize precisely which one-parameter homogenous flows are SD.
For diagonalizable flows, the exponential decay of matrix coefficients clearly imply summable decay.
For unipotent flows, the decay is polynomial and we don't always have summable decay.
When the group $G$ is a simple Lie group of rank $\geq 2$, the following result gives explicit conditions for when a one-parameter flow has summable decay, in terms of the restricted root system of $G$ and the adjoint representation of the generator of the flow (see section \ref{s:pLie} for more details).
\betagin{Thm}\langlebel{t:csd}
Assume that $G$ is simple with real rank $\geq 2$. If the restricted root system is of type $B_n (n\geq 4), D_n (n\geq 4), E_6, E_7, E_8$ or $F_4$, then all unbounded one-parameter flows on $\Gamma\backslash G$ are SD. When the restricted root system is not of the above types, any unipotent one-parameter flow, $h_t=\exp(tX_0)$, with $ad(X_0)^3\neq 0$ is SD.
\end{Thm}
\betagin{Rem}
For example, on $\Gamma\backslash SL_3(\mathbb R)$ the unipotent flow given by $h_t=\left(\betagin{smallmatrix} 1 & 2t & 2t^2\\ 0 & 1& 2t\\ 0 &0 &1\end{smallmatrix}\right)$ is SD, while the flow given by $h_t=\left(\betagin{smallmatrix} 1 & 0 &t\\ 0 & 1 & 0\\ 0 &0 &1\end{smallmatrix}\right)$ is not SD.
\end{Rem}
\betagin{Rem}
For some applications it is useful to know the precise rate of decay for matrix coefficients along
unipotent flows, and we remark that
our method gives the following very explicit rate: When $h_t = \exp(tX_0)$ is unipotent, let $l$ be the largest positive integer such that $\textrm{ad}(X_0)^l\neq 0$. Then our method shows that for any spherical $\varphi,\psi\in L^2_0(\Gamma\backslash G)$,
the matrix coefficients $\left\langle\pi(h_t)\varphi,\psi\right\rangle$ are bounded by
$O_{\epsilon}\left(\frac{\|\varphi\|_2\|\psi\|_2}{|t|^{l(1-\epsilon)}}\right)$ when the restricted root system is $B_n (n\geq 4), D_n (n\geq 4), E_6, E_7, E_8$ or $F_4$ and by $O_{\epsilon}\left(\frac{\|\varphi\|_2\|\psi\|_2}{|t|^{l/2(1-\epsilon)}}\right)$ otherwise.
\end{Rem}
\betagin{Rem}
We can also characterize SD flows when $G$ is semisimple with property (T). In such a case the flow will be SD unless the flow is essentially trivial except in one of the factors, with the restriction of the flow to this factor not SD.
\end{Rem}
\subsection{Groups of real rank one}
For a group of real rank one with property $(T)$, every discrete time homogenous flow is SD, and hence the collection of spherical sets is sBC. For rank one groups without property $(T)$, the question if all one-parameter flows on $\Gamma\backslash G$ are SD, depends on the spectral gap of $\Gamma$, that is, the size of the smallest non-trivial eigenvalue of the Laplacian on $L^2(\Gamma\backslash \mathcal{H})$. When the spectral gap is sufficiently large all flows are SD (see Corollary \ref{uod} below). However, there are also examples with a small spectral gap, for which unipotent flows may not be SD. Nevertheless, using a spectral decomposition it is still possible to prove that the collection of spherical sets is monotone Borell-Cantelli. This was done in \cite{Kelmer17b} for $G=\SO_0(d+1,1)$ with $d\geq 2$ and the proof is similar for $G$ locally isomorphic to $\SU(d,1)$. Explicitly, for these groups we have the following.
\betagin{Thm}\langlebel{t:MBC}
Let $G$ be locally isomorphic to $\SO(d+1,1)$ or $\SU(d,1)$ with $d\geq 2$. Then for any unbounded discrete time one-parameter flow on $\Gammaamma\backslash G$, the collection of spherical subsets is monotone Borel-Cantelli.
\end{Thm}
\betagin{Rem}
In fact, our proof gives something stronger. For a sequence of spherical sets, $\{B_m\}_{m\in \mathbb N}$ (not necessarily monotone) with $\sum_m\mu(B_m)=\infty$, if we assume that $\left\{m\mu(B_m)\right\}_{m\in\mathbb{N}}$ is bounded then we can show this sequence is strongly Borel-Cantelli.
If the sequence is monotone and $\left\{m\mu(B_m)\right\}_{m\in\mathbb{N}}$ is unbounded, we can show instead that there is a subsequence $\{m_j\}$ such that for a.e. $x\in \mathcal{X}$
$$\lim_{j\to\infty}\frac{\#\{1\leq i\leq m_j: xh_i\in B_{m_j}\}}{m_j \mu(B_{m_j})}=1.$$
\end{Rem}
\betagin{Rem}
The case when $G$ is locally isomorphic to $\SO(2,1)
$ was considered in \cite{Kelmer17b}. In this case one needs some additional assumptions on the shrinking rate of family $\{B_m\}_{m\in \mathbb N}$ to show that it is BC for a unipotent flow.
\end{Rem}
\subsection{Orbits eventually always hitting}
Assume now that our flow is either SD or that $G$ is of real rank one, so that any monotone sequence of spherical shrinking targets $\{B_m\}_{m\in \mathbb N}$ is BC. In such cases we wish to study the subtler point, of whether the finite orbits
\betagin{equation}\langlebel{e:Hm}
xH_m^+=\{xh_j: 1\leq j\leq m\},
\end{equation}
eventually always hit or miss the targets $B_m$.
Using the terminology introduced in \cite{Kelmer17b} we say that an orbit of a point $x\in \mathcal{X}$ is {\em{eventually always hitting}} if $xH_m^+\cap B_m\neq\emptyset$ for all sufficiently large $m$, and {\em{eventually always missing}} if
$xH_m^+\cap B_m=\emptyset$ for all sufficiently large $m$. We
denote by $\mathcal{A}_{\rm ah}$ and $\mathcal{A}_{\rm am}$ the set of points with such orbits respectively.
For the eventually always missing set, we have the dichotomy given by the dynamical Borel-Cantelli Lemma: if $\sum_{m=1}^{\infty}\mu(B_m)<\infty$ then $\mathcal{A}_{\rm am}$ is of full measure and otherwise $\mathcal{A}_{\rm am}$ is a null set.
The eventually always hitting set, $\mathcal{A}_{\rm ah}$, is also either a null set or a set of full measure, but in this case we are not able to establish such an explicit dichotomy.
Here we have the following partial result (extending the result of \cite{Kelmer17b} dealing with the case of $G=\SO_0(d+1,1)$).
\betagin{Thm}\langlebel{t:ae}
Fix an unbounded discrete time one-parameter flow on $\Gamma\backslash G$ and assume that either the flow is SD, or that $G$ is of real rank one, not locally isomorphic to $\SO(2,1)$.
Let $\{B_m\}_{m\in \mathbb N}$ denote a monotone sequence of spherical shrinking targets.
If \betagin{equation}\langlebel{e:summable}
\sum_{j=0}^\infty \frac{1}{2^{j}\mu(B_{2^j})}<\infty,
\end{equation}
Then $\mathcal{A}_{\rm ah}$ is of full measure. Moreover, if we further assume that
$\mu(B_{2m})\asymp \mu(B_m)$, then for a.e. $x\in \mathcal{X}$,
for all sufficiently large $m$
$$\#\{1\leq j\leq m: xh_j\in B_m\}\asymp m\mu(B_m).$$
\end{Thm}
\betagin{Rem}
In the other direction, it was shown in \cite{Kelmer17b} that for any ergodic one-parameter flow, for any monotone sequence, $\{B_m\}_{m\in \mathbb N}$, of shrinking targets, if there is $c<1$ such that the set
$\{m: m\mu(B_m)\leq c\}$ is unbounded then $\mathcal{A}_{\rm ah}$ is a null set.
In particular, if we assume that $\mu(B_m)$ decays polynomially in the sense that $\mu(B_m)\asymp m^{-\eta}$ for some fixed $\eta$,
then Theorem \ref{t:ae}, implies that $\mathcal{A}_{\rm ah}$ is a set of full measure when $\eta<1$, and a null set when $\eta>1$.
In this case, however, the same result already follows from Theorem \ref{t:hittingtime} and hence holds also for flows that are not SD.
\end{Rem}
\betagin{Rem}
Finally we remark that in a recent work of Kleinbock and Wadleigh \cite{KleinbockWadleigh2017}, they give examples of dynamical systems where they manage to get an explicit dichotomy for $\mathcal{A}_{\rm ah}$, and the dichotomy depends exactly on the convergence and divergence of the series of the type $(\ref{e:summable})$.
\end{Rem}
\section{Shrinking target problems}
We start by taking a closer look on shrinking target problems for a general ergodic $\mathbb{Z}$-action given by the action of a group, $H=\{h_m\}_{m\in\mathbb Z}$, on a probability space $(\mathcal{X},\mu)$. Though we will later apply these results for the case of $\mathcal{X}=\Gamma\backslash G$, in this section we will not assume anything about the space $\mathcal{X}$ and the flow other than ergodicity.
\subsection{The hitting time problem}
Fix a positive integer $i$ throughout this subsection. Instead of the first hitting time function given in \eqref{e:hitting}, we consider the more general $i$th hitting time function:
For any $x\in \mathcal{X}$ and any set $B\subset \mathcal{X}$ let
\betagin{equation}\langlebel{e:ihitting}
\tau^{i}_{B}(x):=\min\{m\in\mathbb{N}\ |\ |xH_m^+\cap B|=i\}\end{equation}
measure the time needed for the orbit of $x$ to enter the target $B$ for the $i$th time (here $xH_m^+$ is as in \eqref{e:Hm}). In preparation for Theorem \ref{t:hittingtime}, our goal in this section is to give sufficient conditions on a monotone family of shrinking targets, $\{B_t\}_{t> 0}$, implying that for a.e. $x\in \mathcal{X}$
\betagin{equation}\langlebel{e:limtaui}
\lim\limits_{t\to \infty}\frac{\log \tau^{i}_{B_t}(x)}{-\log\mu(B_t)}=1.
\end{equation}
For any $0<\deltalta<\frac12$ we define the sets
$$\mathcal{L}_{\deltalta}^i:=\{x\in\mathcal{X}\ |\ \liminf_{t\to\infty}\frac{\log \tau_{B_t}^i(x)}{-\log \mu(B_t)}\leq 1-2\deltalta\},$$
$$\mathcal{U}_{\deltalta}^{i}:=\{x\in\mathcal{X}\ |\ \limsup_{t\to\infty}\frac{\log \tau_{B_t}^i(x)}{-\log \mu(B_t)}\geq 1+2\deltalta\},$$
and note that the condition $\mu(\mathcal{U}_{\deltalta}^i)=0$ for all $0<\deltalta<\frac12$ implies that for a.e. $x\in \mathcal{X}$
\betagin{equation}\langlebel{upper.bound}
\limsup\limits_{t\to\infty}\frac{\log \tau^{i}_{B_t}(x)}{-\log\mu(B_t)}\leq 1
\end{equation}
and similarly the condition that $\mu(\mathcal{L}_{\deltalta}^i)=0$ for all $0<\deltalta<\frac12$ implies that for a.e. $x\in \mathcal{X}$
\betagin{equation}\langlebel{lower.bound}
\liminf\limits_{t\to \infty}\frac{\log \tau^{i}_{B_t}(x)}{-\log\mu(B_t)}\geq 1.
\end{equation}
Now, for any integer $m\geq 1$ and measurable subset $B\subset \mathcal{X}$, define the hitting set
\betagin{equation}\langlebel{e:hitting1}\mathcal{H}_{m,B}^i:=\left\{x\in\mathcal{X}\ |\ \tau_B^i(x)\leq m \right\}
\end{equation}
and its complement
\betagin{equation}\langlebel{e:missing}\mathcal{M}^i_{m,B}:=\left\{x\in \mathcal{X}\ |\ \tau_B^i(x)> m\right\}.\end{equation}
Note that
$x\in \mathcal{H}^i_{m,B}$ (resp. $x\in\mathcal{M}^i_{m,B}$) means the first $m$ steps in the orbit of $x$ hit the set $B$ at least (resp. strictly less than) $i$ times. For any $0<\deltalta<\frac12$ and any $t>0$ let
$$m^{\pm}_{\deltalta}(t)=\floor{\frac{1}{\mu(B_t)^{(1\pm\deltalta)}}}.$$
If $x\in \mathcal{L}_{\deltalta}^i$, then there exists an unbounded sequence of $t$ such that $\frac{\log\tau_{B_t}^i(x)}{-\log\mu(B_t)}< 1-\deltalta$, or equivalently, $\tau_{B_t}^i(x)<\frac{1}{\mu(B_t)^{(1-\deltalta)}}$. Since $\tau_{B_t}^i(x)$ is integer-valued, this implies that $\tau_{B_t}^i(x)\leq \floor{\frac{1}{\mu(B_t)^{(1-\deltalta)}}}$ for unbounded values of $t$. Hence, $x\in \mathcal{L}_{\deltalta}^i$ implies that $x\in \mathcal{H}_{m^{-}_{\deltalta}(t),B_t}^i$ for unbounded values of $t$. Similarly, $x\in \mathcal{U}_{\deltalta}^i$ implies that $x\in \mathcal{M}^i_{m^{+}_{\deltalta}(t), B_t}$ for unbounded values of $t$. Let $\mathcal{N}$ denote the set of integers $\ell\geq 0$ such that $\{\mu(B_t)\ |\ t> 0\}\cap [\frac{1}{2^{\ell}}, \frac{1}{2^{\ell+1}})$ is nonempty.
Note that $\mathcal{N}$ is unbounded since $\lim\limits_{t\to\infty}\mu(B_t)= 0$. Thus
$$\mathcal{L}_{\deltalta}^i\subset \bigcap_{m=0}^{\infty}\bigcup_{\substack{\ell\geq m\\\ell\in\mathcal{N}}}\bigcup_{\frac{1}{2^{\ell+1}}\leq \mu(B_t)< \frac{1}{2^{\ell}}}\mathcal{H}_{m^-_{\deltalta}(t),B_t}^i$$
and
$$\mathcal{U}_{\deltalta}^i\subset \bigcap_{m=0}^{\infty}\bigcup_{\substack{\ell\geq m\\\ell\in\mathcal{N}}}\bigcup_{\frac{1}{2^{\ell+1}}\leq \mu(B_t)< \frac{1}{2^{\ell}}}\mathcal{M}_{m^+_{\deltalta}(t),B_t}^i.$$
For each $\ell\in\mathcal{N}$, let
$$\overline{B}_{\ell}:= \bigcup_{\frac{1}{2^{\ell+1}}\leq \mu(B_t)< \frac{1}{2^{\ell}}} B_t\quad\textrm{and}\quad \underline{B}_{\ell}:= \bigcap_{\frac{1}{2^{\ell+1}}\leq \mu(B_t)< \frac{1}{2^{\ell}}} B_t.$$
Since $\{B_t\}_{t>0}$ is monotone,
$$\frac{1}{2^{\ell+1}}\leq \mu\left(\underline{B}_{\ell}\right)\leq \mu\left(\overline{B}_{\ell}\right)\leq \frac{1}{2^{\ell}}.$$
Moreover, for any $t$ such that $\mu(B_t)\in [\frac{1}{2^{\ell+1}},\frac{1}{2^{\ell}})$,
$$\floor{2^{\ell(1\pm\deltalta)}}< m^{\pm}_{\deltalta}(t)\leq \floor{2^{(\ell+1)(1\pm\deltalta)}}.$$
By construction, for any $m\leq m'$ and $B\subset B'$, $\mathcal{H}_{m,B}^i\subset \mathcal{H}_{m',B'}^i$ and $\mathcal{M}_{m,B}^i\supset \mathcal{M}_{m',B'}^i$. Hence for any $\ell\in\mathcal{N}$, we have
$$\bigcup_{\frac{1}{2^{\ell+1}}\leq \mu(B_t)< \frac{1}{2^{\ell}}}\mathcal{H}_{m^-_{\deltalta}(t),B_t}^i\subset \mathcal{H}^i_{\floor{2^{(\ell+1)(1-\deltalta)}}, \overline{B}_{\ell}}\quad\textrm{and}\quad \bigcup_{\frac{1}{2^{\ell+1}}\leq \mu(B_t)< \frac{1}{2^{\ell}}}\mathcal{M}_{m^+_{\deltalta}(t),B_t}^i\subset \mathcal{M}^i_{\floor{2^{\ell(1+\deltalta)}},\underline{B}_{\ell}}.$$
Combining the above arguments gives the following:
\betagin{Lem}\langlebel{ght}
For a monotone family, $\{B_t\}_{t>0}$, of shrinking targets in $\mathcal{X}$. If for all sufficiently small $\deltalta>0$
\betagin{equation}\langlebel{lb}
\sum_{\ell\in\mathcal{N}}\mu\left(\mathcal{H}^i_{\floor{2^{(\ell+1)(1-\deltalta)}}, \overline{B}_\ell}\right)< \infty,
\end{equation}
then the lower bound $(\ref{lower.bound})$ holds for a.e. $x\in\mathcal{X}$. Similarly, if for all sufficiently small $\deltalta>0$
\betagin{equation}\langlebel{up}
\sum_{\ell\in\mathcal{N}}\mu\left(\mathcal{M}^i_{\floor{2^{\ell(1+\deltalta)}},\underline{B}_\ell}\right)<\infty,
\end{equation}
then the upper bound $(\ref{upper.bound})$ holds for a.e. $x\in\mathcal{X}$.
\end{Lem}
In the following sections we shall show that \eqref{up} holds for one-parameter flows on homogenous spaces. The condition $\eqref{lb}$, on the other hand, holds in general without any extra assumption on the flow or the shrinking targets. Hence, the following lower bound holds in general.
\betagin{Lem}
\langlebel{lower bound} Let $\{B_t\}_{t>0}$ be a monotone family of shrinking targets in $\mathcal{X}$. Then $(\ref{lb})$ holds for all $0< \deltalta< \frac12$. In particular, for a.e. $x\in \mathcal{X}$
$$\liminf\limits_{t\to \infty}\frac{\log \tau^i_{B_t}(x)}{-\log\mu(B_t)}\geq 1.$$
\end{Lem}
\betagin{proof}
For any integer $m\geq 1$ and any measurable set $B\subset \mathcal{X}$, we first show the trivial estimate
$\mu\left(\mathcal{H}_{m,B}^i\right)\leq m\mu(B)$. By definition $\mathcal{H}_{m,B}^i= \bigcup_{k=i}^m\left\{x\in\mathcal{X}\ |\ \tau_B^i(x)=k\right\}$ and by minimality $\left\{x\in\mathcal{X}\ |\ \tau_B^i(x)=k\right\}\subset \left\{x\in\mathcal{X}\ |\ xh_k\in B\right\}=Bh_{-k}$. Hence indeed $\mu\left(\mathcal{H}_{m,B}^i\right)\leq m\mu(B)$. For each $\ell\in\mathcal{N}$ applying this estimate to $\mathcal{H}^i_{\floor{2^{(\ell+1)(1-\deltalta)}}, \overline{B}_{\ell}}$ we get
$$\mu\left(\mathcal{H}^i_{\floor{2^{(\ell+1)(1-\deltalta)}}, \overline{B}_{\ell}}\right)\leq \floor{2^{(\ell+1)(1-\deltalta)}}\mu(\overline{B}_{\ell})\ll 2^{-\deltalta\ell},$$
where for the last inequality we used $\mu(\overline{B}_{\ell})\leq \frac{1}{2^{\ell}}$.
Hence
\betagin{displaymath}
\sum_{\ell\in\mathcal{N}}\mu\left(\mathcal{H}^i_{\floor{2^{(\ell+1)(1-\deltalta)}}, \overline{B}_{\ell}}\right)\ll \sum_{\ell=0}^{\infty} 2^{-\deltalta\ell}<\infty.\qedhere
\end{displaymath}
\end{proof}
\subsection{Orbits eventually always hitting}
Given a monotone sequence of shrinking targets $\left\{B_m\right\}_{m\in\mathbb N}$, we defined the eventually always hitting set to be
$$\mathcal{A}_{\mathrm{ah}}=\left\{x\in \mathcal{X}\ |\ \textrm{$xH_m^+\cap B_m\neq \emptyset$ for all sufficiently large $m$}\right\}.$$
In \cite[Proposition 12 and Lemmas 13,14] {Kelmer17b} the first author gave sufficient conditions implying that $\mathcal{A}_{\mathrm{ah}}$ is a null or co-null set. For the readers convenience we summarize these results in the following:
\betagin{Lem}\langlebel{l:EventuallyHitting}
Given a measure preserving ergodic $\mathbb Z$-action of a group $H$ on a probability space $(\mathcal{X},\mu)$, and a monotone sequence, $\{B_m\}_{m\in \mathbb N}$, of shrinking targets.
\betagin{enumerate}
\item
If along some subsequence, we have that $m_j\mu(B_{m_j})\leq c<1$, then $\mu(\mathcal{A}_{\rm ah})=0$.
\item If
$\sum_j \mu(\mathcal{M}^1_{2^{j-1},B_{2^{j}}})<\infty$ then $\mu(\mathcal{A}_{\rm ah})=1$. If in addition also $\mu(B_{2^j})\asymp \mu(B_{2^{j+1}})$ and $\sum_j \mu(\mathcal{M}^1_{2^{j+1},B_{2^{j}}})<\infty$ then for a.e. $x\in \mathcal{X}$, for all sufficiently large $m$
$$\#(xH^+_m\cap B_m)\asymp m\mu(B_m).$$
\end{enumerate}
\end{Lem}
The results of \cite{Kelmer17b} were given for a more general setting of $\mathbb Z^d$-actions. For $\mathbb Z$-actions we also have the following lemma stating that $\mathcal{A}_{\mathrm{ah}}$ is always either null or co-null.
\betagin{Lem}
\langlebel{zol}
Let $H$ be a measure preserving ergodic $\mathbb Z$-action on a probability space $(\mathcal{X},\mu)$ and let $\{B_m\}_{m\in \mathbb N}$ denote a monotone sequence of shrinking targets.
Then $\mathcal{A}_{\mathrm{ah}}$ has measure either zero or one.
\end{Lem}
\betagin{proof}
Suppose $\mu(\mathcal{A}_{\mathrm{ah}})>0$, we want to show that $\mu(\mathcal{A}_{\mathrm{ah}})=1$.
Define the set
$$\mathcal{A}':=\{x\in\mathcal{A}_{\mathrm{ah}}\ |\ xh_k\in \mathcal{A}_{\mathrm{ah}}\ \textrm{for all $k\geq 1$}\}.$$
It is clear that $\mathcal{A}'h_1\subset \mathcal{A}'$. Hence by ergodicity it suffices to show that $\mu(\mathcal{A}')>0$. To show this, we prove that $\mu(\mathcal{A}_{\mathrm{ah}}\backslash\mathcal{A}')=0$. Note that
$$\mathcal{A}_{\mathrm{ah}}\backslash\mathcal{A}'=\{x\in\mathcal{A}_{\mathrm{ah}}\ |\ xh_k\notin\mathcal{A}_{\mathrm{ah}}\ \textrm{for some $k\geq 1$}\}=\bigcup_{k=1}^{\infty}\{x\in\mathcal{A}_{\mathrm{ah}}\ |\ xh_k\notin \mathcal{A}_{\mathrm{ah}}\}.$$
Hence, it suffices to show that for any fixed $k$, the set $\{x\in\mathcal{A}_{\mathrm{ah}} |\ xh_k\notin \mathcal{A}_{\mathrm{ah}}\}$ has measure zero.
We note that $x\in\mathcal{A}_{\mathrm{ah}}$ means that for all sufficiently large $m$, there exists some $1\leq j(m)\leq m$ such that $xh_{j(m)}\in B_m$. If for all sufficiently large $m$ we can take $j(m)> k$, then we have $xh_kh_{j(m)-k}\in B_m$ with $1\leq j(m)-k< m$, thus $xh_k\in \mathcal{A}_{\mathrm{ah}}$. Hence, if $x\in\mathcal{A}_{\mathrm{ah}}$ but $xh_k\notin\mathcal{A}_{\mathrm{ah}}$, then there exist infinitely many values of $m$ for which there exists some $1\leq j(m)\leq k$ such that $xh_{j(m)}\in B_m$ but $xh_i\notin B_m$ for all $k< i\leq m$. In particular, for such $x$, there exists some $1\leq j\leq k$ such that $xh_j\in B_m$ for infinitely many values of $m$. Since $\{B_m\}_{m\in\mathbb N}$ is monotone, this implies that $xh_j\in \bigcap_{m=1}^{\infty}B_m$. Hence the set
$$\left\{x\in\mathcal{A}_{\mathrm{ah}}\ |\ xh_k\notin \mathcal{A}_{\mathrm{ah}}\right\}\subset \bigcup_{j=1}^{k}\left(\bigcap_{m=1}^{\infty}B_m\right)h_{-j},$$
is of measure zero.
\end{proof}
\subsection{Dynamical Borel-Cantelli and Quasi-independence}
The second part of the classical Borel-Cantelli lemma requires pairwise independence.
The following argument, going back to Schmidt, shows that a weaker condition of quasi-independence is enough.
Explicitly, let $\mathcal{F}=\{f_m\}_{m\in\mathbb{N}}$ denote a sequence of functions on the probability space $(\mathcal{X},\mu)$ taking values in $[0,1]$. For $m\in \mathbb{N}$ let $E_m^{\mathcal{F}}= \sum_{1\leq j\leq m}\mu(f_j)$ and $S_m^{\mathcal{F}}(x)= \sum_{1\leq j\leq m}f_j(x)$. We then have:
\betagin{Lem}{\cite[Chapter I, Lemma 10]{Sp79}}
\langlebel{gbc}Assuming that for some constant $C>0$, for all $m,n\in \mathbb{N}$
\betagin{equation}
\langlebel{keybound}\int_{\mathcal{X}}\left(\sum_{i=m}^nf_i(x)-\sum_{i=m}^n\mu(f_i)\right)^2d\mu(x)\leq C\sum_{i=m}^n\mu(f_i),
\end{equation}
then for any $\epsilon>0$ for a.e. $x\in\mathcal{X}$
$$S_m^{\mathcal{F}}(x)= E_m^{\mathcal{F}}+ O_{\epsilon}\bigg(\sqrt{E_m^{\mathcal{F}}}\log^{\frac32+\epsilon}(E_m^{\mathcal{F}})\bigg).$$
In particular, if $\left\{E_m^{\mathcal{F}}\right\}_{m\in\mathbb{N}}$ is unbounded, then for a.e. $x\in\mathcal{X}$
$$\lim_{m\to\infty}\frac{S_{m}^{\mathcal{F}}(x)}{E_m^{\mathcal{F}}}= 1.$$
\end{Lem}
Given a $\mathbb Z$-action of a group $H=\{h_m\}_{m\in \mathbb Z}$ on $\mathcal{X}$, and a sequence of targets, $\{B_m\}_{m\in\mathbb{N}}$, let $f_j(x)=\chi_{B_j}(xh_j)$ so that $S_m^{\mathcal{F}}(x)=\#\{1\leq j\leq m\ |\ xh_j\in B_j\}$. If the events, $xh_j\in B_j$, were pairwise independent, then the left hand side of \eqref{keybound} would be zero. This result shows that the weaker quasi-independence bound \eqref{keybound}, is enough to show that the sequence $\{B_m\}_{m\in \mathbb N}$ is sBC for the flow.
\section{Decay of matrix coefficients}
We now collect the needed results on the decay of matrix coefficients for representations of semisimple Lie groups, and use them to prove Theorem \ref{t:edecay}, giving a uniform bound for decay of matrix coefficients for all one-parameter flows, as well as Theorem \ref{t:csd} classifying SD flows. We first give some background on semisimple Lie groups, their Lie algebras and restricted root systems.
We then focus on simple groups and treat the cases of simple groups of real rank one and simple groups of higher rank separately. Finally, we combine our results on simple groups to handle the general case of semisimple groups with property $(T)$.
\subsection{Preliminaries on Lie groups}\langlebel{s:pLie}
Let $G$ be a connected semisimple Lie group with finite center and no compact factors,
and let $\mathfrak g$ denote its Lie algebra. Fix a Cartan involution, $\theta$, on $\mathfrak g$ and let
$$\mathfrak g=\mathfrak{k}\oplus \mathfrak{p}$$
denote the corresponding Cartan decomposition, where $\mathfrak{k}$ is the $+1$ eigenspace and $\mathfrak{p}$ is the $-1$ eigenspace of $\theta$. Let $K\leq G$ denote the maximal compact subgroup with Lie algebra $\mathfrak k$. Let $\mathfrak a$ be a maximal abelian subspace of $\mathfrak{p}$, and $\mathfrak{m}$ the centralizer of $\mathfrak a$ in $\mathfrak{k}$. Denote by $\Phi=\Phi_{\mathbb R}(\mathfrak a,\mathfrak g)$ the set of restricted roots with respect to the pair $(\mathfrak a, \mathfrak g)$. Fix a set of simple roots $\Deltalta=\left\{\alphapha_i\ |\ i\in I\right\}$ and let $\Phi^+=\Phi_{\mathbb R}^+(\mathfrak a,\mathfrak g)$ be the corresponding set of positive roots.
Then $\mathfrak{g}$ has a root-space decomposition
$$\mathfrak{g}=\mathfrak{m}\oplus\mathfrak{a}\bigoplus_{\langlembda\in\Phi_{\mathbb R}(\mathfrak{a},\mathfrak{g})}\mathfrak{g}_{\langlembda}$$ and an Iwasawa decomposition
$$\mathfrak{g}=\mathfrak{n}\oplus\mathfrak{a}\oplus\mathfrak{k},$$
where $\mathfrak{n}=\bigoplus_{\langlembda\in\Phi_{\mathbb R}^{+}(\mathfrak{a},\mathfrak{g})}\mathfrak{g_{\langlembda}}$. Let $G=NAK$ be the corresponding Iwasawa decomposition of $G$. Let $\mathfrak a^{+}=\left\{X\in\mathfrak a\ |\ \alphapha(X)\geq 0\ \textrm{for all $\alphapha\in \Deltalta$}\right\}$ be the positive Weyl chamber determined by $\Deltalta$, and $A^{+}$ the corresponding positive Weyl chamber in $A$. The choice of $\Deltalta$ determines a partial order on $\Phi^+_{\mathbb R}(\mathfrak a,\mathfrak g)$ in the sense that $\langlembda\geq \langlembda'$ if and only if $\langlembda(X)\geq \langlembda'(X)$ for any $X\in \mathfrak a^+$, and we can fix a total order
\betagin{equation}\langlebel{e:order}
\langlembda_1\geq \langlembda_2\geq \cdots\geq \langlembda_L
\end{equation}
on $\Phi^+_{\mathbb R}(\mathfrak a,\mathfrak g)$ that is compatible with this partial order, where $L=|\Phi_{\mathbb R}^+(\mathfrak a,\mathfrak g)|$ (see \cite[p. 155]{Knapp02} for an example of such an order). Let $d_i=\mathrm{dim}( \mathfrak{g}_{\langlembda_i})$ and $d_0=\mathrm{dim} (\mathfrak{m}\oplus \mathfrak{a})$. The following lemma gives a nice matrix representation of the adjoint Lie algebra.
\betagin{Lem}{$($cf. \cite[Lemma 6.45]{Knapp02}$)$}
\langlebel{reflemma} There exists a basis of $\mathfrak{g}$ compatible with the above root-space decomposition such that the matrices representing $\textrm{ad}( \mathfrak{g})$ have the following properties:
\betagin{enumerate}
\item The matrices of ad $\mathfrak{k}$ are skew symmetric,
\item the matrices of ad $\mathfrak{n}$ are upper triangular with $0$ on the diagonal.
\item the matrices of ad $\mathfrak{a}$ are diagonal with real entries with
\end{enumerate}
$$\textrm{ad}(X)=\betagin{pmatrix}
\langlembda_1(X)I_{d_1} & & & & & & &\\
&\ddots& & & & & &\\
& &\langlembda_L(X)I_{d_L}& & & & &\\
& & & 0I_{d_0}& & & &\\
& & & & -\langlembda_L(X)I_{d_L}& & &\\
& & & & & \ddots& & \\
& & & & & & -\langlembda_1(X)I_{d_1}
\end{pmatrix},$$
for $X\in \mathfrak a$ where $I_{d_i}$ is the $d_i\times d_i$ identity matrix.
\end{Lem}
As an immediate consequence, we have the following description of $\textrm{Ad}(G)$.
\betagin{Cor}
\langlebel{cor 1}There exists a basis of $\mathfrak{g}$ such that the matrices representing $\textrm{Ad} (G)$ have the following properties:
\betagin{enumerate}
\item The matrices of $\textrm{Ad} (K)$ are orthogonal,
\item the matrices of $\textrm{Ad} (N)$ are upper triangular with $1$'s on the diagonal,
\item the matrices of $\textrm{Ad} (A)$ are diagonal with real entries with
\end{enumerate}
$$\mathrm{Ad}(\exp(X))=\betagin{pmatrix}
e^{\langlembda_1(X)}I_{d_1} & & & & & & &\\
&\ddots& & & & & &\\
& &e^{\langlembda_L(X)}I_{d_L}& & & & &\\
& & & I_{d_0}& & & &\\
& & & & e^{-\langlembda_L(X)}I_{d_L}& & &\\
& & & & & \ddots& & \\
& & & & & & e^{-\langlembda_1(X)}I_{d_1}
\end{pmatrix},$$
for all $X\in \mathfrak{a}$.
\end{Cor}
We will fix such a basis once and for all and use it to identify $\textrm{ad}(\mathfrak g)$ and $\textrm{Ad}(G)$ with the corresponding matrix groups. For future reference, we prove the following short lemma.
\betagin{Lem}\langlebel{Lem 2}\langlebel{l:nilpotnent}
For any nonzero $X\in\mathfrak g$
we have that $\left(\textrm{ad}(X)\right)^2\neq 0$.
\end{Lem}
\betagin{proof}
It suffices to show that $\textrm{ad}(X)\circ\textrm{ad}(X)$ is nontrivial as an endomorphism of $\mathfrak g$. If $\textrm{ad}(X)$ is not nilpotent there is nothing to show. If it is nilpotent, by \cite[Theorem 7.4, p. 432]{Helgason78} there exist elements $H, Y\in\mathfrak g$ such that
$$[H,X]=2X,\qquad [H,Y]=-2Y,\qquad [X,Y]=H.$$
Applying $\textrm{ad}(X)\circ\textrm{ad}(X)$ to $Y$ we get $$\left(\textrm{ad}(X)\circ\textrm{ad}(X)\right)(Y)= \left[X,[X,Y]\right]=[X,H]= -2X\neq 0,$$
thus completing the proof.
\end{proof}
\subsection{Cartan decomposition of one-parameter subgroups}
We now consider the case where $G$ a simple, that is $\mathfrak g$ is simple as a real Lie algebra, and study the Cartan decomposition of a one-parameter subgroup $\{h_t=\exp(tX_0)\}$, with $X_0\in\mathfrak g$.
Fix an order $\langlembda_1\geq \langlembda_2\geq \ldots\geq \langlembda_L$ on the set of positive roots $\Phi^+$ as in \eqref{e:order}. Since $G$ is simple, $\Phi$ is irreducible and the root $\langlembda_1\in \Phi^+$, is the highest root, characterized by the property that $\langlembda_1(X)\geq \langlembda(X)$ for any $\langlembda\in \Phi^+$ and $X\in \mathfrak a^+$. Recalling the Cartan decomposition $G=KA^+K$, we can write
\betagin{equation}
\langlebel{cartan}h_t= k_1(t)\exp\left(X(t)\right)k_2(t)
\end{equation}
with $k_i(t)\in K$ and $X(t)\in \mathfrak a^{+}$, and note that $X(t)\in\mathfrak a^+$ is uniquely determined by $(\ref{cartan})$. Now, by the complete additive Jordan decomposition, there exist three pairwise commuting elements $X_{\mathfrak n}$, $X_{\mathfrak a}$ and $X_{\mathfrak k}$ in $\mathfrak g$ such that $X_0=X_{\mathfrak n}+X_{\mathfrak a}+X_{\mathfrak k}$ with $\textrm{ad}(X_{\mathfrak n}), \textrm{ad}(X_{\mathfrak a})$ and $\textrm{ad}(X_{\mathfrak k})$ $\textrm{Ad}(G)$-conjugate to elements in $\textrm{ad}(\mathfrak n), \textrm{ad}(\mathfrak a)$ and $\textrm{ad}(\mathfrak k)$ respectively.
We say a one-parameter subgroup is \emph{quasi-diagonalizable} if $X_{\mathfrak a}\neq 0$, and that is \emph{quasi-unipotent} if $X_{\mathfrak a}=0$ and $X_{\mathfrak n}\neq 0$ (note that any unbounded subgroup is either quasi-diagonalizable or quasi-unipotent). We first prove the following:
\betagin{Prop}\langlebel{mprop}
Given an unbounded one-parameter subgroup $h_t= k_1(t)\exp\left(X(t)\right)k_2(t)
$, if it is quasi-diagonalizable then
there exists some constant $c>0$ such that
$$e^{\langlembda_1(X(t))}\gg e^{ct},$$
and if it is quasi-unipotent then there exists some integer $l\geq 2$ such that
$$e^{\langlembda_1(X(t))}\asymp |t|^l.$$
\end{Prop}
\betagin{proof}
For any $g\in G$, let $\|g\|^2:=\textrm{tr}\left(\textrm{Ad}(g)^t\textrm{Ad}(g)\right)$. That is, $\|g\|^2$ is the Hilbert-Schmidt norm of $\textrm{Ad}(g)$, and equals the sum of squares of entries of $\textrm{Ad}(g)$.
By Cauchy-Schwartz, $\|g_1g_2\|\leq \|g_1\|\|g_2\|$ for any $g_1, g_2\in G$, so in particular, for any fixed $g_0\in G$ and all $h\in G$ we have
\betagin{equation}\langlebel{hsb}
\|g_0^{-1}hg_0\|\asymp_{g_0}\|h\|.
\end{equation}
Now, on one hand, in view of $(\ref{cartan})$ and Corollary \ref{cor 1},
$$\|h_t\|^2=\textrm{tr}\left(\textrm{Ad}\left(\exp(X(t))\right)^2\right)=\sum_{i=1}^L d_i\left(e^{2\langlembda_i\left(X(t)\right)}+ e^{-2\langlembda_i\left(X(t)\right)}\right)+d_0,$$
and since $X(t)\in \mathfrak a^+$ we have that $\langlembda_1\left(X(t)\right)\geq \langlembda_i\left(X(t)\right)\geq 0$ for all $\langlembda_i\in \Phi^+$, hence,
$$\|h_t\|^2\asymp e^{2\langlembda_1\left(X(t)\right)}.$$
On the other hand, since $X_{\mathfrak n}, X_{\mathfrak a}$ and $X_{\mathfrak k}$ are pairwise commuting,
$$\|h_t\|=\|\exp(tX_0)\|=\|\exp(tX_{\mathfrak n})\exp(tX_{\mathfrak a})\exp(tX_{\mathfrak k})\|.$$
Moreover, since $\textrm{ad}(X_{\mathfrak k})$ is $\textrm{Ad}(G)$-conjugate to some element in $\textrm{ad}(\mathfrak k)$, the one-parameter subgroup $\{\exp(tX_{\mathfrak k})\}_{t\in\mathbb R}$ is compact, and $\{\|\exp(tX_{\mathfrak k})\|\}_{t\in \mathbb R}$ is uniformly bounded from above (by constants depending only on the generator $X_0$). Hence by Cauchy-Schwartz
$$\|h_t\|\asymp \|\exp(tX_{\mathfrak n})\exp(tX_{\mathfrak a})\|.$$
If $X_{\mathfrak a}$ is nonzero, then there exists some $g\in G$ such that $\textrm{Ad}(g)^{-1}\mathrm{ad}(X_{\mathfrak a})\textrm{Ad}(g)=\mathrm{ad}(X_{\mathfrak a}')$ with $X_{\mathfrak a}'\in \mathfrak a$ nonzero. Thus by $(\ref{hsb})$,
$$\|h_t\|^2\asymp \|g^{-1}\exp(tX_{\mathfrak n})g\exp(tX_{\mathfrak a}')\|^2.$$
Since $\textrm{ad}(X_{\mathfrak n})$ is nilpotent, the entries of $\textrm{Ad}(g^{-1}\exp(tX_{\mathfrak n})g)$ are all polynomials in $t$ and since it is invertible, each row has at least one nonzero entry. Since $X_{\mathfrak a}'\in \mathfrak a$, there exists some root $\langlembda\in \Phi^+$ such that $|\langlembda(X_{\mathfrak a}')|=c>0$. In view of Corollary \ref{cor 1}, the matrix $\textrm{Ad}\left(g^{-1}\exp(tX_{\mathfrak n})g\exp(tX_{\mathfrak a}')\right)$ has at least one entry of the form $e^{ct}P(t)$, where $P(t)$ is some nontrivial polynomial in $t$. Hence in this case, for all sufficiently large $t$,
$$e^{2\langlembda_1(X(t))}\asymp \|h_t\|^2\geq e^{2ct}.$$
If $X_{\mathfrak a}=0$, then $X_{\mathfrak n}$ is nonzero. Let $l$ be the unique integer such that $\left(\textrm{ad}(X_{\mathfrak n})\right)^{l}\neq 0$ and $\left(\textrm{ad}(X_{\mathfrak n})\right)^{l+1}= 0$ and note that $l\geq 2$ by Lemma \ref{l:nilpotnent}. In this case, the entries of $\textrm{Ad}\left(\exp(tX_{\mathfrak n})\right)$ are all polynomials in $t$ with degree less than or equal to $l$, and there exists some entry with degree exactly $l$. Hence in this case, for all sufficiently large $t$
\betagin{displaymath}
e^{2\langlembda_1(X(t))}\asymp \|h_t\|^2\asymp \|\exp(tX_{\mathfrak n})\|^2\asymp t^{2l}.\qedhere
\end{displaymath}
\end{proof}
\subsection{Simple groups of real rank one}
We recall that, up to local isomorphism, a rank one group $G$ is in one of the following four families of groups: $\SO(d,1), \SU(d,1)$, $\Sp(d,1),$ with $d\geq 2$ and $\mathrm{F}_4^{-20}$. In these cases, the positive restricted root system $\Phi_{\mathbb R}^{+}(\mathfrak a,\mathfrak g)$ consists of one or two elements. Let $\alphapha$ be the unique element in $\Phi_{\mathbb R}^{+}(\mathfrak a,\mathfrak g)$ such that $\frac12 \alphapha\notin \Phi_{\mathbb R}^{+}(\mathfrak a,\mathfrak g)$. Let $p$ be the dimension of $\mathfrak g_{\alphapha}$, $q$ be the dimension of $\mathfrak g_{2\alphapha}$ and $\rho=\frac12(p+2q)\alphapha$ be half the sum of the positive roots with multiplicities. Explicitly we have that $p, q$ and $\rho$ are as follows:
\betagin{displaymath}
\betagin{tabular}{| c | c | c | c | c | }
\hline
& $SO(d,1)$ & $SU(d,1)$ & $Sp(d,1)$ & $F_4^{-20}$ \\
\hline
$p$ & $d-1$ & $2(d-1)$ &$4(d-1)$ & $8$\\
\hline
$q$ & $0$ & $1$ & $3$ & $7$\\
\hline
$\rho$ & $\frac{d-1}{2}\alphapha$ & $d\alphapha$ & $(2d+1)\alphapha$ & $11\alphapha$\\
\hline
\end{tabular}
\end{displaymath}
Let $\mathfrak a_{\mathbb C}^{\ast}$ be the complexified dual of $\mathfrak a$ and fix $X_1$ to be the unique element in $\mathfrak a$ such that $\alphapha(X_1)=1$. We identify $\mathfrak a_{\mathbb C}^{\ast}$ with $\mathbb C$ via their values at $X_1$. Denote by $\widehat{G}$ the unitary dual of $G$ and $\widehat{G}_{K}$ the spherical unitary dual. The spherical unitary dual can be parameterized by $\mathfrak a_{\mathbb C}^{\ast}/W$ where $W$ is the Weyl group. Let $\rho_0\in \mathfrak a_{\mathbb C}^{\ast}$ be defined by
\[\rho_0 = \left\{
\betagin{array}{lr}
\rho & \textrm{$\mathfrak g=\mathfrak{so}(d,1)$ or $\mathfrak{su}(d,1)$}\\
\rho-2\alphapha & \textrm{$\mathfrak g= \mathfrak{sp}(d,1)$}\\
\rho-6\alphapha & \textrm{$\mathfrak g= \mathfrak{f}_4^{-20}.$}
\end{array}
\right.
\]
Then with the above identification between $\mathfrak a_{\mathbb C}^{\ast}$ and $\mathbb C$ we have the parametrization
$$\widehat{G}_{K}=\{\pi_s\ |\ s\in i\mathbb R_{\geq0} \cup (0, \rho_0)\}\cup \{\pi_{\rho}\},$$
where the representations $\pi_s, s\in i\mathbb R_{\geq 0}$ are the (tempered) principal series representations, the representations $\pi_s, s\in (0, \rho_0)$ are the (non-tempered) complementary series (cf. \cite{Kostant69}), and $\pi_{\rho}$ is the trivial representation. We note that in each representation $\pi_s\in \widehat{G}_{K}$ there is a unique (up to scaling) spherical vector.
For any lattice $\Gamma\leq G$ consider the right regular representation of $G$ on $L^2(\Gamma\backslash G)$. We denote by
$L^2_{\mathrm{temp}}(\Gamma\backslash G)$ the subspace that weakly contains only tempered representations. We then have a spectral decomposition for any spherical $f\in L^2(\Gamma\backslash G)$
\betagin{equation}
\langlebel{sdc}f= \langlengle f,1\ranglengle +\sum_{k}\langlengle f,\varphi_k\ranglengle \varphi_k + f_{\textrm{temp}},
\end{equation}
with $f_{\textrm{temp}}\in L^2_{\textrm{temp}}(\Gamma\backslash G)$ and $\varphi_k\in\pi_{s_k}$ with $s_k\in (0,\rho_0)$.
After identifying spherical functions in $L^2(\Gamma\backslash G)$ with functions in $L^2(\Gamma\backslash \mathcal{H})$, the vectors $\varphi_k\in\pi_{s_k}$ occurring in this decomposition are the exceptional Laplacian eigenfunction in $L^2(\Gamma\backslash \mathcal{H})$, with corresponding eigenvalues $\rho^2-s_k^2$.
In particular, the \emph{spectral gap} for $\Gamma$, i.e., the gap between the trivial eigenvalue and the first non-trivial eigenvalue of the Laplacian on $L^2(\Gamma\backslash \mathcal{H})$, is $\tau(2\rho-\tau)$ where the parameter $\tau=\tau(\Gamma)$ is given by
\betagin{equation}\langlebel{e:SpectralGap}
\tau(\Gammaamma):=\min_{k}(\rho-s_k).
\end{equation}
The (spherical) exceptional forms $\varphi_k\in \pi_{s_k}$ above are either cusp forms (vanishing at all cusps) or residual forms (obtained as residues of Eisenstein series). The exceptional cusp forms are uniformly bounded, but the residual forms can blow up at the cusps.
To control their growth we recall the following result from \cite[Lemma 7]{Kelmer17b} on the $L^p$ norms of exceptional forms.\mathfrak ootnote{The result in \cite{Kelmer17b} is stated for $G=\SO_0(d,1)$ but the proof is identical.}
\betagin{Prop}
\langlebel{nE} For any spherical exceptional form $\varphi_k\in \pi_{s_k}$ with $s_k\in(0,\rho_0)$ we have that $\varphi_k\in L^p(\Gamma\backslash G)$ for any $p< \frac{2\rho}{\rho-s_k}$.
\end{Prop}
We now turn to estimate the decay of matrix coefficients for flows on $L^2(\Gamma\backslash G)$ for $G$ a rank one group.
For each $s\in i\mathbb R_{\geq 0}\cup (0, \rho_0)$ consider the spherical function defined by
$$\phi_s(g)= \langlengle\pi_s(g)v, v\ranglengle$$
where $v\in V_{\pi_s}$ is the unique unit spherical vector. For any $\epsilon> 0$, $\phi_s$ decays like
\betagin{equation}
\langlebel{pd}|\phi_s(\exp(tX_1))|\ll_{\epsilon} e^{-\rho(1-\epsilon)|t|}
\end{equation}
if $s\in i\mathbb R_{\geq 0}$ and
\betagin{equation}
\langlebel{cd}|\phi_s(\exp(tX_1))|\ll_{\epsilon} e^{-(\rho-s)(1-\epsilon)|t|}
\end{equation}
if $s\in (0,\rho_0)$. We note that $(\ref{pd})$ follows from the asymptotic behavior of the Harish-Chandra $\Xi$ function (cf. \cite[section $7$]{Howe82}), while for $(\ref{cd})$, we refer to \cite[(5.1.18)]{RV12}. Let $\langlembda_1\in\Phi_{\mathbb R}^{+}(\mathfrak a,\mathfrak g)$ be the highest root as before, and define $\kappa=\kappa(G)$ to be the unique integer such that $\alphapha=\frac{\kappa}{2}\langlembda_1$. Explicitly, $\kappa=2$ if $G$ is locally isomorphic to $SO(d,1)$ and $\kappa=1$ otherwise. Applying $(\ref{pd})$ and $(\ref{cd})$ to the regular representation $\left(\pi, L^2_0(\Gamma\backslash G)\right)$, we get the following:
\betagin{Prop}\langlebel{rod}
Let $G$ be a connected simple Lie group with finite center and real rank one, and let $\Gammaamma\leq G$ be a lattice in $G$. Let $\{h_t\}_{t\in \mathbb R}$ be an unbounded one-parameter subgroup of $G$. For any spherical tempered $\psi,\phi\in L^2_{\mathrm{temp}}(\Gamma\backslash G)$, for all $|t|\geq 1$ we have
\betagin{equation}
\langlebel{tede}\left|\langlengle\pi(h_t)\psi,\phi\ranglengle\right|\ll_{\epsilon} \frac{\|\psi\|_2\|\phi\|_2}{|t|^{\kappa\rho(1-\epsilon)}}.
\end{equation}
For any spherical non-tempered exceptional form $\varphi\in \pi_s$ with $s\in (0,\rho_0)$, for all $|t|\geq 1$ we have
\betagin{equation}
\langlebel{comde}|\langlengle\pi(h_t)\varphi,\varphi\ranglengle|\ll_{\epsilon}\frac{\|\varphi\|_2^2}{|t|^{\kappa(\rho-s)(1-\epsilon)}}.
\end{equation}
\end{Prop}
\betagin{proof}
Recall the Cartan decomposition $h_t=k_1(t)\exp(X(t))k_2(t)$ with $k_i(t)\in K$ and $X(t)\in \mathfrak a^{+}$. In view of $(\ref{pd})$ and $(\ref{cd})$, it suffices to show that $e^{\alphapha(X(t))}\gg |t|^{\kappa}$. Recall that $\kappa$ is defined precisely such that $\alphapha=\frac{\kappa}{2}\langlembda_1$ where $\langlembda_1$ is the highest root. By Proposition \ref{mprop}, $e^{\langlembda_1(X(t))}\gg t^2$, hence $e^{\alphapha(X(t))}\gg (t^2)^{\frac{\kappa}{2}}=|t|^{\kappa}$.
\end{proof}
Using Proposition \ref{rod} together with the spectral decomposition, we see that the rate of decay is controlled by the spectral gap parameter $\tau(\Gamma)$ as follows:
\betagin{Thm}
For $G$ and $\Gamma$ as in Proposition \ref{rod}, for all sufficiently small $\epsilon>0$, for any spherical $f_1,f_2\in L^2_0(\Gamma\backslash G)$ and for all $|t|\geq 1$, we have
$$\left|\langlengle \pi(h_t)f_1,f_2\ranglengle\right|\ll_{\epsilon}\frac{\|f_1\|_2\|f_2\|_2}{|t|^{\kappa\tau(\Gammaamma)(1-\epsilon)}}.$$
\end{Thm}
\betagin{proof}
For any spherical $f_1,f_2\in L^2_0(\Gamma\backslash G)$, applying the spectral decomposition $(\ref{sdc})$ and Proposition \ref{rod} we get for any $|t|\geq 1$
\betagin{align*}
|\langlengle \pi(h_t)f_1, f_2\ranglengle|&\leq\sum_{k}\left|\langlengle f_1, \varphi_k\ranglengle \overline{\langlengle f_2, \varphi_k\ranglengle}\right|\left| \langlengle \pi(h_t)\varphi_k, \varphi_k\ranglengle\right| +\left|\langlengle \pi(h_t)f^1_{\textrm{temp}}, f^2_{\textrm{temp}}\ranglengle\right|\\
&\ll_{\epsilon} \sum_k \frac{\left|\langlengle f_1, \varphi_k\ranglengle \overline{\langlengle f_2, \varphi_k\ranglengle}\right|}{|t|^{\kappa(\rho-s_k)(1-\epsilon)}}+ \frac{\|f^1_{\textrm{temp}}\|_2\|f^2_{\textrm{temp}}\|_2}{|t|^{\kappa\rho(1-\epsilon)}}\\
&\leq \frac{\sum_k\left|\langlengle f_1, \varphi_k\ranglengle \overline{\langlengle f_2, \varphi_k\ranglengle}\right|+\|f^1_{\textrm{temp}}\|_2\|f^2_{\textrm{temp}}\|_2}
{|t|^{\kappa\tau(\Gammaamma)(1-\epsilon)}}\\
&\leq \frac{\|f_1\|_2\|f_2\|_2}{|t|^{\kappa\tau(\Gammaamma)(1-\epsilon)}},
\end{align*}
where for the last inequality we used Cauchy-Schwartz.
\end{proof}
As a direct corollary, we have the following:
\betagin{Cor}
\langlebel{uod}
For $G$ a connected simple Lie group with finite center and real rank one, not locally isomorphic to $SO(2,1)$ and $\Gammaamma\leq G$ a lattice in $G$. If $\tau(\Gammaamma)> \frac{1}{\kappa(G)}$, then any unbounded one-parameter flow is SD for $\Gamma\backslash G$. In particular, if $G$ is locally isomorphic to $\Sp(d,1)$ with $d\geq 2$ or $\mathrm{F}_4^{-20}$, then any unbounded one-parameter flow is SD for $\Gamma\backslash G$ for any lattice $\Gamma$.
\end{Cor}
\betagin{Rem}
For $G$ is locally isomorphic to $\SO(d+1,1)$ and $\Gamma$ a congruence lattice, the best known bounds towards the Selberg-Ramanujan conjecture \cite{BurgerSarnak1991,KimSarnak03,BlomerBrumley11,BergeronClozel13,BergeronClozel17} imply that $\tau(\Gamma)\geq \tfrac{25}{64}$ for $d=1$, that $\tau(\Gamma)\geq \tfrac{25}{32}$ for $d=2$ and that $\tau(\Gamma)\geq 1$ when $d\geq 3$. Similarly, when $G$ is locally isomorphic to $\SU(d,1)$, we have that $\tau(\Gamma)\geq \tfrac{6}{5}$ when $d=2$ and that $\tau(\Gamma)\geq 2$ for $d\geq 3$.
In particular, it follows that for any rank one group, $G$, not locally isomorphic to $\SL_2(\mathbb R)$, for any congruence lattice $\Gamma\leq G$, we have that $\kappa \tau(\Gamma)>1$ and all unbounded one-parameter flows on $\Gamma\backslash G$ are SD.
\end{Rem}
\subsection{Simple groups of higher rank}
Next, we consider the case where $G$ is a connected simple Lie group with finite center and real rank $\geq 2$.
Following \cite{Oh98}, two roots $\alphapha$ and $\betata$ are called strongly orthogonal if neither one of $\alphapha\pm\betata$ is a root. Let $S(\Phi)$ denote the family of all subsets of $\Phi^{+}$ whose elements are pairwise strongly orthogonal. We call an element $\mathcal{O}$ in $S(\Phi)$ a strongly orthogonal system. Let $\varrho$ be the function on $S(\Phi)$ given by $\varrho(\mathcal{O})=\sum_{\alphapha\in\mathcal{O}}\alphapha$.
\betagin{Prop}\cite[Proposition 2.3]{Oh98}
\langlebel{so} Let $\Deltalta=\{\alphapha_i\ |\ i\in I\}$ be the set of simple roots as above. There exists a maximal strongly orthogonal system $\mathcal{Q}(\Phi)$ in $S(\Phi)$ in the sense that for any $\mathcal{O}\in S(\Phi)$, for each $i\in I$, the coefficient of $\alphapha_i$ in $\varrho(\mathcal{Q}(\Phi))$ is greater than or equal to the coefficient of $\alphapha_i$ in $\varrho(\mathcal{O})$.
\end{Prop}
Let $\xi=\frac12 \varrho(\mathcal{Q}(\Phi))=\frac12\sum_{\alphapha\in\mathcal{Q}(\Phi)}\alphapha$, and consider the $K$-bi-invariant function, $F:G\to \mathbb R^+$, defined on $A^+$ via
\betagin{equation}\langlebel{e:F}
F(\exp(X))=e^{-\xi(X)}.
\end{equation}
We then have the following result from \cite{Oh98}.
\betagin{Thm}\langlebel{d.decay}
For $G$ a connected simple group with finite center and real rank $\geq 2$, for any $\epsilon>0$ sufficiently small, for any nontrivial $\sigma\in \widehat{G}_{K}$ and $K$-invariant unit vector $v_\sigma$ of $\sigma$
\betagin{equation}
\left|\langlengle\sigma(g)v_{\sigma},v_{\sigma}\ranglengle\right|\ll_\epsilon F(g)^{1-\epsilon}\qquad \textrm{for any}\ g\in G.
\end{equation}
\end{Thm}
\betagin{proof}
When $G$ is a linear group this is \cite[Theorem A]{Oh98}.
In general, by Langlands classification theorem, any $\sigma\in \widehat{G}_{K}$ appears as some quotient of the induced representation $\textrm{Ind}_{NAM}^G\left(\chi\otimes 1_{M}\right)$, where $N, A$ are as above and $M$ is the centralizer of $A$ in $K$, $\chi$ is some character of $A$ and $1_M$ is the trivial representation of $M$. Elements in $\textrm{Ind}_{NAM}^G\left(\chi\otimes 1_{M}\right)$ are measurable functions $f: G\to \mathbb{C}$ satisfying
$$f(namg)= \chi(a)f(g)\ \textrm{for a.e. $g\in G$, with $n\in N, a\in A$ and $m\in M$},$$
and $G$ acts on $\textrm{Ind}_{NAM}^G\left(\chi\otimes 1_{M}\right)$ via the right regular action.
Since $G$ has finite center, the maximal compact subgroup $K$ contains the center (\cite[Theorem 6.31]{Knapp02}). In particular, $M$ also contains the center. Hence for any $z$ in the center, $z\cdot f(g)= f(gz)= f(zg)= f(g)$. Thus $\sigma\in \widehat{G}_{K}$ descends to an irreducible unitary representation of the adjoint group $\textrm{Ad}(G)$. Identifying $\mathfrak g$ with $\textrm{ad}({\mathfrak g})$ and applying \cite[Theorem A]{Oh98} to the linear group $\textrm{Ad}(G)$, we get that for any $g\in G$
$$|\langlengle\sigma(g)v_{\sigma}, v_{\sigma}\ranglengle|= |\langlengle\sigma\left(\textrm{Ad}(g)\right)v_{\sigma},v_{\sigma}\ranglengle|\ll_{\epsilon}F\left(\textrm{Ad}(g)\right)^{1-\epsilon}=F(g)^{1-\epsilon}.$$
\end{proof}
Applying these results to one-parameter subgroups we get:
\betagin{Prop}\langlebel{decay}
Let $G$ denote a connected simple Lie group with finite center and real rank $\geq 2$ and $\{h_t\}_{t\in \mathbb R}$ an unbounded one-parameter subgroup.
For any $\epsilon> 0$ sufficiently small, for any nontrivial $\sigma\in\widehat{G}_{K}$ and $|t|\geq 1$,
\betagin{equation}\langlebel{u.decay}
|\langlengle\sigma(h_t)v_{\sigma}, v_{\sigma}\ranglengle|\ll_\epsilon |t|^{\epsilon-1}.
\end{equation}
\end{Prop}
\betagin{proof}
Since the singleton $\{\langlembda_1\}$ constitutes a strongly orthogonal system, Proposition \ref{so}, implies that $\xi(X)\geq \frac12\langlembda_1(X)$ for any $X\in\mathfrak a^+$, leading to the bound $F(\exp(X))\leq e^{-\langlembda_1(X)/2}$.
Now using the Cartan decomposition $h_t=k_1(t)\exp(X(t))k_2(t)$ and Theorem \ref{d.decay} we get that
\betagin{equation*}
\left|\langlengle\sigma(h_t)v_{\sigma},v_{\sigma}\ranglengle\right|\ll_\epsilon \exp((\epsilon-1)\xi(X(t)))\leq \exp(\tfrac{(\epsilon-1)}{2}\langlembda_1(X(t))).
\end{equation*}
Now, by Proposition \ref{mprop}, we have that $e^{\langlembda_1(X(t))}\gg t^2$, thus concluding the proof.
\end{proof}
\betagin{Rem}
\langlebel{fde}In \cite[p. 187-190]{Oh02} both $\xi$ and the highest root $\langlembda_1$ are explicitly given in terms of linear combinations of simple roots. By comparing $\xi$ and $\langlembda_1$ directly, we note that when $\Phi_{\mathbb R}(\mathfrak a,\mathfrak g)$ is of type $B_n (n\geq 4)$, $D_n (n\geq 4)$, $E_6$, $E_7$, $E_8$ or $F_4$, we have that $\xi(X)\geq \langlembda_1(X)$ for any $X\in \mathfrak a^{+}$. Hence in these cases we have the following summable decay:
\betagin{equation}
\langlebel{fd}|\langlengle\sigma(h_t)v_{\sigma},v_{\sigma}\ranglengle|\ll_{\epsilon}|t|^{2(\epsilon-1)}
\end{equation}
for any nontrivial $\sigma\in \widehat{G}_{K}$ and any $|t|\geq 1$. When $\Phi_{\mathbb R}(\mathfrak a,\mathfrak g)$ is not of the above types, if the one-parameter subgroup is quasi-diagonalizable then the matrix coefficients decay exponentially, and if it is quasi-unipotent with $\left(\textrm{ad}(X_{\mathfrak n})\right)^3\neq 0$, then we can take $l=3$ in Proposition \ref{mprop} leading to the summable decay:
\betagin{equation}\langlebel{fd2}
|\langlengle\sigma(h_t)v_{\sigma},v_{\sigma}\ranglengle|\ll_{\epsilon}|t|^{\frac32(\epsilon-1)}
\end{equation}
for any nontrivial $\sigma\in\widehat{G}_K$ and any $|t|\geq 1$.
\end{Rem}
\subsection{Semisimple groups}
We now consider the general case where $G$ is a connected semisimple Lie group with finite center and no compact factors. Let $\Gammaamma\leq G$ denote an irreducible lattice, and $\{h_t\}_{t\in \mathbb R}$ an unbounded one-parameter subgroup of $G$ as before.
To deal with this case, first, note that there is a surjective homomorphism $\prod_{i=1}^mG_i\rightarrow G$ with finite kernel such that each $G_i$ is a noncompact connected simple Lie group with finite center.
Let $\tilde{\Gammaamma}$ be the preimage of $\Gammaamma$ and $\{\tilde{h}_t\}$ be the identity component of the pre-image of $\{h_t\}$. By replacing $(\Gammaamma\backslash G, h_t)$ by $(\tilde{\Gammaamma}\backslash \prod_{i=1}^m G_i, \tilde{h}_t)$ we may assume, without loss of generality, that $G=\prod_{i=1}^m G_i$ and $h_t=(h_t^1,\ldots, h_t^m)$ is unbounded as a one-parameter subgroup of $G$.
Next, our maximal compact subgroup is of the form $K=\prod_{i=1}^mK_i$, with each $K_i$ a maximal compact subgroup of $G_i$.
By slightly abusing the terminology in \cite{KleinbockMargulis1996}\mathfrak ootnotemark, we say $G_i$ is an \textit{essential factor}
of $G$ if $\{h_t^i\}_{t\in \mathbb R}$ is unbounded in $G_i$. After reordering the factors, we can assume that $G_1,\cdots, G_k$ are all the essential factors (since $\{h_t\}$ is unbounded, we have $k\geq 1$). Let $\widehat{G}_{\Gammaamma}\subset \widehat{G}$ be the set of irreducible unitary representations that are weakly contained in $L^2_0(\Gamma\backslash G)$ and $\widehat{G}_{K, \Gammaamma}=\widehat{G}_{\Gammaamma}\cap \widehat{G}_{K}$. We first note that for any $\sigma\in \widehat{G}_{K,\Gammaamma}$, $\sigma$ is of the form $\sigma=\otimes_{i=1}^m \sigma_i$ with each $\sigma_i\in \widehat{G_i}_{K_i}$. Since $\Gammaamma\leq G$ is irreducible, each $\sigma_i$ is nontrivial.
\mathfrak ootnotetext{In \cite{KleinbockMargulis1996}, $G_i$ is called an essential factor if $\{h_t^i\}_{t\in\mathbb{R}}$ is quasi-diagonalizable, while here we allow $\{h_t^i\}_{t\in\mathbb{R}}$ to be quasi-unipotent.}
Moreover, a $K$-invariant unit vector $v_{\sigma}$ of $\sigma$ is of the form $v_{\sigma}=\otimes_{i=1}^mv_{\sigma_i}$, where $v_{\sigma_i}$ is the $K_i$-invariant unit vector of $\sigma_i$. Thus
$$|\langlengle \sigma(h_t)v_{\sigma}, v_{\sigma}\ranglengle|=\prod_{i=1}^m|\langlengle \sigma_i(h_t^i)v_{\sigma_i},v_{\sigma_i}\ranglengle|.$$
First, we consider the case that $G$ (and hence each of its factors) has property $(T)$, that is, each $G_i$ is either of real rank $\geq 2$ or locally isomorphic to $Sp(d,1)$ or $F_4^{-20}$.
In this case, for $1\leq i\leq k$ the factor $\{h_t^i\}$ is unbounded and by Proposition \ref{decay} and Corollary \ref{uod}, for any $|t|\geq 1$ we have $|\langlengle \sigma_i(h_t^i)v_{\sigma_i},v_{\sigma_i}\ranglengle|\ll_{\epsilon} \frac{1}{|t|^{(1-\epsilon)}}$. While for $k+1\leq i\leq m$, we bound $|\langlengle \sigma_i(h_t^i)v_{\sigma_i},v_{\sigma_i}\ranglengle|\leq 1$. We thus have for any $|t|\geq 1$
\betagin{equation}\langlebel{e:factor}
|\langlengle \sigma(h_t)v_{\sigma}, v_{\sigma}\ranglengle|\ll_{\epsilon} \frac{1}{|t|^{k(1-\epsilon)}}.
\end{equation}
\betagin{Prop}\langlebel{p:decay}
Let $G$ be a connected semisimple Lie group with finite center and no compact factors,
$\Gammaamma\leq G$ an irreducible lattice, and $\{h_t\}_{t\in \mathbb R}$ an unbounded one-parameter subgroup of $G$.
If $G$ has property $(T)$, then for any spherical $\varphi, \psi\in L^2_0(\Gamma\backslash G)$ and for any $|t|\geq 1$, we have
\betagin{equation}\langlebel
{e:decay}
|\langlengle \pi(h_t)\varphi, \psi\ranglengle|\ll_\epsilon \frac{\|\varphi\|_2\|\psi\|_2}{|t|^{k(1-\epsilon)}},\end{equation}
where $k\geq 1$ is the number of essential factors of the flow.
\end{Prop}
\betagin{proof}
Using the direct integral decomposition of $L^2_0(\Gamma\backslash G)$, for any spherical $\varphi\in L^2_0(\Gamma\backslash G)$, $\varphi$ can be written as
$$\varphi= \int_{\sigma\in \widehat{G}_{K,\Gammaamma}}\varphi_{\sigma} d\nu(\sigma),$$
where $\varphi_{\sigma}\in\sigma$ is spherical and $\nu$ is some Borel measure on $\widehat{G}_{\Gamma}$. Hence for any spherical $\varphi, \psi\in L^2_0(\Gamma\backslash G)$, we have
$$\langlengle \pi(h_t)\varphi, \psi\ranglengle=\int_{\sigma\in\widehat{G}_{K,\Gamma}}\langlengle\sigma(h_t)\varphi_{\sigma},\psi_{\sigma}\ranglengle d\nu(\sigma).$$
By \eqref{e:factor} for any $|t|\geq 1$ we can bound
\betagin{align*}
\left|\langlengle \pi(h_t)\varphi, \psi\ranglengle\right|&\leq \int_{\sigma\in \widehat{G}_{K,\Gammaamma}}\left|\left\langlengle\sigma(h_t)\varphi_{\sigma},\psi_{\sigma}\right\ranglengle\right| d\nu(\sigma)\\
&\ll_{\epsilon} \frac{1}{|t|^{k(1-\epsilon)}}\int_{\sigma\in \widehat{G}_{K,\Gammaamma}}||\varphi_{\sigma}||_2||\psi_{\sigma}||_2 d\nu(\sigma)\\
&\leq\frac{\|\varphi\|_2\|\psi\|_2}{|t|^{k(1-\epsilon)}},
\end{align*}
where for the last inequality we used Cauchy-Schwartz.
\end{proof}
\betagin{proof}[Proof of Theorem \ref{t:edecay}]
The result follows from Proposition \ref{p:decay} (after noting that for an unbounded flow we have at least one essential factor so $k\geq 1$).
We further note that the only possibility that the flow is not SD is that there is only one essential factor, say $G_1$, and $\{h_t^1\}_{t\in \mathbb{R}}$ (viewed as a one-parameter flow on $\Gamma\backslash G$) is not SD.
\end{proof}
\betagin{proof}[Proof of Theorem \ref{t:csd}]
The result follows from the above argument and Remark \ref{fde}.
\end{proof}
\betagin{Rem}\langlebel{r:conditional2}
For a semisimple group $G$ of real rank $\geq 2$ without property $(T)$ our result is only conditional. In this case, Margulis Arithmeticity Theorem states that any irreducible lattice $\Gamma\leq G$ is arithmetic in the sense that it is commensurable to a congruence lattice defined over some number field. Moreover, Serre's congruence subgroup conjecture (which is settled when $\Gamma\backslash G$ is not compact \cite{Rapinchuk92}) states that in fact all irreducible lattices are congruence lattices. Now, when $\Gamma$ is a congruence lattice the generalized Selberg-Ramanujan conjecture \cite{Shahidi04,Sarnak05} gives very precise restrictions on which non-tempered representations may occur in the decomposition of $L^2(\Gamma\backslash G)$, and these representations all have fast decay of matrix coefficients. In particular, when $\Gamma$ is a congruence lattice, the Selberg-Ramanujan conjecture implies that \eqref{e:decay} holds for all flows on $\Gamma\backslash G$, and moreover, when $G$ is not locally isomorphic to a product of copies of $\SL_2(\mathbb R)$ and $\SL_2(\mathbb C)$, this already follows from the known bounds \cite{KimSarnak03, BlomerBrumley11,BergeronClozel13,BergeronClozel17} towards the Selberg-Ramanujan conjecture.
\end{Rem}
\section{Effective mean ergodic theorems and consequences}
Let $G$ be a connected semisimple Lie group with finite center and no compact factors, $\Gamma\leq G$ an irreducible lattice, $\mathcal{X}=\Gammaamma\backslash G$, and $H=\{h_m\}_{m\in \mathbb Z}$ an unbounded discrete time one-parameter flow on $\mathcal{X}$ generated by a one-parameter subgroup as before.
For any $f\in L^2(\mathcal{X})$ and any integer $m\geq 1$, define the averaging operator
$$\betata_{m}^+(f)(x):=\frac{1}{m}\sum_{j=1}^{m}f(xh_j).$$
Since $H$ acts ergodically on $\mathcal{X}$, the mean ergodic theorem states that
$$\|\betata_m^+(f)- \mu(f)\|_2\to 0,$$
as $m\to\infty$ for any $f\in L^2(\mathcal{X})$, where $\mu(f):=\int_{\mathcal{X}}fd\mu$. In this section we adapt the method introduced in \cite{GhoshKelmer17} and \cite{Kelmer17b}, to prove two effective mean ergodic theorems using the explicit rate of decay of
matrix coefficients obtained in the previous section. The arguments are slightly different for rank one groups and for groups with property $(T)$, so we will treat them separately.
\subsection{Groups with Property (T)}
When the group $G$ has property $(T)$, we can use the uniform result on decay of matrix coefficients for one-parameter flows to show the following.
\betagin{Prop}
\langlebel{pwt}Assume that $G$ has property (T). Then for any unbounded discrete time one-parameter flow $H=\{h_m\}_{m\in\mathbb{Z}}$, for any $\epsilon> 0$ and for any spherical $f\in L^2(\mathcal{X})$ we have
\betagin{equation}
\langlebel{key estimate}\|\betata_m^+(f)-\mu(f)\|_2\ll_{\epsilon}\frac{\|f\|_2}{m^{\frac12(1-\epsilon)}}.
\end{equation}
If the flow is SD we have the slightly stronger bound
\betagin{equation}
\langlebel{bmet}\|\betata_m^+(f)-\mu(f)\|_2\ll\frac{\|f\|_2}{\sqrt{m}}.
\end{equation}
\end{Prop}
\betagin{proof}
Let $f_0=f-\mu(f)\in L^2_0(\mathcal{X})$. Proposition \ref{decay} and Corollary \ref{uod} imply that for any small $\epsilon>0$ for all
$|k|\geq 1$
$$\left|\left\langle\pi(h_k)f_0,f_0\right\rangle\right|\ll_{\epsilon}\frac{\|f_0\|_2^2}{|k|^{1-\epsilon}}.$$
Noting that $\betata_m^+(f_0)= \betata_m^+(f)-\mu(f)$ we have
\betagin{align*}
\|\betata_m^+(f_0)\|_2^2
&=\frac{1}{m^2}\left\langle\sum_{1\leq i\leq m}\pi(h_i)f_0,\sum_{1\leq j\leq m}\pi(h_j)f_0\right\rangle
= \frac{1}{m^2}\sum_{1\leq i,j\leq m}\left\langle\pi(h_{i-j})f_0,f_0\right\rangle\\
&= \frac{1}{m^2}\sum_{|k|\leq m}\left\langle\pi(h_k)f_0,f_0\right\rangle\#\{(i,j)\ |\ 1\leq i,j\leq m, i-j=k\}\\
&\leq \frac{1}{m}\sum_{|k|\leq m}|\left\langle\pi(h_k)f_0,f_0\right\rangle|
\ll_{\epsilon} \frac{1}{m}(1+2\sum_{k=1}^{m}\frac{1}{k^{1-\epsilon}})\|f_0\|_2^2
\ll\frac{\|f_0\|_2^2}{m^{1-\epsilon}}\leq \frac{\|f\|_2^2}{m^{1-\epsilon}}.
\end{align*}
If the flow is SD the same argument with the stronger bound $\left|\left\langle\pi(h_k)f_0,f_0\right\rangle\right|\ll_{\eta}\frac{\|f_0\|_2^2}{|k|^{\eta}},$
gives $\|\betata_m^+(f)-\mu(f)\|_2^2\ll \frac{\|f\|_2^2}{m}$.
\end{proof}
\subsection{Groups of real rank one}
For groups of real rank one without property $(T)$ we have to take into account the contribution of the possible exceptional spectrum.
The argument is similar to the one used in \cite[Theorem 15]{Kelmer17b} for the orthogonal groups, and we include the details for the reader's convenience.
Doing this leads to the following.
\betagin{Prop}
\langlebel{ewot}Let $G$ be locally isomorphic to $SO(d+1,1)$ or $SU(d,1)$ with $d\geq 2$. Then for any unbounded discrete time one-parameter flow $H=\{h_m\}_{m\in\mathbb{Z}}$, for any sufficiently small $\epsilon> 0$ and for any spherical $f\in L^2(\mathcal{X})$ we have
$$\|\betata_m^+(f)-\mu(f)\|_2\ll_{\epsilon} \frac{\|f\|_2}{\sqrt{m}}+\sum_{s_k\in [\rho-\frac{1}{\kappa}, \rho)}\frac{|\langlengle f,\varphi_k\ranglengle|}{m^{\frac{\kappa}{2}(\rho-s_k)(1-\epsilon)}}, $$
where $\varphi_k$ are the finitely many spherical exceptional forms. When $G$ is locally isomorphic to $\SO(2,1)$ we have the same result with the first term replaced by $\frac{||f||_2}{m^{\frac12(1-\epsilon)}}$.
\end{Prop}
\betagin{proof}
Write $f= \langlengle f, 1\ranglengle +\sum_{k}\langlengle f,\varphi_k\ranglengle \varphi_k+ f_0$ with $f_0\in L^2_{\textrm{temp}}(\mathcal{X})$. Then
$$\|\betata_m^+(f)-\mu(f)\|_2\leq \sum_{k}|\langlengle f,\varphi_k\ranglengle \|\betata_m^+(\varphi_k)\|_2+ \|\betata_m^+(f_0)\|_2.$$
Using $(\ref{tede})$ and the same argument as in Proposition \ref{pwt}, we get
\betagin{equation}
\langlebel{tdecay}
\|\betata_m^+(f_0)\|_2\ll_\epsilon \left\{
\betagin{array}{lr}
\frac{\|f\|_2}{m^{\frac12(1-\epsilon)}} & \textrm{$\mathfrak g=\mathfrak{so}(2,1)$}\\
\\
\frac{\|f\|_2}{\sqrt{m}}& \textrm{otherwise.}
\end{array}
\right.
\end{equation}
Similarly, for each of the spherical exceptional forms $\varphi_k$ in $\pi_{s_k}$ we can bound
\[\|\betata_m^+(\varphi_k)\|_2^2\ll_{\epsilon} \frac{1}{m}(1+\sum_{k=1}^{m} \frac{1}{k^{\kappa(\rho-s_k)(1-\epsilon)}})\ll \left\{
\betagin{array}{lr}
\frac{1}{m} & \textrm{$s_k< \rho-\frac{1}{\kappa}$}\\
\frac{1}{m^{\kappa(\rho-s_k)(1-\epsilon)}} &\ \textrm{$s_k\geq \rho-\frac{1}{\kappa}$.}
\end{array}
\right.
\]
Hence
\[\|\betata_m^+(\varphi_k)\|_2\ll_{\epsilon} \left\{
\betagin{array}{lr}
\frac{1}{\sqrt{m}} & \textrm{$s_k< \rho-\frac{1}{\kappa}$}\\
\frac{1}{m^{\frac{\kappa}{2}(\rho-s_k)(1-\epsilon)}} &\ \textrm{$s_k\geq \rho-\frac{1}{\kappa}$.}
\end{array}
\right.
\]
Combining this with $(\ref{tdecay})$ and using the bound $|\langlengle f, \varphi_k\ranglengle|\leq \|f\|_2$ for $s_k< \rho-\frac{1}{\kappa}$ concludes the proof.
\end{proof}
\subsection{A variance estimate}
We now apply the above mean ergodic theorem for a variance estimate. Following \cite{Kelmer17b}, for any integer $m\geq 1$ and $f\in L^2(\mathcal{X})$, we define the set
$$\mathcal{C}_{m,f}=\left\{x\in \mathcal{X}\ |\ \left|\betata^+_m(f)(x)-\mu(f)\right|\geq \tfrac{\mu(f)}{2}\right\}.$$
Applying the mean ergodic theorem we obtain the following estimate for the measure of $\mathcal{C}_{m,f}$ when $f$ is an indicator function of a spherical set.
\betagin{Prop}
\langlebel{est}
For any spherical set $B\subset \mathcal{X}$ and $f=\chi_B$ its indicator function:
\betagin{enumerate}
\item[(1)] If the flow is SD then
\betagin{equation}
\langlebel{estwt2}\mu(\mathcal{C}_{m,f})\ll\frac{1}{m\mu(B)}.
\end{equation}
\item[(2)] If $G$ has property (T), then for all sufficiently small $\epsilon>0$
\betagin{equation}
\langlebel{estwt1}\mu(\mathcal{C}_{m,f})\ll_{\epsilon}\frac{1}{m^{1-\epsilon}\mu(B)}.
\end{equation}
\item[(3)] If $G$ is locally isomorphic to $\SO(d+1,1)$ or $\SU(d,1)$ with $d\geq 2$, then
\betagin{equation}
\langlebel{estwot1}\mu(\mathcal{C}_{m,f})\ll \frac{1}{m\mu(B)}+ \frac{1}{\left(m\mu(B)^{\frac23}\right)^{2\tau(\Gamma)/3}}.
\end{equation}
\item[(4)] If $G$ is locally isomorphic to $\SO(2,1)$, then for all sufficiently small $\epsilon>0$
\betagin{equation}
\langlebel{estwot2} \mu(\mathcal{C}_{m,f})\ll_{\epsilon}\frac{1}{m^{1-\epsilon}\mu(B)}+\frac{1}{\left(m^{1-\epsilon}\mu(B)^{1+\frac{\epsilon}{2}}\right)^{2\tau(\Gamma)}}
\end{equation}
where for the last two cases, $\tau(\Gamma)$ denotes the spectral gap parameter defined in \eqref{e:SpectralGap}.
\end{enumerate}
\end{Prop}
\betagin{proof}
Since $\mu(\mathcal{C}_{m,f})\leq 1$ we may assume that $m\mu(B)\geq 1$ since otherwise the result holds trivially.
Now, by definition if $x\in \mathcal{C}_{m,f}$, then $\left|\betata_m^+(f)-\mu(B)\right|\geq \frac{\mu(B)}2$ and hence
\betagin{equation}
\langlebel{estt}\left|\left|\betata_m^+(f)-\mu(B)\right|\right|_2^2\geq \frac{1}{4}\int_{\mathcal{C}_{m,f}}\mu(B)^2d\mu(x)=\frac{\mu(\mathcal{C}_{m,f})\mu(B)^2}4.
\end{equation}
On the other hand, for SD flows by Proposition \ref{pwt} we have
\betagin{equation}
\langlebel{e3}\|\betata_m^+(f)-\mu(B)\|_2^2\ll\frac{\|f\|_2^2}{m}= \frac{\mu(B)}{m}.
\end{equation}
Combining $(\ref{estt})$ and $(\ref{e3})$ we get $(\ref{estwt2})$.
If $G$ has property (T), then again by Proposition \ref{pwt} we have
\betagin{equation}
\langlebel{e2}\|\betata_m^+(f)-\mu(B)\|_2^2\ll_{\epsilon}\frac{\|f\|_2^2}{m^{1-\epsilon}}= \frac{\mu(B)}{m^{1-\epsilon}}.
\end{equation}
Combining $(\ref{estt})$ and $(\ref{e2})$, we get $(\ref{estwt1})$.
If $G$ is locally isomorphically to $\SO(d+1,1)$ or $\SU(d,1)$ with $d\geq 2$, then by Proposition \ref{ewot} we have
$$\|\betata_m^+(f)-\mu(B)\|_2^2\ll_{\epsilon} \frac{\|f\|^2_2}{m}+\sum_{s_k\in [\rho-\frac{1}{\kappa}, \rho)}\frac{|\langlengle f,\varphi_k\ranglengle|^2}{m^{\kappa(\rho-s_k)(1-\epsilon)}}.$$
Recall that by Proposition \ref{nE}, the exceptional forms $\varphi_k\in L^p(\mathcal{X})$ for any $1< p< \frac{2\rho}{\rho-s_k}$.
For $\epsilon>0$ sufficiently small let $p_k=\frac{2\rho}{(1+\epsilon/2)(\rho-s_k)}$, $q_k=\frac{p_k-1}{p_k}$, and use H\"older inequality to bound
$$|\langlengle f,\varphi_k\ranglengle|\leq \|\varphi\|_{p_k} \|f\|_{q_k}\ll_\epsilon \|f\|_{q_k}=\mu(B)^{\frac{1}{q_k}}.$$
With this bound we get
\betagin{equation}
\langlebel{enot1}\|\betata_m^+(f)-\mu(B)\|_2^2\ll_{\epsilon}\frac{\mu(B)}{m}+ \sum_{s_k\in [\rho-\frac{1}{\kappa}, \rho)}\frac{\mu(B)^{\frac{2}{q_k}}}{m^{\kappa(\rho-s_k)(1-\epsilon)}}.
\end{equation}
Combining $(\ref{estt})$ and $(\ref{enot1})$ we get
\betagin{equation}\langlebel{arro}
\mu(\mathcal{C}_{m,f})\ll_{\epsilon} \frac{1}{m\mu(B)}+ \sum_{s_k\in[\rho-\frac{1}{\kappa},\rho)}\frac{1}{\left(m^{\kappa(1-\epsilon)}\mu(B)^{\frac{1+\frac{1}{2}\epsilon}{\rho}}\right)^{\rho-s_k}}. \end{equation}
Note that we always have that $\kappa\rho\geq 2$ so fixing $\epsilon<1/3$ sufficiently small so that $\frac{1+\frac12\epsilon}{\kappa\rho(1-\epsilon)}<\frac23$ we get
\betagin{align*}
\mu(\mathcal{C}_{m,f})&\ll \frac{1}{m\mu(B)}+ \sum_{s_k\in[\rho-\frac{1}{\kappa},\rho)}\frac{1}{\left(m^{\kappa(1-\epsilon)}\mu(B)^{\frac{1+\frac{1}{2}\epsilon}{\rho}}\right)^{\rho-s_k}}\\
&=\frac{1}{m\mu(B)}+\sum_{s_k\in[\rho-\frac{1}{\kappa},\rho)}\frac{1}{\left(m\mu(B)^{\frac{1+\frac12\epsilon}{2\kappa\rho/3}}\right)^{\kappa(\rho-s_k)(1-\epsilon)}}\\
&<\frac{1}{m\mu(B)}+\sum_{s_k\in[\rho-\frac{1}{\kappa},\rho)}\frac{1}{\left(m\mu(B)^{\frac23}\right)^{2\kappa(\rho-s_k)/3}}.
\end{align*}
Finally, note that for all terms in the sum we have that $\tau(\Gamma)\leq \rho-s_k\leq \frac{1}{\kappa}$ so that
\betagin{align*}
\mu(\mathcal{C}_{m,f})&\ll\frac{1}{m\mu(B)}+\frac{1}{\left(m\mu(B)^{\frac23}\right)^{2\tau(\Gammaamma)/3}}.
\end{align*}
For $\SO(2,1)$ we have that $\kappa= 2$ and $\rho=\frac12$ and a similar argument gives \eqref{estwot2}
\end{proof}
\section{Applications to shrinking target problems}
We now combine all the ingredients and apply them to the shrinking target problems we described in the introduction. As before, throughout this section we let $G$ denote a connected semisimple Lie group with finite center and no compact factors, $\Gamma\leq G$ an irreducible lattice $\{h_t\}_{t\in \mathbb R}$ an unbounded one-parameter subgroup, generating a discrete time flow given by the action of the discrete group $H=\{h_m\}_{m\in \mathbb Z}$ on the homogenous space $\mathcal{X}=\Gamma\backslash G$.
\subsection{The hitting time problem}
Let $\{B_t\}_{t>0}$ be a monotone family of spherical shrinking targets in $\mathcal{X}$.
We recall that by Lemma \ref{ght} and \ref{lower bound}, in order to prove Theorem \ref{t:hittingtime} it is enough to show that the estimate \eqref{up} holds. We show this in the following Lemma.
\betagin{Lem}
Assume that either $G$ has property (T) or that it is simple of real rank one. Then $(\ref{up})$ holds for any $0<\deltalta<\frac12$.
\end{Lem}
\betagin{proof}
Given a spherical measurable set, $B\subset \mathcal{X}$, let $f=\chi_B$ denote its indicator function, and for any $m\in \mathbb N$ let $\mathcal{M}^i_{m,B}$ be as defined in \eqref{e:missing}. In particular, for any $x\in\mathcal{M}_{m,B}^i$ we have that $\betata_m^+(f)(x)=\frac{k}{m}$ for some $0\leq k< i$. Thus for any $m>\frac{2i}{\mu(B)}$, if $x\in \mathcal{M}^i_{m,B}$ then
$$|\betata_m^+(f)(x)-\mu(f)|=\mu(B)-\frac{k}{m}> \mu(B)-\frac{i}{m}> \frac12\mu(B),$$
so $\mathcal{M}_{m,B}^i\subseteq \mathcal{C}_{m,f}$.
We recall that $\mathcal{N}$ is the set of integers $\ell\geq 0$ such that $\{\mu(B_t)\ |\ t> 0\}\cap [\frac{1}{2^{\ell}}, \frac{1}{2^{\ell+1}})$ is nonempty, and for each $\ell\in\mathcal{N}$, $\underline{B}_{\ell}=\bigcap_{\frac{1}{2^{\ell+1}}\leq\mu(B_t)<\frac{1}{2^{\ell}}}B_t$ with measure $\frac{1}{2^{\ell+1}}\leq \mu\left(\underline{B}_{\ell}\right)\leq \frac{1}{2^{\ell}}$. For each $\ell\in\mathcal{N}$, let $f_{\ell}=\chi_{\underline{B}_{\ell}}$. Fix $0<\deltalta<\frac12$ and let $j=\floor{\frac{\log 4i}{\deltalta\log 2}}$. Then for any $\ell\in \mathcal{N}\cap[j,\infty)$, $\mathcal{M}^i_{\floor{2^{\ell(1+\deltalta)}},\underline{B}_{\ell}}\subset \mathcal{C}_{\floor{2^{\ell(1+\deltalta)}}, f_{\ell}}$.
Hence to prove $(\ref{up})$ it is enough to show that
$$\sum_{\ell\in\mathcal{N}\cap\left[j,\infty\right)}\mu\left(\mathcal{C}_{\floor{2^{\ell(1+\deltalta)}}, f_{\ell}}\right)<\infty.$$
We now use Proposition \ref{est} to estimate $\mu\left(\mathcal{C}_{\floor{2^{\ell(1+\deltalta)}}, f_{\ell}}\right)$.
First, when $G$ has property $(T)$, Proposition \ref{est} with $\epsilon=\frac{\deltalta}{2(1+\deltalta)}$ gives that
$$\mu\left(\mathcal{C}_{\floor{2^{\ell(1+\deltalta)}}, f_{\ell}}\right)
\ll\frac{1}{\floor{2^{\ell(1+\deltalta)}}^{1-\epsilon}\mu\left(\underline{B}_{\ell}\right)}
\ll 2^{-\deltalta \ell/2},
$$
is summable.
Next, for $G$ locally isomorphic to $SO(d+1,1)$ or $SU(d,1)$ with $d\geq 2$, Proposition \ref{est} implies that
\betagin{align*}
\mu\left(\mathcal{C}_{\floor{2^{\ell(1+\deltalta)}}, f_{\ell}}\right)&
\ll_{\epsilon}
\frac{1}{\floor{2^{\ell(1+\deltalta)}}\mu\left(\underline{B}_{\ell}\right)}+ \frac{1}{\left(\floor{2^{\ell(1+\deltalta)}}\mu\left(\underline{B}_{\ell}\right)^{\frac23}\right)^{2\tau(\Gamma)/3}}\\
&\ll 2^{-\deltalta\ell}+ 2^{-2\tau(\Gamma)\ell/9}
\end{align*}
is also summable.
Finally, when $G$ is locally isomorphic to $SO(2,1)$, Proposition \ref{est} with $\epsilon=\frac{\deltalta}{3+2\deltalta}$ implies that
$$\mu\left(\mathcal{C}_{\floor{2^{\ell(1+\deltalta)}}, f_{\ell}}\right)\ll_{\deltalta}2^{-\deltalta\ell/2}+2^{-\deltalta\tau(\Gammaamma)\ell}$$
is summable.
\end{proof}
\subsection{Logarithm laws}
We now apply our results to the special case where $\Gamma\backslash G$ is not compact and the shrinking targets are cusp neighborhoods, to prove Corollary \ref{loglaws}, establishing logarithm laws for one-parameter flows.
Since $\Gamma\backslash G$ is not compact when $G$ is of higher rank, we may assume that $\Gamma$ is a congruence group. We also assume here that $G$ is not locally isomorphic to a product of copies of $\SL_2(\mathbb R)$ and $\SL_2(\mathbb C)$, and note that, in that case, Corollary \ref{loglaws} already follows from \cite{KelmerMohammadi12}.
Now, fix a $K\times K$-invariant function $d(\cdot,\cdot)$ on $\Gammaamma\backslash G\times \Gamma\backslash G$ (coming from a distance function on $\Gamma\backslash \mathcal{H}$) satisfying $\eqref{e:cuspdecay}$ as in the introduction. For a fixed reference point $x_0\in \mathcal{X}$ let
$$B_t=\{x\in \mathcal{X}\ |\ d(x,x_0)> t\}$$
be the corresponding spherical cusp neighborhood. We first note that the easy half of Borel-Cantelli lemma, together with a standard continuity argument, implies that the upper bound,
$$\limsup_{t\to\infty}\frac{d(xh_t,x_0)}{\log t}\leq \frac{1}{\varkappa},$$
holds for a.e. $x\in \mathcal{X}$. For the lower bound, note that by Theorem \ref{t:hittingtime}, when $G$ has property $(T)$ or is of real rank one the limit $(\ref{hit})$ holds for a.e. $x\in \mathcal{X}$. When $G$ is of higher rank without property $(T)$, since $\Gammaamma$ is a congruence subgroup and $G$ is not locally isomorphic to a product of copies of $SL_2(\mathbb R)$ and $SL_2(\mathbb C)$, there is no factor of $G$ locally isomorphic to $SL_2(\mathbb R)$. Thus, using the known bounds towards the Selberg-Ramanujan conjecture (see Remarks \ref{r:conditional} and \ref{r:conditional2}) the limit \eqref{t:hittingtime} still holds for a.e. $x\in \mathcal{X}$.
The proof of Corollary \ref{loglaws} now follows immediately from the following Lemma.
\betagin{Lem}
For any $x\in \mathcal{X}$ satisfying $(\ref{hit})$ we have that
$$\limsup_{t\to\infty}\frac{d(xh_t,x_0)}{\log t}\geq \frac{1}{\varkappa}.$$
\end{Lem}
\betagin{proof}
By $(\ref{hit})$ and $(\ref{e:cuspdecay})$, for any small $\epsilon>0$, there exists $t_0>0$ such that for any $t\geq t_0$ we have that $\frac{\log\tau_{B_t}(x)}{-\log(\mu(B_t))}< 1+\epsilon$ and $\frac{-\log\mu(B_t)}{t}< \varkappa+\epsilon$, hence, $\frac{\log\tau_{B_t}(x)}{t}<(\varkappa+\epsilon)(1+\epsilon)$. Moreover, by the minimality of $\tau_{B_t}(x)$ we have that $xh_{\tau_{B_t}(x)}\in B_t$, or equivalently that $d(xh_{\tau_{B_t(x)}},x_0)> t$. For any integer $\ell\in \mathbb N$ let $s_{\ell}=\tau_{B_{\ell}}(x)$. The condition that $x$ satisfies \eqref{hit}, implies that $s_{\ell}\to \infty$ as $\ell\to\infty$, and for any $\ell\geq t_0$, we have
$$\frac{d(xh_{s_{\ell}}, x_0)}{\log s_{\ell}}=\frac{d(xh_{\tau_{B_{\ell}}(x)}, x_0)}{\log \tau_{B_{\ell}}(x)}> \frac{d(xh_{\tau_{B_{\ell}}(x)}, x_0)}{(\varkappa+\epsilon)(1+\epsilon)\ell}> \frac{1}{(\varkappa+\epsilon)(1+\epsilon)}.$$
To conclude, for any $x$ satisfying $(\ref{hit})$ and for any $\epsilon> 0$ we can find an unbounded sequence $\{s_{\ell}\}$ such that $\frac{d(xh_{s_{\ell}}, x_0)}{\log s_{\ell}}>\frac{1}{(\varkappa+\epsilon)(1+\epsilon)}$, implying that $\limsup_{t\to\infty}\frac{d(xh_t,x_0)}{\log t}\geq \frac{1}{\varkappa}$.
\end{proof}
\betagin{Rem}\langlebel{r:sphericalcusps}
In \cite{KleinbockMargulis1999,AthreyaMargulis14}, the cusp neighborhoods are defined by a distance function $\tilde{d}(\cdot,\cdot)$ on $\Gamma\backslash G$ induced from a left $G$-invariant and bi-$K$-invariant metric (resp. norm like pseudo metric) on $G$, rather than a distance function on $\Gamma\backslash \mathcal{H}$ as in our case. In order to apply the above argument in this case we can replace the non spherical distance function $\tilde{d}(x, x_0)$ by the spherical distance function $d(x, x_0):=\inf_{k\in K}\tilde d(xk,x_0)$. Since $\tilde d(\cdot, \cdot)$ is induced from a bi-$K$-invariant metric, there exists some constant $C>0$ such that for any $x\in \mathcal{X}$, $0\leq \tilde{d}(x,x_0)-d(x,x_0)\leq C$. Thus the corresponding cusp neighborhoods satisfy $\tilde B_{t+C}\subset B_t\subset \tilde B_t$ for any $t>0$, where $\tilde{B_t}=\{x\in\mathcal{X}\ |\ \tilde{d}(x,x_0)> t\}$. Now from \cite{KleinbockMargulis1999,AthreyaMargulis14} we have that $\mu(\tilde B_t)\asymp e^{-\varkappa t}$ for some $\varkappa>0$ and hence also have that $\mu(B_t)\asymp e^{-\varkappa t}$. Since we also have $\tilde{d}(x,x_0)=d(x,x_0)+O(1)$, we get that for a.e. $x\in \mathcal{X}$
$$\limsup_{t\to\infty}\frac{\tilde{d}(xh_t,x_0)}{\log t}=\limsup_{t\to\infty}\frac{d(xh_t,x_0)}{\log t}= \frac{1}{\varkappa}.$$
\end{Rem}
\subsection{Orbits eventually always hitting}
We now give the proof of Theorem \ref{t:ae}.
Given a monotone sequence of spherical shrinking targets $\{B_m\}_{m\in\mathbb N}$, let $f_m=\chi_{B_m}$.
By Proposition \ref{est}, for SD flows
$$\mu\left(\mathcal{C}_{2^{j\pm 1},f_{2^j}}\right)\ll\frac{1}{2^{j\pm 1}\mu(B_{2^j})}\asymp \frac{1}{2^j\mu(B_{2^j})}.$$
and if $G$ is locally isomorphic to $\SO(d+1,1)$ or $\SU(d,1)$ with $d\geq 2$, then
\betagin{align*}
\mu\left(\mathcal{C}_{2^{j\pm 1},f_{2^j}}\right)&\ll \frac{1}{2^j\mu(B_{2^j})}+\frac{1}{(2^{j}\mu(B_{2^j})^{\frac23})^{2\tau(\Gamma)/3}}\\
&< \frac{1}{2^j\mu(B_{2^j})}+\frac{2^{-2j/9}}{(2^{j}\mu(B_{2^j}))^{4\tau(\Gamma)/9}}.
\end{align*}
Since $\sum_{j=0}^{\infty}\frac{1}{2^j\mu(B_{2^j})}<\infty$, there exists $j_0$ such that for any $j\geq j_0$, $2^j\mu(B_{2^j})>1$. Hence for all $j\geq j_0$, we have
$$\mu\left(\mathcal{C}_{2^{j\pm 1},f_{2^j}}\right)\ll_{\epsilon}\frac{1}{2^j\mu(B_{2^j})}+2^{-2j/9}.$$
Thus in both cases, \eqref{e:summable} implies that
$\sum_{j=0}^{\infty}\mu\left(\mathcal{C}_{2^{j\pm 1},f_{2^j}}\right)<\infty$, and since $\mathcal{M}^1_{m,B}\subseteq \mathcal{C}_{m,f}$ with $f= \chi_B$, then
$\sum_{j=0}^{\infty}\mu\left(\mathcal{M}^1_{2^{j\pm 1},B_{2^j}}\right)<\infty$ and Theorem \ref{t:ae} follows from Lemma \ref{l:EventuallyHitting}.
\subsection{Dynamical Borel-Cantelli for SD flows.}
We now give the proof of Theorem \ref{t:SBC}, by showing that rate of decay of matrix coefficients for SD flows is sufficient to show that for any sequence of spherical shrinking targets, $\{B_m\}_{m\in\mathbb{N}}$,
the family of functions $f_m(x)=\chi_{B_m}(xh_m)$ satisfy condition \eqref{keybound} of Lemma \ref{gbc}.
\betagin{Prop}
\langlebel{quasi.prop} Let $\{B_m\}_{m\in\mathbb{N}}$ denote any sequence of spherical sets in $\mathcal{X}$ and let $f_m(x)=\chi_{B_m}(xh_m)$.
If the flow is SD then
there exists some constant $C>0$ such that for all $n> m\geq 1$
$$\int_{\mathcal{X}}\left(\sum_{i=m}^nf_i(x)-\sum_{i=m}^n\mu(f_i)\right)^2d\mu(x)\leq C\sum_{i=m}^n\mu(f_i).$$
\end{Prop}
\betagin{proof}
We first note that by direct computation
$$\int_{\mathcal{X}}\left(\sum_{i=m}^nf_i(x)-\sum_{i=m}^n\mu(f_i)\right)^2d\mu(x)=\sum_{m\leq i,j\leq n}\left(\int_{\mathcal{X}}f_i(x)f_j(x)d\mu(x)-\mu(f_i)\mu(f_j)\right).$$
To simplify notation, let $\mu_i=\mu(f_i)=\mu(B_i)$ and $\mu_{i,j}=\int_{\mathcal{X}}f_i(x)f_j(x)d\mu(x)$, and note that $\mu_{i,i}=\mu_i$ (since for $f_i(x)=\chi_{B_i}(xh_i)$, $f_i^2=f_i$). With these notations we have
\betagin{align*}
\int_{\mathcal{X}}\left(\sum_{i=m}^nf_i(x)-\sum_{i=m}^n\mu(f_i)\right)^2d\mu(x)&=\sum_{i=m}^n\mu_i+\sum_{m\leq i\neq j\leq n}\mu_{i,j}-\sum_{1\leq i,j\leq n}\mu_i\mu_j\\
&\leq\sum_{i=m}^n\mu_i+\sum_{m\leq i\neq j\leq n}(\mu_{i,j}-\mu_i\mu_j).
\end{align*}
Thus it suffices to show that
$$\sum_{m\leq i\neq j\leq n}(\mu_{i,j}-\mu_i\mu_j)\ll \sum_{i=m}^n\mu_i.$$
Since we assume the flow is SD, there exists some constant $\eta>1$ such that for any spherical $\varphi,\psi \in L^2_0(\Gamma\backslash G)$, for all $|t|\geq 1$
$$|\langlengle \pi(h_t)\varphi, \psi\ranglengle |\ll \frac{\|\varphi\|_2\|\psi\|_2}{|t|^{\eta}}.$$
In particular, for any $i\neq j$, taking $\varphi=\chi_{B_i}-\mu(B_i)$ and $\psi=\chi_{B_j}-\mu(B_j)$ we get that
\betagin{align*}
\left|\mu_{i,j}-\mu_i\mu_j\right|&=\left|\langlengle \pi(h_i)\varphi, \pi(h_j)\psi\ranglengle\right|\\
&=\left|\langlengle\pi(h_{i-j})\varphi, \psi\ranglengle\right|\\
&\ll \frac{\|\varphi\|_2\|\psi\|_2}{|i-j|^{\eta}}
\leq \frac{\sqrt{\mu_i\mu_j}}{|i-j|^{\eta}}.
\end{align*}
It now suffices to show that
$$\sum_{m\leq i\neq j\leq n}\frac{\sqrt{\mu_i\mu_j}}{|j-i|^{\eta}}\ll \sum_{i=m}^n \mu_i.$$
We rewrite the sum on the left as
$$\sum_{m\leq i\neq j\leq n}\frac{\sqrt{\mu_i\mu_j}}{|j-i|^{\eta}}
=\sum_{\substack{m\leq i\neq j\leq n \\ \mu_i\leq \mu_j}}\frac{\sqrt{\mu_i\mu_j}}{|j-i|^{\eta}}+ \sum_{\substack{m\leq i\neq j\leq n \\ \mu_i> \mu_j}}\frac{\sqrt{\mu_i\mu_j}}{|j-i|^{\eta}},$$
and using symmetry we can bound
\betagin{align*}
\sum_{m\leq i\neq j\leq n}\frac{\sqrt{\mu_i\mu_j}}{|j-i|^{\eta}}&\leq 2\sum_{\substack{m\leq i\neq j\leq n \\ \mu_i\leq \mu_j}}\frac{\sqrt{\mu_i\mu_j}}{|j-i|^{\eta}}\\
&\leq 2\sum_{\substack{m\leq i\neq j\leq n \\ \mu_i\leq \mu_j}}\frac{\mu_j}{|j-i|^{\eta}}\\
&= 2\sum_{m\leq j\leq n}\mu_j(\sum_{\substack{m\leq i\leq n \\ \mu_i\leq \mu_j, i\neq j}}\frac{1}{|j-i|^{\eta}}).
\end{align*}
We can bound the inner sum by the convergent series $2\sum_{i=1}^{\infty}\frac{1}{i^{\eta}}$
thus concluding the proof.
\end{proof}
\betagin{proof}[Proof of Theorem \ref{t:SBC}]
Let $\{B_m\}_{m\in\mathbb{N}}$ denote any sequence of spherical sets in $\mathcal{X}$ satisfying that $\sum_{m=1}^{\infty}\mu(B_m)=\infty$.
For $f_m(x)=\chi_{B_m}(xh_m)$ we have that
$$\sum_{1\leq j\leq m}f_j(x)=\#\{1\leq j\leq m: xh_j\in B_j\},$$ and hence, Proposition \ref{quasi.prop} and Lemma \ref{gbc} imply that for a.e. $x\in \mathcal{X}$
\betagin{displaymath}
\lim_{m\to\infty}\frac{\#\{1\leq j\leq m: xh_j\in B_j\}}{\sum_{1\leq j\leq m} \mu(B_j)}= 1.\qedhere
\end{displaymath}
\end{proof}
\subsection{Dynamical Borel-Cantelli for rank one groups}
We now turn to the case where $G$ is locally isomorphic to $\SO(d+1,1)$ or $\SU(d,1)$ with $d\geq 2$
(if $G$ is of real rank one with property $(T)$, then every unbounded flow is SD and we can apply Theorem \ref{t:SBC}). Combining bounds coming from the mean ergodic theorem, together with Lemma \ref{gbc} we show that any monotone sequence of spherical shrinking targets, $\{B_m\}_{m\in \mathbb N}$, is BC. In this case, the argument is different when the sequence $\{m\mu(B_m)\}_{m\in\mathbb N}$ is bounded or unbounded, and we treat these cases separately.
We first deal with the bounded case by showing the following.
\betagin{Prop}\langlebel{p:bounded}
For $G$ locally isomorphic to $\SO(d+1,1)$ or $\SU(d,1)$ with $d\geq 2$, let $\{B_m\}_{m\in \mathbb N}$ denote a sequence of spherical sets satisfying that $\left\{m\mu(B_m)\right\}_{m\in\mathbb N}$ is uniformly bounded. Let $f_m(x)=\chi_{B_m}(xh_m)$,
then there exists some constant $C>0$ such that for all $n> m\geq 1$
$$\int_{\mathcal{X}}\left(\sum_{i=m}^nf_i(x)-\sum_{i=m}^n\mu(f_i)\right)^2d\mu(x)\leq C\sum_{i=m}^n\mu(f_i).$$
\end{Prop}
\betagin{proof}
As in the proof of Proposition \ref{quasi.prop}, denote by $\mu_i=\mu(f_i)=\mu(B_i)$ and $\mu_{i,j}=\int_{\mathcal{X}}f_i(x)f_j(x)d\mu(x)$.
Using the spectral decomposition we can write
\betagin{displaymath}
\chi_{B_m}=\mu(B_m)+\sum_{k}\langlengle \chi_{B_m},\varphi_k\ranglengle \varphi_k + f_m^0,
\end{displaymath}
with $f_m^{0}\in L^2_{\textrm{temp}}(\Gamma\backslash \mathcal{H})$. Hence for any $i\neq j$,
\betagin{eqnarray*}
\mu_{i,j}&=&\langlengle f_i, f_j\ranglengle=\langlengle \pi(h_i)\chi_{B_i}, \pi(h_j)\chi_{B_j}\ranglengle=\langlengle \pi(h_{i-j})\chi_{B_i}, \chi_{B_j}\ranglengle\\
&=&\mu_i\mu_j+ \sum_k\langlengle\chi_{B_i},\varphi_k\ranglengle\overline{\langlengle \chi_{B_j},\varphi_k\ranglengle}\langlengle\pi(h_{i-j})\varphi_k,\varphi_k\ranglengle+ \langlengle\pi(h_{i-j})f_i^0, f_j^0\ranglengle.
\end{eqnarray*}
Thus by Proposition \ref{rod} we have for any small $\epsilon>0$
$$
|\mu_{i,j}-\mu_i\mu_j|\ll_{\epsilon}
\sum_k\frac{|\langlengle\chi_{B_i},\varphi_k\ranglengle\langlengle \chi_{B_j},\varphi_k\ranglengle|}{|i-j|^{\kappa(\rho-s_k)(1-\epsilon)}}+ \frac{\sqrt{\mu_i\mu_j}}{|i-j|^{\kappa\rho(1-\epsilon)}}.
$$
Let $\eta=\kappa\rho(1-\epsilon)$ and note that for all $0<\epsilon<1/4$ we have that $\eta>3/2$ (since $G$ is not locally isomorphic to $SO(2,1)$). Hence, by the same arguments
as in the proof of Proposition \ref{quasi.prop}, for any $0<\epsilon<\frac14$ we can bound
$$\sum_{m\leq i\neq j\leq n}\frac{\sqrt{\mu_i\mu_j}}{|j-i|^{\kappa\rho(1-\epsilon)}}\ll \sum_{i=m}^n \mu_i.$$
Hence, it suffices to show that for each of the finitely many exceptional forms we have
$$\sum_{m\leq i\neq j\leq n}\frac{|\langlengle\chi_{B_i},\varphi_k\ranglengle\langlengle \chi_{B_j},\varphi_k\ranglengle|}{|i-j|^{\kappa\rho(1-\epsilon)}}\ll_{\epsilon} \sum_{i=m}^n\mu_i.$$
Now let $0<\epsilon_0<\frac14$ be sufficiently small such that $\frac{2\rho}{(\rho+s_k)(1-\epsilon_0)}<2$ for all $s_k$. For $0<\epsilon\leq \epsilon_0$ (to be determined later) let $q_k=\frac{2\rho}{(\rho+s_k)(1-\epsilon)}$ and let $\eta_k=\kappa(\rho-s_k)(1-\epsilon)$. Note that $\frac12<\frac{1}{q_k}<1$.
Recalling that the exceptional form $\varphi_k\in\pi_{s_k}$ is contained in $L^p(\mathcal{X})$ for any $1<p<\frac{2\rho}{\rho-s_k}$ we can bound
$$|\langlengle \chi_{B_i}, \varphi_k\ranglengle \langlengle \chi_{B_j},\varphi_k\ranglengle|\ll_{\epsilon} \mu_i^{\frac{1}{q_k}}\mu_j^{\frac{1}{q_k}},$$
and hence together with symmetry we can bound
\betagin{displaymath}
\sum_{m\leq i\neq j\leq n}\frac{|\langlengle\chi_{B_i},\varphi_k\ranglengle\langlengle \chi_{B_j},\varphi_k\ranglengle|}{|i-j|^{\kappa(\rho-s_k)(1-\epsilon)}}
\ll \sum_{m\leq i\neq j\leq n}\frac{\mu_i^{\frac{1}{q_k}}\mu_j^{\frac{1}{q_k}}}{|j-i|^{\eta_k}}\\
\ll \mathop{\sum_{m\leq i\neq j\leq n}}_{\mu_j\leq \mu_i}\frac{\mu_i^{\frac{1}{q_k}}\mu_j^{\frac{1}{q_k}}}{|j-i|^{\eta_k}}.\\
\end{displaymath}
Since $\frac{1}{q_k}<1$, for $\mu_j\leq \mu_i$ we can bound
$\mu_i^{\frac{1}{q_k}}\mu_j^{\frac{1}{q_k}}\leq \mu_i\mu_j^{\frac{2}{q_k}-1}$
to get that
\betagin{align*}
\mathop{\sum_{m\leq i\neq j\leq n}}_{\mu_j\leq \mu_i}\frac{\mu_i^{\frac{1}{q_k}}\mu_j^{\frac{1}{q_k}}}{|j-i|^{\eta_k}}\leq \mathop{\sum_{m\leq i\neq j\leq n}}_{\mu_j\leq \mu_i}\frac{\mu_i \mu_j^{\frac{2}{q_k}-1}}{|j-i|^{\eta_k}}.
\end{align*}
Now using the assumption that $\{\ell \mu_{\ell}\}_{\ell\in\mathbb N}$ is uniformly bounded, (and noting that $\tfrac{2}{q_k}>1$) we can bound
$\mu_j^{\frac{2}{q_k}-1}\ll
\frac{1}{j^{\frac{2}{q_k}-1}}$ so that
$$\mathop{\sum_{m\leq i\neq j\leq n}}_{\mu_j\leq \mu_i}\frac{\mu_i \mu_j^{\frac{2}{q_k}-1}}{|j-i|^{\eta_k}}\ll \sum_{m\leq i\leq n}\mu_i\left(\mathop{\sum_{m\leq j\leq n}}_{\mu_j\leq \mu_i, j\neq i}\frac{1}{j^{\frac{2}{q_k}-1}|j-i|^{\eta_k}}\right).$$
For the cases we consider we have that $\kappa-\frac{1}{\rho}\geq 1/2$ and we can estimate the exponent
$$\frac{2}{q_k}-1+\eta_k=\tfrac{(\rho+s_k)(1-\epsilon)}{\rho}-1+\kappa(\rho-s_k)(1-\epsilon)\geq(1-\epsilon)\left(2+\tfrac{\tau}{2}\right)-1,$$
where $\tau=\tau(\Gammaamma)$ is the spectral gap parameter for $\Gammaamma$.
Taking $\epsilon=\min\{\frac12\epsilon_0, \frac{\tau}{2\tau+8}\}$ we get that
$\frac{2}{q_k}-1+\eta_k\geq 1+\frac14\tau$. Hence for any $m\leq i\leq n$, the sum
\betagin{align*}
\mathop{\sum_{m\leq j\leq n}}_{\mu_j\leq \mu_i, j\neq i}\frac{1}{j^{\frac{2}{q_k}-1}|j-i|^{\eta_k}}&\leq\mathop{\sum_{j> 0}}_{ j\neq i}\left(\frac{1}{j^{\frac{2}{q_k}-1+\eta_k}}+\frac{1}{|i-j|^{\frac{2}{q_k}-1+\eta_k}}\right)\\
&\ll\sum_{\ell\neq 0}\frac{1}{|\ell|^{\frac{2}{q_k}-1+\eta_k}}\leq \sum_{\ell\neq 0}\frac{1}{|\ell|^{1+\frac14\tau}}<\infty,
\end{align*}
is uniformly bounded and thus concluding the proof.
\end{proof}
Next, we consider the case where $\{m\mu(B_m)\}_{m\in\mathbb{N}}$ is unbounded. For this case we use results of the effective mean ergodic theorem to show the following.
\betagin{Prop}\langlebel{p:unbounded}
For $G$ locally isomorphic to $\SO(d+1,1)$ or $\SU(d,1)$ with $d\geq 2$, let $\{B_m\}_{m\in\mathbb{N}}$ be a monotone family of spherical shrinking targets in $\mathcal{X}=\Gammaamma\backslash G$ satisfying that $\sum_{m=1}^{\infty}\mu(B_m)=\infty$ and that $\left\{m\mu(B_m)\right\}_{m\in\mathbb{N}}$ is unbounded. Then there is a subsequence $m_j$ with $m_j\mu(B_{m_j})\to \infty$ satisfying that for a.e. $x\in\mathcal{X}$
$$\lim_{j\to\infty}\frac{\#(xH^+_{m_j}\cap B_{m_j})}{m_j\mu(B_{m_j})}=1.$$
\end{Prop}
\betagin{proof}
For any $m\in \mathbb{N}$ let $f_m=\frac{\chi_{B_m}}{\mu(B_m)}$ and note that $\betata_m^+(f_m)(x)=\frac{\#(xH^+_m\cap B_m)}{m\mu(B_{m})}$. Hence it suffices to show that there is some subsequence $\left\{m_j\right\}$ such that $\betata_{m_j}(f_{m_j})(x)\to 1$ as $j\to\infty$ for a.e. $x\in \mathcal{X}$. By Proposition \ref{ewot} we have
$$\|\betata_m^+(f_m)-1\|_2\ll_{\epsilon} \frac{1}{\sqrt{m\mu(B_m)}}+\sum_{s_k\in [\rho-\frac{1}{\kappa},\rho)}\frac{\left|\langlengle f_m,\varphi_k\ranglengle\right|}{m^{\frac{\kappa}{2}(\rho-s_k)(1-\epsilon)}}.$$
Recall that we can bound $\left|\langlengle f_m,\varphi_k\ranglengle\right|\ll_q \|f_m\|_q$ for any $q> \frac{2\rho}{\rho+s_k}$. Now for $0<\epsilon< 1-\frac{1}{\kappa\rho}$, for any $s_k\in [\rho-\frac{1}{\kappa}, \rho)$ we have $q_k=\frac{1}{1-\frac{\kappa}{2}(\rho-s_k)(1-\epsilon)}> \frac{2\rho}{\rho+s_k}$ and hence $\left|\langlengle f_m,\varphi_k\ranglengle\right|\ll_{\epsilon}\|f_m\|_{q_k}=\frac{1}{\mu(B_m)^{1-\frac{1}{q_k}}}=\frac{1}{\mu(B_m)^{\frac{\kappa}{2}(\rho-s_k)(1-\epsilon)}}$. Thus for any $0<\epsilon< 1-\frac{1}{\kappa\rho}$ we have
$$\|\betata_m^+(f_m)-1\|_2\ll_{\epsilon} \frac{1}{\sqrt{m\mu(B_m)}}+\sum_{s_k\in [\rho-\frac{1}{\kappa},\rho)}\frac{1}{\left(m\mu(B_m)\right)^{\frac{\kappa}{2}(\rho-s_k)(1-\epsilon)}}.$$
Since $\left\{m\mu(B_m)\right\}_{m\in \mathbb{N}}$ is unbounded, there is a subsequence satisfying that $m_j\mu(B_{m_j})\to\infty$, for which $\|\betata_{m_j}(f_{m_j})-1\|_2\to 0$ as $j\to \infty$. Passing to another subsequence, if necessary, we get that $\betata_{m_j}(f_{m_j})(x)\to 1$ for a.e. $x\in \mathcal{X}$.
\end{proof}
We now combine the two cases to complete the proof.
\betagin{proof}[Proof of Theorem \ref{t:MBC}]
Let $G$ be locally isomorphic to $\SO(d+1,1)$ or $\SU(d,1)$ with $d\geq 2$, and let $\{B_m\}_{m\in \mathbb N}$ denote a monotone family of spherical shrinking targets with $\sum_m \mu(B_m)=\infty$.
Now, if the sequence $\{m\mu(B_m)\}_{m\in \mathbb N}$ is bounded then by Proposition \ref{p:bounded} we have that
for a.e. $x\in \mathcal{X}$,
$$\lim_{m\to\infty} \frac{\#\{1\leq j\leq m: xh_j\in B_j\}}{\sum_{1\leq j\leq m} \mu(B_j)}=1.$$
If the sequence $\{m\mu(B_m)\}_{m\in \mathbb N}$ is unbounded then by Proposition \ref{p:unbounded}
there is a subsequence $m_j$ such that for a.e. $x\in \mathcal{X}$
$$\lim_{j\to\infty}\frac{\#\{1\leq i\leq m_j: xh_i\in B_{m_j}\}}{m_j \mu(B_{m_j})}=1.$$
In both cases, for a.e. $x\in \mathcal{X}$ the set $\{m\in\mathbb N: xh_m\in B_m\}$ is unbounded, so $\{B_m\}_{m\in \mathbb N}$ is BC for this flow.
\end{proof}
\end{document} |
\begin{document}
\title[Poloids]{Poloids from the Points of View of Partial Transformations and Category
Theory}
\author{Dan Jonsson}
\address{Dan Jonsson, Department of Sociology, University of Gothenburg, Sweden.}
\email{dan.jonsson@gu.se}
\begin{abstract}
Monoids and groupoids are examples of poloids. On the one hand, poloids
can be regarded as one-sorted categories; on the other hand, poloids
can be represented by partial magmas of partial transformations. In
this article, poloids are considered from these two points of view.
\end{abstract}
\maketitle
\section{Introduction}
While category theory is, in a sense, a mathematical theory of mathematics,
there does also exist a mathematical (algebraic) theory of (small)
categories. The phrase ``categories are just monoidoids'' summarizes
this theory in a somewhat cryptic manner. One part of this article
is concerned with clarifying this statement, systematically developing
definitions of category-related algebraic concepts such as semigroupoids,
poloids and groupoids, and deriving results that we recognize from
category theory. While no new results are presented, the underlying
notion that (small) categories are ``just webs of monoids'' \textendash{}
or partial magmas generalizing monoids, semigroups, groups etc. \textendash{}
may deserve more systematic attention than it has received.
The other part of the article deals with the link between abstract
algebraic structures such as poloids and concrete systems of partial
transformations on some set. We obtain systems of partial transformations
that satisfy the axioms of poloids as abstract algebraic structures
by successively adding constraints on partial magmas of partial transformations;
it is also shown that every poloid is isomorphic to such a system
of partial transformations. This procedure provides an intuitive interpretation
of the poloid axioms, helping to motivate the axioms and making it
easier to discover important concepts and results. As is known, this
approach has shown its usefulness in the study of semigroups, for
example, in the work of Wagner \cite{key-9}.
At a late stage in the preparation of the manuscript, I became aware
of related work on constellations \cite{key-4,key-5,key-7}. Constellations
turned out to generalize poloids in a way that I had not considered,
yet had several points of contact with my concepts and results. I
have added an Appendix where these matters are discussed.
\section{Poloids and transformation magmas}
\subsection{(Pre)functions, (pre)transformations and magmas}
\begin{defn}
\label{def1a} A \emph{(partial) prefunction}, $\mathsf{f}:X\nRightarrow Y$
is a set $\mathfrak{X}\subseteq X$ and a rule $\overline{\mathsf{f}}$
that assigns exactly one $\overline{\mathsf{f}}\!\left(x\right)\in Y$
to each $x\in\mathcal{\mathfrak{X}}$; to simplify the notation we
may write $\overline{\mathsf{f}}\!\left(x\right)$ as $\mathsf{f}\!\left(x\right)$.
We call $\mathfrak{X}$ the \emph{domain} of $\mathsf{f}$, denoted
$\mathrm{dom}\!\left(\mathsf{f}\right)$. The \emph{image} of $\mathsf{f}$,
denoted $\mathrm{im}\!\left(\mathsf{f}\right)$, is the set $\mathsf{f}\!\left(\mathrm{dom}\!\left(\mathsf{f}\right)\right)=\left\{ \mathsf{f}\!\left(x\right)\mid x\in\mathcal{\mathfrak{X}}\right\} $.
\end{defn}
\begin{defn}
\label{def1}A \emph{(partial) function} $f:X\nrightarrow Y$ is a
prefunction $\mathsf{f}:X\nRightarrow Y$ and a set $\mathfrak{Y}$
such that $\mathrm{im}\!\left(\mathsf{f}\right)\subseteq\mathfrak{Y}\subseteq Y$.
The \emph{domain} of $f$, denoted $\mathrm{dom}\!\left(f\right)$,
is the domain of $\mathsf{f}$, and $\mathfrak{Y}$ is called the
\emph{codomain} of $f$, denoted $\mathrm{cod}\!\left(f\right)$.
The \emph{image} of $f$, denoted $\mathrm{im}\!\left(f\right)$,
is defined to be the image of $\mathsf{f}$.
\end{defn}
Although this terminology will not be used below, $X$ may be called
the \emph{total domain} for $\mathsf{f}:X\nRightarrow Y$ or $f:X\nrightarrow Y$,
and $Y$ may be called the \emph{total codomain} for $\mathsf{f}:X\nRightarrow Y$
or $f:X\nrightarrow Y$.
A \emph{total} \emph{prefunction} $\mathsf{f}:X\Rightarrow Y$ is
a prefunction such that $\mathrm{dom}\!\left(\mathsf{f}\right)=X$.
A \emph{non-empty} \emph{prefunction} $\mathsf{f}$ is a prefunction
such that $\mathrm{dom}\!\left(\mathsf{f}\right)=\emptyset$. The
\emph{restriction} of $\mathsf{f}:X\nRightarrow Y$ to $X'\subset X$
is the prefunction $\mathfrak{\mathsf{f}}\raise-.2ex\hbox{\ensuremath{|}}_{X'}:X'\nRightarrow Y$
such that $\mathrm{dom}\!\left(\mathsf{\mathsf{f}}\raise-.2ex\hbox{\ensuremath{|}}_{X'}\right)=\mathrm{dom}\!\left(\mathsf{f}\right)\cap X'$
and $\mathsf{f}\raise-.2ex\hbox{\ensuremath{|}}_{X'}\!\left(x\right)=\mathsf{f}\!\left(x\right)$
for all $x\in\mathrm{dom}\!\left(\mathsf{f}\raise-.2ex\hbox{\ensuremath{|}}_{X'}\right)$.
A\emph{ pretransformation} on $X$ is a prefunction $\mathsf{f}:X\nRightarrow X$;
a \emph{total} pretransformation on $X$ is a total prefunction $\mathsf{f}:X\Rightarrow X$.
An \emph{identity pretransformation} $\mathsf{Id}_{S}$ is a pretransformation
on $X\supseteq S$ such that $\mathrm{dom}\!\left(\mathsf{Id}_{S}\right)=S$
and $\mathsf{Id}_{S}\!\left(x\right)=x$ for all $x\in\mathrm{dom}\!\left(\mathsf{Id}_{S}\right)$.
Similarly, a \emph{total} \emph{function} $f:X\rightarrow Y$ is a
function such that $\mathrm{dom}\!\left(f\right)=X$ and $\mathrm{cod}\!\left(f\right)=Y$.
A \emph{non-empty function} $f$ is a function such that $\mathrm{dom}\!\left(f\right)~\neq~\emptyset$.
The \emph{restriction} of $f:X\nrightarrow Y$ to $X'\subset X$ is
the function $f\raise-.2ex\hbox{\ensuremath{|}}_{X'}:X'\nrightarrow Y$
such that $\mathrm{dom}\!\left(f\raise-.2ex\hbox{\ensuremath{|}}_{X'}\right)=\mathrm{dom}\!\left(f\right)\cap X'$,
$\mathrm{cod}\!\left(f\raise-.2ex\hbox{\ensuremath{|}}_{X'}\right)=\mathrm{cod}\!\left(f\right)$
and $f\raise-.2ex\hbox{\ensuremath{|}}_{X'}\!\left(x\right)=f\!\left(x\right)$
for all $x\in\mathrm{dom}\!\left(f\raise-.2ex\hbox{\ensuremath{|}}_{X'}\right)$.
A\emph{ transformation} on $X$ is a function $f:X\nrightarrow X$;
a \emph{total} transformation on $X$ is a total function $f:X\rightarrow X$.
An \emph{identity transformation} $I\!d_{S}$ is a transformation
on $X\supseteq S$ such that $\mathrm{dom}\!\left(I\!d_{S}\right)=\mathrm{cod}\!\left(I\!d_{S}\right)=S$
and $I\!d_{S}\!\left(x\right)=x$ for all $x\in\mathrm{dom}\!\left(I\!d_{S}\right)$.
Given a pretransformation $\mathsf{f}$ on $X$, $\mathsf{f}\!\left(x\right)$
denotes some $x\in X$ if and only if $x\in\mathrm{dom}\!\left(\mathsf{f}\right)$;
$\mathsf{f}\!\left(\mathsf{f}\!\left(x\right)\right)$ denotes some
$x\in X$ if and only if $x,\mathsf{\mathsf{f}}\!\left(x\right)\in\mathrm{dom}\!\left(\mathsf{f}\right)$;
etc. We describe such situations by saying that $\mathsf{f}\!\left(x\right)$,
$\mathsf{f}\!\left(\mathsf{f}\!\left(x\right)\right)$, etc. are \emph{defined}.
Similarly, given a transformation $f$ on $X$, $f\!\left(x\right)$,
$f\!\left(f\!\left(x\right)\right)$, etc. are said to be defined
if the corresponding pretransformations $\mathsf{f}\!\left(x\right)$,
$\mathsf{f}\!\left(\mathsf{f}\!\left(x\right)\right)$, etc. are defined\emph{.}
\begin{defn}
\label{def2}A \emph{(partial) binary operation} on a set $X$ is
a non-empty prefunction
\[
\uppi:X\times X\nRightarrow X,\qquad\left(x,y\right)\mapsto xy.
\]
\end{defn}
A \emph{total} binary operation on $X$ is a total prefunction $\uppi:X\times X\Rightarrow X$.
A \emph{(partial) magma} $P$ is a non-empty set $\left|P\right|$
equipped with a binary operation on $\left|P\right|$; a \emph{total}
magma $P$ is a non-empty set $\left|P\right|$ equipped with a total
binary operation on $\left|P\right|$. A \emph{submagma} $P'$ of
a magma $P$ is a set $\left|P'\right|\subseteq\left|P\right|$ such
that if $x,y\in\left|P'\right|$ then $xy\in\left|P'\right|$, with
the restriction of $\uppi$ to $\left|P'\right|\times\left|P'\right|$
as a binary operation. (By an abuse of notation, $P$ will also denote
the set $\left|P\right|$ henceforth.)
The notion of being defined for expressions involving a pretransformation
can be extended in a natural way to expressions involving a binary
operation. We say that $xy$ is defined if and only if $\left(x,y\right)\in\mathrm{dom}\!\left(\uppi\right)$;
that $\left(xy\right)\!z$ is defined if and only if $\left(x,y\right),\left(xy,z\right)\in\mathrm{dom}\!\left(\uppi\right)$;
that $z\!\left(xy\right)$ is defined if and only if $\left(x,y\right),\left(z,xy\right)\in\mathrm{dom}\!\left(\uppi\right)$;
and so on. Thus, if $\left(xy\right)\!z$ or $z\!\left(xy\right)$
is defined then $xy$ is defined.
\begin{rem}
{\small{}To avoid tedious repetition of the word ``partial'', we
speak about (pre)functions and magmas as opposed to total (pre)functions
and total magmas rather than partial (pre)functions and partial magmas
as opposed to (pre)functions and magmas. Note that a binary operation
$\uppi:P\times P\nRightarrow P$ can always be regarded as a total
binary operation $\uppi^{0}:P^{0}\times P^{0}\Rightarrow P^{0}$,
where $P^{0}=P\cup\left\{ 0\right\} $ and $0x=x0=0$ for each $x\in P$,
considering $xy$ to be defined if and only if $xy\neq0$. If we let
$P^{0}$ represent $P$ in this way, it becomes a theorem that if
$\left(xy\right)\!z$ or $z\!\left(xy\right)$ is defined then $xy$
is defined.}{\small \par}
\end{rem}
\subsection{Semigroupoids, poloids and groupoids }
We say that $x$ \emph{precedes} $y$, denoted $x\prec y$, if and
only if $xy$ or $x\!\left(yz\right)$ or $\left(zx\right)\!y$ is
defined, and we write $x\prec y\prec z$ if and only if $x\prec y$
and $y\prec z$, meaning that $xy$ and $yz$ are defined or $x\!\left(yz\right)$
is defined or $\left(xy\right)\!z$ is defined.
\begin{defn}
\label{def3}A \emph{semigroupoid} is a magma $P$ such that for any
\linebreak{}
$x\prec y\prec z\in P$, $\left(xy\right)\!z$ and $x\!\left(yz\right)$
are defined and $\left(xy\right)\!z=x\!\left(yz\right)$.
\end{defn}
A\emph{ unit} in a magma $P$ is any $e\in P$ such that $ex=x$ for
all $x$ such that $ex$ is defined and $xe=x$ for all $x$ such
that $xe$ is defined.
\begin{defn}
\label{def4}A \emph{poloid} is a semigroupoid $P$ such that for
any $x\in P$ there are units $\epsilon_{\!x},\varepsilon_{\!x}\in P$
such that $\epsilon_{x}x$ and $x\varepsilon_{\!x}$ are defined.
\end{defn}
For any $x\in P$, we have $\epsilon_{x}x=x=x\varepsilon_{x}$ since
$\epsilon_{x}$ and $\varepsilon_{x}$ are units; we may call $\epsilon_{x}$
an \emph{effective left unit} for $x$ and $\varepsilon_{x}$ an \emph{effective
right unit} for $x$.
\begin{defn}
\label{def5}A \emph{groupoid} is a poloid $P$ such that for every
$x\in P$ there is a unique $x^{-1}\in P$ such that $xx^{-1}$ and
$x^{-1}x$ are defined and units.
\end{defn}
\begin{rem}
{\small{}Recall that groups, monoids and semigroups are total magmas
with additional properties. Each kind of total magma can be generalized
to a (partial) magma with similar properties, sometimes named by adding
the ending ``-oid'', as in group/groupoid and semigroup/semigroupoid,
so that the process of generalizing to a not necessarily total magma
has become known as ``oidification''. (See the table below.) However,
the terminology is not consistent \textendash{} for example, a monoid
is not a (partial) magma. I prefer ``poloid'' to the rather clumsy
and confusing term ``monoidoid'', which suggests some kind of ``double
oidification''. An important concept should have a short name, and
the idea behind the current terminology is that a monoid has a single
unit, whereas a poloid may have more than one unit.
{}
}{\small \par}
{\small{}}
\begin{tabular}{ll}
\hline
{\small{}total magma (magma, groupoid)} & {\small{}magma (partial magma, halfgroupoid)}\tabularnewline
{\small{}semigroup} & {\small{}semigroupoid}\tabularnewline
{\small{}monoid} & {\small{}poloid (monoidoid)}\tabularnewline
{\small{}group} & {\small{}groupoid}\tabularnewline
\hline
\end{tabular}{\small \par}
{}
\end{rem}
{\small{}It should be kept in mind that semigroups, monoids and groups
can be generalized to other (partial) magmas than semigroupoids, poloids
and groupoids, respectively. For example, if we do not require that
if $x\prec y\prec z$ then $x\!\left(yz\right)$ and $\left(xy\right)\!z$
are defined and equal but only that if $x\!\left(yz\right)$ or $\left(xy\right)\!z$
is defined then $x\!\left(yz\right)$ and $\left(xy\right)\!z$ are
defined and equal, we obtain a semigroup generalized to a certain
(partial) magma but this is not a semigroupoid as defined here. The
specific definitions given in this section are suggested by category
theory.}{\small \par}
\subsection{(Pre)transformation magmas}
Recall that the \emph{full transformation monoid} $\overline{\mathcal{F}}{}_{\!X}$
on a non-empty set $X$ is the set $\overline{\mathcal{F}}{}_{\!X}$
of all total functions $f:X\rightarrow X$, equipped with the total
binary operation
\[
\circ:\overline{\mathcal{F}}{}_{\!X}\times\overline{\mathcal{F}}{}_{\!X}\Rightarrow\overline{\mathcal{F}}{}_{\!X},\qquad\left(f,g\right)\mapsto f\circ g,
\]
where $\left(f\circ g\right)\!\left(x\right)=f\!\left(g\!\left(x\right)\right)$
for all $x\in X$. More generally, a \emph{transformation semigroup}
$\mathcal{F}{}_{\!X}$ is a set of total functions $f:X\rightarrow X$
with $\circ$ as binary operation and such that $f,g\in\mathcal{F}{}_{\!X}$
implies $\mathsf{f}\circ\mathsf{g}\in\mathcal{F}{}_{\!X}$, and a
\emph{transformation monoid} $\mathcal{M}_{\!X}$ is a transformation
semigroup such that $I\!d{}_{\!X}\in\mathcal{M}{}_{\!X}$.
\begin{example}
\label{exa1}Set $X=\left\{ 1,2\right\} $, let $e:X\rightarrow X$
be defined by $e\!\left(1\right)=e\!\left(2\right)=1$ and let $M_{X}$
be the magma with $\left\{ e\right\} $ as underlying set and function
composition $\circ$ as binary operation. Then $M_{X}$ is a (trivial)
\emph{monoid of transformations}, but it is not a \emph{transformation
monoid}.
\end{example}
When we generalize from total functions $X\rightarrow X$ to functions
$X\nrightarrow X$ or prefunctions $X\nRightarrow X$, $\mathcal{F}{}_{\!X}$
is generalized from a transformation semigroup to a transformation
magma $\mathscr{F}_{\!X}$ or a pretransformation magma $\mathscr{R}_{\!X}$.
\begin{defn}
\label{def6a}Let $X$ be a non-empty set. A\emph{ pretransformation
magma} $\mathscr{R}_{\!X}$ on $X$ is a set $\mathscr{R}_{X}$ of
non-empty pretransformations $\mathsf{f}:X\nRightarrow X$, equipped
with the binary operation
\[
\circ:\mathscr{R}_{\!X}\times\mathscr{R}_{\!X}\nRightarrow\mathscr{R}_{\!X},\qquad\left(\mathsf{f},\mathsf{g}\right)\mapsto\mathsf{f}\circ\mathsf{g},
\]
where $\mathrm{dom}\!\left(\circ\right)=\left\{ \left(\mathsf{f},\mathsf{g}\right)\mid\mathrm{dom}\!\left(\mathsf{f}\right)\supseteq\mathsf{im}\!\left(\mathsf{g}\right)\right\} $
and $\mathsf{f}\circ\mathsf{g}$ if defined is given by $\mathsf{\mathrm{dom}}\!\left(\mathsf{f}\circ\mathsf{g}\right)=\mathrm{dom}\!\left(\mathsf{g}\right)$
and $\left(\mathsf{f}\circ\mathsf{g}\right)\!\left(x\right)=\mathsf{f}\!\left(\mathsf{g}\!\left(x\right)\right)$
for all $x\!\in\!\mathrm{dom}\!\left(\mathsf{f}\circ\mathsf{g}\right)$.
\end{defn}
The \emph{full} pretransformation magma on $X$, denoted $\overline{\mathscr{R}}_{\!X}$,
is the pretransformation magma whose underlying set is the set of
all non-empty pretransformations of the form $\mathsf{f}:X\nRightarrow X$.
\begin{defn}
\label{def6}Let $X$ be a non-empty set. A\emph{ transformation magma}
$\mathscr{F}_{\!X}$ on $X$ is a set $\mathscr{F}_{X}$ of non-empty
transformations $f:X\nrightarrow X$, equipped with the binary operation
\[
\circ:\mathscr{F}_{\!X}\times\mathscr{F}_{\!X}\nRightarrow\mathscr{F}_{\!X},\qquad\left(f,g\right)\mapsto f\circ g,
\]
where $\mathrm{dom}\!\left(\circ\right)=\left\{ \left(f,g\right)\mid\mathrm{dom}\!\left(f\right)\supseteq\mathsf{im}\!\left(g\right)\right\} $
and $f\circ g$ if defined is given by $\mathsf{\mathrm{dom}}\!\left(f\circ g\right)=\mathrm{dom}\!\left(g\right)$,
$\mathrm{cod}\!\left(f\circ g\right)=\mathrm{cod}\!\left(f\right)$
and $\left(f\circ g\right)\!\left(x\right)=f\!\left(g\!\left(x\right)\right)$
for all $x\!\in\!\mathrm{dom}\!\left(f\circ g\right)$.
\end{defn}
The \emph{full} transformation magma on $X$, denoted $\mathscr{\overline{F}}_{\!X}$,
is the transformation magma whose underlying set is the set of all
non-empty transformations $\mathsf{f}:X\nrightarrow X$.
A (pre)transformation magma is clearly a magma as described in Definition
\ref{def2}.
The plan in this section, derived from the view that categories are
``webs of monoids'', is to construct transformation magmas that
relate to poloids in the same way that transformation monoids relate
to monoids. As a monoid is an \emph{associative} magma with a \emph{unit},
we look for appropriate generalizations of these two notions.
\begin{fact}
\label{f1}Let $\mathsf{f},\mathsf{g},\mathsf{h}$ be elements of
a pretransformation magma.\emph{ }If $\left(\mathsf{f}\circ\mathsf{g}\right)\circ\mathsf{h}$
and $\mathsf{f}\circ\left(\mathsf{g}\circ\mathsf{h}\right)$ are defined
then $\left(\mathsf{f}\circ\mathsf{g}\right)\circ\mathsf{h}=\mathsf{f}\circ\left(\mathsf{g}\circ\mathsf{h}\right)$.
\end{fact}
\begin{proof}
We have
\begin{gather*}
\mathrm{dom}\!\left(\left(\mathsf{f}\circ\mathsf{g}\right)\circ\mathsf{h}\right)=\mathrm{dom}\!\left(\mathsf{h}\right)=\mathrm{dom}\!\left(\mathsf{g}\circ\mathsf{h}\right)=\mathrm{dom}\!\left(\mathsf{f}\circ\left(\mathsf{g}\circ\mathsf{h}\right)\right),
\end{gather*}
and
\[
\left(\left(\mathsf{f}\circ\mathsf{g}\right)\circ\mathsf{h}\right)\!\left(x\right)=\left(\mathsf{f}\circ\mathsf{g}\right)\left(\mathsf{h}\!\left(x\right)\right)=\mathsf{f}\!\left(\mathsf{g}\!\left(\mathsf{h}\!\left(x\right)\right)\right)=\mathsf{f}\!\left(\left(\mathsf{g}\circ\mathsf{h}\right)\!\left(x\right)\right)=\left(\mathsf{f}\circ\left(\mathsf{g}\circ\mathsf{h}\right)\right)\!\left(x\right)
\]
for all $x\in\mathrm{dom}\!\left(\left(\mathsf{f}\circ\mathsf{g}\right)\circ\mathsf{h}\right)=\mathrm{dom}\!\left(\mathsf{f}\circ\left(\mathsf{g}\circ\mathsf{h}\right)\right)$.
\end{proof}
\begin{lem}
\label{lem1}Let $\mathsf{f,g}$ be elements of a pretransformation
magma. If $\mathsf{f}\circ\mathsf{g}$ is defined then $\mathsf{im}\!\left(\mathsf{f}\right)\supseteq\mathsf{im}\!\left(\mathsf{f}\circ\mathsf{g}\right)$.
\end{lem}
\begin{proof}
Since $\mathrm{dom}\!\left(\mathsf{f}\right)\supseteq\mathsf{im}\!\left(\mathsf{g}\right)$
by definition, we have $\mathsf{im}\!\left(\mathsf{f}\right)=\mathsf{f}\!\left(\mathrm{dom}\!\left(\mathsf{f}\right)\right)\supseteq\mathsf{f}\!\left(\mathsf{im}\!\left(\mathsf{g}\right)\right)=\mathsf{f}\!\left(\mathsf{g}\!\left(\mathrm{dom}\!\left(\mathsf{g}\right)\right)\right)=\left(\mathsf{f}\circ\mathsf{g}\right)\left(\mathrm{dom}\!\left(\mathsf{f}\circ\mathsf{g}\right)\right)=\mathsf{im}\!\left(\mathsf{f}\circ\mathsf{g}\right)$.
\end{proof}
\begin{fact}
\label{f2}Let $\mathsf{f},\mathsf{g},\mathsf{h}$ be elements of
a pretransformation magma. If $\mathsf{f}\circ\mathsf{g}$ and $\mathsf{g}\circ\mathsf{h}$
are defined then $\left(\mathsf{f}\circ\mathsf{g}\right)\circ\mathsf{h}$
and $\mathsf{f}\circ\left(\mathsf{g}\circ\mathsf{h}\right)$ are defined.
\end{fact}
\begin{proof}
We have $\mathrm{dom}\!\left(\mathsf{f}\circ\mathsf{g}\right)=\mathrm{dom}\!\left(\mathsf{g}\right)\supseteq\mathsf{im}\!\left(\mathsf{h}\right)$,
so $\left(\mathsf{f}\circ\mathsf{g}\right)\circ\mathsf{h}$ is defined.
Also, $\mathrm{dom}\!\left(\mathsf{f}\right)\supseteq\mathsf{im}\!\left(\mathsf{g}\right)$
and by Lemma \ref{lem1} $\mathsf{im}\!\left(\mathsf{g}\right)\supseteq\mathsf{im}\!\left(\mathsf{g}\circ\mathsf{h}\right)$,
so $\mathsf{f}\circ\left(\mathsf{g}\circ\mathsf{h}\right)$ is defined.
\end{proof}
\begin{fact}
\label{f3}Let $\mathsf{f},\mathsf{g},\mathsf{h}$ be elements of
a pretransformation magma{\small{}.} If $\left(\mathsf{f}\circ\mathsf{g}\right)\circ\mathsf{h}$
is defined then $\mathsf{f}\circ\left(\mathsf{g}\circ\mathsf{h}\right)$
is defined.
\end{fact}
\begin{proof}
If $\left(\mathsf{f}\circ\mathsf{g}\right)\circ\mathsf{h}$ is defined
so that $\mathsf{f}\circ\mathsf{g}$ is defined then $\mathrm{dom}\!\left(\mathsf{g}\right)=\mathrm{dom}\!\left(\mathsf{f}\circ\mathsf{g}\right)\supseteq\mathsf{im}\!\left(\mathsf{h}\right)$.
Thus, $\mathsf{g}\circ\mathsf{h}$ is defined so Fact \ref{f2} implies
that $\mathsf{f}\circ\left(\mathsf{g}\circ\mathsf{h}\right)$ is defined.
\end{proof}
The implication in the opposite direction does not hold.
\begin{example}
\label{exa2}Let $\mathsf{f,g,h}$ be pretransformations on $\left\{ 1,2\right\} $;
specifically, $\mathsf{f}=\mathsf{h}=\mathsf{Id}_{\left\{ 1\right\} }$
and $\mathsf{g}=\mathsf{Id}_{\left\{ 1,2\right\} }$. Then, $\mathrm{dom}\!\left(\mathsf{g}\right)\supseteq\mathrm{im}\!\left(\mathsf{h}\right)$
and $\mathrm{im}\!\left(\mathsf{g}\circ\mathsf{h}\right)=\left\{ 1\right\} $,
so $\mathrm{dom}\!\left(\mathsf{f}\right)\supseteq\mathrm{im}\!\left(\mathsf{\mathsf{g}\circ\mathsf{h}}\right)$.
Hence, $\mathsf{f}\circ\left(\mathsf{g}\circ\mathsf{h}\right)$ is
defined, but we do not have $\mathrm{dom}\!\left(\mathsf{f}\right)\supseteq\mathrm{im}\!\left(\mathsf{g}\right)$,
so $\mathsf{f}\circ\mathsf{g}$ is not defined and hence $\left(\mathsf{f}\circ\mathsf{g}\right)\circ\mathsf{h}$
is not defined.
\end{example}
So, somewhat surprisingly, pretransformation magmas do not have a
two-sided notion of associativeness. We need the notion of a transformation
magma and an additional assumption to derive the complement of Fact
\ref{f3}.
\begin{defn}
\label{def7}A\emph{ transformation semigroupoid} $\mathscr{S}_{\!X}$
on $X$ is a transformation magma $\mathscr{F}_{\!X}$ such that if
$\mathrm{dom}\!\left(f\right)\supseteq\mathsf{im}\!\left(g\right)$
for some $f,g\in\mathscr{F}_{\!X}$ then $\mathrm{dom}\!\left(f\right)=\mathsf{\mathrm{cod}}\!\left(g\right)$.
\end{defn}
Of course, if $\mathrm{dom}\!\left(f\right)=\mathrm{cod}\!\left(g\right)$
then $\mathrm{dom}\!\left(f\right)\supseteq\mathsf{im}\!\left(g\right)$.
Thus, in a transformation semigroupoid $f\circ g$ is defined if and
only if $\mathrm{dom}\!\left(f\right)=\mathsf{\mathrm{cod}}\!\left(g\right)$.
If $\left(f\circ g\right)\circ h$ and $f\circ\left(g\circ h\right)$
are defined then $\mathrm{cod}\!\left(\left(f\circ g\right)\circ h\right)=\mathrm{cod}\!\left(f\circ g\right)=\mathrm{cod}\!\left(f\right)=\mathrm{cod}\!\left(f\circ\left(g\circ h\right)\right)$,
so Fact 1 holds for transformation magmas as well. It is also clear
that the proofs of Facts 2 and 3 apply to transformation magmas as
well. Thus, we can use Facts 1\textendash 3 also when dealing with
transformation magmas. On the other hand, Example 2 applies to transformation
magmas as well, but not to transformation semigroupoids.
\begin{fact}
\label{f4}Let $f,g,h$ be elements of a transformation semigroupoid.
If $f\circ\left(g\circ h\right)$ is defined then $\left(f\circ g\right)\circ h$
is defined.
\end{fact}
\begin{proof}
If $f\circ\left(g\circ h\right)$ is defined then $\mathrm{dom}\!\left(f\right)=\mathrm{cod}\!\left(g\circ h\right)=\mathsf{\mathrm{cod}}\!\left(g\right)$.
Thus, $f\circ g$ is defined, and as $g\circ h$ is defined as well
Fact \ref{f2} implies that {\small{}$\left(f\circ g\right)\circ h$
}is defined.
\end{proof}
\begin{thm}
\label{the1}A transformation semigroupoid is a semigroupoid.
\end{thm}
\begin{proof}
By Facts 2, 3 and 4, if $f\prec g\prec h$ then $\left(f\circ g\right)\circ h$
and $f\circ\left(g\circ h\right)$ are defined, and by Fact 1 this
implies that $\left(f\circ g\right)\circ h=f\circ\left(g\circ h\right)$.
\end{proof}
Poloids are semigroupoids with effective left and right units. Such
units can be added to transformation semigroupoids in a quite natural
way.
\begin{defn}
\label{def8}A\emph{ transformation poloid} $\mathscr{P}_{\!X}$ is
a\emph{ }transformation semigroupoid $\mathscr{S}_{\!X}$ such that
if $f\in\mathscr{S}_{\!X}$ then $I\!d_{\mathrm{dom}\left(f\right)},I\!d_{\mathrm{cod}\left(f\right)}\in\mathscr{S}_{\!X}$.
\end{defn}
\begin{fact}
\label{f5}Let $\mathscr{P}_{X}$ be a transformation poloid. For
any $f\in\mathscr{P}_{X}$, $I\!d_{\mathrm{dom}\left(f\right)}$ and
$I\!d_{\mathrm{cod}\left(f\right)}$ are units.
\end{fact}
\begin{proof}
If $f,g\in\mathscr{P}_{X}$ and $I\!d_{\mathrm{dom}\left(f\right)}\circ g$
is defined then
\begin{gather*}
\mathrm{dom}\!\left(I\!d_{\mathrm{dom}\left(f\right)}\circ g\right)=\mathrm{dom}\!\left(g\right),\\
\mathrm{cod}\!\left(I\!d_{\mathrm{dom}\left(f\right)}\circ g\right)=\mathrm{cod}\!\left(I\!d_{\mathrm{dom}\left(f\right)}\right)=\mathrm{dom}\!\left(I\!d_{\mathrm{dom}\left(f\right)}\right)=\mathrm{cod}\!\left(g\right),
\end{gather*}
and $I\!d{}_{\mathrm{dom}\left(\mathsf{f}\right)}\!\left(g\!\left(x\right)\right)=g\!\left(x\right)$
for all $x\in\mathrm{dom}\!\left(g\right)$. Hence, $I\!d_{\mathrm{dom}\left(f\right)}\circ g=g$.
Also, if $f,h\in\mathscr{P}_{X}$ and $h\circ I\!d_{\mathrm{dom}\left(f\right)}$
is defined then
\begin{gather*}
\mathrm{dom}\!\left(h\right)=\mathrm{cod}\!\left(I\!d_{\mathrm{dom}\left(f\right)}\right)=\mathrm{dom}\!\left(I\!d_{\mathrm{dom}\left(f\right)}\right)=\mathrm{dom}\!\left(h\circ I\!d_{\mathrm{dom}\left(f\right)}\right),\\
\mathrm{cod}\!\left(h\right)=\mathrm{cod}\!\left(h\circ I\!d_{\mathrm{dom}\left(f\right)}\right),
\end{gather*}
and $h\!\left(I\!d{}_{\mathrm{dom}\left(\mathsf{f}\right)}\!\left(x\right)\right)=h\!\left(x\right)$
for all $x\in\mathrm{dom}\!\left(I\!d_{\mathrm{dom}\left(f\right)}\right)=\mathrm{dom}\!\left(h\right)$,
so $h\circ I\!d_{\mathrm{dom}\left(f\right)}=h$.
We have thus shown that $I\!d{}_{\mathrm{dom}\left(f\right)}$ is
a unit.
It is shown similarly that if $I\!d{}_{\mathrm{cod}\left(f\right)}\circ g$
is defined then $I\!d{}_{\mathrm{cod}\left(f\right)}\circ g=g$, and
if $h\circ I\!d_{\mathrm{cod}\left(f\right)}$ is defined then $h\circ I\!d_{\mathrm{cod}\left(f\right)}=h$,
so $I\!d_{\mathrm{cod}\left(f\right)}$ is a unit as well.
\end{proof}
\begin{fact}
\label{f6}Let $\mathscr{P}_{X}$ be a transformation poloid. For
any $f\in\mathscr{P}_{X}$, $f\circ I\!d_{\mathrm{dom}\left(f\right)}$
and $I\!d_{\mathrm{cod}\left(f\right)}\circ f$ are defined.
\end{fact}
\begin{proof}
We have $\mathrm{dom}\!\left(f\right)=\mathrm{dom}\!\left(I\!d_{\mathrm{dom}\left(f\right)}\right)=\mathrm{cod}\!\left(I\!d_{\mathrm{dom}\left(f\right)}\right)$
and $\mathrm{dom}\!\left(I\!d_{\mathrm{cod}\left(f\right)}\right)=\mathrm{cod}\!\left(f\right)$.
\end{proof}
\begin{thm}
\label{the2}A transformation poloid is a poloid.
\end{thm}
\begin{proof}
Immediate from Facts 5 and 6.
\end{proof}
\begin{rem}
{\small{}We have considered two requirements for $\mathsf{f}\circ$$\mathsf{g}$
or $f\circ g$ being defined, namely that $\mathrm{dom}\!\left(\mathsf{f}\right)\supseteq\mathsf{im}\!\left(\mathsf{g}\right)$
or that $\mathrm{dom}\!\left(f\right)=\mathsf{\mathrm{cod}}\!\left(g\right)$.
Other definitions are common in the literature. Instead of requiring
that $\mathrm{dom}\!\left(\mathsf{f}\right)\supseteq\mathsf{im}\!\left(\mathsf{g}\right)$,
it is often required that $\mathrm{dom}\!\left(\mathsf{f}\right)\cap\mathsf{im}\!\left(\mathsf{g}\right)\neq\emptyset$,
and instead of requiring that $\mathrm{dom}\!\left(f\right)=\mathsf{\mathrm{cod}}\!\left(g\right)$,
it is sometimes required that $\mathrm{dom}\!\left(f\right)=\mathsf{\mathrm{im}}\!\left(g\right)$.
Of these alternative definitions, the first one tends to be too weak
for present purposes, while the second one tends to be too restrictive.}{\small \par}
{\small{}For example, if we stipulate that $\mathsf{f}\circ\mathsf{g}$
is defined if and only if $\mathrm{dom}\!\left(\mathsf{f}\right)\cap\mathsf{im}\!\left(\mathsf{g}\right)\neq\emptyset$
then $\mathsf{f}\circ\mathsf{g}$ and $\mathsf{g}\circ\mathsf{h}$
being defined does not imply that $\left(\mathsf{f}\circ\mathsf{g}\right)\circ\mathsf{h}$
and $\mathsf{f}\circ\left(\mathsf{g}\circ\mathsf{h}\right)$ are defined,
contrary to Fact \ref{f2}.}{\small \par}
{\small{}Also, if we stipulate that $f\circ g$ is defined if and
only if $\mathrm{dom}\!\left(f\right)=\mathsf{\mathrm{im}}\!\left(g\right)$
and let $f,g$ be total transformations on $X$, then $f\circ g$
is defined only if $g$ is surjective so that $\mathrm{im}\!\left(g\right)=X$.
Thus, $\overline{\mathcal{F}}{}_{\!X}=\left\{ f\mid f:X\rightarrow X\right\} $
is not a monoid under this function composition. As monoids are poloids,
this anomaly suggests that the condition $\mathrm{dom}\!\left(f\right)=\mathsf{\mathrm{im}}\!\left(g\right)$
for $f\circ g$ to be defined is not appropriate in the context of
poloids.}{\small \par}
{\small{}On the other hand, stipulating that $f\circ g$ is defined
if and only if $\mathrm{dom}\!\left(f\right)\supseteq\mathsf{im}\!\left(g\right)$
does not give a fully associative binary operation (Example 2). This
is a fatal flaw for many purposes, including representing poloids
as magmas of transformations.}{\small \par}
{\small{}We note that the exact formalization of the notion of ``partial
function'' is important. A ``partial function'' $\mathfrak{f}$
is often defined as being equipped only with a domain and an image
(range), and then there are only three reasonable ways of composing
``partial transformations'': $\mathfrak{f}\circ\mathfrak{g}$ is
defined if and only if $\mathrm{dom}\!\left(\mathfrak{f}\right)\cap\mathsf{im}\!\left(\mathsf{\mathfrak{g}}\right)\neq\emptyset$
or $\mathrm{dom}\!\left(\mathfrak{f}\right)\supseteq\mathsf{im}\!\left(\mathfrak{g}\right)$
or $\mathrm{dom}\!\left(\mathfrak{f}\right)=\mathsf{\mathrm{im}}\!\left(\mathsf{\mathfrak{g}}\right)$.
But according to Definition \ref{def1}, ``partial functions'' have
codomains of their own, so we can stipulate that $\mathfrak{f}\circ\mathfrak{g}$
is defined if and only if $\mathrm{dom}\!\left(\mathfrak{f}\right)=\mathsf{\mathrm{cod}}\!\left(\mathfrak{g}\right)$,
and this turns out to be just what we need when specializing magmas
of ``partial transformations'' to semigroupoids and poloids.}{\small \par}
\end{rem}
\section{Poloids and categories}
\subsection{Elementary properties of abstract poloids}
Recall that a poloid $P$ is a magma satisfying the following conditions:
\begin{lyxlist}{1}
\item [{(P1).}] For any $x\prec y\prec z\in P$, $\left(xy\right)\!z$
and $x\!\left(yz\right)$ are defined and $\left(xy\right)\!z=x\!\left(yz\right)$.
\item [{(P2).}] \noindent For any $x\!\in\!P$ there are units $\epsilon_{\!x},\varepsilon_{\!x}\!\in\!P$
such that $\epsilon_{\!x}x$ and $x\varepsilon_{\!x}$ are defined.
\end{lyxlist}
Let us derive some elementary properties of poloids as abstract algebraic
structures.
{}
\begin{prop}
\label{pro1}Let $P$ be a poloid and $e\in P$ a unit. Then $ee$
is defined and $ee=e$.
\end{prop}
\begin{proof}
Let $\epsilon_{e}\in P$ be an effective left unit for the unit $e$.
Then, $\epsilon_{e}e$ is defined and $e=\epsilon_{e}e=\epsilon_{e}$,
implying the assertion.
\end{proof}
By Proposition \ref{pro1}, every unit is an effective left and right
unit for itself.
\begin{prop}
\label{pro2}Let $P$ be a poloid. If $\epsilon_{x}$ and $\epsilon_{x}'$
are effective left units for $x\in P$ then $\epsilon_{x}=\epsilon_{x}'$,
and if $\varepsilon{}_{x}$ and $\varepsilon_{x}'$ are effective
right units for $x\in P$ then $\varepsilon_{x}=\varepsilon_{x}'$.
\end{prop}
\begin{proof}
By assumption, $\epsilon_{x}x$ and and $\epsilon_{x}'x$ are defined
and equal to $x$, so $\epsilon_{x}\!\left(\epsilon_{x}'x\right)$
is defined. Thus, $\left(\epsilon_{x}\epsilon_{x}'\right)\!x$ is
defined, so $\epsilon_{x}\epsilon_{x}'$ is defined. As $\epsilon_{x}$
and $\epsilon_{x}'$ are units, this implies $\epsilon_{x}=\epsilon_{x}\epsilon_{x}'=\epsilon_{x}'$.
The uniqueness of the effective right unit for $x$ is proved in the
same way.
\end{proof}
Note that if $xy$ is defined then $\left(\epsilon_{x}x\right)\!y$
is defined so $\epsilon_{x}\!\left(xy\right)$ is defined and $\epsilon_{x}\!\left(xy\right)=\left(\epsilon_{x}x\right)\!y=xy=\epsilon_{xy}\!\left(xy\right)$,
so by Proposition \ref{pro2} we have $\epsilon_{x}=\epsilon_{xy}$.
A similar argument shows that $\varepsilon_{y}=\varepsilon_{xy}$.
Also note that in a groupoid, where $xx^{-1}$ and $x^{-1}x$ are
defined and units, we have $x\left(x^{-1}x\right)=x$, $x^{-1}\left(xx^{-1}\right)=x^{-1}$,
$\left(xx^{-1}\right)x=x$, and $\left(x^{-1}x\right)x^{-1}=x^{-1}$,
where the four left-hand sides are defined. Thus, by Proposition \ref{pro2}
we have $xx^{-1}=\epsilon_{x}=\varepsilon_{x^{-1}}$ and $x^{-1}x=\varepsilon_{x}=\epsilon_{x^{-1}}$.
\begin{prop}
\label{pro3}Every poloid $P$ can be equipped with surjective functions
\begin{gather*}
s:P\rightarrow E,\qquad x\mapsto\epsilon_{x},\\
t:P\rightarrow E,\qquad x\mapsto\varepsilon_{x},
\end{gather*}
where $E$ is the set of all units in $P$ and $s\!\left(e\right)=t\!\left(e\right)=e$
for all $e\in E$.
\end{prop}
\begin{proof}
Immediate from (P2), Proposition \ref{pro1} and Proposition \ref{pro2}.
\end{proof}
\begin{prop}
\label{pro4}Let $P$ be a poloid. For any $x,y\in P$, $xy$ is defined
if and only if $\varepsilon_{x}=\epsilon_{y}$.
\end{prop}
\begin{proof}
If $xy$ is defined then $\left(x\varepsilon_{x}\right)\!y$ is defined,
so $\varepsilon_{x}y$ is defined and as $\varepsilon_{x}$ is a unit
we have $\varepsilon_{x}y=y=\epsilon_{y}y$, so $\varepsilon_{x}=\epsilon_{y}$
by Proposition \ref{pro2}. Conversely, if $\varepsilon_{x}=\epsilon_{y}$
then $\varepsilon_{x}y$ is defined, and as $x\varepsilon_{x}$ is
defined, $\left(x\varepsilon_{x}\right)\!y=xy$ is defined.
\end{proof}
A \emph{total poloid} is a poloid $P$ whose binary operation $\uppi$
is a total function.
\begin{prop}
A total poloid has only one unit.
\end{prop}
\begin{proof}
For any pair $e,e'\in P$ of units, $ee'$ is defined so $e=ee'=e'$.
\end{proof}
\begin{prop}
\label{pro5}A poloid with only one unit is a monoid.
\end{prop}
\begin{proof}
Let $P$ be a poloid. By assumption, there is a unique unit $e\in P$
such that $e=\varepsilon_{x}=\epsilon_{y}$ for any $x,y\in P$. Therefore,
it follows from Proposition \ref{pro4} that $xy$ and $yz$ are defined
for any $x,y,z\in P$. Hence, $\left(xy\right)\!z$ and $x\!\left(yz\right)$
are defined and equal for any $x,y,z\in P$. Also, $x=\epsilon_{x}x=x\varepsilon_{x}$
for any $x\in P$ implies $x=ex=xe$ for any $x\in P$.
\end{proof}
A poloid can thus be regarded as a generalized monoid, and also as
a generalized groupoid; in fact, poloids generalize groups via monoids
and via groupoids.
\begin{prop}
\label{pro6}A groupoid with only one unit is a group.
\end{prop}
\begin{proof}
A monoid with inverses is a group.
\end{proof}
\subsection{Subpoloids, poloid homomorphisms and poloid actions}
Recall that a submonoid of a monoid $M$ is a monoid $M'$ such that
$M'$ is a submagma of $M$ and the unit in $M'$ is the unit in $M$.
Subpoloids can be defined similarly.
\begin{defn}
\label{def9}A \emph{subpoloid} of a poloid $P$ is a poloid $P'$
such that $P'$ is a submagma of $P$ and every unit in $P'$ is a
unit in $P$.
\end{defn}
Homomorphisms and actions of poloids similarly generalize homomorphisms
and actions of monoids.
\begin{defn}
\label{def10}Let $P$ and $Q$ be poloids. A \emph{poloid homomorphism}
from $P$ to $Q$ is a total function $\phi:P\rightarrow Q$ such
that
\begin{lyxlist}{1}
\item [{(1)}] if $x,y\in P$ and $xy$ is defined then $\phi\!\left(x\right)\!\phi\!\left(y\right)$
is defined and $\phi\!\left(xy\right)=\phi\!\left(x\right)\!\phi\!\left(y\right)$;
\item [{(2)}] if $e$ is a unit in $P$ then $\phi\!\left(e\right)$ is
a unit in $Q$.
\end{lyxlist}
A \emph{poloid isomorphism} is a poloid homomorphism $\phi$ such
that the inverse function $\phi^{-1}$ exists and is a poloid homomorphism.
\end{defn}
Note that $\phi\!\left(x\right)=\phi\!\left(\epsilon_{x}x\right)=\phi\!\left(\epsilon_{x}\right)\!\phi\!\left(x\right)$
by (1) and $\phi\!\left(\epsilon_{x}\right)$ is a unit by (2) in
Definition \ref{def10}, so by Proposition \ref{pro2} we have $\phi\!\left(\epsilon_{x}\right)=\epsilon_{\phi\left(x\right)}$.
Dually, $\phi\!\left(\varepsilon_{x}\right)=\varepsilon_{\phi\left(x\right)}$.
Let $P$ be a poloid, let $Q$ be a magma and assume that there exists
a total function $\phi:P\rightarrow Q$ satisfying (1) and (2) in
Definition \ref{def10} and also such that (1') if $\phi\!\left(x\right)\!\phi\!\left(y\right)$
is defined then $xy$ is defined. It is easy to verify that then $\phi\!\left(P\right)$
is a magma, (P1) is satisfied in $\phi\!\left(P\right)$, and if $x'=\phi\!\left(x\right)\in\phi\!\left(P\right)$
then $\phi\!\left(\epsilon_{x}\right)$ ($\phi\!\left(\varepsilon_{x}\right))$
is an effective left (right) unit for $x'$, so $\phi\!\left(P\right)$
is a poloid.
\begin{defn}
\label{def11}A \emph{poloid action} of a poloid $P$ on a set $X$
is a total function
\[
\alpha:P\rightarrow\alpha\!\left(P\right)\subseteq\overline{\mathscr{F}}_{\!X}
\]
which is a poloid homomorphism such that if $e\in P$ is a unit then
$\alpha\!\left(e\right)\in\alpha\!\left(P\right)$ is an identity
transformation $I\!d_{\mathrm{dom}\left(\alpha\left(e\right)\right)}$
on $X$.
A \emph{prefunction poloid action} of a poloid $P$ on $X$ is similarly
a total function
\[
\upalpha:P\rightarrow\upalpha\!\left(P\right)\subseteq\overline{\mathscr{R}}_{\!X}
\]
which is a poloid homomorphism such that if $e\in P$ is a unit then
$\upalpha\!\left(e\right)\in\upalpha\!\left(P\right)$ is an identity
pretransformation $\mathsf{Id}{}_{\mathrm{dom}\left(\upalpha\left(e\right)\right)}$
on $X$.
\end{defn}
A poloid action $\alpha$ thus assigns to each $x\in P$ a non-empty
transformation
\[
\alpha\!\left(x\right):X\nrightarrow X,\qquad t\mapsto\alpha\!\left(x\right)\!\left(t\right)
\]
such that if $xy$ is defined then $\alpha\!\left(x\right)\circ\alpha\!\left(y\right)$
is defined and $\alpha\!\left(xy\right)=\alpha\!\left(x\right)\circ\alpha\!\left(y\right)$,
and for each unit $e$ in $P$ its image $\alpha\!\left(e\right)$
is a unit in $\alpha\!\left(P\right)$ such that $\alpha\!\left(e\right)\!\left(t\right)=t$
for each $t\in\mathrm{dom}\!\left(\alpha\!\left(e\right)\right)$.
\begin{rem}
{\small{}The definition of a poloid homomorphism given here implies
the usual definition of a monoid homomorphism. The definition of a
monoid action obtained from Definition \ref{def11} is also the usual
one. Specifically, a monoid action $\alpha$ of $M$ on a set $X$
is a function
\[
\alpha:M\rightarrow\alpha\!\left(M\right)\subseteq\overline{\mathcal{F}}_{\!X}
\]
such that $\alpha\!\left(xy\right)\!\left(t\right)=\alpha\!\left(x\right)\circ\alpha\!\left(y\right)\!\left(t\right)$
and $\alpha\!\left(e\right)\!\left(t\right)=t$ for all $x,y\in M$
and all $t\in X$. Denoting $\alpha\!\left(x\right)\!\left(t\right)$
by $x\cdot t$, this is rendered as $\left(xy\right)\cdot t=x\cdot\left(y\cdot t\right)$
and $e\cdot t=t$. Note that $\alpha\!\left(e\right)=I\!d_{X}$ is
a unit in $\overline{\mathcal{F}}_{\!X}$ and thus in $\alpha\!\left(M\right)$.}{\small \par}
\end{rem}
\subsection{Poloids as transformation poloids}
Recall that every transformation poloid is, indeed, a poloid. Up to
isomorphism, there are, in fact, no other poloids.
\begin{lem}
\label{lem2}For any poloid $P$, there is a prefunction poloid action
\[
\upmu:P\rightarrow\upmu\!\left(P\right)\subseteq\mathscr{\overline{R}}\!_{P},\qquad x\mapsto\upmu\!\left(x\right)
\]
of $P$ on $P$ such that $\upmu$ is a poloid isomorphism.
\end{lem}
\begin{proof}
Set $\upmu\!\left(x\right)=\left(\overline{\upmu\!\left(x\right)},\mathrm{dom}\!\left(\upmu\!\left(x\right)\right)\right)$,
where $\overline{\upmu\!\left(x\right)}\!\left(t\right)=xt$ for all
$t\in\mathrm{dom}\!\left(\upmu\!\left(x\right)\right)$ and $\mathrm{dom}\!\left(\upmu\!\left(x\right)\right)=\left\{ t\!\mid\!xt\;\mathrm{defined}\right\} $.
Then $\upmu\!\left(x\right)$ is a prefunction $P\nRightarrow P$,
and $\upmu\!\left(x\right)$ is non-empty for each $x\in P$ since
$x\varepsilon_{x}$ is defined for each $x\in P$.
Furthermore, $\overline{\upmu\!\left(x\right)}\!\left(\varepsilon{}_{x}\right)=x\varepsilon{}_{x}=x$
for any $x\in P$, and also $\overline{\upmu\!\left(y\right)}\!\left(\varepsilon{}_{x}\right)=y\varepsilon{}_{x}=y$
for any $y\in P$ such that $y\varepsilon{}_{x}$ is defined since
$\varepsilon{}_{x}$ is a unit. Hence, if $x\neq y$ and $y\varepsilon{}_{x}$
is defined then $\overline{\upmu\!\left(x\right)}\!\left(\varepsilon{}_{x}\right)\neq\overline{\upmu\!\left(y\right)}\!\left(\varepsilon{}_{x}\right)$,
so $\overline{\upmu\!\left(x\right)}\neq\overline{\upmu\!\left(y\right)}$;
if $x\neq y$ and $y\varepsilon{}_{x}$ is not defined then $\mathrm{dom}\left(\alpha\!\left(x\right)\right)\neq\mathrm{dom}\left(\alpha\!\left(y\right)\right)$
since $x\varepsilon{}_{x}$ is defined. Thus, $\upmu$ is a bijection.
For any fixed $x,y\in P$ such that $xy$ is defined, $\left(xy\right)\!t$
is defined if and only if $t\in P$ is such that $yt$ is defined.
Thus, $\mathrm{im}\!\left(\upmu\!\left(y\right)\right)=\left\{ yt\mid yt\:\mathrm{defined}\right\} =\left\{ yt\mid x\!\left(yt\right)\;\mathrm{defined}\right\} \subseteq\left\{ t\mid xt\;\mathrm{defined}\right\} =$
$\mathrm{dom}\!\left(\upmu\!\left(x\right)\right)$ and $\left\{ t\mid\left(xy\right)\!t\:\mathrm{defined}\right\} =\left\{ t\mid yt\:\mathrm{defined}\right\} $,
so if $xy$ is defined then $\upmu\!\left(x\right)\circ\upmu\!\left(y\right)$
is defined and $\mathrm{dom}\!\left(\upmu\!\left(xy\right)\right)=\mathrm{dom}\!\left(\upmu\!\left(y\right)\right)=\mathrm{dom}\!\left(\upmu\!\left(x\right)\circ\upmu\!\left(y\right)\right)$.
Also, if $xy$ is defined and $t\in\mathrm{dom}\!\left(\upmu\!\left(xy\right)\right)=\mathrm{dom}\!\left(\upmu\!\left(y\right)\right)$,
meaning that $yt$ is defined, then $\left(xy\right)\!t$ and $x\!\left(yt\right)$
are defined and equal, and as $\left(xy\right)\!t=\overline{\upmu\!\left(xy\right)}\!\left(t\right)$
for all $t\in\mathrm{dom}\!\left(\upmu\!\left(xy\right)\right)$ and
$x\!\left(yt\right)=\overline{\upmu\!\left(x\right)}\circ\overline{\upmu\!\left(y\right)}\left(t\right)$
for all $t\in\mathrm{dom}\!\left(\upmu\!\left(x\right)\circ\upmu\!\left(y\right)\right)$,
this implies that if $xy$ is defined then $\upmu\!\left(xy\right)=\upmu\!\left(x\right)\circ\upmu\!\left(y\right)$.
Conversely, if $\upmu\!\left(x\right)\circ\upmu\!\left(y\right)$
is defined so that $\left\{ t\mid yt\;\mathrm{defined}\right\} =\mathrm{dom}\!\left(\upmu\!\left(y\right)\right)=\mathrm{dom}\!\left(\upmu\!\left(x\right)\circ\upmu\!\left(y\right)\right)=\left\{ t\mid x\!\left(yt\right)\;\mathrm{defined}\right\} $,
then $yt$ defined implies $x\!\left(yt\right)$ defined for any fixed
$x,y\in P$. But this implication does not hold if $xy$ is not defined;
then, $x\!\left(y\varepsilon_{y}\right)$ is not defined although
$y\varepsilon_{y}$ is defined. Hence, if $\upmu\!\left(x\right)\circ\upmu\!\left(y\right)$
is defined then $xy$ must be defined. Therefore, $\upmu\!\left(x\right)\circ\upmu\!\left(y\right)=\upmu\!\left(xy\right)\in\upmu\!\left(P\right)$,
so $\upmu\!\left(P\right)$ is a magma with $\circ$ as binary operation.
Let $e\in P$ be a unit. If $\upmu\!\left(e\right)\circ\upmu\!\left(x\right)$
is defined then $ex$ is defined so $\upmu\!\left(x\right)=\upmu\!\left(ex\right)=\upmu\!\left(e\right)\circ\upmu\!\left(x\right)$,
and if $\upmu\!\left(x\right)\circ\upmu\!\left(e\right)$ is defined
then $xe$ is defined so $\upmu\!\left(x\right)=\upmu\!\left(xe\right)=\upmu\!\left(x\right)\circ\upmu\!\left(e\right)$.
Thus, $\upmu\!\left(e\right)\in\upmu\!\left(P\right)$ is a unit.
Conversely, if $f'=\upmu\!\left(f\right)\in\upmu\!\left(P\right)$
is a unit and $fx$ is defined then $\upmu\!\left(f\right)\circ\upmu\!\left(x\right)$
is defined and $\upmu\!\left(fx\right)=\upmu\!\left(f\right)\circ\upmu\!\left(x\right)=\upmu\!\left(x\right)$,
so $fx=x$ since $\upmu$ is injective. Similarly, if $\upmu\!\left(f\right)\in\upmu\!\left(P\right)$
is a unit and $xf$ is defined then $xf=x$. Hence, $f\in P$ is a
unit.
Thus, we have shown that $\upmu$ satisfies the conditions labeled
(1), (1') and (2) in Section 3.2, so $\upmu\!\left(P\right)$ is a
poloid. Also, (1) and (2) in Definition \ref{def10} are satisfied
by both $\upmu$ and $\upmu^{-1}$, so $\upmu:P\rightarrow\upmu\!\left(P\right)$
is a poloid isomorphism.
The observation that if $e\in P$ is a unit then $\upmu\!\left(e\right)\!\left(t\right)=et=t$
for all $t\in\mathrm{dom}\!\left(\upmu\!\left(e\right)\right)$, so
that $\upmu\!\left(e\right)$ is an identity pretransformation $\mathsf{Id}{}_{\mathrm{dom}\left(\upmu\left(e\right)\right)}$,
completes the proof.
\end{proof}
\begin{lem}
\label{lem3}For any poloid $P$ and function $\upmu$ defined as
in Lemma \ref{lem2}, there is a total function $\tau:\upmu\!\left(P\right)\rightarrow\tau\!\left(\upmu\!\left(P\right)\right)\subseteq\mathscr{\overline{F}}\!_{P}$
such that
\begin{enumerate}
\item $\tau$ is bijective;
\item $\upmu\!\left(x\right)\circ\upmu\!\left(y\right)$ is defined if and
only if $\tau\!\left(\upmu\!\left(x\right)\right)\circ\tau\!\left(\upmu\!\left(y\right)\right)$
is defined;
\item if $\upmu\!\left(x\right)\circ\upmu\!\left(y\right)$ is defined then
$\tau\!\left(\upmu\!\left(x\right)\circ\upmu\!\left(y\right)\right)=\tau\!\left(\upmu\!\left(x\right)\right)\circ\tau\!\left(\upmu\!\left(y\right)\right)$;
\item if $e\in P$ is a unit then $\tau\!\left(\upmu\!\left(e\right)\right)\in\mathscr{\overline{F}}\!_{P}$
is a unit and identity transformation $I\!d{}_{\mathrm{dom}\left(\tau\left(\upmu\left(e\right)\right)\right)}$.
\end{enumerate}
\end{lem}
\begin{proof}
For any prefunction $\upmu\!\left(x\right)=\left(\overline{\upmu\!\left(x\right)},\mathrm{dom}\!\left(\upmu\!\left(x\right)\right)\right):P\nRightarrow P$,
$x\in P$, the tuple
\[
\left/\upmu\!\left(x\right)\right/=\left(\overline{\upmu\!\left(x\right)},\mathrm{dom}\!\left(\upmu\!\left(x\right)\right),\mathrm{dom}\!\left(\upmu\!\left(\epsilon_{x}\right)\right)\right)
\]
is a function $P\nrightarrow P$ for which $\overline{\left/\upmu\!\left(x\right)\right/}=\overline{\upmu\!\left(x\right)}$,
$\mathrm{dom}\!\left(\left/\upmu\!\left(x\right)\right/\right)=\mathrm{dom}\!\left(\upmu\!\left(x\right)\right)$
and $\mathrm{cod}\!\left(\left/\upmu\!\left(x\right)\right/\right)=\mathrm{dom}\!\left(\upmu\!\left(\epsilon_{x}\right)\right)$.
In fact, $\epsilon_{x}x$ is defined, so $\upmu\!\left(\epsilon_{x}\right)\circ\upmu\!\left(x\right)$
is defined, so $\mathrm{cod}\!\left(\left/\upmu\!\left(x\right)\right/\right)\!=\mathrm{dom}\!\left(\upmu\!\left(\epsilon_{x}\right)\right)\supseteq\mathrm{im}\!\left(\upmu\!\left(x\right)\right)=\overline{\upmu\!\left(x\right)}\!\left(\mathrm{dom}\!\left(\upmu\!\left(x\right)\right)\right)=\overline{\left/\upmu\!\left(x\right)\right/}\!\left(\mathrm{dom}\!\left(\left/\upmu\!\left(x\right)\right/\right)\right)$\linebreak{}
$=\mathrm{im}\!\left(\left/\upmu\!\left(x\right)\right/\right)$,
as required. Thus, there is a total function
\[
\tau:\mathscr{\overline{R}}\!_{P}\supseteq\upmu\!\left(P\right)\rightarrow\tau\!\left(\upmu\!\left(P\right)\right)\subseteq\mathscr{\overline{F}}\!_{P},\qquad\upmu\!\left(x\right)\mapsto\left/\upmu\!\left(x\right)\right/.
\]
It remains to prove (1) \textendash{} (4). (1) and (2) are obvious.
Also, $\mathrm{dom}\!\left(\left/\upmu\!\left(x\right)\circ\upmu\!\left(y\right)\right/\right)=\mathrm{dom}\!\left(\upmu\!\left(x\right)\circ\upmu\!\left(y\right)\right)=\mathrm{dom}\!\left(\upmu\!\left(y\right)\right)=\mathrm{dom}\!\left(\left/\upmu\!\left(y\right)\right/\right)=\mathrm{dom}\!\left(\left/\upmu\!\left(x\right)\right/\circ\left/\upmu\!\left(y\right)\right/\right)$
and\linebreak{}
$\mathrm{cod}\!\left(\left/\upmu\!\left(x\right)\circ\upmu\!\left(y\right)\right/\right)=\mathrm{cod}\!\left(\left/\upmu\!\left(xy\right)\right/\right)=\mathrm{dom}\!\left(\upmu\!\left(\epsilon_{xy}\right)\right)=\mathrm{dom}\!\left(\upmu\!\left(\epsilon_{x}\right)\right)=\mathrm{cod}\!\left(\left/\upmu\!\left(x\right)\right/\right)=\mathrm{cod}\!\left(\left/\upmu\!\left(x\right)\right/\circ\left/\upmu\!\left(y\right)\right/\right),$
so $\left/\upmu\!\left(x\right)\circ\upmu\!\left(y\right)\right/=\left/\upmu\!\left(x\right)\right/\circ\left/\upmu\!\left(y\right)\right/$.
Concerning (4), $\upmu\!\left(e\right)$ is a unit and identity pretransformation
$\mathsf{Id}{}_{\mathrm{dom}\left(\upmu\left(e\right)\right)}$ in
$\mathscr{\overline{R}}\!_{P}$, so it suffices to note that $\mathrm{cod}\!\left(\left/\upmu\!\left(e\right)\right/\right)=\mathrm{dom}\!\left(\upmu\!\left(\epsilon_{e}\right)\right)=\mathrm{dom}\!\left(\upmu\!\left(e\right)\right)=\mathrm{dom}\!\left(\left/\upmu\!\left(e\right)\right/\right)$.
\end{proof}
\begin{thm}
\label{lem2-1}For any poloid $P$, there is a poloid action
\[
\alpha:P\rightarrow\alpha\!\left(P\right)\subseteq\mathscr{\overline{F}}\!_{P},\qquad x\mapsto\alpha\!\left(x\right)
\]
of $P$ on $P$ such that $\alpha$ is a poloid isomorphism and $\alpha\!\left(P\right)$
equipped with $\circ$ is a transformation poloid.
\end{thm}
\begin{proof}
First set $\alpha=\tau\circ\upmu$ and use Lemmas \ref{lem2} and
\ref{lem3} to prove the first part of the theorem. It remains to
show that $\alpha\!\left(P\right)$ is a transformation poloid. Recall
that $\upmu$ and $\tau$ are injective so that $\alpha$ is injective,
and note that $\mathrm{dom}\!\left(\alpha\!\left(x\right)\right)=\mathrm{dom}\!\left(\alpha\!\left(x\varepsilon_{x}\right)\right)=\mathrm{dom}\!\left(\alpha\!\left(x\right)\circ\alpha\!\left(\varepsilon_{x}\right)\right)=\mathrm{dom}\!\left(\alpha\!\left(\varepsilon_{x}\right)\right)$
and that identity transformations, such as $\alpha\!\left(\varepsilon_{x}\right)$
and $\alpha\!\left(\epsilon_{y}\right)$, are determined by their
domains. Hence, we have
\begin{align*}
& \mathrm{dom}\!\left(\alpha\!\left(x\right)\right)=\mathrm{cod}\!\left(\alpha\!\left(y\right)\right)\\
\Longleftrightarrow\quad & \mathrm{dom}\!\left(\alpha\!\left(\varepsilon_{x}\right)\right)=\mathrm{dom}\!\left(\alpha\!\left(\epsilon_{y}\right)\right)\\
\Longleftrightarrow\quad & \alpha\!\left(\varepsilon_{x}\right)=\alpha\!\left(\epsilon_{y}\right)\\
\Longleftrightarrow\quad & \varepsilon_{x}=\epsilon_{y}\\
\Longleftrightarrow\quad & xy\;\mathrm{defined}\\
\Longleftrightarrow\quad & \alpha\!\left(x\right)\circ\alpha\!\left(y\right)\;\mathrm{defined}.
\end{align*}
Thus the poloid of transformations $\alpha\!\left(P\right)$ is a
transformation semigroupoid by \linebreak{}
Definition \ref{def7}. Also, if $\alpha\!\left(x\right)\in\alpha\!\left(P\right)$
then $\alpha\!\left(\epsilon_{x}\right),\alpha\!\left(\varepsilon_{x}\right)\in\alpha\!\left(P\right)$,
$\alpha\!\left(\epsilon_{x}\right)=I\!d_{\mathrm{dom}\left(\alpha\left(\epsilon_{x}\right)\right)}=I\!d{}_{\mathrm{cod}\left(\alpha\left(x\right)\right)}$
and $\alpha\!\left(\varepsilon{}_{x}\right)=I\!d{}_{\mathrm{dom}\left(\alpha\left(\varepsilon{}_{x}\right)\right)}=I\!d{}_{\mathrm{dom}\left(\alpha\left(x\right)\right)}$,
so the transformation semigroupoid $\alpha\!\left(P\right)$ is a
transformation poloid by Definition \ref{def8}.
\end{proof}
\begin{cor}
\label{the3-1}Any poloid is isomorphic to a transformation poloid.
\end{cor}
This is a 'Cayley theorem' for poloids; it generalizes similar isomorphism
theorems for groupoids, monoids and groups. Note, though, that $\alpha\!\left(P\right)$
is not only a \emph{poloid of transformations} isomorphic to $P$,
but actually a \emph{transformation poloid} isomorphic to $P$, so
Corollary \ref{the3-1} is stronger than a straight-forward generalization
of the 'Cayley theorem' as usually stated.
\subsection{Categories as poloids}
It is no secret that a poloid is the same as a small arrows-only category.
In various guises, (P1), (P2) and Propositions \ref{pro1} \textendash{}
\ref{pro4} appear as axioms or theorems in category theory. The two-axiom
system proposed here is related to the set of ``Gruppoid'' axioms
given by Brandt \cite{key-1}, and essentially equivalent to axiom
systems used by Freyd \cite{key-2}, Hastings \cite{key-6}, and others.
By Proposition 3, one can define functions $s:x\mapsto\epsilon_{x}$
and $t:x\mapsto\varepsilon_{x}$; axiom systems using these two functions
but equivalent to the one given here, as used by Freyd and Scedrov
\cite{key-3}, currently often serve to define arrows-only categories.
Concepts from category theory can be translated into the the language
of poloids and vice versa. For example, an initial object in a category
corresponds to some unit $\epsilon\in P$ such that for every unit
$e\in P$ there is a unique $x\in P$ such that $\epsilon x$ and
$xe$ are defined (hence, $\epsilon x=x=xe)$. More significantly,
in the language of category theory a subpoloid is a subcategory, and
a poloid homomorphism is a functor.
Looking at categories as ``webs of monoids'' does lead to some shift
of emphasis and perspective, however. In particular, whereas the notion
of a category acting on a set is not emphasized in texts on category
theory, the corresponding notion of a poloid action is central when
regarding categories as poloids. For example, recall that letting
a group act on itself we obtain Cayley's theorem for groups. Similarly,
letting a poloid act on itself we have obtained a Cayley theorem for
poloids \cite{key-7}, corresponding to Yoneda's lemma for categories.
Poloid actions are also a tool that can be used to define ordinary
(small) two-sorted categories in terms of poloids \textendash{} we
let a poloid $P$ act on a set $O$ in a special way, then interpreting
the elements of $P$ as morphisms and the elements of $O$ acted on
by $P$ as objects.
Applying an algebraic perspective on category theory may thus lead
to more than merely a reformulation of category theory, especially
as the algebraic structures related to categories are also linked
to specific magmas of transformations.
{}
\appendix
\section{Constellations}
A\emph{ constellation} \cite{key-4,key-7}, is defined in \cite{key-5}
as follows:
\begin{quote}
A {[}\emph{left}{]}\emph{ constellation} is a structure $P$ of signature
$(\cdot,D)$ consisting of a class $P$ with a partial binary operation
and unary operation $D$ {[}...{]} that maps onto the set of \emph{projections}
$E\subseteq P$, so that $E=\left\{ D(x)\mid x\in P\right\} $, and
such that for all $e\in E$, $ee$ exists and equals $e$, and for
which, for all $x,y,z\in P$:
\begin{lyxlist}{00.00.0000}
\item [{(C1)}] if $x\cdot(y\cdot z)$ exists then so does $(x\cdot y)\cdot z$,
and then the two are equal;
\item [{(C2)}] $x\cdot(y\cdot z)$ exists if and only if $x\cdot y$ and
$y\cdot z$ exist;
\item [{(C3)}] for each $x\in P$, $D(x)$ is the unique left identity
of $x$ in $E$ (i.e. it satisfies $D(x)\cdot x=x$);
\item [{(C4)}] for $a\in P$ and $g\in E$, if $a\cdot g$ exists then
it equals $a$.
\end{lyxlist}
\end{quote}
It turns out that constellations generalize poloids. Recall that by
Definition \ref{def3} a semigroupoid is a partial magma such that
if (a) $x\!\left(yz\right)$ is defined or (b) $\left(xy\right)\!z$
is defined or (c) $xy$ and $yz$ are defined then $x\!\left(yz\right)$
and $\left(xy\right)\!z$ are defined and $x\!\left(yz\right)=\left(xy\right)\!z$.
Removing (a), we obtain the following definition.
\begin{defn}
\label{def12}A \emph{right-directed semigroupoid} is a magma $P$
such that, for any $x,y,z\in P$, if $\left(xy\right)\!z$ is defined
or $xy$ and $yz$ are defined then $\left(xy\right)\!z$ and $x\!\left(yz\right)$
are defined and $x\!\left(yz\right)=\left(xy\right)\!z$.
\end{defn}
The condition in this definition corresponds to conditions (C1) and
(C2) in \cite{key-5} except for some non-substantial differences.
First, we are defining here the left-right dual of the notion defined
by (C1) and (C2). This amounts to a difference in notation only, deriving
from the fact that functions are composed from left to right in \cite{key-5}
while they are composed from right to left here. Second, it is not
necessary to postulate that if $\left(xy\right)\!z$ is defined then
$xy$ and $yz$ are defined, in accordance with (C2), because, by
Definition \ref{def12}, if $\left(xy\right)\!z$ is defined then
$x(yz)$ is defined, so $xy$ and $yz$ are defined. Finally, in \cite{key-5}
$P$ is assumed to be a class rather than a set; this difference has
to do with set-theoretic considerations that need not concern us here.
We shall need some generalizations of the unit concept. First, a \emph{left
unit} in $P$ is an element $\epsilon$ of $P$ such that $\epsilon x=x$
for all $x\in P$ such that $\epsilon x$ is defined, while a \emph{right
unit} in $P$ is an element $\varepsilon$ of $P$ such that $x\varepsilon=x$
for all $x\in P$ such that $x\varepsilon$ is defined. Also, a \emph{local
left unit} $\lambda_{x}$ for $x\in P$ is an element of $P$ such
that $\lambda_{x}x$ is defined and $\lambda_{x}x=x$, while a \emph{local
right unit} $\rho_{x}$ for $x\in P$ is an element of $P$ such that
$x\rho_{x}$ is defined and $x\rho_{x}=x$.
\begin{defn}
\label{def13}A \emph{right poloid} is a right-directed semigroupoid
$P$ such that for any $x\in P$ there is a unique left unit $\varphi_{x}\in P$
such that $\varphi_{x}$ is a local right unit for $x$.
\end{defn}
\begin{prop}
\label{pro7}Let $P$ be a right poloid. If $\epsilon\in P$ is a
left unit then $\epsilon\epsilon$ is defined and $\epsilon\epsilon=\epsilon$.
\end{prop}
\begin{proof}
Let $\varphi_{\epsilon}\in P$ be a local right unit for the left
unit $\epsilon$. Then $\epsilon\varphi_{\epsilon}$ is defined and
$\varphi_{\epsilon}=\epsilon\varphi_{\epsilon}=\epsilon$, and this
implies the assertion.
\end{proof}
Thus, the left unit $\epsilon$ is the unique local right unit $\varphi_{e}$
for itself.
Disregarding (C1) and (C2), which were incorporated in Definition
\ref{def12}, the requirements stated in the definition cited above
can be summed up as follows:
\begin{lyxlist}{00.00.0000}
\item [{(C)}] For each $x\in P$, there is exactly one $D\!\left(x\right)\in E=\left\{ D\!\left(x\right)\mid x\in P\right\} $
such that $D\!\left(x\right)\cdot x$ is defined and $D\!\left(x\right)\cdot x=x$,
and every $e\in E$ is a right unit in $P$ and such that $e\cdot e$
is defined and equal to $e$.
\end{lyxlist}
Using (C), it can be proved as in Proposition \ref{pro7} that if
$f\in P$ is a right unit then $f\cdot f$ is defined and $f\cdot f=f$,
so $f$ is the unique local left unit $D\!\left(f\right)$ for itself.
Thus, $E$ equals the set of right units in $P$, since conversely
every $e\in E$ is a right unit in $P$ by (C). As all right units
are idempotent, this means that the requirement that all elements
of $E$ are idempotent is redundant, so (C) can be simplified to:
\begin{lyxlist}{00.00.0000}
\item [{(C{*})}] For each $x\in P$, there is exactly one right unit $D\!\left(x\right)\in P$
such that $D\!\left(x\right)\cdot x$ is defined and $D\!\left(x\right)\cdot x=x$.
\end{lyxlist}
In our terminology, this means, of course, that for any $x\in P$
there is a unique left unit $\varphi_{x}$ in $P$ such $\varphi_{x}$
is a local right unit for $x$. We conclude that a (small) constellation
is just a right poloid; note that $D$ is just the function $x\mapsto\varphi_{x}$.
Proposition \ref{pro7} generalizes Proposition \ref{pro1}, and there
are also natural generalizations of Propositions \ref{pro2} \textendash{}
\ref{pro4} to right poloids.
It should be pointed out that in \cite{key-5} an alternative definition
of constellations is also given; this definition is essentially the
same as Definition \ref{def13} here (see Proposition 2.9 in \cite{key-5}).
So while the definition of constellations cited above reflects the
historical development of that notion, it has been shown here and
in \cite{key-5} that a more direct approach can also be used.
{}
Let us also look at the transformation systems corresponding to constellations.
{}
\begin{thm}
\label{the3}A pretransformation magma is a right-directed semigroupoid.
\end{thm}
\begin{proof}
Use Facts \ref{f1} \textendash{} \ref{f3} in Section 2.3.
\end{proof}
A \emph{domain} pretransformation magma is a pretransformation magma
$\mathscr{R}_{\!X}$ such that if $\mathsf{f}\!\in\!\mathscr{R}_{\!X}$
then $\mathsf{Id}_{\mathrm{dom}\left(\mathsf{f}\right)}\!\in\!\mathscr{R}_{\!X}$.
Corresponding to Theorem \ref{the2} in Section 2.3, we have the following
result.
\begin{thm}
\label{the4}A domain pretransformation magma is a right poloid.
\end{thm}
\begin{proof}
In view of Theorem \ref{the3}, it suffices to show that for any $\mathsf{f}\in\mathscr{R}_{\!X}$
there is a unique left unit $\upvarphi_{\mathsf{f}}\in\mathscr{R}_{\!X}$
such that $\mathsf{f}\circ\upvarphi_{\mathsf{f}}$ is defined and
equal to $\mathsf{f}$, namely $\mathsf{Id}_{\mathrm{dom}\left(\mathsf{f}\right)}$.
If $\mathsf{f},\mathsf{g}\in\mathscr{R}_{\!X}$ and $\mathsf{Id}_{\mathrm{dom}\left(\mathsf{f}\right)}\circ\mathsf{g}$
is defined so that $\mathrm{dom}\!\left(\mathsf{Id}_{\mathrm{dom}\left(\mathfrak{\mathsf{f}}\right)}\right)\supseteq\mathrm{im}\!\left(\mathsf{g}\right)$
then
\begin{gather*}
\mathrm{dom}\!\left(\mathsf{Id}_{\mathrm{dom}\left(\mathsf{f}\right)}\circ\mathsf{g}\right)=\mathrm{dom}\!\left(\mathfrak{\mathsf{g}}\right),\\
\mathsf{Id}_{\mathrm{dom}\left(\mathsf{f}\right)}\circ\mathsf{g}\left(x\right)=\mathsf{Id}_{\mathrm{dom}\left(\mathsf{f}\right)}\!\left(\mathsf{g}\!\left(x\right)\right)=\mathsf{g}\!\left(x\right)
\end{gather*}
for all $x\!\in\!\mathrm{dom}\!\left(\mathsf{g}\right)$, meaning
that $\mathfrak{\mathsf{Id}}_{\mathrm{dom}\left(\mathsf{f}\right)}\!\circ\mathsf{g}=\mathsf{g}$.
Thus, $\mathfrak{\mathsf{Id}}_{\mathrm{dom}\left(\mathsf{f}\right)}$
is a left unit in $\mathscr{R}_{\!X}$.
Also, $\mathrm{dom}\!\left(\mathsf{f}\right)=\mathrm{dom}\!\left(\mathsf{Id}_{\mathrm{dom}\left(\mathsf{f}\right)}\right)=\mathrm{im}\!\left(\mathsf{Id}_{\mathrm{dom}\left(\mathsf{f}\right)}\right)$,
so $\mathsf{f}\circ\mathsf{Id}_{\mathrm{dom}\left(\mathfrak{\mathsf{f}}\right)}$
is defined, and
\begin{gather*}
\mathrm{dom}\!\left(\mathfrak{\mathsf{f}}\circ\mathsf{Id}_{\mathrm{dom}\left(\mathsf{f}\right)}\right)=\mathrm{dom}\!\left(\mathsf{Id}_{\mathrm{dom}\left(\mathfrak{\mathsf{f}}\right)}\right)=\mathrm{dom}\!\left(\mathfrak{\mathsf{f}}\right),\\
\mathfrak{\mathsf{f}}\circ\mathsf{Id}_{\mathrm{dom}\left(\mathsf{f}\right)}\left(x\right)=\mathsf{f}\!\left(\mathsf{Id}{}_{\mathrm{dom}\left(\mathsf{f}\right)}\!\left(x\right)\right)=\mathfrak{\mathsf{f}}\!\left(x\right)
\end{gather*}
for all $x\in\mathrm{dom}\!\left(\mathsf{Id}_{\mathrm{dom}\left(\mathsf{f}\right)}\right)=\mathrm{dom}\!\left(\mathfrak{\mathsf{f}}\right)$.
Thus, $\mathsf{f}\circ\mathsf{Id}_{\mathrm{dom}\left(\mathsf{f}\right)}$
is defined and equal to $\mathsf{f}$, so $\mathsf{Id}{}_{\mathrm{dom}\left(\mathsf{f}\right)}\in\mathscr{R}_{\!X}$
is a left unit $\upvarphi_{\mathsf{f}}$ such that $\mathsf{f}\circ\upvarphi_{\mathsf{f}}$
is defined and equal to $\mathsf{f}$.
It remains to show that $\mathsf{Id}_{\mathrm{dom}\left(\mathsf{f}\right)}$
is the only such $\upvarphi_{\mathsf{f}}$. Let $\upepsilon\in\mathscr{R}_{\!X}$
be a left unit. Then $\mathfrak{\mathsf{Id}}_{\mathrm{dom}\left(\upepsilon\right)}\in\mathscr{R}_{\!X}$
and as $\mathrm{dom}\!\left(\upepsilon\right)=\mathrm{dom}\!\left(\mathfrak{\mathsf{Id}}_{\mathrm{dom}\left(\upepsilon\right)}\right)=\mathrm{im}\!\left(\mathfrak{\mathsf{Id}}_{\mathrm{dom}\left(\upepsilon\right)}\right)$,
so that $\upepsilon\circ\mathfrak{\mathsf{Id}}_{\mathrm{dom}\left(\upepsilon\right)}$
is defined, we have $\upepsilon\circ\mathfrak{\mathsf{Id}}_{\mathrm{dom}\left(\upepsilon\right)}=\mathfrak{\mathsf{Id}}_{\mathrm{dom}\left(\upepsilon\right)}$.
On the other hand,
\begin{gather*}
\mathrm{dom}\left(\upepsilon\circ\mathsf{Id}_{\mathrm{dom}\left(\upepsilon\right)}\right)=\mathrm{dom}\left(\mathsf{Id}_{\mathrm{dom}\left(\upepsilon\right)}\right)=\mathrm{dom}\!\left(\upepsilon\right),\\
\upepsilon\circ\mathsf{Id}_{\mathrm{dom}\left(\upepsilon\right)}\left(x\right)=\upepsilon\!\left(\mathsf{Id}_{\mathrm{dom}\left(\upepsilon\right)}\!\left(x\right)\right)=\upepsilon\!\left(x\right)
\end{gather*}
for all $x\in\mathrm{dom}\!\left(\mathsf{Id}_{\mathrm{dom}\left(\upepsilon\right)}\right)=\mathrm{dom}\!\left(\upepsilon\right)$,
so $\upepsilon\circ\mathsf{Id}_{\mathrm{dom}\left(\upepsilon\right)}=\upepsilon$.
Thus $\upepsilon=\mathsf{Id}_{\mathrm{dom}\left(\upepsilon\right)}$,
so $\upvarphi_{\mathsf{f}}=\mathsf{Id}_{\mathrm{dom}\left(\upvarphi_{\mathsf{f}}\right)}=\mathsf{Id}_{\mathrm{dom}\left(\mathfrak{\mathsf{f}}\circ\upvarphi_{\mathsf{f}}\right)}=\mathsf{Id}_{\mathrm{dom}\left(\mathsf{f}\right)}$.
\end{proof}
With Theorem \ref{the2} and Corollary \ref{the3-1} in mind, one
might expect, given Theorem \ref{the4}, that conversely every right
poloid is isomorphic to some domain pretransformation magma (regarded
as a right poloid). Indeed, any poloid can be embedded in a pretransformation
magma by Lemma \ref{lem2}, and it can be shown that $\upmu\!\left(\varepsilon_{x}\right)=\mathsf{Id}_{\mathrm{dom}\left(\upmu\left(x\right)\right)}$,
so any poloid can actually be embedded in a domain pretransformation
magma. Also, the proof of Lemma \ref{lem2} uses almost only properties
of poloids that they share with right poloids. There is one crucial
exception, though: both $\varepsilon_{x}$ and $\varphi_{x}$ are
local right units, but in addition $\varepsilon_{x}$ is a unit while
$\varphi_{x}$ is just a left unit. The fact that $\varepsilon_{x}$
is a unit is used to prove that $x\mapsto\upmu\!\left(x\right)$ is
injective, and this is not true for all right poloids.
\begin{example}
\label{ex3} The magma defined by the Cayley table below is a right
poloid with $x=\varphi_{x}$ and $y=\varphi_{y}$, but $\upmu\!\left(x\right)=\upmu\!\left(y\right)$.
\[
\begin{array}{ccc}
& x & y\\
x & x & y\\
y & x & y
\end{array}
\]
\end{example}
This suggests that we look for an additional condition on right poloids
to ensure that $x\mapsto\upmu\!\left(x\right)$ is injective. On finding
such a condition, we can prove a weakened converse of Theorem \ref{the4}
by an argument similar to the proof of Lemma \ref{lem2}.
Adapting a definition in \cite{key-5}, we say that a right poloid
such that if $\varphi_{x}\varphi_{y}$ and $\varphi_{y}\varphi_{x}$
are defined then $\varphi_{x}=\varphi_{y}$ is \emph{normal}. (The
poloid in Example \ref{ex3} is not normal.) This notion is the key
to the following three results:
\begin{thm}
\label{the6}A domain pretransformation magma is a normal right poloid.
\end{thm}
\begin{proof}
In a domain pretransformation magma, $\upvarphi_{\mathsf{f}}=\mathsf{Id}_{\mathrm{dom}\left(\mathsf{f}\right)}$.
Thus, $\upvarphi_{\mathsf{f}}\circ\upvarphi_{\mathsf{g}}$ is defined
if and only if $\mathrm{dom}\!\left(\mathsf{Id}_{\mathrm{dom}\left(\mathsf{f}\right)}\right)\supseteq\mathrm{im}\!\left(\mathsf{Id}_{\mathrm{dom}\left(\mathsf{g}\right)}\right)=\mathrm{dom}\!\left(\mathsf{Id}_{\mathrm{dom}\left(\mathsf{g}\right)}\right)$,
or equivalently $\mathrm{dom}\!\left(\mathsf{f}\right)\supseteq\mathrm{dom}\!\left(\mathsf{g}\right)$,
so if $\upvarphi_{\mathsf{f}}\circ\upvarphi_{\mathsf{g}}$ and $\upvarphi_{\mathsf{g}}\circ\upvarphi_{\mathsf{f}}$
are defined then $\mathrm{dom}\!\left(\mathsf{f}\right)=\mathrm{dom}\!\left(\mathsf{g}\right)$,
so $\mathsf{Id}_{\mathrm{dom}\left(\mathsf{f}\right)}=\mathsf{Id}_{\mathrm{dom}\left(\mathsf{g}\right)}$
or equivalently $\upvarphi_{\mathsf{f}}=\upvarphi_{\mathsf{g}}$.
\end{proof}
\begin{lem}
\label{lem4} In a normal right poloid, the correspondence $x\mapsto\upmu\!\left(x\right)$
is injective.
\end{lem}
\begin{proof}
Assume that $x\neq y$. If $\mathrm{dom}\!\left(\upmu\!\left(x\right)\right)\neq\mathrm{dom}\!\left(\upmu\!\left(y\right)\right)$
then $\upmu\!\left(x\right)\neq\upmu\!\left(y\right)$ as required.
Otherwise, $\mathrm{dom}\!\left(\upmu\!\left(x\right)\right)=\mathrm{dom}\!\left(\upmu\!\left(y\right)\right)$,
and as $x\varphi_{x}$ and $y\varphi_{y}$ are defined we have $\varphi_{x},\varphi_{y}\in\mathrm{dom}\!\left(\upmu\!\left(x\right)\right)=\mathrm{dom}\!\left(\upmu\!\left(y\right)\right)$.
Thus, $x\varphi_{y}$ is defined, so $\left(x\varphi_{x}\right)\!\varphi_{y}$
is defined, so $x\!\left(\varphi_{x}\varphi_{y}\right)$ is defined,
so $\varphi_{x}\varphi_{y}$ is defined. Similarly, $y\varphi_{x}$
is defined, so $\varphi_{y}\varphi_{x}$ is defined. Therefore, $\varphi_{x}=\varphi_{y}$,
so $\upmu\!\left(x\right)\!\left(\varphi_{x}\right)=x$ and $\upmu\!\left(y\right)\!\left(\varphi_{x}\right)=\upmu\!\left(y\right)\!\left(\varphi_{y}\right)=y$,
so again $\upmu\!\left(x\right)\neq\upmu\!\left(y\right)$.
\end{proof}
Using Lemma \ref{lem4} and proceeding as in the proof of Lemma \ref{lem2},
keeping in mind that $\varphi_{\upmu\left(x\right)}=\upmu\!\left(\varepsilon_{x}\right)=\mathsf{Id}_{\mathrm{dom}\left(\upmu\left(x\right)\right)}$,
we obtain the following result:
\begin{thm}
\label{the7}A normal right poloid can be embedded in a domain pretransformation
magma.
\end{thm}
Theorems \ref{the6} and \ref{the7} correspond to Proposition 2.23
in \cite{key-5}.
Let us look at another way of narrowing down the notion of a right
poloid so that any right poloid considered can be embedded in a domain
pretransformation magma. Consider the relation $\leq$ on a right
poloid $P$ given by $x\leq y$ if and only if $y\varphi_{x}$ is
defined and $x=y\varphi_{x}$. The relation $\leq$ is obviously reflexive,
and if $x\leq y$ and $y\leq z$ then (a) $y\varphi_{x}=\left(z\varphi_{y}\right)\!\varphi_{x}$
is defined so that $z\!\left(\varphi_{y}\varphi_{x}\right)=z\varphi_{x}$
is defined and (b) $x=y\varphi_{x}=\left(z\varphi_{y}\right)\!\varphi_{x}=z\!\left(\varphi_{y}\varphi_{x}\right)=z\varphi_{x}$,
so $\leq$ is transitive as well. Hence, $\leq$ is a preorder, called
the \emph{natural preorder} on $P$, so $\leq$ is a partial order
if and only if it is antisymmetric. \emph{A} right poloid such that
$\epsilon\leq\epsilon'$ and $\epsilon'\leq\epsilon$ implies $\epsilon=\epsilon'$
for any left units $\epsilon,\epsilon'\in P$ is said to be \emph{unit-posetal}.
Recall that for any left unit $\epsilon\in P$ we have $\varphi_{\epsilon}=\epsilon$,
so $\varphi_{\varphi_{x}}=\varphi_{x}$. Thus, $\varphi_{x}\leq\varphi_{y}$
if and only $\varphi_{y}\varphi_{x}$ is defined and $\varphi_{x}=\varphi_{y}\varphi_{x}$,
so as $\varphi_{y}$ is a left unit we have $\varphi_{x}\leq\varphi_{y}$
if and only $\varphi_{y}\varphi_{x}$ is defined. Hence, we obtain
the following results.
\begin{thm}
\label{the8}A right poloid is unit-posetal if and only if it is normal.
\end{thm}
\begin{thm}
\label{the9}A domain pretransformation magma is a unit-posetal right
poloid.
\end{thm}
\begin{thm}
\label{the10}A unit-posetal right poloid can be embedded in a domain
pretrans\-formation magma.
\end{thm}
If we specialize the concept of a unit-posetal right poloid by adding
more requirements, the analogue of Theorem \ref{the9} need of course
not hold. In particular, the partial order on the left units is not
necessarily a semilattice.
\begin{example}
Set $X=\left\{ 1,2,3\right\} $ and $\mathscr{R}_{\!X}=\left\{ \mathsf{Id}{}_{\left\{ 1,2\right\} },\mathsf{Id}{}_{\left\{ 2,3\right\} }\right\} $
with $\mathsf{f}\circ\mathsf{g}$ defined as usual when $\mathrm{dom}\left(\mathsf{f}\right)\supseteq\mathrm{im}\left(\mathsf{g}\right)$.
Then $\mathscr{R}_{\!X}$ is a domain pretransformation magma where
$\mathsf{Id}{}_{\left\{ 1,2\right\} }\leq\mathsf{Id}{}_{\left\{ 1,2\right\} }$
and $\mathsf{Id}{}_{\left\{ 2,3\right\} }\leq\mathsf{Id}{}_{\left\{ 2,3\right\} }$,
but this partial order is not a semilattice.
\end{example}
More broadly, let $\boldsymbol{A}$ denote a class of abstract algebraic
structures corresponding to a class $\boldsymbol{C}$ of concrete
magmas of correspondences (functions, prefunctions etc.) in the sense
that any $\boldsymbol{c}$ in $\boldsymbol{C}$ belongs to $\boldsymbol{A}$
when certain operations in $\boldsymbol{C}$ are interpreted as the
operations in $\boldsymbol{A}$. Note that this does not imply that
any $\boldsymbol{a}$ in $\boldsymbol{A}$ can be embedded in some
$\boldsymbol{c}$ in $\boldsymbol{C}$. In particular, if $\boldsymbol{A}$
is a class of generalized groups, with axioms merely defining a generalized
group operation and (optional) generalized identities and inverses,
then the fact that the axioms defining $\boldsymbol{A}$ are satisfied
for any concrete magma $\boldsymbol{c}$ in $\boldsymbol{C}$ does
not provide a strong reason to expect that any $\boldsymbol{a}$ satisfying
these axioms can be embedded in some $\boldsymbol{c}$ in $\boldsymbol{C}$.
As we have just seen, the relation between right poloids and domain
pretransformation magmas is asymmetrical in this respect. (One-sided)
restriction semigroups \cite{key-4} provide another example of this
phenomenon.
\begin{example}
Let $\boldsymbol{A}$ be the class of semigroups such that for each
$\boldsymbol{a}$ in $\boldsymbol{A}$ and each $x\in\boldsymbol{a}$
there is a unique local right unit for $x$ in $\boldsymbol{a}$.
Let $\boldsymbol{C}$ be the class of semigroups of functional relations
on a given set where the binary operation is composition of relations
and such that for each $\boldsymbol{c}$ in $\boldsymbol{C}$ and
each $\mathtt{f}\in\boldsymbol{c}$ the functional relation $\mathtt{Id}{}_{\mathrm{dom}\left(\mathtt{f}\right)}$
belongs to $\boldsymbol{c}$. Then any $\boldsymbol{c}$ in $\boldsymbol{C}$
belongs to $\boldsymbol{A}$, with $\mathtt{Id}{}_{\mathrm{dom}\left(\mathtt{f}\right)}$
the local right unit for $\mathtt{f}$, but it is not the case that
any $\boldsymbol{a}$ in $\boldsymbol{A}$ can be embedded in some
$\boldsymbol{c}$ in $\boldsymbol{C}$. To ensure embeddability, $\boldsymbol{A}$
needs to be narrowed down by additional conditions, subject to the
restriction that $\boldsymbol{A}$ remains wide enough to accommodate
all $\boldsymbol{c}$ in $\boldsymbol{C}$.
\end{example}
We have seen that any transformation poloid is a poloid and that those
poloids which can be embedded in a transformation poloid are simply
all poloids, and a similar elementary symmetry exists for inverse
semigroups, but such cases are perhaps best regarded as ideal rather
than normal, reflecting the fact that poloids and inverse semigroups
are particularly natural algebraic structures.
\end{document} |
\mathbf{b}egin{document}
\mathbf{a}uthor{Howard Nuer}
\mathbf{a}ddress{H.N.: Department of Mathematics, Statistics, and Computer Science,
University of Illinois at Chicago,
851 S. Morgan Street
Chicago, IL 60607}
\email{hjnuer@gmail.com}
\mathbf{a}uthor{K\={o}ta Yoshioka}
\mathbf{a}ddress{K.Y.: Department of Mathematics, Faculty of Science, Kobe University, Kobe, 657, Japan}
\email{yoshioka@math.kobe-u.ac.jp}
\title[MMP via wall-crossing for moduli sheaves on Enriques surfaces]{MMP via wall-crossing for moduli spaces of stables sheaves on an Enriques surface}
\mathbf{b}egin{abstract}
We use wall-crossing in the Bridgeland stability manifold to systematically study the birational geometry of the moduli space $M_\sigma(\mathbf{v})$ of $\sigma$-semistable objects of class $\mathbf{v}$ for a generic stability condition $\sigma$ on an arbitrary Enriques surface $X$. In particular, we show that for any other generic stability condition $\tau$, the two moduli spaces $M_\tau(\mathbf{v})$ and $M_\sigma(\mathbf{v})$ are birational. As a consequence, we show that for primitive $\mathbf{v}$ of odd rank $M_\sigma(\mathbf{v})$ is birational to a Hilbert scheme of points. Similarly, in even rank we show that $M_\sigma(\mathbf{v})$ is birational to a moduli space of torsion sheaves supported on a hyperelliptic curve when $\ell(\mathbf{v})=1$. As an added bonus of our work, we prove that the Donaldson-Mukai map $\theta_{\mathbf{v},\sigma}:\mathbf{v}^\perp\to\ensuremath{\mathbb{P}}ic(M_\sigma(\mathbf{v}))$ is an isomorphism for these classes. Finally, we use our classification to fully describe the geometry of the only two examples of moduli of stable sheaves on $X$ that are uniruled (and thus not K-trivial).
\end{abstract}
\maketitle
\setcounter{tocdepth}{1}
\tableofcontents
\section{Introduction}
For almost forty years, moduli spaces of stable sheaves have attracted great interest from mathematicians and physicists alike. They have been studied using vastly different mathematical disciplines, but more recently the development and application of Bridgeland stability conditions to their study has unified most of these tools. Introduced by Bridgeland \cite{Bri07} to formulate a rigorous definition of Douglas's $\pi$-stability for branes in string theory, stability conditions on the derived category $\ensuremath{\mathbb{D}}b(X)$ of a smooth projective variety $X$ provide an adequately robust arena in which to study moduli spaces of sheaves using tools such as Fourier-Mukai transforms, enumerative and motivic invariants, and the minimal model program. In particular, as stability conditions move around in a complex manifold $\mathop{\mathrm{Stab}}\nolimits(X)$ which admits a wall-and-chamber for any given Chern character, there is a strong connection between wall-crossing in $\mathop{\mathrm{Stab}}\nolimits(X)$ on the one hand, and wall-crossing formulae for enumerative invariants and birational transformations on moduli spaces on the other.
In this paper, we bring this toolbox to bear on the study of moduli spaces of sheaves on an Enriques surface $X$. In previous work \cite{Nue14a,Nue14b,Yos03,Yos14,Yos16b,Yos16a}, we had shown that for a generic stability condition $\sigma$ in a certain distinguished connected component $\mathop{\mathrm{Stab}}\nolimitsd(X)\subset\mathop{\mathrm{Stab}}\nolimits(X)$ there exist projective coarse moduli spaces $M_\sigma(\mathbf{v})$ parametrizing (S-equivalence classes) of $\sigma$-semistable objects of Mukai vector $\mathbf{v}$ (see \cref{sec:ReviewStabilityK3Enriques} for definitions). We also classified precisely for which Mukai vectors $\mathbf{v}$ (or equivalently Chern character) the moduli space $M_\sigma(\mathbf{v})$ is nonempty, and we studied some coarse geometric and topological invariants of $M_\sigma(\mathbf{v})$ (refer again to \cref{sec:ReviewStabilityK3Enriques} for a brief recap of these results). In particular, for the Mukai vectors (or Chern characters) of stable sheaves, we described some of these invariants for the moduli space of stable sheaves using a combination of modern and classical techniques in conjunction with Bridgeland stability.
We continue our investigation of these moduli spaces in this paper with two more specific goals in mind. The first is to intimately study the effect on moduli spaces of crossing a wall $\ensuremath{\mathcal W}$. More specifically, given a Mukai vector $\mathbf{v}$, a wall $\ensuremath{\mathcal W}\subset\mathop{\mathrm{Stab}}\nolimitsd(X)$ for $\mathbf{v}$, and two stability conditions $\sigma_\pm$ in the opposite and adjacent chambers separated by $\ensuremath{\mathcal W}$, we seek a precise answer to the question: how are $M_{\sigma_+}(\mathbf{v})$ and $M_{\sigma_-}(\mathbf{v})$ related? The basic answer is given by \cref{Thm:MainTheorem1} which says that $M_{\sigma_+}(\mathbf{v})$ and $M_{\sigma_-}(\mathbf{v})$ are birational. This question, and others associated with it, are motivated by a larger trend in moduli theory, wherein minimal models of a given moduli space are shown to be moduli spaces in and of themselves, just of slightly different objects. In the case of K3 surfaces, such a result, stating that all minimal models of $M_\sigma(\mathbf{v})$ are isomorphic to some $M_\tau(\mathbf{v})$ for a different $\tau\in\mathop{\mathrm{Stab}}\nolimitsd(X)$, was shown to be true by Bayer and Macr\`{i} in \cite{BM14b}. While we had hoped to prove such a result in the case of Enriques surfaces, our investigation instead led to a possible counterexample, see \cref{Ex:ConfusingSmallContraction}.
Our second goal is to use wall-crossing, Fourier-Mukai transforms, and \cref{Thm:MainTheorem1} to pin-point precisely where the moduli spaces $M_\sigma(\mathbf{v})$ live in the classification of algebraic varieties. We accomplish this goal with success for almost all Mukai vectors in \cref{Thm:application1,Thm:application2}. It has become more apparent with these results and similar results for other surfaces that Bridgeland stability conditions are crucial tool not only for studying the birational geometry of moduli spaces, but also more intrinsic and more classical questions about the geometry of moduli spaces.
\subsection*{Summary of Results and Techniques}
Let us turn now to stating our main results and the main tool we use to prove them. While we briefly introduce notation, the reader is invited to see \cref{sec:ReviewStabilityK3Enriques} for more details.
For an Enriques surface $X$, denote by $\mathbf{v}arpi:\mathbf{w}idetilde{X}\to X$ the two-to-one K3 universal covering. The topological invariants of a coherent sheaf or object $E$ in the derived category $\ensuremath{\mathbb{D}}b(X)$ are encoded in its Mukai vector $$\mathbf{v}(E):=\mathop{\mathrm{ch}}\nolimits(E)\sqrt{\mathop{\mathrm{td}}\nolimits(X)}\in H^*(X,\ensuremath{\mathbb{Q}}).$$ We consider the Mukai lattice $$\ensuremath{\mathbb{H}}al(X,\ensuremath{\mathbb{Z}}):=\mathbf{v}(K(X)),$$ where $K(X)$ is the Grothendieck group, along with the induced pairing $\langle\mathbf{v}(E),\mathbf{v}(F)\rangle=-\mathop{\mathrm{ch}}\nolimitsi(E,F)$. For any primitive $\mathbf{v}\in\ensuremath{\mathbb{H}}al(X,\ensuremath{\mathbb{Z}})$, if we write $\mathbf{v}arpi^*\mathbf{v}=\ell(\mathbf{v})\mathbf{w}\in\ensuremath{\mathbb{H}}al(\mathbf{w}idetilde{X},\ensuremath{\mathbb{Z}})$ with $\mathbf{w}$ primitive, then $\ell(\mathbf{v})=1$ or 2 (see \cref{primitive}). The stability conditions that we consider are all contained in the distinguished connected component $\mathop{\mathrm{Stab}}\nolimitsd(X)$ of $\mathop{\mathrm{Stab}}\nolimits(X)$ containing those stability conditions $\sigma$ such that skyscraper sheaves of points are $\sigma$-stable. For $\sigma\in\mathop{\mathrm{Stab}}\nolimitsd(X)$, we denote by $M_\sigma(\mathbf{v})$ the moduli space of (S-equivalence classes of) $\sigma$-semistable objects $E\in\ensuremath{\mathbb{D}}b(X)$ with $\mathbf{v}(E)=\mathbf{v}$. These moduli spaces admit a decomposition $$M_\sigma(\mathbf{v})=M_{\sigma}(\mathbf{v},L)\mathbf{b}igsqcup M_\sigma(\mathbf{v},L+K_X),$$ where $L\in\ensuremath{\mathbb{P}}ic(X)$ satisfies $c_1(L)=c_1(\mathbf{v})$ in $H^2(X,\ensuremath{\mathbb{Q}})$ and $M_\sigma(\mathbf{v},L)$ parametrizes $E\in M_\sigma(\mathbf{v})$ such that $\mathbf{d}et(E)=L$.
Our first main result states that any two Bridgeland moduli spaces are birational.
\mathbf{b}egin{Thm}\label{Thm:MainTheorem1}
Let $\mathbf{v}\in\ensuremath{\mathbb{H}}al(X,\ensuremath{\mathbb{Z}})$ satisfy $\mathbf{v}^2>0$, and let $\sigma,\tau\in\mathop{\mathrm{Stab}}\nolimitsd(X)$ be generic stability conditions with respect to $\mathbf{v}$ (that is, they are not contained on any wall for $\mathbf{v}$).
\mathbf{b}egin{enumerate}
\item\label{enum:MT1-two moduli are birational} The two moduli spaces $M_{\sigma}(\mathbf{v})$ and $M_{\tau}(\mathbf{v})$ of Bridgeland-semistable objects are birational to each other.
\item\label{enum:MT1-birational map given by FM transform} More precisely, there is a birational map induced by a derived (anti-)autoequivalence $\ensuremath{\mathbb{P}}hi$ of $\ensuremath{\mathbb{D}}b(X)$ in the following sense: there exists a common open subset $U\subset M_{\sigma}(\mathbf{v}),U\subset M_{\tau}(\mathbf{v})$ such that for any $u\in U$, the corresponding objects $E_u\in M_{\sigma}(\mathbf{v})$ and $F_u\in M_{\tau}(\mathbf{v})$ satisfy $F_u=\ensuremath{\mathbb{P}}hi(E_u)$. If $\ensuremath{\mathbb{P}}ic(\mathbf{w}idetilde{X})=\mathbf{v}arpi^*\ensuremath{\mathbb{P}}ic(X)$, then the complements of $U$ in $M_{\sigma}(\mathbf{v})$ and $M_{\tau}(\mathbf{v})$ have codimension at least two.
\end{enumerate}
\end{Thm}
We remark that the generic Enriques surface $X$ satisfies $\mathbf{v}arpi^*\ensuremath{\mathbb{P}}ic(X)=\ensuremath{\mathbb{P}}ic(\mathbf{w}idetilde{X})$. The proof of the analogous statement to \cref{Thm:MainTheorem1} for K3 surfaces \cite[Theorem 1.1]{BM14b} relies heavily on the fact that $M_\sigma(\mathbf{v})$ is a projective hyperk\"{a}hler manifold in the K3 case. Our proof is based on studying stacks of Harder-Narasimhan filtrations and is thus more universal. In fact, the first author has already used this tool to obtain analogous results for bielliptic surfaces \cite{Nue18}.
Let $Y$ be a smooth projective variety, and suppose we are given a stability condition $\sigma\in\mathop{\mathrm{Stab}}\nolimitsd(Y)$ on a wall $\ensuremath{\mathcal W}$ for $\mathbf{v}$ and a sequence of Mukai vectors (or Chern characters) $\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s$ of descending slope with respect to a nearby generic stability condition $\sigma_-$. Then under some mild assumptions on $Y$, we prove in \cref{Thm:DimensionOfHNFiltrationStack} that the substack of $\sigma$-semistable objects whose Harder-Narasimhan filtration with respect to $\sigma_-$ has $i$-th $\sigma_-$-semistable factor of class $\mathbf{v}_i$ has dimension $$\sum_{i=1}^s\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{v}_i)+\sum_{i<j}\langle\mathbf{v}_i,\mathbf{v}_j\rangle.$$ See \cref{sec:DimensionsOfHarderNarasimhan} for more details. In particular, in the case of an Enriques surface, \cref{Thm:DimensionOfHNFiltrationStack} allows us to determine what objects in $M_{\sigma_+}(\mathbf{v},L)$ are destabilized when crossing the wall $\ensuremath{\mathcal W}$, and most importantly the dimension of this locus, purely in terms of a hyperpolic lattice $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ associated with the wall. This is the content of \cref{classification of walls} which gives a much more refined and detailed classification of the type of birational map in \cref{Thm:MainTheorem1} in terms of the lattices $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ associated to the walls $\ensuremath{\mathcal W}$ that are crossed on along a path from $\sigma$ to $\tau$.
One of the most powerful uses of a result like \cref{Thm:MainTheorem1} appears in the next three results. They all follow a similar pattern. To study a given moduli space $M_\sigma(\mathbf{v},L)$, we apply a Fourier-Mukai transform $\ensuremath{\mathbb{P}}hi:\ensuremath{\mathbb{D}}b(X)\to\ensuremath{\mathbb{D}}b(X)$ inducing an isomorphism $M_\sigma(\mathbf{v},L)\mor[\sim] M_{\ensuremath{\mathbb{P}}hi(\sigma)}(\ensuremath{\mathbb{P}}hi_*(\mathbf{v}),L')$, where $\ensuremath{\mathbb{P}}hi_*(\mathbf{v})$ has a more familiar form, say that of a well-understand type of coherent sheaf. Using \cref{Thm:MainTheorem1}, we know that there is a birational map $M_{\ensuremath{\mathbb{P}}hi(\sigma)}(\ensuremath{\mathbb{P}}hi_*(\mathbf{v}),L')\mathbf{d}ashrightarrow M_\tau(\ensuremath{\mathbb{P}}hi_*(\mathbf{v}),L')$, where $\tau$ is now in the so-called Gieseker chamber so that $M_\tau(\ensuremath{\mathbb{P}}hi_*(\mathbf{v}),L')$ is simply the moduli space of stable sheaves of Mukai vector $\ensuremath{\mathbb{P}}hi_*(\mathbf{v})$ and determinant $L'$. An argument exactly along these lines gives the next two results:
\mathbf{b}egin{Thm}\label{Thm:application1}
Let $\mathbf{v}$ be a primitive Mukai vector such that $\mathbf{v}^2>0$ is odd.
Then for a general $\sigma$,
there is an (anti-)autoequivalence $\ensuremath{\mathbb{P}}hi$ of $\ensuremath{\mathbb{D}}b(X)$
which induces an isomorphism $\ensuremath{\mathbb{P}}hi:U \to U'$
where $U \subset M_\sigma(\mathbf{v},L)$ and
$U' \subset \ensuremath{\mathbb{H}}ilb^{\frac{\mathbf{v}^2+1}{2}}(X)$ are dense open subsets.
In particular, $M_\sigma(\mathbf{v},L)$ is birationally equivalent to
$\ensuremath{\mathbb{H}}ilb^{\frac{\mathbf{v}^2+1}{2}}(X)$,
$\pi_1(M_\sigma(\mathbf{v},L)) \cong \ensuremath{\mathbb{Z}}/2 \ensuremath{\mathbb{Z}}$,
$K_{M_\sigma(\mathbf{v},L)} \not \cong \ensuremath{\mathcal O}_{M_\sigma(\mathbf{v},L)}$ and
$K_{M_\sigma(\mathbf{v},L)}^{\mathop{\ord(\omega_S)}\nolimitstimes 2} \cong \ensuremath{\mathcal O}_{M_\sigma(\mathbf{v},L)}$
for $\mathbf{v}^2 \geq 1$.
Moreover, if $\ensuremath{\mathbb{P}}ic(\mathbf{w}idetilde{X})=\mathbf{v}arpi^*\ensuremath{\mathbb{P}}ic(X)$, then the complements of $U,U'$ have codimension at least two.
\end{Thm}
\mathbf{b}egin{Thm}\label{Thm:application2}
Let $\mathbf{v}$ be a primitive Mukai vector such that $\mathbf{v}^2$ is even
and $\ell(\mathbf{v})=1$.
Then there is an elliptic fibration $\pi:X \to \ensuremath{\mathbb{P}}^1$, a curve $C\subset X$,
and a Mukai vector $\mathbf{w}=(0,C,\mathop{\mathrm{ch}}\nolimitsi)$ with $\mathop{\mathrm{ch}}\nolimitsi\neq0$
such that
(1) $\pi|_C:C \to \ensuremath{\mathbb{P}}^1$ is a double cover, and
(2) there is an (anti-)autoequivalence $\ensuremath{\mathbb{P}}hi$ of $\ensuremath{\mathbb{D}}b(X)$
which induces an isomorphism $\ensuremath{\mathbb{P}}hi:U \to U'$
where $U \subset M_\sigma(\mathbf{v},L)$ and
$U' \subset M_H(\mathbf{w},L')$ are open subsets. In particular,
$M_\sigma(\mathbf{v},L)$ is
birationally equivalent to
$M_H(\mathbf{w},L')$
for a general $\sigma$.
If $\ensuremath{\mathbb{P}}ic(\mathbf{w}idetilde{X})=\mathbf{v}arpi^*\ensuremath{\mathbb{P}}ic(X)$ and $\mathbf{v}^2 \geq 2$, then the complements of $U$ and $U'$ are of codimension at least two, $\pi_1(M_\sigma(\mathbf{v},L)) \cong \ensuremath{\mathbb{Z}}/2 \ensuremath{\mathbb{Z}}$,
$K_{M_\sigma(\mathbf{v},L)} \cong \ensuremath{\mathcal O}_{M_\sigma(\mathbf{v},L)}$,
and $h^{p,0}(M_\sigma(\mathbf{v},L))=0$ for $p \ne 0, \mathbf{v}^2+1$.
\end{Thm}
The remaining case, when $\mathbf{v}^2$ is even and $\ell(\mathbf{v})=2$, can still be attacked in the same way, but the corresponding moduli space of torsion sheaves is not well understood due to the presence of non-reduced curves of larger multiplicity.
Our third use of \cref{Thm:MainTheorem1} goes in a slightly different direction. To prove that all minimal models of $M_\sigma(\mathbf{v},L)$ come from Bridgeland wall-crossing, an important step, arguably a necessary one, is to show that the Donaldson-Mukai map is surjective. Set $K(X)_\mathbf{v}:=\Set{x \in K(X) \ |\ \langle \mathbf{v}(x),\mathbf{v} \rangle=0}$.
For a universal family $\ensuremath{\mathcal E}$ on $M_{\sigma}(\mathbf{v},L) \times X$,
the Donaldson-Mukai map is defined by
\mathbf{b}egin{equation}
\mathbf{b}egin{matrix}
\theta_{\mathbf{v},\sigma}:& K(X)_\mathbf{v} & \to & \ensuremath{\mathbb{P}}ic(M_{\sigma}(\mathbf{v},L))\\
& x &\mapsto & \mathbf{d}et (p_{M_\sigma(\mathbf{v},L)!}(\ensuremath{\mathcal E} \mathop{\ord(\omega_S)}\nolimitstimes p_X^*(x^{\mathbf{v}ee}))).
\end{matrix}
\end{equation}
Then using \cref{Thm:MainTheorem1}, we obtain the following result.
\mathbf{b}egin{Cor}\label{Cor:Picard}
Let $\mathbf{v}$ be a primitive Mukai vector with $\mathbf{v}^2 \geq 3$ and $\sigma\in\mathop{\mathrm{Stab}}\nolimitsd(X)$ a generic stability condition with respect to $\mathbf{v}$. If either
\mathbf{b}egin{enumerate}
\item
$\mathbf{v}^2$ is odd, or
\item
$\mathbf{v}^2$ is even, $\ell(\mathbf{v})=1$, and $\ensuremath{\mathbb{P}}ic(\mathbf{w}idetilde{X})=\mathbf{v}arpi^*\ensuremath{\mathbb{P}}ic(X)$,
\end{enumerate}
then $\theta_{\mathbf{v}, \sigma}$ is an isomorphism.
\end{Cor}
In an appendix, we use our results to describe the geometry of the two examples of moduli spaces of sheaves on an Enriques surface $X$ that are uniruled. These constitute the only counterexamples to the fact that moduli spaces of stable sheaves on an Enriques surface have numerically trivial canonical divisors. One of these examples only exists on nodal Enriques surfaces, while the other is ubiquitous, but only applies for Mukai vectors of the form $\mathbf{v}=2\mathbf{v}_0$ where $\mathbf{v}_0^2=1$. In each case we show that there is always a wall $\ensuremath{\mathcal W}\subset\mathop{\mathrm{Stab}}\nolimitsd(X)$ that induces the structure of a $\ensuremath{\mathbb{P}}^1$-fibration on $M_\sigma(\mathbf{v})$. Correspondingly, these are the only counterexamples to the conjecture of the first author in \cite{Nue14a} that the Bayer-Macr\`{i} divisor at the wall is big.
\subsection*{Relation to other work} The relation between wall-crossing and the minimal model program studied here has been explored for many other surfaces. We refer the reader to \cite{BM14a,BM14b,MYY14b,MYY14} for K3 surfaces, to \cite{ABCH13,BMW14,CH15,CHW14,LZ13} for $\ensuremath{\mathbb{P}}^2$, to \cite{BC13,AM17} for Hirzebruch and del Pezzo surfaces, and to \cite{MM13,YY14,Yos12} for abelian surfaces.
During the writing phase of this project, the authors became aware of the recent preprint \cite{Bec18}. In that paper, the author proves part \ref{enum:MT1-two moduli are birational} of \cref{Thm:MainTheorem1} as well as \cref{Thm:application1,Thm:application2} for generic Enriques surfaces (that is, when $\ensuremath{\mathbb{P}}ic(\mathbf{w}idetilde{X})=\mathbf{v}arpi^*\ensuremath{\mathbb{P}}ic(X)$) and for Mukai vectors $\mathbf{v}$ such that $\mathbf{v}arpi^*\mathbf{v}$ is primitive (see Theorem 4.4, Proposition 4.6, and Proposition 4.7, respectively, in \cite{Bec18} for the precise results). The approach is entirely different from ours and is again based on hyperk\"{a}hler geometry, specifically the concept of a constant cycle subvariety of a hyperk\"{a}hler manifold. Nevertheless, our approach has numerous advantages. The first is that we do not assume in \cref{Thm:MainTheorem1} that $\mathbf{v}$ is primitive or that $X$ is generic.\footnote{In the case of \cite[Proposition 4.6]{Bec18}, the author actually does not need to assume that $X$ is generic, but instead uses a clever deformation argument with relative moduli spaces of stable sheaves to prove his analogue of \cref{Thm:application1} only for moduli of Gieseker stable sheaves. This is another benefit of our method; although, as the author points out, one can obtain the same generality as in \cref{Thm:application1} using the construction of relative moduli spaces of Bridgeland stable objects in the forthcoming article \cite{BLMNPS19}}
The second advantage, which is a consequence of the first, is that we obtain (in \cref{classification of walls}) a complete classification of the birational behavior induced by any wall $\ensuremath{\mathcal W}$ without any assumptions on $X$ or the divisibility of $\mathbf{v}$. Even when $\mathbf{v}$ is primitive, our approach has the added advantage of giving necessary and sufficient conditions for a potential numerical wall $\ensuremath{\mathcal W}$ to actually be a wall, which is not guaranteed by just using the corresponding condition on the covering K3 surface $\mathbf{w}idetilde{X}$.
\subsection*{Notation}
For a complex number $z\in\ensuremath{\mathbb{C}}$ we denote its real and imaginary parts by $\ensuremath{\mathbb{R}}e z$ and $\Im z$, respectively.
We will denote by $\ensuremath{\mathbb{D}}b(X)$ the bounded derived category of coherent sheaves on a smooth projective variety $X$. On occasion we will consider the bounded derived category $\ensuremath{\mathbb{D}}b(X,\mathbf{a}lpha)$ of the abelian category $\ensuremath{\mathbb{C}}oh(X,\mathbf{a}lpha)$ of $\mathbf{a}lpha$-twisted coherent sheaves, where $\mathbf{a}lpha\in\mathop{\mathrm{Br}}\nolimits(X)$ is a Brauer class. See \cite[pp. 515-516]{BM14b} and the references contained therein for more background on twisted sheaves.
We will use non-script letters ($E,F,G,\mathbf{d}ots$) for objects on a fixed scheme and reserve curly letters ($\ensuremath{\mathcal E},\ensuremath{\mathcal F},\ensuremath{\mathbb{G}}G,\mathbf{d}ots$) for families of such objects.
For a vector $\mathbf{v}$ in a lattice $\ensuremath{\mathbb{H}}H$ with pairing $\langle\mathbf{u}nderline{\hphantom{A}},\mathbf{u}nderline{\hphantom{A}}\rangle$, we abuse notation and write $$\mathbf{v}^2:=\langle\mathbf{v},\mathbf{v}\rangle.$$
For a given lattice $\ensuremath{\mathbb{H}}H$ and integer $k\in\ensuremath{\mathbb{Z}}$, we denote by $\ensuremath{\mathbb{H}}H(k)$ the same underlying lattice with pairing multiplied by the integer $k$.
The intersection pairing on a smooth surface $X$ will be denoted by $(\mathbf{u}nderline{\hphantom{A}},\mathbf{u}nderline{\hphantom{A}})$ and the self-intersection of a divisor $D$ by $(D^2)$. The fundamental class of a smooth projective variety $X$ will be denoted by $\mathbf{v}arrho_X$.
By an \emph{irreducible} object of an abelian category, we mean an object that has no non-trivial subobjects. These are sometimes called \emph{simple} objects in the literature.
Recall that an object $S$ in the derived category of a K3 or Enriques surface is called \emph{spherical} if $\ensuremath{\mathbb{R}}Hom(S,S)=\ensuremath{\mathbb{C}}\mathop{\ord(\omega_S)}\nolimitsplus\ensuremath{\mathbb{C}}[-2]$. We denote the associated spherical reflection by $R_S$; it is defined by
\mathbf{b}egin{equation}\label{eqn:spherical reflection}
R_S(E):=\ensuremath{\mathbb{C}}one{(\ensuremath{\mathbb{R}}Hom(S,E)\mathop{\ord(\omega_S)}\nolimitstimes S\to E).}
\end{equation}
Similarly, an object $E_0$ in the derived category of an Enriques surface is called \emph{exceptional} if $\ensuremath{\mathbb{R}}Hom(E_0,E_0)=\ensuremath{\mathbb{C}}$. In analogy to the spherical case, we denote the associated exceptional , or weakly spherical, reflection by $R_{E_0}$; it is defined by
\mathbf{b}egin{equation}\label{eqn:weakly spherical reflection}
R_{E_0}(E):=\ensuremath{\mathbb{C}}one{(\ensuremath{\mathbb{R}}Hom(E_0,E)\mathop{\ord(\omega_S)}\nolimitstimes E_0\mathop{\ord(\omega_S)}\nolimitsplus\ensuremath{\mathbb{R}}Hom(E_0(K_X),E)\mathop{\ord(\omega_S)}\nolimitstimes E_0(K_X)\to E)}.
\end{equation}
This has also been called the Fourier-Mukai transform associated to $(-1)$-reflection in the literature.
\section{Review: Bridgeland stability conditions}
In this section, we summarize the notion of Bridgeland stability conditions on a triangulated category $\ensuremath{\mathbb{D}}D$. The main reference is \cite{Bri07}.
\mathbf{b}egin{Def}\label{def:slicing}
A slicing $\ensuremath{\mathbb{P}}P$ of the category $\ensuremath{\mathbb{D}}D$ is a collection of full extension-closed subcategories $\ensuremath{\mathbb{P}}P(\phi)$ for $\phi\in\ensuremath{\mathbb{R}}$ with the following properties :
\mathbf{b}egin{enumerate}
\item $\ensuremath{\mathbb{P}}P(\phi+1)=\ensuremath{\mathbb{P}}P(\phi)[1]$.
\item If $\phi_1>\phi_2$, then $\ensuremath{\mathbb{H}}om(\ensuremath{\mathbb{P}}P(\phi_1),\ensuremath{\mathbb{P}}P(\phi_2))=0$.
\item For any $E\in\ensuremath{\mathbb{D}}D$, there exists a collection of real numbers $\phi_1>\phi_2>\cdots>\phi_n$ and a sequence of triangles
\mathbf{b}egin{equation} \label{eq:HN-filt}
\ensuremath{\mathbb{T}}FILTB E A n
\end{equation}
with $A_i \in \ensuremath{\mathbb{P}}P(\phi_i)$.
\end{enumerate}
\end{Def}
The subcategory $\ensuremath{\mathbb{P}}P(\phi)$ is abelian; its nonzero objects are called semistable of phase $\phi$, and its simple objects are called stable. Appropriately, the collection of triangles in \eqref{eq:HN-filt} is called the \emph{Harder-Narasimhan (HN) filtration} of $E$, and we define $\phi_{\max}(E):=\phi_1$ and $\phi_{\mathop{\mathrm{min}}\nolimits}(E):=\phi_n$. For any $\phi\in\ensuremath{\mathbb{R}}$, we denote by $\ensuremath{\mathbb{P}}P(\phi-1,\phi]$ the full subcategory of objects with $\phi_{\mathop{\mathrm{min}}\nolimits}>\phi-1$ and $\phi_{\max}\leq\phi$. This is the heart of a bounded t-structure. We usually consider $\ensuremath{\mathcal A}=\ensuremath{\mathbb{P}}P(0,1]$.
Let us fix a lattice of finite rank $\Lambda$ and a surjective map $\mathbf{v}:K(\ensuremath{\mathbb{D}}D)\mathop{\ord(\omega_S)}\nolimitsnto\Lambda$.
\mathbf{b}egin{Def}\label{def:stability condition}
A Bridgeland stability condition on $\ensuremath{\mathbb{D}}D$ is a pair $\sigma=(Z,\ensuremath{\mathbb{P}}P)$, where
\mathbf{b}egin{itemize}
\item the \emph{central charge} $Z:\Lambda\to \ensuremath{\mathbb{C}}$ is a group homomorphism, and
\item $\ensuremath{\mathbb{P}}P$ is a slicing of $Z$,
\end{itemize}satisfying the following compatibility conditions:
\mathbf{b}egin{enumerate}
\item For all non-zero $E\in\ensuremath{\mathbb{P}}P(\phi)$, $\frac{1}{\pi}\mathbf{a}rg Z(\mathbf{v}(E))=\phi$;
\item For a fixed norm $\lvert\mathbf{u}nderline{\hphantom{A}}\rvert$ on $\Lambda_\ensuremath{\mathbb{R}}$, there exists a constant $C>0$ such that
\mathbf{b}egin{equation*}|Z(\mathbf{v}(E))|\geq C\lvert\mathbf{v}(E)\rvert
\end{equation*}
for all semistable $E$.
\end{enumerate}
\end{Def}
We will write $Z(E)$ for $Z(\mathbf{v}(E))$ from here on. Furthermore, when we wish to refer to the central charge, the heart, or the slicing of a stability condition $\sigma$, we will denote it by $Z_{\sigma}$, $\ensuremath{\mathcal A}_{\sigma}$, and $\ensuremath{\mathbb{P}}P_{\sigma}$, respectively. It is worth noting that giving a stability condition $(Z,\ensuremath{\mathbb{P}}P)$ is equivalent to giving a pair $(Z,\ensuremath{\mathcal A})$ where $Z:\Lambda\to\ensuremath{\mathbb{C}}$ is a stability function with the HN-property in the sense of \cite{Bri08}. See \cite[Proposition 3.5]{Bri08} specifically for this equivalence.
The main theorem in \cite{Bri07} asserts that the set $\mathop{\mathrm{Stab}}\nolimits(\ensuremath{\mathbb{D}}D)$ of stability conditions is a complex manifold of dimension $\mathop{\mathrm{rk}}(\Lambda)$. The manifold $\mathop{\mathrm{Stab}}\nolimits(\ensuremath{\mathbb{D}}D)$ carries two group actions: the group $\mathop{\mathrm{Aut}}\nolimits(\ensuremath{\mathbb{D}}D)$ of autoequivalences acts on the left by $\ensuremath{\mathbb{P}}hi(Z,\ensuremath{\mathbb{P}}P)=(Z\circ\ensuremath{\mathbb{P}}hi_*^{-1},\ensuremath{\mathbb{P}}hi(\ensuremath{\mathbb{P}}P))$, where $\ensuremath{\mathbb{P}}hi\in\mathop{\mathrm{Aut}}\nolimits(\ensuremath{\mathbb{D}}D)$ and $\ensuremath{\mathbb{P}}hi_*$ is the action $K(\ensuremath{\mathbb{D}}D)$, and the universal cover $\mathbf{w}idetilde{\mathop{\mathrm{GL}}\nolimits}_2^+(\ensuremath{\mathbb{R}})$ of matrices in $\mathop{\mathrm{GL}}\nolimits_2(\ensuremath{\mathbb{R}})$ with positive determinant acts on the right. This second action lifts the action of $\mathop{\mathrm{GL}}\nolimits_2(\ensuremath{\mathbb{R}})$ on $\ensuremath{\mathbb{H}}om(K(\ensuremath{\mathbb{D}}D),\ensuremath{\mathbb{C}})=\ensuremath{\mathbb{H}}om(K(\ensuremath{\mathbb{D}}D),\ensuremath{\mathbb{R}}^2)$.
\section{Review: Stability conditions on Enriques surfaces, K3 surfaces, and moduli spaces}\label{sec:ReviewStabilityK3Enriques}We give here a review of Bridgeland stability conditions on Enriques surfaces and K3 surfaces and their moduli spaces of stable complexes. The main references are \cite{Nue14a,Yos16b}. Throughout this section, $Y$ will denote a K3 or Enriques surface.
\subsection{The algebraic Mukai lattice}\label{subsec:algMukaiLattice}Let $X$ be an Enriques surface over an algebraically
closed field $k$ of $\mathop{\mathrm{ch}}\nolimitsr(k) \ne 2$, and let $\mathbf{w}idetilde{X}$ be its covering K3 surface with covering map $\mathbf{v}arpi:\mathbf{w}idetilde{X}\to X$ and covering involution $\iota$. We denote by $H^*_{\mathrm{alg}}(\mathbf{w}idetilde{X},\ensuremath{\mathbb{Z}})$ the algebraic part of the whole cohomology of $\mathbf{w}idetilde{X}$, namely
\mathbf{b}egin{equation}\label{eq:AlgebraicMukaiLattice}
H^*_\mathrm{alg}(\mathbf{w}idetilde{X},\ensuremath{\mathbb{Z}}) = H^0(\mathbf{w}idetilde{X},\ensuremath{\mathbb{Z}}) \mathop{\ord(\omega_S)}\nolimitsplus \mathrm{NS}(\mathbf{w}idetilde{X}) \mathop{\ord(\omega_S)}\nolimitsplus H^4(\mathbf{w}idetilde{X},\ensuremath{\mathbb{Z}}).
\end{equation}
Similarly, for the Enriques surface $X$, we define \mathbf{b}egin{equation}\label{EnriquesLattice}
H^*_{\mathrm{alg}}(X,\ensuremath{\mathbb{Z}}):=\Set{(r,D,\tfrac{s}{2}) \ |\
r,s \in \ensuremath{\mathbb{Z}}, r \equiv s \mathop{\mathrm{mod}}\nolimits 2, D \in \ensuremath{\mathbb{N}}um(X) }\subset H^{*}(X,\ensuremath{\mathbb{Q}}),\end{equation} where $\ensuremath{\mathbb{N}}um(X)=\ensuremath{\mathbb{N}}S(X)/\langle K_X\rangle$.
\mathbf{b}egin{Def} Let $Y=X$ or $\mathbf{w}idetilde{X}$.
\mathbf{b}egin{enumerate}
\item We denote by $\mathbf{v}:K(Y)\mathop{\ord(\omega_S)}\nolimitsnto\ensuremath{\mathbb{H}}al(Y,\ensuremath{\mathbb{Z}})$, the \emph{Mukai vector} $$\mathbf{v}(E):=\mathop{\mathrm{ch}}\nolimits(E)\sqrt{\mathop{\mathrm{td}}\nolimits(Y)}.$$ When $Y=\mathbf{w}idetilde{X}$, the Mukai vector takes the form \[\mathbf{v}(E)=(r(E),c_1(E),r(E)+\mathop{\mathrm{ch}}\nolimits_2(E)),\] in the decomposition \eqref{eq:AlgebraicMukaiLattice}, and when $Y=X$ it takes the form \[\mathbf{v}(E)=(r(E),c_1(E),\frac{r(E)}{2}+\mathop{\mathrm{ch}}\nolimits_2(E)),\] in the decomposition \eqref{EnriquesLattice}.
\item The \emph{Mukai pairing} $\langle\mathbf{u}nderline{\hphantom{A}},\mathbf{u}nderline{\hphantom{A}}\rangle$ is defined on $\ensuremath{\mathbb{H}}al(Y,\ensuremath{\mathbb{Z}})$ by $$\langle(r,c,s),(r',c',s')\rangle:=(c,c')-rs'-r's\in\ensuremath{\mathbb{Z}},$$ where $(\mathbf{u}nderline{\hphantom{A}},\mathbf{u}nderline{\hphantom{A}})$ is the intersection pairing on $H^2(Y,\ensuremath{\mathbb{Z}})$. The Mukai pairing has signature $(2,\rho(Y))$ and satisfies $\langle\mathbf{v}(E),\mathbf{v}(F)\rangle=-\mathop{\mathrm{ch}}\nolimitsi(E,F)=-\sum_i(-1)^i\mathop{\mathrm{ext}}\nolimits^i(E,F)$ for all $E,F\in\ensuremath{\mathbb{D}}b(Y)$.
\item The \emph{algebraic Mukai lattice} is defined to be the pair $(\ensuremath{\mathbb{H}}al(Y,\ensuremath{\mathbb{Z}}),\langle\mathbf{u}nderline{\hphantom{A}},\mathbf{u}nderline{\hphantom{A}}\rangle)$.
\end{enumerate}
\end{Def}
Given a Mukai vector $\mathbf{v}\in\ensuremath{\mathbb{H}}al(Y,\ensuremath{\mathbb{Z}})$, we denote
its orthogonal complement by
\[
\mathbf{v}^\perp:=\Set{\mathbf{w}\in\ensuremath{\mathbb{H}}al(Y,\ensuremath{\mathbb{Z}})\ | \ \langle\mathbf{v},\mathbf{w}\rangle=0 }.
\]
We call a Mukai vector $\mathbf{v}$ \emph{primitive} if it is not divisible in $\ensuremath{\mathbb{H}}al(Y,\ensuremath{\mathbb{Z}})$.
The covering map $\mathbf{v}arpi$ induces an embedding $$\mathbf{v}arpi^*:\ensuremath{\mathbb{H}}al(X,\ensuremath{\mathbb{Z}})\ensuremath{\hookrightarrow}\ensuremath{\mathbb{H}}al(\ensuremath{\mathbf{w}idetilde{X}},\ensuremath{\mathbb{Z}})$$ such that $\langle\mathbf{v}arpi^*\mathbf{v},\mathbf{v}arpi^*\mathbf{w}\rangle=2\langle\mathbf{v},\mathbf{w}\rangle$ and identifies $\ensuremath{\mathbb{H}}al(X,\ensuremath{\mathbb{Z}})$ with an index 2 sublattice of the $\iota^*$-invariant component of $\ensuremath{\mathbb{H}}al(\ensuremath{\mathbf{w}idetilde{X}},\ensuremath{\mathbb{Z}})$. The following lemma makes this precise:
\mathbf{b}egin{Lem}[{\cite[Lem. 2.1]{Nue14a}}]\label{primitive} A Mukai vector $\mathbf{v}=(r,c_1,\frac{s}{2})\in\ensuremath{\mathbb{H}}al(X,\ensuremath{\mathbb{Z}})$ is primitive if and only if $$\gcd(r,c_1,\frac{r+s}{2})=1.$$ For primitive $\mathbf{v}$, we define $\ell(\mathbf{v})$ by $\mathbf{v}arpi^*\mathbf{v}=\ell(\mathbf{v})\mathbf{w}$, where $\mathbf{w}$ is primitive in $\ensuremath{\mathbb{H}}al(\ensuremath{\mathbf{w}idetilde{X}},\ensuremath{\mathbb{Z}})$. Then $\ell(\mathbf{v})=\gcd(r,c_1,s)$ and can be either $1$ or $2$. Moreover,
\mathbf{b}egin{itemize}
\item if $\ell(\mathbf{v})=1$, then either $r$ or $c_1$ is not divisible by 2;
\item if $\ell(\mathbf{v})=2$, then $c_2$ must be odd and $r+s\equiv 2\pmod{4}$.
\end{itemize}
\end{Lem}
In particular, for odd rank Mukai vectors or Mukai vectors with $c_1$ primitive, $\mathbf{v}arpi^*\mathbf{v}$ is still primitive, while primitive Mukai vectors with $\gcd(r,c_1)=2$ (and thus necessarily $\gcd(r,c_1,s)=2$) must satisfy $\mathbf{v}^2\equiv 0 \pmod 8$, as can be easily seen.
\subsection{Stability conditions on Enriques and K3 surfaces}\label{subsec:StabilityCondOnEnriquesK3}We continue to let $Y=X$ or $\mathbf{w}idetilde{X}$.
\mathbf{b}egin{Def}
A (full, numerical) \emph{stability condition} on $Y$ is a Bridgeland stability condition on $\ensuremath{\mathbb{D}}b(Y)$ with $\Lambda=\ensuremath{\mathbb{H}}al(Y,\ensuremath{\mathbb{Z}})$ and $\mathbf{v}$ defined as in \cref{subsec:algMukaiLattice}.
\end{Def}
In particular, for a stability condition $\sigma=(Z,\ensuremath{\mathbb{P}}P)$ on $Y$, the category $\ensuremath{\mathbb{P}}P(\phi)$ has finite length for $\phi\in\ensuremath{\mathbb{R}}$, so any $\sigma$-semistable object $E\in\ensuremath{\mathbb{P}}P(\phi)$ admits a filtration with $\sigma$-stable objects $E_i\in\ensuremath{\mathbb{P}}P(\phi)$. While the filtration itself, called a \emph{Jordan-H\"{o}lder (JH) filtration}, is not unique, the $\sigma$-stable factors $E_i$ are unique, up to reordering.
A connected component $\mathop{\mathrm{Stab}}\nolimitsd(Y)$ of the space of full numerical stability conditions on $\ensuremath{\mathbb{D}}b(Y)$ is described in \cite{Bri08,Yos16b}. Let $\mathbf{b}eta,\mathop{\ord(\omega_S)}\nolimitsmega\in\ensuremath{\mathbb{N}}S(Y)_\ensuremath{\mathbb{R}}$ be two real divisor classes, with $\mathop{\ord(\omega_S)}\nolimitsmega$ ample. For $E\in\ensuremath{\mathbb{D}}b(Y)$, define $$Z_{\mathop{\ord(\omega_S)}\nolimitsmega,\mathbf{b}eta}(E):=\langle e^{\mathbf{b}eta+i\mathop{\ord(\omega_S)}\nolimitsmega},\mathbf{v}(E)\rangle,$$ and consider the heart $\ensuremath{\mathcal A}_{\mathop{\ord(\omega_S)}\nolimitsmega,\mathbf{b}eta}$ defined by
\mathbf{b}egin{equation*} \label{eq:AK3}
\ensuremath{\mathcal A}_{\mathop{\ord(\omega_S)}\nolimitsmega,\mathbf{b}eta}:=\Set{E\in\ensuremath{\mathbb{D}}b(Y)\ |\ \mathbf{b}egin{array}{l}
\mathbf{b}ullet\;\;\ensuremath{\mathbb{H}}H^p(E)=0\mbox{ for }p\not\in\{-1,0\},\\\mathbf{b}ullet\;\;
\ensuremath{\mathbb{H}}H^{-1}(E)\in\ensuremath{\mathcal F}_{\mathop{\ord(\omega_S)}\nolimitsmega,\mathbf{b}eta},\\\mathbf{b}ullet\;\;\ensuremath{\mathbb{H}}H^0(E)\in\ensuremath{\mathbb{T}}T_{\mathop{\ord(\omega_S)}\nolimitsmega,\mathbf{b}eta}\end{array}},
\end{equation*} where $\ensuremath{\mathcal F}_{\mathop{\ord(\omega_S)}\nolimitsmega,\mathbf{b}eta}$ and $\ensuremath{\mathbb{T}}T_{\mathop{\ord(\omega_S)}\nolimitsmega,\mathbf{b}eta}$ are defined by
\mathbf{b}egin{enumerate}
\item $\ensuremath{\mathcal F}_{\mathop{\ord(\omega_S)}\nolimitsmega,\mathbf{b}eta}$ is the set of torsion-free sheaves $F$ such that every subsheaf $F'\subseteq F$ satisfies $\Im Z_{\mathop{\ord(\omega_S)}\nolimitsmega,\mathbf{b}eta}(F')\leq 0$;
\item $\ensuremath{\mathbb{T}}T_{\mathop{\ord(\omega_S)}\nolimitsmega,\mathbf{b}eta}$ is the set of sheaves $T$ such that, for every non-zero torsion-free quotient $T\mathop{\ord(\omega_S)}\nolimitsnto Q$, we have $\Im Z_{\mathop{\ord(\omega_S)}\nolimitsmega,\mathbf{b}eta}(Q)>0$.
\end{enumerate}
We have the following result:
\mathbf{b}egin{Thm}[{\cite[Prop. 10.3]{Bri08},\cite{Yos16b}}]\label{thm:GeometricStabilityConditions}
Let $\sigma$ be a stability condition such that all skyscraper sheaves $k(y)$ of points $y\in Y$ are $\sigma$-stable. Then there are $\mathop{\ord(\omega_S)}\nolimitsmega,\mathbf{b}eta\in\ensuremath{\mathbb{N}}S(Y)_\ensuremath{\mathbb{R}}$ with $\mathop{\ord(\omega_S)}\nolimitsmega$ ample, such that, up to the $\mathbf{w}idetilde{\mathop{\mathrm{GL}}\nolimits}_2^+(\ensuremath{\mathbb{R}})$-action, $Z_\sigma=Z_{\mathop{\ord(\omega_S)}\nolimitsmega,\mathbf{b}eta}$ and $\ensuremath{\mathbb{P}}P_{\sigma}(0,1]=\ensuremath{\mathcal A}_{\mathop{\ord(\omega_S)}\nolimitsmega,\mathbf{b}eta}$.
\end{Thm}
We call such stability conditions \emph{geometric}, and we let $U(Y)$ be the open subset of $\mathop{\mathrm{Stab}}\nolimits(Y)$ consisting of geometric stability conditions and denote by $\mathop{\mathrm{Stab}}\nolimitsd(Y)$ the connected component of $\mathop{\mathrm{Stab}}\nolimits(Y)$ containing $U(Y)$.
Using the Mukai pairing, for any stability condition $\sigma=(Z_\sigma,\ensuremath{\mathbb{P}}P_\sigma)$,
we can find $\mho_\sigma \in \ensuremath{\mathbb{H}}al(Y,\ensuremath{\mathbb{C}})$
such that $$Z_\sigma(\mathbf{u}nderline{\hphantom{A}})=\langle \mho_\sigma,\mathbf{v}(\mathbf{u}nderline{\hphantom{A}}) \rangle.$$ For $\sigma\in\mathop{\mathrm{Stab}}\nolimitsd(Y)$, $\ensuremath{\mathbb{R}}e\mho_\sigma$ and $\Im\mho_\sigma$ span a positive definite 2-plane in $\ensuremath{\mathbb{H}}al(Y,\ensuremath{\mathbb{R}})$. In fact, we can say more. Let $P(Y)$ be the set of $\mho\in\ensuremath{\mathbb{H}}al(Y,\ensuremath{\mathbb{C}})$ with components spanning a positive definite 2-plane, which consists of two connected components, and denote by $P^+(Y)$ the component containing vectors of the form $e^{\mathbf{b}eta+i\mathop{\ord(\omega_S)}\nolimitsmega}$. When $Y=\ensuremath{\mathbf{w}idetilde{X}}$, define the subset
\mathbf{b}egin{equation}\label{eqn:DefOfK3Roots}
\ensuremath{\mathbb{D}}elta(Y):=\Set{\mathbf{w}\in\ensuremath{\mathbb{H}}al(Y,\ensuremath{\mathbb{Z}})\ | \ \mathbf{w}^2=-2}.
\end{equation}
When $Y=X$, we define the subsets
\mathbf{b}egin{equation}\label{eqn:DefOfEnriquesRootsSep}
\mathbf{b}egin{split}
\ensuremath{\mathbb{D}}elta(Y)_{-1}&:=\Set{\mathbf{w}\in\ensuremath{\mathbb{H}}al(Y,\ensuremath{\mathbb{Z}})\ |\ \mathbf{w}^2=-1}\\
\ensuremath{\mathbb{D}}elta(Y)_{-2}&:=\Set{\mathbf{w}\in\ensuremath{\mathbb{H}}al(Y,\ensuremath{\mathbb{Z}})\ |\ \mathbf{b}egin{array}{c}\mathbf{w}^2=-2 \mbox{ and }c_1(\mathbf{v})\equiv D\pmod 2,\\
\;\;\;\;\mbox{where $D$ is a nodal cycle}
\end{array}},
\end{split}
\end{equation}
and take their union
\mathbf{b}egin{equation}\label{eqn:DefOfEnriquesRoots}
\ensuremath{\mathbb{D}}elta(Y):=\ensuremath{\mathbb{D}}elta(Y)_{-1}\cup\ensuremath{\mathbb{D}}elta(Y)_{-2}.
\end{equation}
In either case, we consider the subset $$P_0^+(Y):=P^+(Y)\mathbf{b}ackslash\mathbf{b}igcup_{\mathbf{w}\in\ensuremath{\mathbb{D}}elta(Y)}\mathbf{w}^\perp.$$ Then we have the following fundamental theorem:
\mathbf{b}egin{Thm}[{\cite[Prop. 8.3,Thm. 13.2]{Bri08},\cite[Cor. 3.8]{MMS09}}]\label{thm:CoveringMap}
The map $$\ensuremath{\mathbb{Z}}Z:\mathop{\mathrm{Stab}}\nolimitsd(Y)\to\ensuremath{\mathbb{H}}al(Y,\ensuremath{\mathbb{C}}),\;\;\;\;\sigma\mapsto\mho_\sigma$$ is a covering map of the open subset $P_0^+(Y)$, where $Z_\sigma(\mathbf{u}nderline{\hphantom{A}})=\langle\mho_\sigma,\mathbf{v}(\mathbf{u}nderline{\hphantom{A}})\rangle$. If we let $$\mathop{\mathrm{Aut}}\nolimits_0^\mathbf{d}agger(Y):=\Set{\ensuremath{\mathbb{P}}hi\in\mathop{\mathrm{Aut}}\nolimits(\ensuremath{\mathbb{D}}b(Y))\ | \ \ensuremath{\mathbb{P}}hi_*=\mathop{\mathrm{id}}\nolimits_{\ensuremath{\mathbb{H}}al(Y,\ensuremath{\mathbb{Z}})},\ensuremath{\mathbb{P}}hi(\mathop{\mathrm{Stab}}\nolimitsd(Y))=\mathop{\mathrm{Stab}}\nolimitsd(Y)},$$ then the group of deck transformations for the covering map $\ensuremath{\mathbb{Z}}Z$ is precisely $\mathop{\mathrm{Aut}}\nolimits_0^\mathbf{d}agger(Y)$. Finally, let $\ensuremath{\mathbb{T}}$ be the subgroup of $\mathop{\mathrm{Aut}}\nolimits(\ensuremath{\mathbb{D}}b(Y))$ generated by $R_{\ensuremath{\mathcal O}_C(k)}$, where $C$ is a $(-2)$-curve, and $R^2_T$, where $T$ is a spherical or exceptional object (of nonvanishing rank). Then $$\mathop{\mathrm{Stab}}\nolimitsd(Y)=\mathbf{b}igcup_{\ensuremath{\mathbb{P}}hi\in\ensuremath{\mathbb{T}}}\ensuremath{\mathbb{P}}hi(\mathop{\ord(\omega_S)}\nolimitsverline{U(Y)}).$$
\end{Thm}
Here $R_T$ is the spherical or exceptional twist.
\subsubsection{Inducing stability conditions}\label{subsubsec:inducing stability}The natural pull-back and push-forward functors relate stability conditions on $\mathbf{w}idetilde{X}$ to those on $X$ and vice-versa. Indeed, a stability condition $\sigma=(Z_\sigma,\ensuremath{\mathbb{P}}P_\sigma)\in\mathop{\mathrm{Stab}}\nolimitsd(X)$ \emph{induces} a stability condition $\mathbf{v}arpi^*(\sigma)=(Z_{\mathbf{v}arpi^*(\sigma)},\ensuremath{\mathbb{P}}P_{\mathbf{v}arpi^*(\sigma)})\in\mathop{\mathrm{Stab}}\nolimitsd(\mathbf{w}idetilde{X})$ by defining
$$Z_{\mathbf{v}arpi^*(\sigma)}:=Z_\sigma\circ\mathbf{v}arpi_*,\;\;\;\ensuremath{\mathbb{P}}P_{\mathbf{v}arpi^*(\sigma)}(\phi):=\Set{E\in\ensuremath{\mathbb{D}}b(\mathbf{w}idetilde{X})\ |\ \mathbf{v}arpi_*(E)\in\ensuremath{\mathbb{P}}P_\sigma(\phi)}.$$ In the opposite direction, for a stability condition $\sigma'=(Z_{\sigma'},\ensuremath{\mathbb{P}}P_{\sigma'})\in\mathop{\mathrm{Stab}}\nolimitsd(\mathbf{w}idetilde{X})$, we can induce a stability condition $\mathbf{v}arpi_*(\sigma')=(Z_{\mathbf{v}arpi_*(\sigma')},\ensuremath{\mathbb{P}}P_{\mathbf{v}arpi_*(\sigma')})\in\mathop{\mathrm{Stab}}\nolimitsd(X)$ via the definition
$$Z_{\mathbf{v}arpi_*(\sigma')}:=Z_{\sigma'}\circ\mathbf{v}arpi^*,\;\;\;\ensuremath{\mathbb{P}}P_{\mathbf{v}arpi_*(\sigma')}(\phi):=\Set{E\in\ensuremath{\mathbb{D}}b(X)\ |\ \mathbf{v}arpi^*(E)\in\ensuremath{\mathbb{P}}P_{\sigma'}(\phi)}.$$
It was shown in \cite{MMS09} that $\mathbf{v}arpi^*:\mathop{\mathrm{Stab}}\nolimitsd(X)\to\mathop{\mathrm{Stab}}\nolimitsd(\mathbf{w}idetilde{X})$ is a closed embedding onto the submanifold of $\mathop{\mathrm{Stab}}\nolimitsd(\mathbf{w}idetilde{X})$ consisting of $\iota^*$-invariant stability conditions, albeit using different notation.
\subsection{Walls}\label{subsec:Walls}Of paramount importance to our investigation here, the space of Bridgeland stability conditions admits a well-behaved wall and chamber structure. For a fixed Mukai vector $\mathbf{v}\in\ensuremath{\mathbb{H}}al(Y,\ensuremath{\mathbb{Z}})$, there exists a locally finite set of \emph{walls} (real codimension one submanifolds with boundary) in $\mathop{\mathrm{Stab}}\nolimitsd(Y)$, depending only on $\mathbf{v}$, with the following properties:
\mathbf{b}egin{enumerate}
\item When $\sigma$ varies in a chamber, that is, a connected component of the complement of the union of walls, the sets of $\sigma$-semistable and $\sigma$-stable objects of class $\mathbf{v}$ do not change.
\item When $\sigma$ lies on a single wall $\ensuremath{\mathcal W}\subset\mathop{\mathrm{Stab}}\nolimitsd(Y)$, there is a $\sigma$-semistable object that is unstable in one of the adjacent chambers and semistable in the other adjacent chamber.
\item These same properties remain true for the wall and chamber structure on $\ensuremath{\mathcal W}_1\cap\cdots\cap\ensuremath{\mathcal W}_k$ whose walls are $\ensuremath{\mathcal W}_1\cap\cdots\cap\ensuremath{\mathcal W}_k\cap\ensuremath{\mathcal W}$ for an additional wall $\ensuremath{\mathcal W}$ for $\mathbf{v}$.
\end{enumerate}
These walls were originally defined in \cite[Prop. 2.3]{Bri08} for K3 surfaces, and more generally in \cite{Tod08}. By the construction of these walls, it follows that, for primitive $\mathbf{v}$ and $\sigma$ in a chamber for $\mathbf{v}$, a JH-filtration factor of a $\sigma$-semistable object of class $m\mathbf{v}$ ($m\in\ensuremath{\mathbb{N}}$) must have class $m'\mathbf{v}$ for $1\leq m'<m$. In particular, for $\sigma$ in a chamber for $\mathbf{v}$, $\sigma$-stability coincides with $\sigma$-semistability.
\mathbf{b}egin{Def}\label{def:generic} Let $\mathbf{v}\in\ensuremath{\mathbb{H}}al(Y,\ensuremath{\mathbb{Z}})$. A stability condition $\sigma\in\mathop{\mathrm{Stab}}\nolimitsd(Y)$ is called \emph{generic} with respect to $\mathbf{v}$ if it does not lie on any wall for $\mathbf{v}$.
\end{Def}
It is worth recalling that, given a polarization $H\in\mathrm{Amp}(Y)$ and the Mukai vector $\mathbf{v}$ of an $H$-Gieseker semistable sheaf, there exists a chamber $\ensuremath{\mathbb{C}}C$ for $\mathbf{v}$, the \emph{Gieseker chamber}, where the set of $\sigma$-semistable objects of class $\mathbf{v}$ coincides with the set of $H$-Gieseker semistable sheaves \cite[Prop. 14.2]{Bri08}.
\subsection{Moduli stacks and moduli spaces}
For $\sigma \in \mathop{\mathrm{Stab}}\nolimits^\mathbf{d}agger(Y)$,
let $\ensuremath{\mathcal M}_\sigma(\mathbf{v})$ be the moduli stack of $\sigma$-semistable objects $E$
with $\mathbf{v}(E)=\mathbf{v}$ and $\ensuremath{\mathcal M}_\sigma^s(\mathbf{v})$
the open substack of $\sigma$-stable
objects. That is, for a scheme $T$,
$\ensuremath{\mathcal M}_{\sigma}(\mathbf{v})(T)$ is the category of $\ensuremath{\mathcal E} \in\ensuremath{\mathbb{D}}b(Y \times T)$ such that $\ensuremath{\mathcal E}$ is relatively perfect over $T$ (\cite[Def. 2.1.1]{Lie}) and $\ensuremath{\mathcal E}_t$ are $\sigma$-semistable objects
with $\mathbf{v}(\ensuremath{\mathcal E}_t)=\mathbf{v}$ for all $t \in T$.
By (the proof of) \cite[Thm. 4.12]{Tod08},
$\ensuremath{\mathcal M}_\sigma(\mathbf{v})$ is an Artin stack of finite type which is an open substack of Lieblich's ``mother of all moduli spaces" $\ensuremath{\mathcal M}$ which parametrizes families $\ensuremath{\mathcal E}\in\ensuremath{\mathbb{D}}b(Y\times T)$ with $\ensuremath{\mathcal E}$ relatively perfect over $T$ and such that $\mathop{\mathrm{Ext}}\nolimits^i(\ensuremath{\mathcal E}_t,\ensuremath{\mathcal E}_t)=0$ for all $i<0$ and $t\in T$ (see \cite{Lie}).
We say two objects $E_1$ and $E_2$ in $\ensuremath{\mathcal M}_\sigma(\mathbf{v})(k)$ are S-equivalent if they have the same JH-filtration factors. For $\sigma\in\mathop{\mathrm{Stab}}\nolimitsd(Y)$ generic with respect to $\mathbf{v}$, $\ensuremath{\mathcal M}_{\sigma}(\mathbf{v})$ (resp. $\ensuremath{\mathcal M}_\sigma^s(\mathbf{v})$) admits a projective coarse moduli scheme $M_\sigma(\mathbf{v})$ (resp. $M_\sigma^s(\mathbf{v})$), which parametrizes S-equivalence
classes of $\sigma$-semistable (resp. $\sigma$-stable) objects $E$ with $\mathbf{v}(E)=\mathbf{v}$ (see \cite{BM14a} for the K3 case and \cite[sect. 9]{Nue14b},\cite{Yos16b} for the Enriques case).
It was shown in \cite{BM14a} and \cite{Yos16b} that there exists an autoequivalence $\ensuremath{\mathbb{P}}hi\in\mathop{\mathrm{Aut}}\nolimits(\ensuremath{\mathbb{D}}b(Y))$ inducing an isomorphism between $\ensuremath{\mathcal M}_\sigma(\mathbf{v})$ and the stack $\ensuremath{\mathcal M}_H(\mathbf{v}')$ of $H$-Gieseker semistable sheaves of Mukai vector $\mathbf{v}'=\ensuremath{\mathbb{P}}hi(\mathbf{v})$ for some polarization $H$ generic with respect to $\mathbf{v}'$. Thus $$\ensuremath{\mathcal M}_\sigma(\mathbf{v})\cong[Q(\mathbf{v}')^{ss}/\mathop{\mathrm{GL}}\nolimits_N],$$ where $Q(\mathbf{v}')$ is the open subscheme of the Quot scheme parametrizing quotients $$\lambda:\ensuremath{\mathcal O}_Y(-mH)^{\mathop{\ord(\omega_S)}\nolimitsplus N}\mathop{\ord(\omega_S)}\nolimitsnto E$$ such that
\mathbf{b}egin{enumerate}
\item $\mathbf{v}(E)=\mathbf{v}'$;
\item $\lambda$ induces an isomorphism $H^0(Y,\ensuremath{\mathcal O}_Y^{\mathop{\ord(\omega_S)}\nolimitsplus N})\cong H^0(Y,E(mH))$;
\item $H^i(Y,E(mH))=0$ for $i>0$,
\end{enumerate}
where $m\gg 0$ is fixed and $Q(\mathbf{v}')^{ss}$ (resp. $Q(\mathbf{v}')^s$) is the open sublocus where $E$ is semistable (resp. stable). It follows that \mathbf{b}egin{equation}\label{eqn:DimensionOfStackAndCoarse}\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_\sigma^s(\mathbf{v})=\mathop{\mathrm{dim}}\nolimits Q(\mathbf{v}')^{s}-\mathop{\mathrm{dim}}\nolimits\mathop{\mathrm{GL}}\nolimits_N=(\mathop{\mathrm{dim}}\nolimits Q(\mathbf{v}')^s-\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathbb{P}}GL_N)-1=\mathop{\mathrm{dim}}\nolimits M_\sigma^s(\mathbf{v})-1,\end{equation} since $\ensuremath{\mathbb{P}}GL_N$ acts freely on $Q(\mathbf{v}')^s$.
For $L \in \ensuremath{\mathbb{N}}S(Y)$, we let $\ensuremath{\mathcal M}_\sigma(\mathbf{v},L)$ be the substack of $\ensuremath{\mathcal M}_\sigma(\mathbf{v})$
consisting of $E$ with $c_1(E)=L$.
We define $\ensuremath{\mathcal M}_\sigma^s(\mathbf{v},L)$,
$M_\sigma(\mathbf{v},L)$ and $M_\sigma^s(\mathbf{v},L)$ similary. When the determinant is irrelevant, we drop $L$ from the notation. In particular, as $\ensuremath{\mathbb{N}}S(\mathbf{w}idetilde{X})=\ensuremath{\mathbb{N}}um(\mathbf{w}idetilde{X})$, we drop it from the notation in the K3 case. On the other hand, in the Enriques case, when $Y=X$, $$\ensuremath{\mathcal M}_\sigma(\mathbf{v})=\ensuremath{\mathcal M}_\sigma(\mathbf{v},L)\mathbf{b}igsqcup\ensuremath{\mathcal M}_\sigma(\mathbf{v},L+K_X).$$
\subsection{Some properties of moduli spaces}We recall here what is known about the moduli spaces $\ensuremath{\mathcal M}_\sigma(\mathbf{v})$ and their coarse moduli spaces $M_\sigma(\mathbf{v})$. Before we get into details for K3 surfaces and Enriques surfaces individually, let us point out that using the definition of inducing stability conditions in \cref{subsubsec:inducing stability} we can relate their respective moduli spaces. Indeed, for $\sigma\in\mathop{\mathrm{Stab}}\nolimitsd(X)$, $\mathbf{v}\in\ensuremath{\mathbb{H}}al(X,\ensuremath{\mathbb{Z}})$, and $\mathbf{w}\in\ensuremath{\mathbb{H}}al(\mathbf{w}idetilde{X},\ensuremath{\mathbb{Z}})$, there are morphisms of stacks
$$\ensuremath{\mathcal M}_{\mathbf{v}arpi^*(\sigma)}(\mathbf{w})\to\ensuremath{\mathcal M}_\sigma(\mathbf{v}arpi_*(\mathbf{w})),\;\;\;E\mapsto\mathbf{v}arpi_*(E)$$
$$\ensuremath{\mathcal M}_\sigma(\mathbf{v})\to\ensuremath{\mathcal M}_{\mathbf{v}arpi^*(\sigma)}(\mathbf{v}arpi^*\mathbf{v}),\;\;\;E\mapsto\mathbf{v}arpi^*(E).$$ Only the second of these requires comment. For the stability condition $\mathbf{v}arpi_*(\mathbf{v}arpi^*(\sigma))$, we have $Z_{\mathbf{v}arpi_*(\mathbf{v}arpi^*(\sigma))}=2Z_\sigma$ and $\ensuremath{\mathbb{P}}P_{\mathbf{v}arpi_*(\mathbf{v}arpi^*(\sigma))}(\phi)=\ensuremath{\mathbb{P}}P_\sigma(\phi)$, so in particular $E\in\ensuremath{\mathbb{D}}b(X)$ is $\sigma$-semistable if and only if $\mathbf{v}arpi^*(E)$ is $\mathbf{v}arpi^*(\sigma)$-semistable.
\subsubsection{$Y=\mathbf{w}idetilde{X}$ is a K3 surface} The following result gives precise conditions on nonemptiness of the moduli spaces $M_\sigma(\mathbf{v})$ in the K3 case and is proven in \cite{BM14a} and \cite{BM14b}.
\mathbf{b}egin{Thm}[{\cite[Thm. 2.15]{Bri08}}]\label{thm:nNnemptinessModuliK3}
Let $\mathbf{w}idetilde{X}$ be a K3 surface over $k$, and let $\sigma\in\mathop{\mathrm{Stab}}\nolimitsd(\mathbf{w}idetilde{X})$ be a generic stability condition with respect to $\mathbf{v}=m\mathbf{v}_0\in\ensuremath{\mathbb{H}}al(\mathbf{w}idetilde{X},\ensuremath{\mathbb{Z}})$, where $\mathbf{v}_0$ is primitive and $m>0$.
\mathbf{b}egin{enumerate}
\item The coarse moduli space $M_\sigma(\mathbf{v})$ is non-empty if and only if $\mathbf{v}_0^2\geq-2$.
\item Either $\mathop{\mathrm{dim}}\nolimits M_\sigma(\mathbf{v})=\mathbf{v}^2+2$ and $M_\sigma^s(\mathbf{v})\neq\mathbf{v}arnothing$, or $m>1$ and $\mathbf{v}_0^2\leq0$.
\item When $\mathbf{v}_0^2>0$, $M_\sigma(\mathbf{v})$ is a normal irreducible projective variety with $\ensuremath{\mathbb{Q}}$-factorial singularities.
\end{enumerate}
\end{Thm}
\subsubsection{$Y=X$ is an Enriques surface}
The following results follow from \cite{Nue14a}.
Since $\ensuremath{\mathcal M}_\sigma(v)$ is isomorphic to a moduli stack of Gieseker semi-stable
sheaves \cite{Yos16b}, they also follow from corresponding results for
Gieseker semistable sheaves \cite{Yos14,Yos16a}. Since the Enriques case is more subtle, we break the statement into smaller pieces, beginning with the primitive case:
\mathbf{b}egin{Thm}[{cf. \cite{Nue14b},\cite[Thm. 3.1]{Yos14},\cite[Theorem 4.10]{Yos16a}}]\label{Thm:exist:nodal}
Let $X$ be an Enriques surface over $k$, and let $\sigma\in\mathop{\mathrm{Stab}}\nolimitsd(X)$ be a generic stability condition with respect to primitive $\mathbf{v}\in\ensuremath{\mathbb{H}}al(X,\ensuremath{\mathbb{Z}})$. Then for $L\in\ensuremath{\mathbb{N}}S(X)$ such that $[L\mathop{\mathrm{mod}}\nolimits K_X]=c_1(\mathbf{v})$, $M_\sigma(\mathbf{v},L) \ne \mathbf{v}arnothing$ if and only if
\mathbf{b}egin{enumerate}
\item
$\ell(\mathbf{v})=1$ and $\mathbf{v}^2\geq -1$ or
\item
$\ell(\mathbf{v})=2$ and $\mathbf{v}^2>0$ or
\item
$\ell(\mathbf{v})=2$, $\mathbf{v}^2=0$, and $L \equiv \frac{r}{2}K_X \pmod 2$ or
\item\label{enum:SphericalNonemptiness}
$\mathbf{v}^2=-2$,
$L \equiv D+\frac{r}{2}K_X \pmod 2$, where
$D$ is a nodal cycle, that is, $(D^2)=-2$ and $H^1({\mathcal O}_D)=0$.
\end{enumerate}
Furthermore, when non-empty,
\mathbf{b}egin{enumerate}
\item
$M_\sigma(\mathbf{v},L)$ is connected, and
\item
if $X$ is unnodal or $\mathbf{v}^2\geq 4$,
then $M_\sigma(\mathbf{v},L)$ is irreducible.
\end{enumerate}
\end{Thm}
\cref{enum:SphericalNonemptiness} in Theorem \ref{Thm:exist:nodal} only occurs when $X$ contains a smooth rational curve $C$ which necessarily satisfies $C^2=-2$, in which case $X$ is called a \emph{nodal} Enriques surface. An Enriques surfaces containing no smooth rational curve is called \emph{unnodal}. A Mukai vector $\mathbf{v}$ as in \ref{enum:SphericalNonemptiness} and an object $E\in M_\sigma^s(\mathbf{v})$ are called \emph{spherical}, and it can be shown that the existence of a spherical object on $X$ is equivalent to $X$ being nodal \cite{Kim94}. Similarly, an object $E\in M_\sigma^s(\mathbf{v})$ with $\mathbf{v}^2=-1$ is called \emph{exceptional}.
For non-primitive Mukai vectors, we phrase the results in terms of the moduli stacks, as it is in this form that we will use them. We state the positive square case first:
\mathbf{b}egin{Prop}[{cf. \cite[Lem. 1.5, Cor. 1.6]{Yos16a}, \cite[Thm. 8.2]{Nue14b}}]\label{prop:pss}
Let $\mathbf{v}\in\ensuremath{\mathbb{H}}al(X,\ensuremath{\mathbb{Z}})$ be a Mukai vector with $\mathbf{v}^2 >0$, and let $\sigma\in\mathop{\mathrm{Stab}}\nolimitsd(X)$ be generic with respect to $\mathbf{v}$. For $L\in\ensuremath{\mathbb{N}}S(X)$ such that $[L\mathop{\mathrm{mod}}\nolimits K_X]=c_1(\mathbf{v})$, we set
\mathbf{b}egin{equation*}
\ensuremath{\mathcal M}_\sigma(\mathbf{v},L)^{pss}:=\Set{E \in \ensuremath{\mathcal M}_\sigma(\mathbf{v},L) \ | \
\text{$E$ is properly $\sigma$-semistable}}
\end{equation*}
Then
\mathbf{b}egin{enumerate}
\item
$\mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal M}_\sigma(\mathbf{v},L)^{pss} \leq \mathbf{v}^2-1$.
Moreover $\mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal M}_\sigma(\mathbf{v},L)^{pss} \leq \mathbf{v}^2-2$ unless
$\mathbf{v}=2\mathbf{v}_0$ with $\mathbf{v}_0^2=1$.
\item
$\ensuremath{\mathcal M}_\sigma(\mathbf{v},L)^{s} \ne \mathbf{v}arnothing$, $\ensuremath{\mathcal M}_\sigma(\mathbf{v},L)$ is reduced, and
$\mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal M}_\sigma(\mathbf{v},L)=\mathbf{v}^2$.
\item
$\ensuremath{\mathcal M}_\sigma(\mathbf{v},L)$ is normal, unless
\mathbf{b}egin{enumerate}
\item $\mathbf{v}=2\mathbf{v}_0$ with $\mathbf{v}_0^2=1$ and
$L \equiv \frac{r}{2}K_X \pmod 2$, or
\item $\mathbf{v}^2=2$.
\end{enumerate}
\end{enumerate}
\end{Prop}
The statements in Proposition \ref{prop:pss} remain true for the coarse moduli spaces with dimensions adjusted in accordance with \eqref{eqn:DimensionOfStackAndCoarse}. In particular, $\mathop{\mathrm{dim}}\nolimits M_\sigma(\mathbf{v},L)=\mathbf{v}^2+1$.
For Mukai vectors with $\mathbf{v}^2\leq 0$ it is particularly useful to use the moduli stacks for dimension estimates as we now see:
\mathbf{b}egin{Prop}[cf. {\cite[Proposition 1.9]{Yos16a}}]\label{prop:isotropic}
Let $\mathbf{u}\in\ensuremath{\mathbb{H}}al(X,\ensuremath{\mathbb{Z}})$ be an isotropic and primitive Mukai vector, and let $\sigma\in\mathop{\mathrm{Stab}}\nolimitsd(X)$ be generic with respect to $\mathbf{u}$.
\mathbf{b}egin{enumerate}
\item
If $\ensuremath{\mathcal M}_\sigma^s(m\mathbf{u}) \ne \mathbf{v}arnothing$, then
$m=1,2$.
\item
$\ensuremath{\mathcal M}_\sigma^s(2\mathbf{u},L) \ne \mathbf{v}arnothing$
if and only if $\ell(\mathbf{u})=1$ and $L \equiv 0 \pmod 2$.
Moreover
$$
\ensuremath{\mathcal M}_\sigma^s(2\mathbf{u})=\Set{\mathbf{v}arpi_*(F) \ | \ F \in
\ensuremath{\mathcal M}_{\mathbf{v}arpi^*(\sigma)}^s(\mathbf{v}arpi^*\mathbf{v}),\;
\iota^*(F) \not \cong F }.
$$
In particular, $\ensuremath{\mathcal M}_\sigma^s(2\mathbf{u})$ is smooth of
dimension 1.
\item
$\mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal M}_\sigma(m\mathbf{u}) \leq\lfloor\frac{m\ell(\mathbf{u})}{2}\rfloor$.
\end{enumerate}
\end{Prop}
Finally, we consider the negative square case:
\mathbf{b}egin{Lem}\label{Lem:dimension negative}
Let $\mathbf{w}\in\ensuremath{\mathbb{H}}al(X,\ensuremath{\mathbb{Z}})$ be a primitive spherical or exceptional class, and let $\sigma\in\mathop{\mathrm{Stab}}\nolimitsd(X)$ be a generic stability condition with respect to $\mathbf{w}$. Then
$$\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma}(m\mathbf{w})= \mathbf{b}egin{cases}
-m^2, & \text{ if }\mathbf{w}^2=-2,\\
-\frac{m^2}{2}, & \text{ if }\mathbf{w}^2=-1,m\equiv 0\pmod 2,\\
-\frac{m^2+1}{2}, & \text{ if }\mathbf{w}^2=-1,m\equiv 1\pmod 2.\\
\end{cases}$$
\end{Lem}
\mathbf{b}egin{proof}
By \cite[Proposition 9.9]{Nue14b}, in case $\mathbf{w}^2=-2$, the coarse moduli space $M_{\sigma}(m\mathbf{w})$ consists of a single point, $S^{\mathop{\ord(\omega_S)}\nolimitsplus m}$, where $S$ is the unique $\sigma$-stable spherical object of class $\mathbf{w}$. As $\mathop{\mathrm{Aut}}\nolimits(S^{\mathop{\ord(\omega_S)}\nolimitsplus m})=\mathop{\mathrm{GL}}\nolimits_m(k)$, we get $$\mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal M}_{\sigma}(m\mathbf{w})=\mathop{\mathrm{dim}}\nolimits M_{\sigma}(m\mathbf{w})-\mathop{\mathrm{dim}}\nolimits\mathop{\mathrm{Aut}}\nolimits(S^{\mathop{\ord(\omega_S)}\nolimitsplus m})=-m^2,$$ If $\mathbf{w}^2=-1$, then by \cite[Lemma 9.2]{Nue14b} the coarse moduli space $M_{\sigma}(m\mathbf{w})$ consists of the $m+1$ points $\Set{E^{\mathop{\ord(\omega_S)}\nolimitsplus i}\mathop{\ord(\omega_S)}\nolimitsplus E(K_X)^{\mathop{\ord(\omega_S)}\nolimitsplus m-i}}_{i=0}^m$, where $E$ and $E(K_X)$ are the two $\sigma$-stable exceptional objects of class $\mathbf{w}$. As $E$ and $E(K_X)$ are both exceptional, $\mathop{\mathrm{Aut}}\nolimits(E^{\mathop{\ord(\omega_S)}\nolimitsplus i}\mathop{\ord(\omega_S)}\nolimitsplus E(K_X)^{\mathop{\ord(\omega_S)}\nolimitsplus m-i})=\mathop{\mathrm{GL}}\nolimits_i(k)\times \mathop{\mathrm{GL}}\nolimits_{m-i}(k)$ of dimension $i^2+(m-i)^2$. But then
\mathbf{b}egin{align}
\mathbf{b}egin{split}\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma}(m\mathbf{w})&=\max_{0\leq i\leq m}\mathop{\mathrm{dim}}\nolimits_{E^{\mathop{\ord(\omega_S)}\nolimitsplus i}\mathop{\ord(\omega_S)}\nolimitsplus E(K_X)^{\mathop{\ord(\omega_S)}\nolimitsplus m-i}} M_{\sigma}(m\mathbf{w})-\mathop{\mathrm{Aut}}\nolimits(E^{\mathop{\ord(\omega_S)}\nolimitsplus i}\mathop{\ord(\omega_S)}\nolimitsplus E(K_X)^{\mathop{\ord(\omega_S)}\nolimitsplus m-i})\\
&=-\mathop{\mathrm{min}}\nolimits_{0\leq i\leq m} i^2+(m-i)^2,\\
\end{split}
\end{align}
which gives the dimension as claimed.
\end{proof}
\subsection{Line bundles on moduli spaces}\label{subsec:LineBundles}
We again let $Y=X$ or $\ensuremath{\mathbf{w}idetilde{X}}$, and we recall the definition of the Donaldson-Mukai morphism. Fix a Mukai vector $\mathbf{v}\in\ensuremath{\mathbb{H}}al(Y,\ensuremath{\mathbb{Z}})$, a stability condition $\sigma\in\mathop{\mathrm{Stab}}\nolimitsd(Y)$, and a universal family $\ensuremath{\mathcal E}\in\ensuremath{\mathbb{D}}b(M_\sigma(\mathbf{v},L)\times Y)$. Then we have the following definition.
\mathbf{b}egin{Def} Set $K(Y)_\mathbf{v}:=\Set{x\in K(Y)\ |\ \langle\mathbf{v}(x),\mathbf{v}\rangle=0}$. The Donaldson-Mukai morphism from $K(Y)_\mathbf{v}$ to $\ensuremath{\mathbb{P}}ic(M_\sigma(\mathbf{v},L))$ is defined by:
\mathbf{b}egin{equation}\label{eqn:DonaldsonMukai}
\mathbf{b}egin{matrix}
\theta_{\mathbf{v},\sigma}:& K(Y)_\mathbf{v} & \to & \ensuremath{\mathbb{P}}ic(M_{\sigma}(\mathbf{v},L))\\
& x & \mapsto & \mathbf{d}et (p_{M_\sigma(\mathbf{v},L)!}(\ensuremath{\mathcal E} \mathop{\ord(\omega_S)}\nolimitstimes p_Y^*(x^{\mathbf{v}ee}))).
\end{matrix}
\end{equation}
More generally, for a scheme $S$ and a family $\ensuremath{\mathcal E}\in\ensuremath{\mathbb{D}}b(S\times Y)$ over $S$ of objects in $M_\sigma(\mathbf{v},L)$, there is a Donaldson-Mukai morphism $\theta_\ensuremath{\mathcal E}:K(Y)_\mathbf{v}\to\ensuremath{\mathbb{P}}ic(S)$ associated to $\ensuremath{\mathcal E}$, defined as in \eqref{eqn:DonaldsonMukai}, which satisfies $\theta_\ensuremath{\mathcal E}=\lambda_\ensuremath{\mathcal E}^*\theta_{\mathbf{v},\sigma}$, where $\lambda_\ensuremath{\mathcal E}:S\to M_{\sigma}(\mathbf{v},L)$ is the associated classifying map. See \cite[Section 8.1]{HL10} for more details.
Setting
\mathbf{b}egin{equation}\label{eqn:def of xi}
\xi_\sigma:=\Im\frac{ \mho_\sigma}{\langle \mho_\sigma, \mathbf{v} \rangle}
\in \mathbf{v}^\perp,
\end{equation} we define the numerical divisor class
\mathbf{b}egin{equation}\label{eqn:def of ell(sigma)}
\ell_\sigma:=\theta_{\mathbf{v},\sigma}(\xi_\sigma)\in\ensuremath{\mathbb{N}}um(M_\sigma(\mathbf{v},L)),
\end{equation}
where we abuse notation by also using $\theta_{\mathbf{v},\sigma}$ for the extension of \eqref{eqn:DonaldsonMukai} to the Mukai lattice.
\end{Def}
From \cref{subsec:Walls} it follows that the moduli space $M_\sigma(\mathbf{v},L)$ and $\ensuremath{\mathcal E}$ remain constant when varying $\sigma$ in a chamber for $\mathbf{v}$, so for each chamber $\ensuremath{\mathbb{C}}C$, we get a map $$\ell_{\ensuremath{\mathbb{C}}C}:\ensuremath{\mathbb{C}}C\to\ensuremath{\mathbb{N}}S(M_\ensuremath{\mathbb{C}}C(\mathbf{v},L)),\;\;\sigma\mapsto\ell_\sigma,$$ where the notation $M_\ensuremath{\mathbb{C}}C(\mathbf{v},L)$ denotes the fixed moduli space. By the proof of the projectivity of $M_\sigma(\mathbf{v},L)$ in \cite{Yos16b} with the argument
in \cite{MYY14} or \cite{BM14a}, we get a generalization of
\cite[Theorem 10.3]{Nue14b} and can say even more:
\mathbf{b}egin{Thm}[{cf. \cite[Theorem 4.1]{BM14a} in the K3 case}]\label{Thm:NefAmpleDivisor}
For a generic $\sigma$,
$\ell_\sigma=\theta_{\mathbf{v},\sigma}(\xi_\sigma)$ is an ample divisor on $M_\sigma(\mathbf{v},L)$.
\end{Thm}
\subsection{Wall-crossing and Birational transformations} In this last subsection, we recall one more result that will be essential for our study of the connection between crossing walls in $\mathop{\mathrm{Stab}}\nolimitsd(Y)$ and birational transformations of the moduli space $M_\sigma(\mathbf{v},L)$. Let $\mathbf{v}\in\ensuremath{\mathbb{H}}al(Y,\ensuremath{\mathbb{Z}})$ with $\mathbf{v}^2>0$, and let $\ensuremath{\mathcal W}$ be a wall for $\mathbf{v}$. We say $\sigma_0\in\ensuremath{\mathcal W}$ is \emph{generic} if it does not belong to any other wall, and we denote by $\sigma_+$ and $\sigma_-$ two generic stability conditions nearby $\ensuremath{\mathcal W}$ in two opposite adjacent chambers. Then all $\sigma_\pm$-semistable objects are still $\sigma_0$-semistable, and thus the universal familes $\ensuremath{\mathcal E}^\pm$ on $M_{\sigma_\pm}(\mathbf{v})\times Y$ induce nef divisors $\ell_{\sigma_0,\pm}$ on $M_{\sigma_\pm}(\mathbf{v})$ by $$\ell_{\sigma_0,\pm}:=\theta_{\mathbf{v},\sigma_\pm}(\xi_{\sigma_0}).$$ The main result about $\ell_{\sigma_0,\pm}$ is the following:
\mathbf{b}egin{Thm}[{\cite[Thm. 1.4(a)]{BM14a},\cite[Thm. 11.3]{Nue14b}}]\label{Thm:WallContraction} Let $\mathbf{v}\in\ensuremath{\mathbb{H}}al(Y,\ensuremath{\mathbb{Z}})$ satisfy $\mathbf{v}^2>0$, and let $\sigma_\pm$ be two stability conditions in opposite chambers nearby a generic $\sigma_0\in\ensuremath{\mathcal W}$. Then:
\mathbf{b}egin{enumerate}
\item The divisors $\ell_{\sigma_0,\pm}$ are semiample on $M_{\sigma_\pm}(\mathbf{v})$. In particular, they induce contractions $$\pi^\pm:M_{\sigma_\pm}(\mathbf{v})\to\mathop{\ord(\omega_S)}\nolimitsverline{M}_\pm,$$ where $\mathop{\ord(\omega_S)}\nolimitsverline{M}_\pm$ are normal projective varieties. When $Y=\ensuremath{\mathbf{w}idetilde{X}}$, the divisors $\ell_{\sigma_0,\pm}$ are big so that $\pi^\pm$ are birational and $\mathop{\ord(\omega_S)}\nolimitsverline{M}_\pm$ are irreducible.
\item For any curve $C\subset M_{\sigma_\pm}(\mathbf{v})$, $\ell_{\sigma_0,\pm}.C=0$ if and only if the two objects $\ensuremath{\mathcal E}_c^\pm$ and $\ensuremath{\mathcal E}_{c'}^\pm$ corresponding to two general points $c,c'\in C$ are S-equivalent. In particular, the curves contracted by $\pi^\pm$ are precisely the curves of objects that are S-equivalent with respect to $\sigma_0$.
\end{enumerate}
\end{Thm}
This theorem leads us to the following definition describing a wall $\ensuremath{\mathcal W}$ in terms of the geometry of the induced morphisms $\pi^\pm$.
\mathbf{b}egin{Def}
We call a wall $\ensuremath{\mathcal W}$:
\mathbf{b}egin{enumerate}
\item a \emph{fake wall}, if there are no curves contracted by $\pi^\pm$;
\item a \emph{totally semistable wall}, if $M_{\sigma_0}^s(\mathbf{v})=\mathbf{v}arnothing$;
\item a \emph{flopping wall}, if we can identify $\mathop{\ord(\omega_S)}\nolimitsverline{M}_+=\mathop{\ord(\omega_S)}\nolimitsverline{M}_-$ and the induced map $M_{\sigma_+}(\mathbf{v})\mathbf{d}ashrightarrow M_{\sigma_-}(\mathbf{v})$ induces a flopping contraction;
\item a \emph{divisorial wall}, if the morphisms $\pi^\pm$ are both divisorial contractions;
\item a \emph{$\ensuremath{\mathbb{P}}^1$-wall}, if the morphisms $\pi^\pm$ are both $\ensuremath{\mathbb{P}}^1$-fibrations.
\end{enumerate}
\end{Def}
A non-fake wall $\ensuremath{\mathcal W}$ such that $\mathop{\mathrm{codim}}\nolimits(M_{\sigma_\pm}(\mathbf{v})\mathbf{b}ackslash M^s_{\sigma_0}(\mathbf{v}))\geq 2$ is necessarily a flopping wall by \cite[Thm. 1.4(b)]{BM14a} and \cite[Thm. 11.3]{Nue14b}.
\section{Dimension estimates of substacks of Harder-Narasimhan filtrations}\label{sec:DimensionsOfHarderNarasimhan}
Having finished our review of known tools for studying wall-crossing, we develop here the first tool we will use to classify the behavior induced by crossing a wall. In this section, we will denote by $Y$ any smooth projective variety satisfying openness of stability and boundedness of Bridgeland semistable objects as in \cite[Lemma 3.4]{Tod08}.
For Mukai vectors $\mathbf{v}_1,\mathbf{v}_2,\mathbf{d}ots,\mathbf{v}_s$ with the same phase $\phi$ with respect to $\sigma$,
let $\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)$ be the stack of filtrations:
for a scheme $T$,
\mathbf{b}egin{equation}\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)(T):=\Set{0 \subset \ensuremath{\mathcal F}_1 \subset \cdots \subset \ensuremath{\mathcal F}_s\ | \ \ensuremath{\mathcal F}_i/\ensuremath{\mathcal F}_{i-1} \in \ensuremath{\mathcal M}_{\sigma}(\mathbf{v}_i)(T), 1\leq i\leq s,\ensuremath{\mathcal F}_s\in\ensuremath{\mathcal M}_{\sigma}(\mathbf{v})(T)},
\end{equation}
where $\mathbf{v}=\mathbf{v}_1+\cdots+\mathbf{v}_s$. Then we have the following result.
\mathbf{b}egin{Prop}With the notation as above, let $Y$ be a smooth projective variety satisfying boundedness and openness of stability. Then $\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)$ is an Artin stack of finite type.
\end{Prop}
\mathbf{b}egin{proof}
We prove the proposition by induction on $s$. Assuming that $\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_{s-1})$ is an Artin stack of finite type,
we shall prove that
$\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_{s-1},\mathbf{v}_s)$ is also an Artin stack of finite type.
We set $\mathbf{v}\:=\sum_{i=1}^s \mathbf{v}_i$.
It is sufficient to show that
\mathbf{b}egin{enumerate}
\item\label{enum:MorphismRepresentable} the natural morphism
$\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_{s-1},\mathbf{v}_s) \to
\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_{s-1}) \times \ensuremath{\mathcal M}_{\sigma}(\mathbf{v})$
is representable by schemes, and
\item\label{enum:DiagonalRepresentable} the diagonal morphism $\ensuremath{\mathbb{D}}elta:\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)\to\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)\times\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)$ is representable.
\end{enumerate}
Indeed, if we take a smooth surjective morphism $M\to\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_{s-1})\times\ensuremath{\mathcal M}_\sigma(\mathbf{v})$ from a scheme $M$ of finite type, then we get a smooth surjective morphism $\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)\times_{\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_{s-1})\times\ensuremath{\mathcal M}_\sigma(\mathbf{v})}M\to\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)$, where $\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)\times_{\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_{s-1})\times\ensuremath{\mathcal M}_\sigma(\mathbf{v})}M$ is a finite type scheme by \ref{enum:MorphismRepresentable}. The statement in \ref{enum:DiagonalRepresentable} is simply the other condition in the definition of an Artin stack \cite[Def. 8.1.4]{Ols16}
We prove \ref{enum:MorphismRepresentable} first. Let $T$ be a scheme and
$T \to \ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_{s-1}) \times \ensuremath{\mathcal M}_{\sigma}(\mathbf{v})$
a morphism. Then
we have a family of filtrations
\mathbf{b}egin{equation}
0 \subset \ensuremath{\mathcal F}_1 \subset \cdots \subset \ensuremath{\mathcal F}_{s-1}
\end{equation}
on $T \times Y$
such that $\ensuremath{\mathcal F}_i/\ensuremath{\mathcal F}_{i-1}$ are relatively perfect over $T$,
$(\ensuremath{\mathcal F}_i/\ensuremath{\mathcal F}_{i-1})_t \in \ensuremath{\mathcal M}_{\sigma}(\mathbf{v}_i)$ for all $t \in T$
and a family of objects $\ensuremath{\mathcal F}_s$ such that
$\ensuremath{\mathcal F}_s$ is relatively perfect over $T$ and
$(\ensuremath{\mathcal F}_s)_t \in \ensuremath{\mathcal M}_{\sigma}(\mathbf{v})$.
By \cite[Prop. 1.1]{Ina02},
there is a scheme $p:Q \to T$
which represents the functor $\ensuremath{\mathbb{Q}}Q:(Sch/T) \to (Sets)$ defined by
\mathbf{b}egin{equation}
\ensuremath{\mathbb{Q}}Q(U \mathop{\ord(\omega_S)}\nolimitsverset{\mathbf{v}arphi}{\to} T)=
\{f \mid f:(\mathbf{v}arphi \times 1_Y)^*(\ensuremath{\mathcal F}_{s-1}) \to (\mathbf{v}arphi \times 1_Y)^*(\ensuremath{\mathcal F}_s)\}.
\end{equation}
Let
$\xi:(p \times 1_Y)^*(\ensuremath{\mathcal F}_{s-1}) \to (p \times 1_Y)^*(\ensuremath{\mathcal F}_s)$
be the universal family of homomorphisms.
Let $Q^0$ be the subscheme
of $Q$ such that $\ensuremath{\mathbb{C}}one(\xi_q)=\ensuremath{\mathbb{C}}one(\xi)_q\in\ensuremath{\mathcal A}_\sigma$ for all $q\in Q^0$, which is open by the Open Heart Property \cite[Theorem 3.8]{Tod08},\cite[Theorem 3.3.2]{AP06}. It follows that $\xi_q$ is injective in $\ensuremath{\mathcal A}_{\sigma}$ for all $q \in Q^0$.
Then on $Q^0$ we have a family of filtrations
\mathbf{b}egin{equation}
0 \subset \ensuremath{\mathcal F}_1 \subset \cdots \subset \ensuremath{\mathcal F}_s.
\end{equation}
Therefore
\mathbf{b}egin{equation}
Q^0 \cong \ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)
\times_{\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_{s-1}) \times \ensuremath{\mathcal M}_{\sigma}(\mathbf{v})}
T.
\end{equation}
In particular, the morphism $\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)\to\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_{s-1})\times\ensuremath{\mathcal M}_\sigma(\mathbf{v})$ is representable by schemes, as claimed.
Now let us prove \ref{enum:DiagonalRepresentable}. By \cite[Lem. 8.1.8]{Ols16}, it is equivalent to showing that for every scheme $T$ and two families of filtrations $\ensuremath{\mathcal F},\ensuremath{\mathcal F}'\in\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)(T)$, the sheaf $\mathop{\mathrm{Isom}}\nolimits_{\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)/T}(\ensuremath{\mathcal F},\ensuremath{\mathcal F}')$ is an algebraic space. So let $T$ be a scheme and consider two families
\mathbf{b}egin{equation}
\mathbf{b}egin{split}
\ensuremath{\mathcal F}: & 0 \subset \ensuremath{\mathcal F}_1 \subset\cdots \subset \ensuremath{\mathcal F}_s\\
\ensuremath{\mathcal F}': & 0 \subset \ensuremath{\mathcal F}_1'\subset \cdots \subset \ensuremath{\mathcal F}_s'
\end{split}
\end{equation}
of relatively perfect filtrations. An isomorphism $\phi:\ensuremath{\mathcal F} \to \ensuremath{\mathcal F}'$ is
an isomorphism $\ensuremath{\mathcal F}_s \to \ensuremath{\mathcal F}_s'$ as families in $\ensuremath{\mathcal M}_\sigma(\mathbf{v})(T)$ which preserves the filtration. But $\phi$ preserves the filtration if and only if the induced maps
$\ensuremath{\mathcal F}_i \to \ensuremath{\mathcal F}_s'/\ensuremath{\mathcal F}_i'$ are the 0-map for all $0<i<s$.
This is a closed condition by \cite[Prop. 1.1]{Ina02}, and if $\phi(\ensuremath{\mathcal F}_i) \subseteq \ensuremath{\mathcal F}_i'$, then we must in fact have equality as they are both families of $\sigma$-semistable
objects with the same Mukai vector. Hence the sheaf $\mathop{\mathrm{Isom}}\nolimits_{\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)/T}(\ensuremath{\mathcal F},\ensuremath{\mathcal F}')$ is parametrized by a closed algebraic subspace of the algebraic space
$\mathop{\mathrm{Isom}}\nolimits_{\ensuremath{\mathcal M}_\sigma(\mathbf{v})/T}(\ensuremath{\mathcal F}_s,\ensuremath{\mathcal F}_s')$, which shows that $\mathop{\mathrm{Isom}}\nolimits_{\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)/T}(\ensuremath{\mathcal F},\ensuremath{\mathcal F}')$ is an algebraic space, as required.
\end{proof}
We have a natural morphism
\mathbf{b}egin{equation*}
\mathbf{b}egin{matrix}
\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)&\longrightarrow&\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_{s-1})\times\ensuremath{\mathcal M}_{\sigma}(\mathbf{v}_s)\\
(0\subset\ensuremath{\mathcal F}_1\subset\cdots\subset\ensuremath{\mathcal F}_s)&\longmapsto&((0\subset\ensuremath{\mathcal F}_1\subset\cdots\subset\ensuremath{\mathcal F}_{s-1}),\ensuremath{\mathcal F}_s/\ensuremath{\mathcal F}_{s-1})
\end{matrix},
\end{equation*}
and hence a morphism
$$
\ensuremath{\mathbb{P}}i:\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s) \to \prod_{i=1}^s \ensuremath{\mathcal M}_{\sigma}(\mathbf{v}_i).
$$
Let $$\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)^*:=\ensuremath{\mathbb{P}}i^{-1}\left(\prod_{i=1}^s(\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{v}_i)\cap\ensuremath{\mathcal M}_\sigma(\mathbf{v}_i))\right)\subset \ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)$$ be the open substack of
$\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)$ where each $\ensuremath{\mathcal F}_i/\ensuremath{\mathcal F}_{i-1}$ is $\sigma_-$-semistable as well, where $\sigma_-$ is sufficiently close to
$\sigma$. The intersections are taken within the large moduli space $\ensuremath{\mathcal M}$, and as $\ensuremath{\mathcal M}_{\sigma}(\mathbf{v})$ is an open substack of $\ensuremath{\mathcal M}$ for any $\sigma\in\mathop{\mathrm{Stab}}\nolimits(Y)$ by openness of stability, it follows that $\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{v}_i)\cap\ensuremath{\mathcal M}_\sigma(\mathbf{v}_i)$ is open in $\ensuremath{\mathcal M}_\sigma(\mathbf{v}_i)$. Thus $\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)^*$ is indeed well-defined and an open substack of $\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)$, as claimed.
While we cannot say much more about the stack of filtrations in general, if we assume that $\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s$ are the Mukai vectors of the semistable factors of the Harder-Narasimhan
filtration with respect to $\sigma_-$ of an object $E\in\ensuremath{\mathcal M}_{\sigma}(\mathbf{v})$, with $\mathbf{v}=\sum_{i=1}^s\mathbf{v}_i$, then the natural map
\mathbf{b}egin{equation}
\mathbf{b}egin{matrix}
\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)^*& \to &\ensuremath{\mathcal M}_{\sigma}(\mathbf{v})\\
(0\subset\ensuremath{\mathcal F}_1\subset\cdots\subset\ensuremath{\mathcal F}_s)&\mapsto&\ensuremath{\mathcal F}_s
\end{matrix}
\end{equation}
is injective with image the substack of $\ensuremath{\mathcal M}_{\sigma}(\mathbf{v})$ parameterizing objects with Harder-Narasimhan filtration factors having Mukai vectors
$\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s$. In this case, we can say even more and prove the following theorem, whose proof is similar to that of \cite[Prop. 6.2]{Bri12}.
\mathbf{b}egin{Thm}\label{Thm:DimensionOfHNFiltrationStack} As above, suppose that $Y$ satisfies boundedness and openness of stability, and let $\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s$ be the Mukai vectors of the semistable factors of the Harder-Narasimhan filtration with respect to $\sigma_-$ of some object $E\in\ensuremath{\mathcal M}_{\sigma}(\mathbf{v})$, where $\mathbf{v}=\sum_{i=1}^s\mathbf{v}_i$. Suppose further that for any $\sigma\in\mathop{\mathrm{Stab}}\nolimits(Y)$ and $E,E'\in\ensuremath{\mathcal A}_\sigma$ such that $\phi_{\mathop{\mathrm{min}}\nolimits}(E')>\phi_{\max}(E)$ we have $\ensuremath{\mathbb{H}}om(E,E'[k])=0$ for $2\leq k\leq\mathop{\mathrm{dim}}\nolimits Y$. Then
\mathbf{b}egin{equation}\label{eqn:DimensionOfHNFiltraionStack}
\mathbf{b}egin{split}
\mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)^*=&
\mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_{s-1})^*+\mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal M}_{\sigma_-}(\mathbf{v}_s)+
\langle \mathbf{v}-\mathbf{v}_s,\mathbf{v}_s \rangle\\
=& \sum_{i=1}^s \mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal M}_{\sigma_-}(\mathbf{v}_i)+\sum_{i<j}\langle \mathbf{v}_i,\mathbf{v}_j \rangle.
\end{split}
\end{equation}
\end{Thm}
\mathbf{b}egin{proof}
For an atlas $\mathbf{v}arphi:T \to \ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_{s-1})^* \times \ensuremath{\mathcal M}_{\sigma_-}(\mathbf{v}_s)$,
we set
$R:=T \times_{\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_{s-1})^* \times \ensuremath{\mathcal M}_{\sigma_-}(\mathbf{v}_s)} T$.
Let $0 \subset \ensuremath{\mathcal F}_1 \subset \cdots \subset \ensuremath{\mathcal F}_{s-1}$ and
$\ensuremath{\mathcal E}_s$ be objects on $T \times Y$
corresponding to the morphism $\mathbf{v}arphi$.
We note that for all $t\in T$, $\ensuremath{\mathbb{H}}om((\ensuremath{\mathcal E}_s)_t,(\ensuremath{\mathcal F}_{s-1})_t [k])=0$ for
$k \ne 0,1$. Indeed, for $k\not\in[0,\mathop{\mathrm{dim}}\nolimits Y]$, this is clear as $(\ensuremath{\mathcal E}_s)_t,(\ensuremath{\mathcal F}_{s-1})_t\in\ensuremath{\mathcal A}_{\sigma_-}$, while for $k\in[2,\mathop{\mathrm{dim}}\nolimits Y]$, this follows from the hypothesis of the theorem. We shall stratify $T=\mathbf{b}igcup_i T_i$ by $n(t):=\hom((\ensuremath{\mathcal E}_s)_t,(\ensuremath{\mathcal F}_{s-1})_t)$ so that $n(t)$ is constant on $T_i$ and $n_{|T_i} \ne n_{|T_j}$ for $i \ne j$. It follows that $$\mathop{\mathrm{ch}}\nolimitsi((\ensuremath{\mathcal E}_s)_t,(\ensuremath{\mathcal F}_{s-1})_t)=\hom((\ensuremath{\mathcal E}_s)_t,(\ensuremath{\mathcal F}_{s-1})_t)-\hom((\ensuremath{\mathcal E}_s)_t,(\ensuremath{\mathcal F}_{s-1})_t[1])$$ is constant for all $t\in T$, and thus on each $T_i$, both $n(t)$ and $\hom((\ensuremath{\mathcal E}_s)_t,(\ensuremath{\mathcal F}_{s-1})_t[1])$ are constant.
We set $R_{ij}:=R \times_{T \times T} T_i \times T_j$.
Then $R_{ij}=\mathbf{v}arnothing $ if $i \ne j$ and we have a stratification
$R=\mathbf{b}igcup_i R_{ii}$. Let $p_i^k:V_i^k \to T_i$ $(k=0,1)$ be the vector bundles associated to
$\mathop{\mathcal Hom}\nolimits_{p_i}((\ensuremath{\mathcal E}_s)_{|T_i},(\ensuremath{\mathcal F}_{s-1})_{|T_i}[k])$,
where $p_i:T_i \times Y \to T_i$ is the projection.
As in \cite[Lemma 6.1]{Bri12}, there is a universal extension $$0\to (p_i^1)^*\ensuremath{\mathcal F}_{s-1}\to \ensuremath{\mathcal F}\to(p_i^1)^*\ensuremath{\mathcal E}_s\to 0$$ over $V_i^1$, and the family $\ensuremath{\mathcal F}$ determines a morphism $q_i:V_i^1\to\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)^*$ which factors through
$$
V_i^1 \to
T_i \times_{ \ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_{s-1})^* \times \ensuremath{\mathcal M}_{\sigma_-}(\mathbf{v}_s)}
\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)^*.
$$
As in \cite[p. 131]{Bri12}, one can show that there is an isomorphism
$$
V_i^1 \times_{\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)^*} V_i^1 \cong
V_i^0 \times_{R_{ii}} V_i^1
$$
with a commutative diagram
\mathbf{b}egin{equation}
\mathbf{b}egin{CD}
V_i^0 \times_{R_{ii}} V_i^1 @>>> V_i^1 \times V_i^1 \\
@VVV @VVV\\
R_{ii} @>>> T_i \times T_i.
\end{CD}
\end{equation}
It follows that in this description $\Set{V_i^1\times V_i^1}_i$ provide an atlas for $\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)^*$ with relations given by $\Set{V_i^0\times_{R_{ii}}V_i^1}_i$ just as $\Set{T_i\times T_i}_i$ and $\Set{R_{ii}}_i$, respectively, do for $\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_{s-1})^*\times\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{v}_s)$. Since
\mathbf{b}egin{equation}
\mathbf{b}egin{split}
& \mathop{\mathrm{dim}}\nolimits V_i^1 \times V_i^1-\mathop{\mathrm{dim}}\nolimits V_i^0 \times_{R_{ii}} V_i^1\\
=& \mathop{\mathrm{dim}}\nolimits T_i \times T_i +2\mathop{\mathrm{rk}} V_i^1-(\mathop{\mathrm{dim}}\nolimits R_{ii}+\mathop{\mathrm{rk}} V_i^1+\mathop{\mathrm{rk}} V_i^0)\\
=& (\mathop{\mathrm{dim}}\nolimits T_i \times T_i-\mathop{\mathrm{dim}}\nolimits R_{ii})+\langle \mathbf{v}-\mathbf{v}_s, \mathbf{v}_s \rangle,
\end{split}
\end{equation}
we get
\mathbf{b}egin{equation}
\mathbf{b}egin{split}
\mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)^* =&
\max_i \{\mathop{\mathrm{dim}}\nolimits V_i^1 \times V_i^1-\mathop{\mathrm{dim}}\nolimits V_i^0 \times_{R_{ii}} V_i^1\}\\
=& \max_i \{(\mathop{\mathrm{dim}}\nolimits T_i \times T_i-\mathop{\mathrm{dim}}\nolimits R_{ii})\}+\langle \mathbf{v}-\mathbf{v}_s, \mathbf{v}_s \rangle\\
=&
\mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_{s-1})^*+
\mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal M}_{\sigma_-}(\mathbf{v}_s)+\langle \mathbf{v}-\mathbf{v}_s, \mathbf{v}_s \rangle.
\end{split}
\end{equation}
This gives the first equation in \eqref{eqn:DimensionOfHNFiltraionStack}, while the second follows by induction.
\end{proof}
We can apply the above theorem to study the locus in $\ensuremath{\mathcal M}_{\sigma_+}(\mathbf{v})$ of strictly $\sigma_0$-semistable objects. Recall our setup: $X$ is an Enriques surface, and for a given $\mathbf{v}\in\ensuremath{\mathbb{H}}al(X,\ensuremath{\mathbb{Z}})$ with $\mathbf{v}^2>0$, we take a generic stability condition $\sigma_0$ on a wall $\ensuremath{\mathcal W}$ for $\mathbf{v}$ and two generic nearby stability conditions $\sigma_\pm$ in opposite adjacent chambers. By letting $\sigma=\sigma_0$ in the above theorem, we get the following result:
\mathbf{b}egin{Prop}\label{Prop:HN codim} Let $X$ be an Enriques surface, and suppose that $\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s$ are the Mukai vectors of the semistable factors of the Harder-Narasimhan filtration with respect to $\sigma_-$ of an object $E\in\ensuremath{\mathcal M}_{\sigma_+}(\mathbf{v})$, where $\mathbf{v}=\sum_{i=1}^s\mathbf{v}_i$ satisfies $\mathbf{v}^2>0$. Then letting $\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)^o:=\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)^*\cap\ensuremath{\mathcal M}_{\sigma_+}(\mathbf{v})$, where the intersection is taken in $\ensuremath{\mathcal M}_{\sigma_0}(\mathbf{v})$, we have
\mathbf{b}egin{equation}\label{eqn:HNFiltrationCodim}\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)^o
\geq\sum_{i=1}^s \left(\mathbf{v}_i^2-\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{v}_i)\right)+\sum_{i<j}\langle \mathbf{v}_i,\mathbf{v}_j\rangle,
\end{equation}
where the codimension is taken with respect to $\ensuremath{\mathcal M}_{\sigma_+}(\mathbf{v})$.
\end{Prop}
\mathbf{b}egin{proof}
In order to apply Theorem \ref{Thm:DimensionOfHNFiltrationStack}, we first observe that the hypothesis of the theorem is met where $\sigma=\sigma_0$ in this case. Indeed, for $E,E'\in\ensuremath{\mathcal A}_{\sigma_0}$ such that $\phi_{\mathop{\mathrm{min}}\nolimits}(E')>\phi_{\max}(E)$, Serre duality gives $$\ensuremath{\mathbb{H}}om(E,E'[2])=\ensuremath{\mathbb{H}}om(E',E(K_X))=0,$$ where the last equality follows since $X$ is numerically $K$-trivial so that $\phi_{\max}(E(K_X))=\phi_{\max}(E)$.
Noting that $\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)^o$ is an open substack of $\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)^*$ by openness of stability, we get that $\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)^o\leq\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)^*$, with equality if and only if the component of $\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)^*$ of largest dimension contains a $\sigma_+$-semistable object. As $\mathbf{v}^2>0$, $\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_+}(\mathbf{v})=\mathbf{v}^2$ by Proposition \ref{prop:pss}, so
\mathbf{b}egin{equation}
\mathbf{b}egin{split}
\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)^o&=\mathbf{v}^2-\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)^o\geq\mathbf{v}^2-\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal F}(\mathbf{v}_1,\mathbf{d}ots,\mathbf{v}_s)^*\\
&=\mathbf{v}^2-\left(\sum_{i=1}^s\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{v}_i)+\sum_{i<j}\langle\mathbf{v}_i,\mathbf{v}_j\rangle\right)\\
&=\sum_{i=1}^s\left(\mathbf{v}_i^2-\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{v}_i)\right)+\sum_{i<j}\langle\mathbf{v}_i,\mathbf{v}_j\rangle,
\end{split}
\end{equation}
as claimed.
\end{proof}
While Proposition \ref{Prop:HN codim} is phrased for Enriques surfaces, it applies to any $K$-trivial surface, with \eqref{eqn:HNFiltrationCodim} modified appropriately. In particular, the first author uses it in \cite{Nue18} to study wall-crossing for bielliptic surfaces.
\section{The hyperbolic lattice associated to a wall}\label{sec:HyperbolicLattice}
In order to effectively use the estimates provided by Proposition \ref{Prop:HN codim}, we need to gain some understanding of the Mukai vectors $\mathbf{v}_i$ which appear as Harder-Narasimhan factors of an object $E\in\ensuremath{\mathcal M}_{\sigma_+}(\mathbf{v})$ when we cross a wall $\ensuremath{\mathcal W}$. From their definition, these walls are associated to the existence of another Mukai vector with the same phase as $\mathbf{v}$, so to any wall $\ensuremath{\mathcal W}$ it is natural to consider the set of these ``extra'' classes, as in the following definition. As it turns out, this set will contain all of the Mukai vectors we are interested in.
\mathbf{b}egin{PropDef}\label{hyperbolic} To a wall $\ensuremath{\mathcal W}$, let $\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}}\subset\ensuremath{\mathbb{H}}al(X,\ensuremath{\mathbb{Z}})$ be the set of Mukai vectors $$\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}:=\Set{\mathbf{w}\in\ensuremath{\mathbb{H}}al(X,\ensuremath{\mathbb{Z}})\ |\ \Im\frac{Z(\mathbf{w})}{Z(\mathbf{v})}=0\mbox{ for all }\sigma\in\ensuremath{\mathcal W}}.$$
Then $\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}}$ has the following properties:
\mathbf{b}egin{enumerate}
\item It is a primitive sublattice of rank two and of signature $(1,-1)$ (with respect to the restriction of the Mukai form).
\item Let $\sigma_+,\sigma_-$ be two sufficiently close and generic stability conditions on opposite sides of the wall $\ensuremath{\mathcal W}$, and consider any $\sigma_+$-stable object $E\in M_{\sigma_+}(\mathbf{v})$. Then any HN-filtration factor $A_i$ of $E$ with respect to $\sigma_-$ satisfies $\mathbf{v}(A_i)\in\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}}$.
\item If $\sigma_0$ is a generic stability condition on the wall $\ensuremath{\mathcal W}$, the conclusion of the previous claim also holds for any $\sigma_0$-semistable object $E$ of class $\mathbf{v}$.
\item Similarly, let $E$ be any object with $\mathbf{v}(E)\in\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}}$, and assume that it is $\sigma_0$-semistable for a generic stability condition $\sigma_0\in\ensuremath{\mathcal W}$. Then every Jordan-H\"{o}lder factor of $E$ with respect to $\sigma_0$ will have Mukai vector contained in $\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}}$.
\end{enumerate}
\end{PropDef}
\mathbf{b}egin{proof} The proof of \cite[Proposition 5.1]{BM14a} carries over word for word.
\end{proof}
We would like to characterize the type of the wall $\ensuremath{\mathcal W}$, i.e. the type of birational transformation induced by crossing it, in terms of the lattice $\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}}$. We will find it helpful to also go in the opposite direction as in \cite[Definition 5.2]{BM14a}:
\mathbf{b}egin{Def} Let $\ensuremath{\mathbb{H}}H\subset\ensuremath{\mathbb{H}}al(X,\ensuremath{\mathbb{Z}})$ be a primitive rank two hyperbolic sublattice containing $\mathbf{v}$. A \emph{potential wall} $\ensuremath{\mathcal W}$ associated to $\ensuremath{\mathbb{H}}H$ is a connected component of the real codimension one submanifold consisting of those stability conditions $\sigma$ such that $Z_\sigma(\ensuremath{\mathbb{H}}H)$ is contained in a line.
\end{Def}
We will also have cause to consider two special convex cones in $\ensuremath{\mathbb{H}}H_{\ensuremath{\mathbb{R}}}$. The first is defined as follows (see \cite[Definition 5.4]{BM14a}):
\mathbf{b}egin{Def} Given any hyperbolic lattice $\ensuremath{\mathbb{H}}H\subset \ensuremath{\mathbb{H}}al(X,\ensuremath{\mathbb{Z}})$ of rank two containing $\mathbf{v}$, denote by $P_{\ensuremath{\mathbb{H}}H}\subset \ensuremath{\mathbb{H}}H_{\ensuremath{\mathbb{R}}}$ the cone generated by classes $\mathbf{u}\in\ensuremath{\mathbb{H}}H$ with $\mathbf{u}^2\geq 0$ and $\langle\mathbf{v},\mathbf{u}\rangle>0$. We call $P_{\ensuremath{\mathbb{H}}H}$ the \emph{positive cone} of $\ensuremath{\mathbb{H}}H$, and a class in $P_{\ensuremath{\mathbb{H}}H}\cap\ensuremath{\mathbb{H}}H$ a \emph{positive class}.
\end{Def}
The next cone, called the \emph{effective cone} and whose integral classes are \emph{effective classes}, is classified by the following proposition (see \cite[Proposition 5.5]{BM14a} for the analogue in the K3 case):
\mathbf{b}egin{Prop} Let $\ensuremath{\mathcal W}$ be a potential wall associated to a hyperbolic rank two sublattice $\ensuremath{\mathbb{H}}H\subset\ensuremath{\mathbb{H}}al(X,\ensuremath{\mathbb{Z}})$. For any $\sigma\in\ensuremath{\mathcal W}$, let $C_{\sigma}\subset\ensuremath{\mathbb{H}}H_{\ensuremath{\mathbb{R}}}$ be the cone generated by classes $\mathbf{u}\in\ensuremath{\mathbb{H}}H$ satisfying the two conditions $$\ensuremath{\mathbb{R}}e\frac{Z_{\sigma}(\mathbf{u})}{Z_{\sigma}(\mathbf{v})}>0\mbox{ and }\mathbf{b}egin{cases}
\mathbf{u}^2\geq -1, & \mbox{or}\\
\mathbf{u}^2=-2, & c_1(\mathbf{u})\equiv D\pmod2,\mbox{ $D$ a nodal cycle}
\end{cases}.$$ Then this cone does not depend on the choice of $\sigma\in\ensuremath{\mathcal W}$, so we may and will denote it by $C_{\ensuremath{\mathcal W}}$. Moreover, it contains $P_{\ensuremath{\mathbb{H}}H}$.
If $\mathbf{u}\in C_{\ensuremath{\mathcal W}}$, then there exists a $\sigma$-semistable object of class $\mathbf{u}$ for every $\sigma\in\ensuremath{\mathcal W}$, and if $\mathbf{u}\notin C_{\ensuremath{\mathcal W}}$, then for a generic $\sigma\in\ensuremath{\mathcal W}$ there does not exist a $\sigma$-semistable object of class $\mathbf{u}$.
\end{Prop}
\mathbf{b}egin{proof} The proof is identical to that of \cite[Proposition 5.5]{BM14a} except that for the statements about the existence of semistable objects we must use Theorem \ref{Thm:exist:nodal}. This accounts for the more subtle condition on $\mathbf{u}^2$ compared to the corresponding condition $\mathbf{u}^2\geq -2$ for K3 surfaces.
\end{proof}
We also recall \cite[Remark 5.6]{BM14a}:
\mathbf{b}egin{Rem} From the positivity condition on $\ensuremath{\mathbb{R}}e\frac{Z_{\sigma}(\mathbf{u})}{Z_{\sigma}(\mathbf{v})}$, it is clear that $C_{\ensuremath{\mathcal W}}$ contains no line through the origin, i.e. if $\mathbf{u}\in C_{\ensuremath{\mathcal W}}$ then $-\mathbf{u}\notin C_{\ensuremath{\mathcal W}}$. Thus there are only finitely many classes in $C_{\ensuremath{\mathcal W}}\cap(\mathbf{v}-C_{\ensuremath{\mathcal W}})\cap\ensuremath{\mathbb{H}}H$.
We use this fact to make the following assumption: when we refer to a generic $\sigma_0\in\ensuremath{\mathcal W}$, we mean that $\sigma_0$ is not in any of the other walls associated to the finitely many classes in $C_{\ensuremath{\mathcal W}}\cap(\mathbf{v}-C_{\ensuremath{\mathcal W}})\cap\ensuremath{\mathbb{H}}H$. Likewise, $\sigma_{\pm}$ will refer to stability conditions in adjacent chambers to $\ensuremath{\mathcal W}$ in this more refined wall-and-chamber decomposition.
\end{Rem}
Finally, we single-out two types of primitive hyperbolic lattices as the nature of our arguments differ greatly between them:
\mathbf{b}egin{Def} We say that $\ensuremath{\mathcal W}$ is \emph{isotropic} if $\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}}$ contains an isotropic class and \emph{non-isotropic} otherwise.
\end{Def}
We begin our investigation by determining precisely the kind of Mukai vectors that can be contained in $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$.
\mathbf{b}egin{Prop}\label{Prop:lattice classification}Let $\ensuremath{\mathbb{H}}H$ be the hyperbolic lattice associated to a wall $\ensuremath{\mathcal W}$ and $\sigma_0=(Z,\ensuremath{\mathbb{P}}P_0)\in\ensuremath{\mathcal W}$ generic. Then $\ensuremath{\mathbb{H}}H$ and $\sigma_0$ satisfy one of the following mutually exclusive conditions:
\mathbf{b}egin{enumerate}
\item\label{enum:nonegativeclasses} $\ensuremath{\mathbb{H}}H$ contains no effective spherical or exceptional classes.
\item\label{enum:OneNegative} \mathbf{b}egin{enumerate}
\item\label{enum:OneSpherical} $\ensuremath{\mathbb{H}}H$ contains precisely one effective spherical class, and there exists a unique $\sigma_0$-stable spherical object $S$ with $\mathbf{v}(S)\in\ensuremath{\mathbb{H}}H$.
\item\label{enum:OneExceptional} $\ensuremath{\mathbb{H}}H$ contains precisely one effective exceptional class, and there exists exactly two $\sigma_0$-stable exceptional objects $E,E(K_X)$ with $\mathbf{v}(E)=\mathbf{v}(E(K_X))\in\ensuremath{\mathbb{H}}H$.
\end{enumerate}
\item\label{enum:TwoNegative} There are infinitely many effective spherical or exceptional classes in $\ensuremath{\mathbb{H}}H$, and either
\mathbf{b}egin{enumerate}
\item\label{enum:TwoSpherical} there exist exactly two $\sigma_0$-stable spherical objects $S,T$ whose classes are in $\ensuremath{\mathbb{H}}H$; or
\item\label{enum:TwoExceptional} there exist exactly four $\sigma_0$-stable exceptional objects $E_1,E_1(K_X),E_2,E_2(K_X)$ with $\mathbf{v}(E_1)=\mathbf{v}(E_1(K_X)),\mathbf{v}(E_2)=\mathbf{v}(E_2(K_X))\in\ensuremath{\mathbb{H}}H$; or
\item\label{enum:OneExceptionalOneSpherical} there exists exactly one $\sigma_0$-stable spherical object $S$ and exactly two $\sigma_0$-stable exceptional objects $E,E(K_X)$ with $\mathbf{v}(S),\mathbf{v}(E)=\mathbf{v}(E(K_X))\in\ensuremath{\mathbb{H}}H$.
\end{enumerate}
In case \ref{enum:TwoNegative}, $\ensuremath{\mathbb{H}}H$ is non-isotropic.
\end{enumerate}
\end{Prop}
\mathbf{b}egin{proof}
Suppose that $\ensuremath{\mathbb{H}}H$ contains precisely one effective spherical (resp. exceptional) class $\mathbf{w}$. Then by Theorem \ref{Thm:exist:nodal}, there exists a unique $\sigma_+$-stable object $S$ with $\mathbf{v}(S)=\mathbf{w}$ (resp. precisely two $\sigma_+$-stable objects $E$ and $E(K_X)$ with $\mathbf{v}(E)=\mathbf{v}(E(K_X))=\mathbf{w}$), which must then be spherical (resp. exceptional) by \cite[Lemma 4.3]{Yos16b}. Suppose that $S$ (resp. $E$) is strictly $\sigma_0$-semistable. Then by \cite[Lemma 4.6]{Yos16b} every $\sigma_0$-stable factor $F$ of $S$ (resp. $E\mathop{\ord(\omega_S)}\nolimitsplus E(K_X)$) must satisfy $\mathop{\mathrm{Ext}}\nolimits^1(F,F)=0$. But then $\mathbf{v}(F)^2<0$, so by \cite[Lemma 4.3]{Yos16b} $\mathbf{v}(F)^2=-1$ or $\mathbf{v}(F)^2=-2$ and $c_1(F)\equiv D\pmod 2$ for some nodal cylce $D$, i.e. $\mathbf{v}(F)$ is either an effective spherical or exceptional class. But this is a contradiction to the assumption, so $S$ is (resp. $E,E(K_X)$ are) $\sigma_0$-stable, giving Case \ref{enum:OneNegative}.
It remains to consider Case \ref{enum:TwoNegative}. Let $\phi$ be the phase of $\mathbf{v}$ with respect to $\sigma_0$. It will suffice for our purposes to show that, up to twisting by $K_X$, there cannot be any combination of three stable spherical or exceptional objects $S_1,S_2,S_3$ in $\ensuremath{\mathbb{P}}P_0(\phi)$. Since each $S_i$ is $\sigma_0$-stable of the same phase and distinct up to twisting by $K_X$, we must have $\ensuremath{\mathbb{H}}om(S_i,S_j)=\ensuremath{\mathbb{H}}om(S_j,S_i(K_X))=0$ for each $i\neq j$. Thus if $\mathbf{w}_i=\mathbf{v}(S_i)$, then $\langle \mathbf{w}_i,\mathbf{w}_j\rangle=\mathop{\mathrm{ext}}\nolimits^1(S_i,S_j)\geq 0$.
Now any two of the $\mathbf{w}_i$ must be linearly independent, and we may choose, say, $\mathbf{w}_1$ and $\mathbf{w}_2$ to represent either both spherical or both exceptional $\sigma_0$-stable objects. Denote by $m:=\langle \mathbf{w}_1,\mathbf{w}_2\rangle\geq 0$. Since $\ensuremath{\mathbb{H}}H$ has signature $(1,-1)$, $$\langle \mathbf{w}_1,\mathbf{w}_2\rangle^2> \mathbf{w}_1^2\mathbf{w}_2^2=\mathbf{b}egin{cases}
1, & \text{ if }\mathbf{w}_1^2=\mathbf{w}_2^2=-1,\\
4, & \text{ if }\mathbf{w}_1^2=\mathbf{w}_2^2=-2.\\
\end{cases}$$
So $m\geq 2$ or $3$. We write $\mathbf{w}_3=x\mathbf{w}_1+y\mathbf{w}_2$ with $x,y\in\ensuremath{\mathbb{Q}}$, and from $\langle \mathbf{w}_3,\mathbf{w}_1\rangle,\langle \mathbf{w}_3,\mathbf{w}_2\rangle\geq 0$, we get that \mathbf{b}egin{equation}\label{eq:positivity}\mathbf{b}egin{cases}
\frac{1}{m}\leq \frac{y}{x}\leq m, & \text{ if }\mathbf{w}_1^2=\mathbf{w}_2^2=-1,\\
\frac{2}{m}\leq \frac{y}{x}\leq \frac{m}{2}, & \text{ if }\mathbf{w}_1^2=\mathbf{w}_2^2=-2.\\
\end{cases}\end{equation} But then since $$m-\sqrt{m^2-1}\leq\frac{1}{m}\leq\frac{y}{x}\leq m\leq m+\sqrt{m^2-1}$$ for $m\geq 2$ and $$\frac{m-\sqrt{m^2-4}}{2}\leq\frac{2}{m}\leq\frac{y}{x}\leq\frac{m}{2}\leq\frac{m+\sqrt{m^2-4}}{2}$$ for $m\geq 3$, we see that $$\mathbf{w}_3^2=x^2\mathbf{b}egin{cases}
-1+2m(\frac{y}{x})-(\frac{y}{x})^2, &\text{ if }\mathbf{w}_1^2=\mathbf{w}_2^2=-1,\\
-2+2m(\frac{y}{x})-2(\frac{y}{x})^2, &\text{ if }\mathbf{w}_1^2=\mathbf{w}_2^2=-2,
\end{cases}$$ must be positive, in contradiction to the fact that $\mathbf{w}_3^2=-1$ or $-2$. Thus, we see that, up to tensoring by $K_X$, there can only be at most two $\sigma_0$-stable spherical or exceptional objects with Mukai vectors in $\ensuremath{\mathbb{H}}H$. Notice further that if $\ensuremath{\mathbb{H}}H$ admits any combination of two linearly independent spherical or exceptional classes, then the group generated by the associated spherical and $(-1)$ reflections is infinite, so the orbit of a spherical or exceptional class gives infinitely many Mukai vectors of the same kind.
Furthermore, we see that solving the quadratic equation $$0=\mathbf{u}^2=(x\mathbf{w}_1+y\mathbf{w}_2)^2$$ gives \mathbf{b}egin{equation}\label{eq:isotropic solutions}\frac{y}{x}=\mathbf{b}egin{cases}
m\pm\sqrt{m^2-1}, & \text{ if }\mathbf{w}_1^2=\mathbf{w}_2^2=-1,\\
\frac{m\pm\sqrt{m^2-4}}{2}, & \text{ if }\mathbf{w}_1^2=\mathbf{w}_2^2=-2,
\end{cases}\end{equation} which are irrational. Thus in subcases \ref{enum:TwoSpherical} and \ref{enum:TwoExceptional}, there can be no isotropic classes, as these would give rational solutions in \eqref{eq:isotropic solutions}.
Finally, it only remains to show that in subcase \ref{enum:OneExceptionalOneSpherical} $\ensuremath{\mathbb{H}}H$ is non-isotropic. Similar to the previous subcases, we write an integral isotropic class $\mathbf{u}=x\mathbf{w}_1+y\mathbf{w}_2$ with $x,y\in\ensuremath{\mathbb{Q}}$, where $\mathbf{w}_1=\mathbf{v}(S)$ is a spherical class and $\mathbf{w}_2=\mathbf{v}(E)=\mathbf{v}(E(K_X))$ is an exceptional class. Solving the quadratic equation $\mathbf{u}^2=0$ gives $$\frac{y}{x}=m\pm\sqrt{m^2-2},$$ where $m=\langle \mathbf{w}_1,\mathbf{w}_2\rangle$. But $S$ and $E$ are $\sigma_0$-stable objects of the same phase with classes in a lattice of signature $(1,-1)$, so we must have $m\geq 2$, as in the arguments for the preceeding cases. But this gives a contradiction as $m^2-2$ cannot be a square for $m\geq 2$.
\end{proof}
All of our main results are a consequence of the following classification theorem, which essentially says that the birational behavior induced by crossing $\ensuremath{\mathcal W}$ is entirely determined by $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ and $C_\ensuremath{\mathcal W}$.
\mathbf{b}egin{Thm}\label{classification of walls}
Let $\ensuremath{\mathbb{H}}H\subset \ensuremath{\mathbb{H}}al(X,\ensuremath{\mathbb{Z}})$ be a primitive hyperbolic rank two sublattice containing $\mathbf{v}$, and let $\ensuremath{\mathcal W}\subset\mathop{\mathrm{Stab}}\nolimitsd(X)$ be a potential wall associated to $\ensuremath{\mathbb{H}}H$.
The set $\ensuremath{\mathcal W}$ is a totally semistable wall if and only if one of the following conditions hold:
\mathbf{b}egin{description}
\item[(TSS1)] there exists a spherical or exceptional class $\mathbf{w}\in C_\ensuremath{\mathcal W}\cap\ensuremath{\mathbb{H}}H$ such that $\langle \mathbf{v},\mathbf{w}\rangle<0$;
\item[(TSS2)] there exists an isotropic class $\mathbf{u}\in\ensuremath{\mathbb{H}}H$ with $\ell(\mathbf{u})=2$ and $\langle \mathbf{v},\mathbf{u}\rangle=1$; or
\item[(TSS3)] there exists a primitive isotropic class $\mathbf{u}\in\ensuremath{\mathbb{H}}H$ such that $\langle \mathbf{v},\mathbf{u}\rangle=\ell(\mathbf{u})$ and $\langle \mathbf{v},\mathbf{w}\rangle=0$ for a spherical $\mathbf{w}\in C_\ensuremath{\mathcal W}\cap\ensuremath{\mathbb{H}}H$; or
\item[(TSS4)] there exists a primitive isotropic class $\mathbf{u}\in\ensuremath{\mathbb{H}}H$ such that $\langle\mathbf{v},\mathbf{u}\rangle=2=\ell(\mathbf{u})$ and $\langle\mathbf{v},\mathbf{w}\rangle=0$ for an exceptional $\mathbf{w}\in C_{\ensuremath{\mathcal W}}\cap\ensuremath{\mathbb{H}}H$.
\end{description}
In addition,
\mathbf{b}egin{enumerate}
\item\label{thm:Classification,Divisorial} The set $\ensuremath{\mathcal W}$ is a wall inducing a divisorial contraction if one of the following conditions hold:
\mathbf{b}egin{description*}
\item[(Brill-Noether)] there exists a spherical class $\mathbf{w}\in C_\ensuremath{\mathcal W}\cap\ensuremath{\mathbb{H}}H$ such that $\langle \mathbf{w},\mathbf{v}\rangle=0$, or there exists an exceptional class $\mathbf{w}\in C_\ensuremath{\mathcal W}\cap\ensuremath{\mathbb{H}}H$ such that $\langle\mathbf{w},\mathbf{v}\rangle=0$ and $\mathbf{v}-2\mathbf{w}\in\ensuremath{\mathbb{D}}elta(X)_{-2}\cap C_{\ensuremath{\mathcal W}}\cap\ensuremath{\mathbb{H}}H$; or
\item[(Hilbert-Chow)] there exists an isotropic class $\mathbf{u}$ with $\langle \mathbf{v},\mathbf{u}\rangle=1$ and $\ell(\mathbf{u})=2$; or
\item[(Li-Gieseker-Uhlenbeck)] there exists a primitive isotropic class $\mathbf{u}\in\ensuremath{\mathbb{H}}H$ with $\langle \mathbf{v},\mathbf{u}\rangle=2=\ell(\mathbf{u})$; or
\item[(induced Li-Gieseker-Uhlenbeck)] there exists an isotropic class $\mathbf{u}\in\ensuremath{\mathbb{H}}H$ with $\langle \mathbf{v},\mathbf{u}\rangle=1=\ell(\mathbf{u})$ and $\mathbf{v}^2\geq 3$.
\end{description*}
\item\label{thm:Classification,Fibration} The set $\ensuremath{\mathcal W}$ is a wall inducing a $\ensuremath{\mathbb{P}}^1$-fibration on $M_{\sigma_+}(\mathbf{v},L)$ if one of the following conditions hold:
\mathbf{b}egin{description*}
\item[(Exceptional case)] there exists a primitive isotropic class $\mathbf{u}$ with $\langle \mathbf{v},\mathbf{u}\rangle=2=\ell(\mathbf{u})$, an exceptional class $\mathbf{w}\in C_\ensuremath{\mathcal W}\cap\ensuremath{\mathbb{H}}H$ with $\langle \mathbf{v},\mathbf{w}\rangle=0$, and $L\equiv K_X\pmod 2$, or
\item[(Spherical case)] there exists an isotropic class $\mathbf{u}$ with $\langle \mathbf{v},\mathbf{u}\rangle=\ell(\mathbf{u})$, a spherical class $\mathbf{w}\in C_\ensuremath{\mathcal W}\cap\ensuremath{\mathbb{H}}H$ with $\langle \mathbf{v},\mathbf{w}\rangle=0$, and $L\equiv D+\frac{\mathop{\mathrm{rk}} \mathbf{v}}{2}K_X\pmod 2$, where $D$ is a nodal cycle.
\end{description*}
\item\label{thm:Classification,Flops}
Otherwise, if $\mathbf{v}$ is primitive and either
\mathbf{b}egin{enumerate}
\item \label{enum:sum2positive}
$\mathbf{v}^2\geq 3$ and $\mathbf{v}$ can be written as the sum
$\mathbf{v} = \mathbf{a}_1 + \mathbf{a}_2$ with $\mathbf{a}_i\in P_\ensuremath{\mathbb{H}}H$ such that $L\equiv \frac{r}{2}K_X\pmod 2$ if for each $i$, $\mathbf{a}_i^2=0$ and $\ell(\mathbf{a}_i)=2$; or
\item\label{enum:exceptional} there exists an exceptional class $\mathbf{w}\in\ensuremath{\mathbb{H}}H$ and either
\mathbf{b}egin{enumerate}
\item\label{enum:exceptionalflop1}
$0< \langle \mathbf{w},\mathbf{v}\rangle\leq\frac{\mathbf{v}^2}{2}$, or
\item\label{enum:exceptionalflop2}
$\langle \mathbf{v},\mathbf{w}\rangle=0$ and $\mathbf{v}^2\geq 3$; or
\end{enumerate}
\item\label{enum:spherical} there exists a spherical class $\mathbf{w}\in \pm (C_\ensuremath{\mathcal W}\cap\ensuremath{\mathbb{H}}H)$ and either
\mathbf{b}egin{enumerate}
\item\label{enum:sphericalflop1}
$0 < \langle \mathbf{w}, \mathbf{v}\rangle < \frac{\mathbf{v}^2}2$, or
\item\label{enum:sphericalflop2}
$\langle \mathbf{w},\mathbf{v}\rangle=\frac{\mathbf{v}^2}{2}$ and $\mathbf{v}-\mathbf{w}$ is a spherical class,
\end{enumerate}
\end{enumerate}
then $\ensuremath{\mathcal W}$ induces a small contraction.
\item In all other cases, $\ensuremath{\mathcal W}$ is either a fake wall or not a wall at all.
\end{enumerate}
\end{Thm}
The proof of the above theorem will occupy us for the next four sections, but before we enter into a more involved and lattice specific analysis of the wall-crossing behavior, we present a general result on the codimension of the strictly $\sigma_0$-semistable locus corresponding to the simplest Harder-Narasimhan filtration as above:
\mathbf{b}egin{Prop}\label{Prop:HN filtration all positive classes}
As above, let $\ensuremath{\mathcal F}(\mathbf{a}_1,\mathbf{d}ots,\mathbf{a}_n)^o$ be the substack of $\ensuremath{\mathcal M}_{\sigma_+}(\mathbf{v})$ parametrizing objects with $\sigma_-$ Harder-Narasimhan filtration factors of classes $\mathbf{a}_1,\mathbf{d}ots,\mathbf{a}_n$ (in order of descending phase with respect to $\phi_{\sigma_-}$), and suppose that $\mathbf{a}_i^2>0$ for all $i$. Then $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_1,\mathbf{d}ots,\mathbf{a}_n)^o\geq 2$. \end{Prop}
\mathbf{b}egin{proof}
By Theorem \ref{Thm:exist:nodal}, the assumption that $\mathbf{a}_i^2>0$ implies that $\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_i)=\mathbf{a}_i^2$. Thus by Proposition \ref{Prop:HN codim}, $$\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_1,\mathbf{d}ots,\mathbf{a}_n)^o\geq\sum_{i<j}\langle \mathbf{a}_i,\mathbf{a}_j\rangle.$$ But as $\mathbf{a}_i^2\geq 1$ and $\ensuremath{\mathbb{H}}H$ has signature $(1,-1)$, we must have $$\langle \mathbf{a}_i,\mathbf{a}_j\rangle>\sqrt{\mathbf{a}_i^2 \mathbf{a}_j^2}\geq 1,$$ for $i<j$. Thus $\langle \mathbf{a}_i,\mathbf{a}_j\rangle\geq 2$. It follows that $$\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_1,\mathbf{d}ots,\mathbf{a}_n)^o\geq n(n-1)\geq 2,$$ as $n\geq 2$.
\end{proof}
It follows from the proposition that in order for there to be more interesting wall-crossing behavior, $\ensuremath{\mathbb{H}}H$ must contain some class $\mathbf{w}$ with $\mathbf{w}^2\leq 0$. We will begin with the non-isotropic case first in the next section.
\section{Totally semistable non-isotropic walls}\label{Sec:TotallySemistable-non-isotropic}
In this section we describe the criterion for a potential non-isotropic wall $\ensuremath{\mathcal W}$ to be totally semistable, that is, every $E\in M_{\sigma_+}(\mathbf{v})$ is strictly $\sigma_0$-semistable. We will see that by applying an appropriate sequence of spherical or weakly spherical twists, we can always reduce to the case of a non-totally semistable wall to study the birational behavior of crossing $\ensuremath{\mathcal W}$. Let us begin with a sufficient condition for $\ensuremath{\mathcal W}$ to be totally semistable in general.
\mathbf{b}egin{Lem}\label{Lem: condition for totally semistable wall}
Let $\ensuremath{\mathcal W}$ be a potential wall such that $\langle \mathbf{v},\mathbf{w}\rangle<0$ for an effective spherical or exceptional class $\mathbf{w}\in\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}}$. Then $\ensuremath{\mathcal W}$ is totally semistable.
\end{Lem}
\mathbf{b}egin{proof}
Suppose there were a $\sigma_0$-stable object $E$ of class $\mathbf{v}$. Let $\tilde{E_0}$ be a $\sigma_0$-semistable object $\tilde{E_0}$ with $\mathbf{v}(\tilde{E_0})=\mathbf{w}$. As all stable factors of $\tilde{E_0}$ are spherical or exceptional \cite[Lemma 4.3, Lemma 4.6]{Yos16b}, we may find a $\sigma_0$-stable object $E_0$ such that $\langle \mathbf{v},\mathbf{v}(E_0)\rangle<0$ and $\mathbf{v}(E_0)^2=-1$ or $-2$. As $E$ and $E_0$ (resp. $E$ and $E_0(K_X)$) are non-isomorphic $\sigma_0$-stable objects of the same phase, we must have $\ensuremath{\mathbb{H}}om(E,E_0)=\ensuremath{\mathbb{H}}om(E_0(K_X),E)=0$. But then $0>\langle \mathbf{v},\mathbf{v}(E_0)\rangle=\mathop{\mathrm{ext}}\nolimits^1(E,E_0)\geq 0$, a contradiction.
\end{proof}
For a non-isotropic wall, the condition in Lemma \ref{Lem: condition for totally semistable wall} is actually necessary as we see now.
\mathbf{b}egin{Lem}\label{Lem:non-isotropic no totally semistable wall}Suppose that $\ensuremath{\mathbb{H}}H$ is non-isotropic and $\langle \mathbf{v},\mathbf{w}\rangle\geq 0$ for all spherical or exceptional classes $\mathbf{w}\in C_\ensuremath{\mathcal W}\cap\ensuremath{\mathbb{H}}H$. Then $\ensuremath{\mathcal W}$ cannot be a totally semistable wall, and if $\mathop{\mathrm{codim}}\nolimits(M_{\sigma_+}(\mathbf{v})\mathbf{b}ackslash M^s_{\sigma_0}(\mathbf{v}))=1$ then $\langle \mathbf{v},\mathbf{w}\rangle=0$ for some spherical or exceptional class $\mathbf{w}\in C_\ensuremath{\mathcal W}\cap\ensuremath{\mathbb{H}}H$ or $\langle\mathbf{v},\mathbf{w}\rangle=1=\langle\mathbf{v},\mathbf{v}-\mathbf{w}\rangle$ for spherical classes $\mathbf{w},\mathbf{v}-\mathbf{w}\in C_\ensuremath{\mathcal W}\cap\ensuremath{\mathbb{H}}H$. Moreover, the generic member of $M_{\sigma_+}(\mathbf{v})\mathbf{b}ackslash M^s_{\sigma_0}(\mathbf{v})$ has HN-filtration factors of classes $\mathbf{w}$ and $\mathbf{v}-\mathbf{w}$ with respect to $\sigma_-$-stability.
\end{Lem}
\mathbf{b}egin{proof}
Consider the stack $\ensuremath{\mathcal F}(\mathbf{a}_1,\mathbf{d}ots,\mathbf{a}_n)^o$ of Harder-Narasimhan filtrations with respect to $\sigma_-$-stability as in Section \ref{sec:DimensionsOfHarderNarasimhan}. We wish to estimate the codimension of $\ensuremath{\mathcal F}(\mathbf{a}_1,\mathbf{d}ots,\mathbf{a}_n)^o$.
Suppose that $I=\Set{i\ |\ \mathbf{a}_i^2>0}\neq\mathbf{v}arnothing$ and let $\mathbf{a}:=\sum_{i\in I}\mathbf{a}_i$. Write $\mathbf{b}:=\mathbf{v}-\mathbf{a}$. If $\mathbf{b}^2>0$, then we automatically have $\langle \mathbf{a},\mathbf{b}\rangle>\sqrt{\mathbf{a}^2 \mathbf{b}^2}\geq 1$, so $\mathbf{v}^2=\mathbf{a}^2+2\langle \mathbf{a},\mathbf{b}\rangle+\mathbf{b}^2\geq \mathbf{a}^2+5$. If, instead $\mathbf{b}^2<0$, then note that $\mathbf{b}$ is the sum of effective spherical and/or exceptional classes. Indeed, for $i\notin I$, $\mathbf{a}_i^2<0$ and $M_{\sigma_-}(\mathbf{a}_i)\ne\mathbf{v}arnothing$, so $\mathbf{a}_i$ is the sum of effective spherical and/or exceptional classes by \cite[Lemma 4.3, Lemma 4.6]{Yos16b}, so the same holds for $\mathbf{b}=\sum_{i\notin I}\mathbf{a}_i$. By the assumption that $\mathbf{v}$ pairs non-negatively with all effective spherical and exceptional classes, we see that $\langle\mathbf{v},\mathbf{b}\rangle\geq 0$, so $\mathbf{a}^2=\mathbf{v}^2-2\langle \mathbf{v},\mathbf{b}\rangle+\mathbf{b}^2\leq \mathbf{v}^2-1$.
In any case we have $\mathbf{a}^2<\mathbf{v}^2$, and we write $\mathbf{a}^2=\mathbf{v}^2-k$ with $k\in\ensuremath{\mathbb{Z}}_{\geq 0}$ and $k>0$ if $\mathbf{v}\neq \mathbf{a}$. Expanding the squares on each side gives $$0=\sum_{i\in I^c}\mathbf{a}_i^2+2\sum_{i<j,(i,j)\in (I^2)^c}\langle \mathbf{a}_i,\mathbf{a}_j\rangle-k,$$ and rearranging gives \mathbf{b}egin{equation}\label{non-isotropic estimate}
\sum_{i<j,(i,j)\in (I^2)^c}\langle \mathbf{a}_i,\mathbf{a}_j\rangle =\frac{k}{2} -\frac{1}{2}\sum_{i\in I^c}\mathbf{a}_i^2.\end{equation}
By Proposition \ref{Prop:HN codim},
we get that \mathbf{b}egin{align}\label{codim estimate}
\mathbf{b}egin{split}\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_1,\mathbf{d}ots,\mathbf{a}_n)^o&\geq\sum_i (\mathbf{a}_i^2-\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_i))+\sum_{i<j}\langle \mathbf{a}_i,\mathbf{a}_j\rangle\\
&=\sum_{i\in I^c}(\mathbf{a}_i^2-\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_i))+\sum_{i<j,(i,j)\in I^2}\langle \mathbf{a}_i,\mathbf{a}_j\rangle+\sum_{i<j,(i,j)\in (I^2)^c}\langle \mathbf{a}_i,\mathbf{a}_j\rangle,
\end{split}
\end{align}
since $\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_i)=\mathbf{a}_i^2$ for $i\in I$. Using \eqref{non-isotropic estimate} and writing $\mathbf{a}_i=m_i \mathbf{w}_i$ with $\mathbf{w}_i$ primitive for $i\in I^c$, we have
\mathbf{b}egin{align}\label{eqn: non-isotropic codimension}
\mathbf{b}egin{split}
\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_1,\mathbf{d}ots,\mathbf{a}_n)^o&\geq\frac{k}{2}+\sum_{i\in I^c}\left(\frac{\mathbf{a}_i^2}{2}-\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_i)\right)+\sum_{i<j,(i,j)\in I^2}\langle \mathbf{a}_i,\mathbf{a}_j\rangle\\
&=\frac{k}{2}+\sum_{i\in I^c:\mathbf{w}_i^2\equiv m_i\equiv 1\pmod 2}\frac{1}{2}+\sum_{i<j,(i,j)\in I^2}\langle \mathbf{a}_i,\mathbf{a}_j\rangle,
\end{split}
\end{align}
where the final equality follows from Lemma \ref{Lem:dimension negative}. Moreover, for $(i,j)\in I^2$ with $i\neq j$, the signature of $\ensuremath{\mathbb{H}}H$ forces $\langle \mathbf{a}_i,\mathbf{a}_j\rangle\geq 2$. Thus $$\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_1,\mathbf{d}ots,\mathbf{a}_n)^o \geq \frac{k}{2}+|I|(|I|-1)>0.$$ As this holds true for all possible HN-filtrations of objects in $M_{\sigma_+}(\mathbf{v})$ with respect to $\sigma_-$-stability, $\ensuremath{\mathcal W}$ cannot be totally semistable. Moreover, note that if $|I|=0$, so that $\mathbf{a}=0$, the estimate in \eqref{eqn: non-isotropic codimension} is still valid. In that case, we must have $k=\mathbf{v}^2>0$ from which we see that $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_1,\mathbf{d}ots,\mathbf{a}_n)^o >0$.
For the second and third claim, we note that if $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_1,\mathbf{d}ots,\mathbf{a}_n)^o=1$, then from \eqref{eqn: non-isotropic codimension} we see that $|I|=0,1$ and $k=1,2$. If $|I|=1$, then $\mathbf{b}=\mathbf{v}-\mathbf{a}$ must satisfy $\mathbf{b}^2<0$ (otherwise $k\geq 5$ as we saw above), and solving for $\langle \mathbf{v},\mathbf{b}\rangle$ in $\mathbf{v}^2-k=(\mathbf{v}-\mathbf{b})^2$ gives $$0\leq 2\langle \mathbf{v},\mathbf{b}\rangle=\mathbf{b}^2+k\leq \mathbf{b}^2+2<2,$$ so $\langle \mathbf{v},\mathbf{b}\rangle=0$ and $\mathbf{b}^2=-k$. Thus $\mathbf{b}$ is an effective spherical or exceptional class orthogonal to $\mathbf{v}$. Moreover, as $\mathbf{b}=\sum_{i\notin I}\mathbf{a}_i$, it follows from the assumption on $\mathbf{v}$ and $\langle\mathbf{b},\mathbf{v}\rangle=0$ that $n=2$ and $\mathbf{b}=\mathbf{a}_1$ or $\mathbf{a}_2$. Letting $\mathbf{w}=\mathbf{b}$, we get the claim about the HN-filtration of the generic member of $M_{\sigma_+}(\mathbf{v})\mathbf{b}ackslash M_{\sigma_0}^s(\mathbf{v})$.
If instead $|I|=0$, then $k=\mathbf{v}^2=1,2$, and we rearrange the $\mathbf{a}_i$, if necessary, so that $\langle\mathbf{v},\mathbf{w}_i\rangle\leq\langle\mathbf{v},\mathbf{w}_{i+1}\rangle$ for all $i$. Let us assume first that $\mathbf{v}^2=2$, and note that it follows from \eqref{eqn: non-isotropic codimension} that $m_i$ must be even if $\mathbf{w}_i^2=-1$.
If $\langle \mathbf{v},\mathbf{w}_1 \rangle=0$, then as $(\mathbf{v}-\mathbf{w}_1)^2=2+\mathbf{w}_1^2$ and $\ensuremath{\mathbb{H}}H$ is non-isotropic, we must have $\mathbf{w}_1^2=-1$. It is easy to see that then $\ensuremath{\mathbb{H}}H=\ensuremath{\mathbb{Z}} \mathbf{v} \perp \ensuremath{\mathbb{Z}} \mathbf{w}_1$.
For $\mathbf{w}_j$ ($j \ne 1$), we set $\mathbf{w}_j=x_j \mathbf{v}+y_j \mathbf{w}_1$ for $x_j,y_j\in\ensuremath{\mathbb{Z}}$.
Then $0 \leq \langle \mathbf{v},\mathbf{w}_j \rangle=2x_j$.
Since $\mathbf{v}=\sum_{j=1}^n m_j\mathbf{w}_j$, it follows that $1=\sum_{j \ne } m_j x_j$, so $n=2$ and
$m_2=x_2=1$. Hence $\mathbf{v}=m_1 \mathbf{w}_1+\mathbf{w}_2$.
Since $2-m_1^2=\mathbf{w}_2^2$, $m_1=2$ and $\mathbf{w}_2^2=-2$.
If $\langle \mathbf{v},\mathbf{w}_i \rangle>0$ for all $i$, then from $$2=\mathbf{v}^2=\sum_{i=1}^n m_i\langle\mathbf{v},\mathbf{w}_i\rangle,$$ we see that $n=2$ and
$\langle \mathbf{v},\mathbf{w}_1 \rangle=\langle \mathbf{v},\mathbf{w}_2 \rangle=m_1=m_2=1$.
Thus $\mathbf{w}_1^2=(\mathbf{v}-\mathbf{w}_2)^2=\mathbf{v}^2-2\langle\mathbf{v},\mathbf{w}_2\rangle+\mathbf{w}_2^2=\mathbf{w}_2^2$.
As the $m_i$ are odd, we must have $\mathbf{w}_i^2=-2$, which implies $\langle \mathbf{w}_1,\mathbf{w}_2 \rangle=3$.
Now assume that $\mathbf{v}^2=1$.
Then from $$1=\mathbf{v}^2=\sum_{i=1}^n m_i\langle\mathbf{v},\mathbf{w}_i\rangle,$$ we see that $n=2$ and $\mathbf{v}=m_1 \mathbf{w}_1+m_2 \mathbf{w}_2$ with $\langle \mathbf{v},\mathbf{w}_1 \rangle=0$ and $\langle \mathbf{v},\mathbf{w}_2 \rangle=m_2=1$.
Since $(\mathbf{v}-\mathbf{w}_1)^2=1+\mathbf{w}_1^2$ and $\ensuremath{\mathbb{H}}H$ is not isotropic, we must have $\mathbf{w}_1^2=-2$.
Then $$\mathbf{v}^2-2m_1^2=(\mathbf{v}-m_1\mathbf{w}_1)^2=\mathbf{w}_2^2$$ implies $\mathbf{w}_2^2=-1$ and $m_1=1$.
Hence $\langle \mathbf{w}_1,\mathbf{w}_2 \rangle=2$.
\end{proof}
The previous lemma justifies singling out Mukai vectors which pair non-negatively with all effective spherical or exceptional classes, and we will spend the remainder of this section using the theory of Pell's equation to reduce to this case. Such a Mukai vector is called \emph{minimal in its $G_{\ensuremath{\mathbb{H}}H}$-orbit}, or simply \emph{minimal} for short, because of the following definition.
\mathbf{b}egin{PropDef}\label{PropDef: minimal vectors}
Let $G_{\ensuremath{\mathbb{H}}H}\subset\mathop{\mathrm{Aut}}\nolimits(\ensuremath{\mathbb{H}}H)$ be the group generated by spherical and exceptional reflections associated to effective spherical and exceptional classes in $C_{\ensuremath{\mathcal W}}$. For a given positive class $\mathbf{v}\in P_{\ensuremath{\mathbb{H}}H}\cap\ensuremath{\mathbb{H}}H$, the $G_{\ensuremath{\mathbb{H}}H}$-orbit of $\mathbf{v}$ contains a unique class $\mathbf{v}_0$ such that $\langle \mathbf{v},\mathbf{w}\rangle\geq 0$ for all effective spherical and exceptional classes $w\in C_{\ensuremath{\mathcal W}}$. We call $\mathbf{v}_0$ the minimal class of the orbit $G_{\ensuremath{\mathbb{H}}H}\cdot\mathbf{v}$.
\end{PropDef}
The proof of the existence of $\mathbf{v}_0$ is almost identical to that of \cite[Proposition and Definition 6.6]{BM14b}, so we omit it. In the remainder of this section we will explore the action of $G_\ensuremath{\mathbb{H}}H$ more fully. We will consider explicitly only Cases \ref{enum:TwoExceptional} and \ref{enum:OneExceptionalOneSpherical}, as Case \ref{enum:TwoSpherical} is covered in detail in \cite[Section 6]{BM14b}. Furthermore, our discussion is clearly irrelevant to Case \ref{enum:nonegativeclasses}, and applies in a much simpler but completely analogous way in Case \ref{enum:OneNegative}.
For simplicity, we observe that up to the action of $\mathbf{w}idetilde{\mathop{\mathrm{GL}}\nolimits}_2^+(\ensuremath{\mathbb{R}})$, we may assume that the phase of all objects in $C_\ensuremath{\mathcal W}$ is 1. We make this assumption throughout the rest of this section.
\subsection{Stability}
In order to study the behavior of stability under $G_\ensuremath{\mathbb{H}}H$,
we must study the interplay between rotating a stability function and tilting.
Let $\ensuremath{\mathcal B}$ be an abelian category with a stability function $Z:\ensuremath{\mathbb{D}}b(\ensuremath{\mathcal B}) \to \ensuremath{\mathbb{C}}$ such that $Z(\ensuremath{\mathcal B} \setminus \{0 \})= \ensuremath{\mathbb{H}} \cup \ensuremath{\mathbb{R}}_{<0}$. We set $\phi(E):=\frac{1}{\pi}\mathbf{a}rg Z(E)$.
\mathbf{b}egin{Def}
$E \in \ensuremath{\mathcal B}$ is $Z$-semistable if $\phi(F) \leq \phi(E)$ for all subobjects $F$ of $E$ in $\ensuremath{\mathcal B}$.
\end{Def}
We assume that $Z$ satisfies the HN-filtration property and that the category $$\Set{E\in\ensuremath{\mathcal B}\ | \ E\mbox{ is $Z$-semistable of phase $\phi$}}$$
is finite so that JH-filtrations exist, see \cite[Sections 3 and 4]{Bri08}.
Now we recall the definition of a torsion pair:
\mathbf{b}egin{Def}[{\cite{HRS96}}]\label{defn:TorsionPair}
A torsion pair in an abelian category $\ensuremath{\mathcal B}$ is a pair of full subcategories $(\ensuremath{\mathbb{T}}T, \ensuremath{\mathcal F})$ of $\ensuremath{\mathcal B}$ which satisfy $\ensuremath{\mathbb{H}}om(T,F) = 0$ for $T\in\ensuremath{\mathbb{T}}T$ and $F\in\ensuremath{\mathcal F}$, and such that every object $E\in\ensuremath{\mathcal B}$ fits into a short exact sequence
$$0\to T\to E\to F\to 0$$ for some pair of objects $T\in\ensuremath{\mathbb{T}}T$ and $F\in\ensuremath{\mathcal F}$. We write $T=\ensuremath{\mathbb{T}}T(E)$ and $F=\ensuremath{\mathcal F}(E)$.
\end{Def}
One often constructs torsion pairs via HN-filtrations as in the following definition of the rotation of a stability function $Z$ and the tilt of corresponding torsion pair.
\mathbf{b}egin{Def}\label{defn:B'}
For a real number $\theta \in (-1,1)$,
we set $Z_\theta:=e^{\pi \sqrt{-1} \theta} Z$.
\mathbf{b}egin{enumerate}
\item[(1)]
If $\theta \geq 0$, then
let $(\ensuremath{\mathbb{T}}T',\ensuremath{\mathcal F}')$ be the torsion pair of $\ensuremath{\mathcal B}$
such that
\mathbf{b}egin{enumerate}
\item
$\ensuremath{\mathbb{T}}T'$ is generated by $Z$-stable objects $E \in \ensuremath{\mathcal B}$ with
$\phi(E)+\theta>1$.
\item
$\ensuremath{\mathcal F}'$ is generated by $Z$-stable objects $E \in \ensuremath{\mathcal B}$ with
$\phi(E)+\theta \leq 1$.
\end{enumerate}
We set $\ensuremath{\mathcal B}':=\langle \ensuremath{\mathbb{T}}T'[-1],\ensuremath{\mathcal F}' \rangle$
and $Z':=Z_\theta$. We denote the corresponding phase by $\phi'$.
\item[(2)]
If $\theta \leq 0$, then
let $(\ensuremath{\mathbb{T}}T'',\ensuremath{\mathcal F}'')$ be the torsion pair of $\ensuremath{\mathcal B}$
such that
\mathbf{b}egin{enumerate}
\item
$\ensuremath{\mathbb{T}}T''$ is generated by $Z$-stable objects $E \in \ensuremath{\mathcal B}$ with
$\phi(E)+\theta>0$.
\item
$\ensuremath{\mathcal F}''$ is generated by $Z$-stable objects $E \in \ensuremath{\mathcal B}$ with
$\phi(E)+\theta \leq 0$.
\end{enumerate}
We set $\ensuremath{\mathcal B}'':=\langle \ensuremath{\mathbb{T}}T'',\ensuremath{\mathcal F}''[1] \rangle$
and $Z'':=Z_\theta$. We denote the corresponding phase by $\phi''$.
\end{enumerate}
\end{Def}
We determine now precisely when an object in the tilted category is semistable with respect to the rotated stability function, generalizing \cite[Lemma 6.10]{BM14b}.
\mathbf{b}egin{Prop}\label{Prop:BB}
\mathbf{b}egin{enumerate}
\item\label{enum:B'}
Assume that $E\in \ensuremath{\mathcal B}'$. Then $E$ is $Z'$-semistable if and only if
\mathbf{b}egin{enumerate}
\item
$E \in \ensuremath{\mathcal F}'$
and $E$ is $Z$-semistable or
\item
$E[1] \in \ensuremath{\mathbb{T}}T'$ and $E[1]$ is $Z$-semi-stable.
\end{enumerate}
\item\label{enum:B''}
Assume that $E\in \ensuremath{\mathcal B}''$. Then $E$ is $Z''$-semistable if and only if
\mathbf{b}egin{enumerate}
\item
$E[-1] \in \ensuremath{\mathcal F}''$ and $E[-1]$ is $Z$-semistable or
\item
$E \in \ensuremath{\mathbb{T}}T''$ and
$E$ is $Z$-semistable.
\end{enumerate}
\end{enumerate}
\end{Prop}
Although this result might be known to experts, we provide a proof for the sake of convenience.
\mathbf{b}egin{proof}
We prove \ref{enum:B'} first. Note that $\phi'(E_1)>\phi'(E_2)$ for all $E_1 \in \ensuremath{\mathcal F}'$ and $E_2 \in \ensuremath{\mathbb{T}}T'[-1]$. Then, if $E\in\ensuremath{\mathcal B}'$ is $Z'$-semistable, it follows from the canonical exact sequence in $\ensuremath{\mathcal B}'$,
\mathbf{b}egin{equation}
0 \to \ensuremath{\mathbb{H}}H^0(E) \to E \to \ensuremath{\mathbb{H}}H^1(E)[-1] \to 0,
\end{equation}
that either $\ensuremath{\mathbb{H}}H^0(E)=0$ or $\ensuremath{\mathbb{H}}H^1(E)=0$.
Assume first that $\ensuremath{\mathbb{H}}H^1(E)=0$, that is, $E \in \ensuremath{\mathcal F}'$. Let $E_1$ be a subobject of $E$ in $\ensuremath{\mathcal B}$. By considering the HN-filtration of $E/E_1$, we see that there is a subobject $E_1' \subset E$ in $\ensuremath{\mathcal B}$ such that $E_1 \subset E_1'$, $E_1'/E_1 \in \ensuremath{\mathbb{T}}T'$ and $E/E_1' \in \ensuremath{\mathcal F}'$. Since $E/E_1'$ is a quotient object of $E$ in $\ensuremath{\mathcal B}'$, $Z'$-semistability of $E$ implies $$\phi(E)+\theta=\phi'(E) \leq \phi'(E/E_1')=\phi(E/E_1')+\theta.$$ In particular, $\phi(E)\leq\phi(E/E_1')$. From the definitions of $\ensuremath{\mathbb{T}}T'$ and $\ensuremath{\mathcal F}'$ and our choice of $E_1'$, we see that $\phi(E_1'/E_1) \geq \phi(E/E_1) \geq \phi(E/E_1')$, so it follows that $\phi(E) \leq \phi(E/E_1)$. Therefore $E$ is $Z$-semistable.
Now assume that $\ensuremath{\mathbb{H}}H^0(E)=0$, that is, $E[1] \in \ensuremath{\mathbb{T}}T'$. Let $E_1$ be a subobject of $E[1]$ in $\ensuremath{\mathcal B}$. Then there is a subobject $E_1' \subset E_1$ in $\ensuremath{\mathcal B}$ such that $E_1' \in \ensuremath{\mathbb{T}}T'$ and $E_1/E_1' \in \ensuremath{\mathcal F}'$. Since $E[1]/E_1' \in \ensuremath{\mathbb{T}}T'$, $E_1'$ is a subobject of $E[1]$ in $\ensuremath{\mathcal B}'[1]$. By the $Z'$-semi-stability of $E$, $$\phi(E_1')+\theta=\phi'(E_1') \leq \phi'(E[1])=\phi(E[1])+\theta,$$ so in particular $\phi(E_1')\leq\phi(E[1])$. Since $\phi(E_1') \geq \phi(E_1) \geq \phi(E_1/E_1')$, from the definition of $\ensuremath{\mathbb{T}}T'$ and $\ensuremath{\mathcal F}'$, we see that $\phi(E_1) \leq \phi(E[1])$. Therefore $E$ is $Z$-semistable.
Next we shall prove the converse direction. Let $E$ be a $Z$-semistable object of $\ensuremath{\mathcal B}$. We first assume that $E \in \ensuremath{\mathcal F}'$. Taking the long exact sequence associated to a given short exact sequence in $\ensuremath{\mathcal B}'$,
\mathbf{b}egin{equation}
0 \to E_1 \to E \to E_2 \to 0,
\end{equation}
we get
\mathbf{b}egin{equation*}
0 \to \ensuremath{\mathbb{H}}H^0(E_1) \to E \mathop{\ord(\omega_S)}\nolimitsverset{\mathbf{v}arphi}{\to} \ensuremath{\mathbb{H}}H^0(E_2)
\to \ensuremath{\mathbb{H}}H^1(E_1) \to 0.
\end{equation*}
Then $\phi(\mathop{\mathrm{im}}\nolimits \mathbf{v}arphi) \leq \phi(E_2)$ by $\ensuremath{\mathbb{H}}H^1(E_1)\in \ensuremath{\mathbb{T}}T'$. By the $Z$-semistability of $E$, $\phi(E) \leq \phi(\mathop{\mathrm{im}}\nolimits \mathbf{v}arphi)$ so that $$\phi'(E)-\theta=\phi(E) \leq\phi(E_2)=\phi'(E_2)-\theta.$$ Therefore $E$ is $Z'$-semistable.
We next assume that $E \in \ensuremath{\mathbb{T}}T'$. Again we take the long exact sequence associated to a short exact sequence in $\ensuremath{\mathcal B}'$,
\mathbf{b}egin{equation}
0 \to E_1 \to E[-1] \to E_2 \to 0,
\end{equation}
and we get the exact sequence
\mathbf{b}egin{equation}
0 \to \ensuremath{\mathbb{H}}H^0(E_2) \to \ensuremath{\mathbb{H}}H^1(E_1) \mathop{\ord(\omega_S)}\nolimitsverset{\mathbf{v}arphi}{\to} E \to \ensuremath{\mathbb{H}}H^1(E_2) \to 0.
\end{equation}
Then $\phi(\mathop{\mathrm{im}}\nolimits \mathbf{v}arphi) \geq \phi(H^1(E_1))$ by $\ensuremath{\mathbb{H}}H^0(E_2) \in \ensuremath{\mathcal F}'$. From the $Z$-semistability of $E$, it follows that $\phi(E) \geq \phi(\mathop{\mathrm{im}}\nolimits \mathbf{v}arphi)$, and thus we have $$\phi'(E[-1])-\theta=\phi'(E)-\theta=\phi(E) \geq\phi(H^1(E_1))=\phi(E_1[1])=\phi'(E_1[1])-\theta=\phi'(E_1)-\theta.$$ Therefore $E$ is $Z'$-semi-stable, as required.
To prove \ref{enum:B''}, we note that for the abelian category $\ensuremath{\mathcal B}[1]$ with
the stability function
$$
Z^*:\ensuremath{\mathcal B}[1] \mathop{\ord(\omega_S)}\nolimitsverset{[-1]}{\to} \ensuremath{\mathcal B} \mathop{\ord(\omega_S)}\nolimitsverset{Z}{\to}\ensuremath{\mathbb{C}},
$$
we have
$(\ensuremath{\mathcal B}[1])'=\ensuremath{\mathcal B}''$, where $Z_{1+\theta}^*=Z_\theta$.
Hence the claim follows from \ref{enum:B'}.
\end{proof}
We will use Proposition \ref{Prop:BB} to study how stability is effected under the action of $G_\ensuremath{\mathbb{H}}H$. To do so, we must investigate Cases \ref{enum:TwoExceptional} and \ref{enum:OneExceptionalOneSpherical} separately.
\subsection{\ref{enum:TwoExceptional}: Exactly two $\sigma_0$-stable exceptional objects up to $-\mathop{\ord(\omega_S)}\nolimitstimes\ensuremath{\mathcal O}_X(K_X)$}\label{subsec:TwoExceptional}
Recall from Proposition \ref{Prop:lattice classification} that in this case $\ensuremath{\mathbb{H}}H$ contains infinitely many exceptional classes, precisely two of which represent $\sigma_0$-stable objects (up two tensoring with $\ensuremath{\mathcal O}_X(K_X)$). Denote one of these classes by $\mathbf{w}_0$.
We may complete $\mathbf{w}_0$ to a basis so that $\ensuremath{\mathbb{H}}H=\ensuremath{\mathbb{Z}}\mathbf{w}_0+\ensuremath{\mathbb{Z}}\mathbf{z}$,
where $\langle\mathbf{w}_0,\mathbf{z}\rangle=0$ and $D:=\mathbf{z}^2>0$.
Recalling the definition of $\ensuremath{\mathbb{D}}elta(X)_{-1}$ in \eqref{eqn:DefOfEnriquesRootsSep}, we see that $\ensuremath{\mathbb{D}}elta(X)_{-1}\cap\ensuremath{\mathbb{H}}H$ is described by the Pell equation
\mathbf{b}egin{equation}\label{eq:Pell}
x^2-Dy^2=1.
\end{equation}
Note that $\sqrt{D}$ must be irrational since $\ensuremath{\mathbb{H}}H$ would be isotropic otherwise, in contradiction to Case \ref{enum:TwoNegative} of Proposition \ref{Prop:lattice classification}.
Recall, for example from \cite[Theorem 8.6]{Lev96}, that the solutions to Pell's equation \eqref{eq:Pell} form a group isomorphic to $\ensuremath{\mathbb{Z}}\mathop{\ord(\omega_S)}\nolimitsplus\ensuremath{\mathbb{Z}}/2\ensuremath{\mathbb{Z}}$. Indeed, let $(p_1,q_1)$ be the fundamental solution of
\eqref{eq:Pell} with $p_1<0$ and $q_1>0$, and define $p_n, q_n \in\ensuremath{\mathbb{Z}}$ by
\mathbf{b}egin{equation}
p_n+q_n \sqrt{D}=
\mathbf{b}egin{cases}
-(-p_1-q_1 \sqrt{D})^n, & n > 0\\
(-p_1-q_1 \sqrt{D})^n, & n \leq 0
\end{cases}.
\end{equation}
Then setting $\mathbf{w}_n:=p_n\mathbf{w}_0+q_n\mathbf{z}$, we see that all solutions to \eqref{eq:Pell} are given by
$$
\ensuremath{\mathbb{D}}elta(X)_{-1}\cap\ensuremath{\mathbb{H}}H=\Set{\pm\mathbf{w}_n \ | \ n \in \ensuremath{\mathbb{Z}}}.
$$
Recall that to an exceptional object $E_0\in\ensuremath{\mathbb{D}}b(X)$, we get a weakly spherical reflection $R_{E_0}$ as in
\eqref{eqn:weakly spherical reflection}. We abuse notation by using $R_{\mathbf{v}(E_0)}$ to denote the action on cohomology, which is given by $$R_{\mathbf{v}(E_0)}(\mathbf{v})=\mathbf{v}+2\langle\mathbf{v},\mathbf{v}(E_0)\rangle\mathbf{v}(E_0).$$ Then it is easy to see that
\mathbf{b}egin{equation}\label{eqn:ExceptionalVectorsReflections}
\mathbf{b}egin{split}
\mathbf{w}_{n+1}=&-R_{\mathbf{w}_n}(\mathbf{w}_{n-1}),\; (n \geq 2)\\
\mathbf{w}_{n-1}=&-R_{\mathbf{w}_n}(\mathbf{w}_{n+1}),\; (n \leq -1)\\
\mathbf{w}_2=&R_{\mathbf{w}_1}(\mathbf{w}_0),\;\; \mathbf{w}_{-1}=R_{\mathbf{w}_0}(\mathbf{w}_1).
\end{split}
\end{equation}
\mathbf{b}egin{figure}
\mathbf{b}egin{tikzpicture}[scale=1]
\mathbf{d}raw [->] (-4,0) -- (4,0);
\mathbf{d}raw[->] (0,-3) -- (0,3);
\path [fill=gray!50,opacity=0.2] (-4,3) -- (-4,.8) -- (4,-.8) -- (4,3) -- cycle;
\mathbf{d}raw[gray,domain=-4:4] plot (\x,{-.2*\x});
\mathbf{d}raw [red,domain=-4:4] plot (\x,{(1/sqrt(2))*\x});
\mathbf{d}raw [red,domain=-4:4] plot (\x,{-(1/sqrt(2))*\x});
\mathbf{d}raw [blue,domain=-4:-1] plot (\x,{sqrt(.5*(pow(\x,2)-1))});
\mathbf{d}raw [blue,domain=1:4] plot (\x,{sqrt(.5*(pow(\x,2)-1))});
\mathbf{d}raw [blue,domain=-4:-1] plot (\x,{-sqrt(.5*(pow(\x,2)-1))});
\mathbf{d}raw [blue,domain=1:4] plot (\x,{-sqrt(.5*(pow(\x,2)-1))});
\filldraw [gray] (1,0) circle (1.5pt) node [anchor=south west] {$\mathbf{w}_0$};
\filldraw [gray] (-1,0) circle (1.5pt) node [anchor=north east] {$-\mathbf{w}_0$};
\filldraw [gray] (2.00149,-1.21698) circle (1.5pt);
\filldraw [gray] (1.43593,-0.733002) circle (1.5pt) node [anchor=west] {$-\mathbf{w}_1$};
\filldraw [gray] (2.00149,1.21698) circle (1.5pt) node [anchor=west] {$\mathbf{w}_{-2}$};
\filldraw [gray] (-1.43593,0.733002) circle (1.5pt) node [anchor=east] {$\mathbf{w}_1$};
\filldraw [gray] (-2.00149,1.21698) circle (1.5pt) node [anchor=east] {$\mathbf{w}_2$};
\filldraw [gray] (1.43593,0.733002) circle (1.5pt) node [anchor=west] {$\mathbf{w}_{-1}$};
\node[below] at (-3.5,0.75) {$Z^{-1}(0)$};
\node[left] at (0,2.5) {$\ensuremath{\mathbb{R}}e\frac{Z_{\sigma_0}(\mathbf{u}nderline{\hphantom{A}})}{Z_{\sigma_0}(\mathbf{v})}>0$};
\node[left] at (0,-2.5) {$\ensuremath{\mathbb{R}}e\frac{Z_{\sigma_0}(\mathbf{u}nderline{\hphantom{A}})}{Z_{\sigma_0}(\mathbf{v})}<0$};
\end{tikzpicture}
\caption{The shaded gray area is the half plane containing $\mathbf{u}$ such that $\ensuremath{\mathbb{R}}e\frac{Z_{\sigma_0}(\mathbf{u})}{Z_{\sigma_0}(\mathbf{v})}>0$, which is bounded by the line $Z^{-1}(0)$. The hyperbola is defined by $\mathbf{u}^2=-1$, and the lines by $\mathbf{u}^2=0$.}
\label{fig:TwoExceptional}
\end{figure}
Since $Z^{-1}(0) \cap \ensuremath{\mathbb{H}}H_\ensuremath{\mathbb{R}}$ is negative definite and $\lim_{n \to \pm \mathop{\mathrm{inf}}\nolimitsty}\frac{p_n}{q_n}=\mp \sqrt{D}$, it is not difficult to see that for $\mathbf{w}'=-\mathbf{w}_1$, $Z^{-1}(0) \cap (\ensuremath{\mathbb{R}}_{>0}\mathbf{w}_0+\ensuremath{\mathbb{R}}_{>0}\mathbf{w}') \ne \mathbf{v}arnothing$ and $\ensuremath{\mathbb{D}}elta(X)_{-1} \cap (\ensuremath{\mathbb{R}}_{>0}\mathbf{w}_0+\ensuremath{\mathbb{R}}_{>0}\mathbf{w}')= \mathbf{v}arnothing$, see Figure \ref{fig:TwoExceptional}. Then $\ensuremath{\mathbb{R}}e\frac{Z_{\sigma_0}(-\mathbf{w}')}{Z_{\sigma_0}(\mathbf{v})}>0$, and since $\mathbf{w}_1=-\mathbf{w}'$ we get
$$
\Set{\mathbf{u} \in \ensuremath{\mathbb{H}}H \ | \ \ensuremath{\mathbb{R}}e\frac{Z_{\sigma_0}(\mathbf{u})}{Z_{\sigma_0}(\mathbf{v})}>0, \mathbf{u}^2 \geq -1}\subset\ensuremath{\mathbb{Q}}_{\geq 0}\mathbf{w}_0+\ensuremath{\mathbb{Q}}_{\geq 0}\mathbf{w}_1.
$$
It follows that $C_\ensuremath{\mathcal W}=\ensuremath{\mathbb{R}}_{>0}\mathbf{w}_0+\ensuremath{\mathbb{R}}_{>0}\mathbf{w}_1$. Furthermore, the positive cone can be described as
$$
P_{\ensuremath{\mathbb{H}}H}=\Set{x\mathbf{w}_0+y\mathbf{z} \in \ensuremath{\mathbb{H}}H \ |\ y^2 D-x^2>0, y \geq 0 }.
$$
It follows that $\mathbf{w}_n\in C_{\ensuremath{\mathcal W}}$ for all $n\in\ensuremath{\mathbb{Z}}$. Let $T_0$ and $T_1$ be $\sigma_0$-semistable objects with $\mathbf{v}(T_i)=\mathbf{w}_i$ for $i=0$ and $1$, respectively. By construction, $T_0$ is $\sigma_0$-stable, and we claim that $T_1$ is as well. Indeed, by \cite[Lemma 4.3, Lemma 4.6]{Yos16b}, any $\sigma_0$-stable factor $\tilde{T}$ of $T_0$ must be exceptional so that $\mathbf{v}(\tilde{T})\in\ensuremath{\mathbb{D}}elta(X)_{-1}\cap C_{\ensuremath{\mathcal W}}$. In particular, by the description in \eqref{eqn:ExceptionalVectorsReflections}, $\mathbf{v}(\tilde{T})=a\mathbf{w}_0+b\mathbf{w}_1$, where $a$ and $b$ are nonnegative integers. This gives a contradiction unless $\mathbf{v}(\tilde{T})=\mathbf{w}_1$ so that $T_1$ is $\sigma_0$-stable, as claimed.
We set
\mathbf{b}egin{equation}
\ensuremath{\mathbb{C}}C_n:=\Set{x \mathbf{w}_0+y\mathbf{z} \ |\
\frac{Dq_{n+1}}{p_{n+1}}y<x<\frac{Dq_n}{p_n}y, y>0}.
\end{equation}
Note that for $n<0$, we have
\mathbf{b}egin{equation}
\ensuremath{\mathbb{C}}C_n=\Set{\mathbf{u}\in C_\ensuremath{\mathcal W}\ | \ \langle\mathbf{u},\mathbf{w}_{n+1}\rangle<0<\langle\mathbf{u},\mathbf{w}_n\rangle},
\end{equation}
and for $n>0$,
\mathbf{b}egin{equation}
\ensuremath{\mathbb{C}}C_n=\Set{\mathbf{u}\in C_\ensuremath{\mathcal W}\ | \ \langle\mathbf{u},\mathbf{w}_{n+1}\rangle>0>\langle\mathbf{u},\mathbf{w}_n\rangle},
\end{equation}
while for $n=0$,
\mathbf{b}egin{equation}
\ensuremath{\mathbb{C}}C_n=\Set{\mathbf{u}\in C_\ensuremath{\mathcal W}\ | \ 0<\langle\mathbf{u},\mathbf{w}_n\rangle,\langle\mathbf{u},\mathbf{w}_{n+1}\rangle}.
\end{equation}
Then $\{\ensuremath{\mathbb{C}}C_n \mid n \in \ensuremath{\mathbb{Z}} \}$ is the chamber decomposition
of $P_{\ensuremath{\mathbb{H}}H}$ under the action of $G_\ensuremath{\mathbb{H}}H$.
For $\mathbf{v}_0 \in \ensuremath{\mathbb{C}}C_0$,
we set
\mathbf{b}egin{equation}\label{eqn:OrbitOfv0}
\mathbf{v}_n:=
\mathbf{b}egin{cases}
R_{\mathbf{w}_n} \circ R_{\mathbf{w}_{n-1}} \circ \cdots \circ R_{\mathbf{w}_1}(\mathbf{v}_0), & n>0\\
R_{\mathbf{w}_{n+1}}^{-1} \circ R_{\mathbf{w}_{n+2}}^{-1}
\circ \cdots \circ R_{\mathbf{w}_0}^{-1}(\mathbf{v}_0), & n < 0.
\end{cases}
\end{equation}
Then for $\mathbf{v} \in \ensuremath{\mathbb{C}}C_n$, there is $\mathbf{v}_0 \in \ensuremath{\mathbb{C}}C_0$ such that
$\mathbf{v}=\mathbf{v}_n$ for this $\mathbf{v}_0$.
\subsubsection{The abelian categories $\ensuremath{\mathcal A}_i$}
Up to reordering, we may assume that $\phi^+(T_1)>\phi^+(T_0)$ (and hence $\phi^-(T_1)<\phi^-(T_0)$), where $\phi^{\pm}$ denotes the phase with respect to $\sigma_{\pm}$, respectively. For $i\in\ensuremath{\mathbb{Z}}$, let $T_i^\pm\in\ensuremath{\mathbb{P}}P_0(1)$ be $\sigma^\pm$-stable objects with
$\mathbf{v}(T_i^\pm)=\mathbf{w}_i$.
Then
\mathbf{b}egin{equation}
\phi^+(T_1^+) > \phi^+(T_2^+)>\cdots>\phi^+(E)>
\cdots >\phi^+(T_{-1})>\phi^+(T_0^+)
\end{equation}
for any $\sigma_+$-stable object $E$ with
$\mathbf{v}(E)^2 \geq 0$.
We note that $T_i^+=T_i^-=T_i$ $(i=0,1)$ are $\sigma_0$-stable objects.
We make the following definition which generalizes the approach of \cite[Lemma 6.8]{BM14b}.
\mathbf{b}egin{Def}
Assume that $i \geq 0$.
\mathbf{b}egin{enumerate}
\item[(1)]
Let $(\ensuremath{\mathbb{T}}T_i,\ensuremath{\mathcal F}_i)$ be the torsion pair of $\ensuremath{\mathbb{P}}P_0(1)$ such that
\mathbf{b}egin{enumerate}
\item
$\ensuremath{\mathbb{T}}T_i=\langle T_1^+,T_1^+(K_X),T_2^+,T_2^+ (K_X),...,
T_i^+,T_i^+ (K_X) \rangle$
is the subcategory of $\ensuremath{\mathbb{P}}P_0(1)$ generated by $\sigma_+$-stable objects
$F$ with $\phi^+(F)>\phi^+(T_{i+1}^+)$ and
\item
$\ensuremath{\mathcal F}_i$ is the subcategory of $\ensuremath{\mathbb{P}}P_0(1)$ generated by
$\sigma_+$-stable objects $F$
with $\phi^+(F) \leq \phi^+(T_{i+1}^+)$.
\end{enumerate}
Let $\ensuremath{\mathcal A}_i:=\langle \ensuremath{\mathbb{T}}T_i[-1],\ensuremath{\mathcal F}_i \rangle$ be the tilting.
\item[(2)]
Let $(\ensuremath{\mathbb{T}}T_i^*,\ensuremath{\mathcal F}_i^*)$ be the torsion pair of $\ensuremath{\mathbb{P}}P_0(1)$ such that
\mathbf{b}egin{enumerate}
\item
$\ensuremath{\mathbb{T}}T_i^*$ is the subcategory of $\ensuremath{\mathbb{P}}P_0(1)$ generated by
$\sigma_-$-stable objects $F$
with $\phi^-(F) \geq \phi^-(T_{i+1}^-)$.
\item
$\ensuremath{\mathcal F}_i^*=\langle T_1^-,T_1^-(K_X),T_2^-,T_2^-(K_X),...,
T_i^-,T_i^-(K_X) \rangle$
is the subcategory of $\ensuremath{\mathbb{P}}P_0(1)$ generated by $\sigma_-$-stable objects
$F$ with $\phi^-(F)<\phi^-(T_{i+1}^-)$.
\end{enumerate}
Let $\ensuremath{\mathcal A}_i^*:=\langle \ensuremath{\mathbb{T}}T_i^*,\ensuremath{\mathcal F}_i^*[1] \rangle$ be the tilting.
\end{enumerate}
\end{Def}
Since
$\ensuremath{\mathbb{T}}T_0=0$ and $\ensuremath{\mathcal F}_0^*=0$,
we have $\ensuremath{\mathcal A}_0=\ensuremath{\mathcal A}_0^*=\ensuremath{\mathbb{P}}P_0(1)$.
\mathbf{b}egin{Rem}\label{rem:simple-objects}
We note that from the definition of $\ensuremath{\mathcal A}_i$ (resp. $\ensuremath{\mathcal A}_i^*$), it follows that:
\mathbf{b}egin{enumerate}
\item
$T_{i+1}^+,T_{i+1}^+(K_X), T_i^+[-1],T_i^+(K_X)[-1]$ are irreducible objects of $\ensuremath{\mathcal A}_i$.
\item
$T_{i+1}^-,T_{i+1}^-(K_X), T_i^-[1],T_i^-(K_X)[1]$ are irreducible objects of $\ensuremath{\mathcal A}_i^*$.
\end{enumerate}
\end{Rem}
With these notions in place, we prove the following stronger form of the induction claim in \cite[p. 541]{BM14b}.
\mathbf{b}egin{Prop}\label{Prop:equiv1}
For $i\geq 0$, $R_{T_{i+1}^+}$ induces an equivalence
$\ensuremath{\mathcal A}_i \to \ensuremath{\mathcal A}_{i+1}$.
\end{Prop}
\mathbf{b}egin{proof}
We set $\ensuremath{\mathbb{P}}hi:=R_{T_{i+1}^+}$ and $\ensuremath{\mathbb{P}}hi^p(E):=\ensuremath{\mathbb{H}}H^p(\ensuremath{\mathbb{P}}hi(E))$ for $E \in \ensuremath{\mathbb{D}}b(X)$.
We first prove that
$R_{T_{i+1}^+}(\ensuremath{\mathcal A}_i) \subset \ensuremath{\mathcal A}_{i+1}$ by showing that $\ensuremath{\mathbb{P}}hi(\ensuremath{\mathcal F}_i)\subset\ensuremath{\mathcal A}_{i+1}$ and $\ensuremath{\mathbb{P}}hi(\ensuremath{\mathbb{T}}T_i)\in\ensuremath{\mathcal A}_{i+1}[1]$. Then the claim follows for a general $E\in\ensuremath{\mathcal A}_i$ from the short exact sequence
\mathbf{b}egin{equation}\label{eqn:canonical short exact sequence}
0\to \ensuremath{\mathbb{H}}H^0(E)\to E\to\ensuremath{\mathbb{H}}H^1(E)[-1]\to 0,
\end{equation}
as $\ensuremath{\mathbb{H}}H^0(E)\in\ensuremath{\mathcal F}_i$ and $\ensuremath{\mathbb{H}}H^1(E)\in\ensuremath{\mathbb{T}}T_i$.
Observe first that for $E \in \ensuremath{\mathbb{P}}P_0(1)$,
$\mathop{\mathrm{Ext}}\nolimits^p(T_{i+1}^+,E)=\mathop{\mathrm{Ext}}\nolimits^p(T_{i+1}^+(K_X),E)=0$ for $p \ne 0,1,2$.
Hence, from the definition of $R_{T_{i+1}^+}$ in \eqref{eqn:weakly spherical reflection}, we have an exact sequence
\mathbf{b}egin{equation}
\mathbf{b}egin{CD}
0 @>>> \ensuremath{\mathbb{P}}hi^{-1}(E) @>>> \ensuremath{\mathbb{H}}om(T_{i+1}^+,E) \mathop{\ord(\omega_S)}\nolimitstimes T_{i+1}^+ \mathop{\ord(\omega_S)}\nolimitsplus
\ensuremath{\mathbb{H}}om(T_{i+1}^+(K_X),E) \mathop{\ord(\omega_S)}\nolimitstimes T_{i+1}^+ (K_X) @>{\mathbf{v}arphi}>> E \\
@>>> \ensuremath{\mathbb{P}}hi^0(E) @>>> \mathop{\mathrm{Ext}}\nolimits^1(T_{i+1}^+,E) \mathop{\ord(\omega_S)}\nolimitstimes T_{i+1}^+ \mathop{\ord(\omega_S)}\nolimitsplus
\mathop{\mathrm{Ext}}\nolimits^1(T_{i+1}^+(K_X),E) \mathop{\ord(\omega_S)}\nolimitstimes T_{i+1}^+ (K_X) @>>> 0
\end{CD},
\end{equation}
and also an isomorphism
\mathbf{b}egin{equation}
\ensuremath{\mathbb{P}}hi^1(E) \cong \mathop{\mathrm{Ext}}\nolimits^2(T_{i+1}^+,E) \mathop{\ord(\omega_S)}\nolimitstimes T_{i+1}^+\mathop{\ord(\omega_S)}\nolimitsplus
\mathop{\mathrm{Ext}}\nolimits^2(T_{i+1}^+(K_X),E) \mathop{\ord(\omega_S)}\nolimitstimes T_{i+1}^+ (K_X) \in \ensuremath{\mathbb{T}}T_{i+1}.
\end{equation}
Suppose first that $E \in \ensuremath{\mathcal F}_i$ so that
$\phi_{\max}^+(E) \leq \phi^+(T_{i+1}^+)$. Then it follows that
$\mathbf{v}arphi$ is injective (so that $\ensuremath{\mathbb{P}}hi^{-1}(E)=0$) and $\mathop{\mathrm{cok}}er \mathbf{v}arphi \in \ensuremath{\mathcal F}_i$. Thus $\ensuremath{\mathbb{P}}hi^0(E) \in \ensuremath{\mathcal F}_i$, but in fact we can say more. Noting that $\ensuremath{\mathbb{P}}hi(T_{i+1}^+(K_X))=T_{i+1}^+[-1]$ (and similarly that $\ensuremath{\mathbb{P}}hi(T_{i+1}^+)=T_{i+1}^+(K_X)[-1]$), we get
\mathbf{b}egin{equation}
\mathbf{b}egin{split}
\ensuremath{\mathbb{H}}om(T_{i+1}^+,\ensuremath{\mathbb{P}}hi(E))& =\ensuremath{\mathbb{H}}om(\ensuremath{\mathbb{P}}hi(T_{i+1}^+(K_X))[1],\ensuremath{\mathbb{P}}hi(E))
=\ensuremath{\mathbb{H}}om(T_{i+1}^+(K_X),E[-1])=0,\\
\ensuremath{\mathbb{H}}om(T_{i+1}^+(K_X),\ensuremath{\mathbb{P}}hi(E))&=\ensuremath{\mathbb{H}}om(\ensuremath{\mathbb{P}}hi(T_{i+1}^+)[1],\ensuremath{\mathbb{P}}hi(E))
=\ensuremath{\mathbb{H}}om(T_{i+1}^+,E[-1])=0,
\end{split}
\end{equation}
where the final equality follows from $\phi_{\max}^+(E[-1])=\phi_{\max}^+(E)-1<\phi^+(T_{i+1}^+)=\phi^+(T_{i+1}^+(K_X))$. From the triangle $$\ensuremath{\mathbb{P}}hi^0(E)\to\ensuremath{\mathbb{P}}hi(E)\to\ensuremath{\mathbb{P}}hi^{\geq 1}(E)[-1]\to\ensuremath{\mathbb{P}}hi^0(E)[1]$$ we see that this implies $\ensuremath{\mathbb{H}}om(T_{i+1}^+,\ensuremath{\mathbb{P}}hi^0(E))=\ensuremath{\mathbb{H}}om(T_{i+1}^+(K_X),\ensuremath{\mathbb{P}}hi^0(E))=0$ so that $\ensuremath{\mathbb{P}}hi^0(E) \in \ensuremath{\mathcal F}_{i+1}$. Therefore $\ensuremath{\mathbb{P}}hi(E) \in \ensuremath{\mathcal A}_{i+1}$, as claimed.
Now we assume that $E \in \ensuremath{\mathbb{T}}T_i$, from which it follows
$\mathop{\mathrm{cok}}er \mathbf{v}arphi \in \ensuremath{\mathbb{T}}T_i \subset \ensuremath{\mathbb{T}}T_{i+1}$.
Since $T_{i+1}^+, T_{i+1}^+ (K_X)\in \ensuremath{\mathbb{T}}T_{i+1}$,
we get $\ensuremath{\mathbb{P}}hi^0(E) \in \ensuremath{\mathbb{T}}T_{i+1}$.
By $T_{i+1}^+,T_{i+1}^+(K_X) \in \ensuremath{\mathcal F}_i$,
we get $\ensuremath{\mathbb{P}}hi^{-1}(E) \in \ensuremath{\mathcal F}_i$.
Since
\mathbf{b}egin{equation}
\mathbf{b}egin{split}
\ensuremath{\mathbb{H}}om(T_{i+1}^+,\ensuremath{\mathbb{P}}hi(E)[-1])& =\ensuremath{\mathbb{H}}om(\ensuremath{\mathbb{P}}hi(T_{i+1}^+(K_X)),\ensuremath{\mathbb{P}}hi(E)[-2])
=\ensuremath{\mathbb{H}}om(T_{i+1}^+(K_X),E[-2])=0,\\
\ensuremath{\mathbb{H}}om(T_{i+1}^+(K_X),\ensuremath{\mathbb{P}}hi(E)[-1])&=\ensuremath{\mathbb{H}}om(\ensuremath{\mathbb{P}}hi(T_{i+1}^+),\ensuremath{\mathbb{P}}hi(E)[-2])
=\ensuremath{\mathbb{H}}om(T_{i+1}^+,E[-2])=0,
\end{split}
\end{equation}
for the same reasons as above, we see from the triangle $$\ensuremath{\mathbb{P}}hi^{-1}(E)\to\ensuremath{\mathbb{P}}hi(E)\to\ensuremath{\mathbb{P}}hi^{\geq 0}(E)\to\ensuremath{\mathbb{P}}hi^{-1}(E)[1]$$ that $\ensuremath{\mathbb{H}}om(T_{i+1}^+,\ensuremath{\mathbb{P}}hi^{-1}(E))=\ensuremath{\mathbb{H}}om(T_{i+1}^+(K_X),\ensuremath{\mathbb{P}}hi^{-1}(E))=0$. Thus we get $\ensuremath{\mathbb{P}}hi^{-1}(E) \in \ensuremath{\mathcal F}_{i+1}$. Finally, by Serre duality we have
\mathbf{b}egin{equation*}
\mathbf{b}egin{split}
\mathop{\mathrm{Ext}}\nolimits^2(T_{i+1}^+,E)^\mathbf{v}ee\cong\ensuremath{\mathbb{H}}om(E,T_{i+1}^+(K_X))=0,\\
\mathop{\mathrm{Ext}}\nolimits^2(T_{i+1}^+(K_X),E)^\mathbf{v}ee\cong\ensuremath{\mathbb{H}}om(E,T_{i+1}^+)=0,
\end{split}
\end{equation*} since $E\in\ensuremath{\mathbb{T}}T_i$ implies that $\phi^+_{\mathop{\mathrm{min}}\nolimits}(E)>\phi^+(T_{i+1}^+)=\phi^+(T_{i+1}^+(K_X))$. Therefore, $\ensuremath{\mathbb{P}}hi^1(E)=0$, so $\ensuremath{\mathbb{P}}hi(E) \in \ensuremath{\mathcal A}_{i+1}[1]$, as required.
We next claim that $R_{T_{i+1}^+}^{-1}(\ensuremath{\mathcal A}_{i+1}) \subset \ensuremath{\mathcal A}_i$.
Let $\ensuremath{\mathbb{P}}si$ be the inverse of $\ensuremath{\mathbb{P}}hi$, and set
$\ensuremath{\mathbb{P}}si^p(E):=\ensuremath{\mathbb{H}}H^p(\ensuremath{\mathbb{P}}si(E))$ for any $E\in\ensuremath{\mathbb{D}}b(X))$. Recall that for any $E\in\ensuremath{\mathbb{D}}b(X)$ we have a distinguished triangle
\mathbf{b}egin{equation}
\ensuremath{\mathbb{P}}si(E)\to E\to T_{i+1}^+[2]\mathop{\ord(\omega_S)}\nolimitstimes\ensuremath{\mathbb{R}}Hom(T_{i+1}^+,E)\mathop{\ord(\omega_S)}\nolimitsplus T_{i+1}^+(K_X)[2]\mathop{\ord(\omega_S)}\nolimitstimes\ensuremath{\mathbb{R}}Hom(T_{i+1}^+(K_X),E)\to\ensuremath{\mathbb{P}}si(E)[1].
\end{equation}
Then
\mathbf{b}egin{equation}\label{eq:Psi-1}
\ensuremath{\mathbb{H}}om(T_{i+1}^+,E(K_X)) \mathop{\ord(\omega_S)}\nolimitstimes T_{i+1}^+ \mathop{\ord(\omega_S)}\nolimitsplus
\ensuremath{\mathbb{H}}om(T_{i+1}^+,E) \mathop{\ord(\omega_S)}\nolimitstimes T_{i+1}^+(K_X) \cong \ensuremath{\mathbb{P}}si^{-1}(E)
\end{equation}
and we have an exact sequence
\mathbf{b}egin{equation}\label{eqn:Psi-2}
\mathbf{b}egin{CD}
0 @>>> \mathop{\mathrm{Ext}}\nolimits^1(T_{i+1}^+,E(K_X)) \mathop{\ord(\omega_S)}\nolimitstimes T_{i+1}^+ \mathop{\ord(\omega_S)}\nolimitsplus
\mathop{\mathrm{Ext}}\nolimits^1(T_{i+1}^+,E) \mathop{\ord(\omega_S)}\nolimitstimes T_{i+1}^+(K_X) @>>> \ensuremath{\mathbb{P}}si^0(E) @>>>E \\
@>{\psi}>> \mathop{\mathrm{Ext}}\nolimits^2(T_{i+1}^+,E(K_X)) \mathop{\ord(\omega_S)}\nolimitstimes T_{i+1}^+ \mathop{\ord(\omega_S)}\nolimitsplus
\mathop{\mathrm{Ext}}\nolimits^2(T_{i+1}^+,E) \mathop{\ord(\omega_S)}\nolimitstimes T_{i+1}^+(K_X) @>>> \ensuremath{\mathbb{P}}si^1(E) @>>>0.
\end{CD}
\end{equation}
We prove the claim by showing that $\ensuremath{\mathbb{P}}si(\ensuremath{\mathcal F}_{i+1})\subset\ensuremath{\mathcal A}_i$ and $\ensuremath{\mathbb{P}}si(\ensuremath{\mathbb{T}}T_{i+1})\subset\ensuremath{\mathcal A}_i[1]$ which suffices by considering the exact sequence \eqref{eqn:canonical short exact sequence}.
First assume that
$E \in \ensuremath{\mathcal F}_{i+1}$.
Then $\ensuremath{\mathbb{P}}si^{-1}(E)=0$ by \eqref{eq:Psi-1}. From \eqref{eqn:Psi-2}, we get $\ensuremath{\mathbb{P}}si^0(E) \in \ensuremath{\mathcal F}_i$ and $\ensuremath{\mathbb{P}}si^1(E) \in \ensuremath{\mathbb{T}}T_{i+1}$.
Since
\mathbf{b}egin{equation}\label{eq:T_{i+1}}
\mathbf{b}egin{split}
\ensuremath{\mathbb{H}}om(\ensuremath{\mathbb{P}}si(E),T_{i+1}^+[p])& =\ensuremath{\mathbb{H}}om(E,\ensuremath{\mathbb{P}}hi(T_{i+1}^+)[p])=
\ensuremath{\mathbb{H}}om(E, T_{i+1}^+(K_X)[p-1])=0,\\
\ensuremath{\mathbb{H}}om(\ensuremath{\mathbb{P}}si(E),T_{i+1}^+(K_X)[p])& =\ensuremath{\mathbb{H}}om(E,\ensuremath{\mathbb{P}}hi(T_{i+1}^+(K_X))[p])=
\ensuremath{\mathbb{H}}om(E, T_{i+1}^+[p-1])=0
\end{split}
\end{equation}
for $p \leq 0$,
$\ensuremath{\mathbb{P}}si^1(E) \in \ensuremath{\mathbb{T}}T_i$.
Therefore $\ensuremath{\mathbb{P}}si(E) \in \ensuremath{\mathcal A}_i$.
Now assume that $E \in \ensuremath{\mathbb{T}}T_{i+1}$. Then $\ensuremath{\mathbb{P}}si^{-1}(E)\in\ensuremath{\mathcal F}_i$ by \eqref{eq:Psi-1}. We must show that $\ensuremath{\mathbb{P}}si^0(E)\in\ensuremath{\mathbb{T}}T_i$ and $\ensuremath{\mathbb{P}}si^{1}(E)=\mathop{\mathrm{cok}}er \psi=0$. As a quotient of an object generated by
$T_{i+1}^+$ and $T_{i+1}^+(K_X)$, $\ensuremath{\mathbb{P}}si^1(E)=\mathop{\mathrm{cok}}er \psi \in \ensuremath{\mathbb{T}}T_{i+1}$.
Moreover, for the same reason we note that $\mathop{\mathrm{im}}\nolimits \psi \in \ensuremath{\mathbb{T}}T_{i+1}$ as $E \in \ensuremath{\mathbb{T}}T_{i+1}$. As a subobject of an object generated by
$T_{i+1}^+$ and $T_{i+1}^+(K_X)$, we also have $\mathop{\mathrm{im}}\nolimits \psi \in \ensuremath{\mathcal F}_i$.
Hence $\phi^+(\mathop{\mathrm{im}}\nolimits \psi)=\phi^+(T_{i+1}^+)$. Similarly, $\mathop{\mathrm{cok}}er \psi$ is a $\sigma_+$-semistable object of $\phi^+(\mathop{\mathrm{cok}}er
\psi)=\phi^+(T_{i+1}^+)$.
It follows that $\mathop{\mathrm{cok}}er \psi$ and $\mathop{\mathrm{im}}\nolimits \psi$ are direct sums of $T_{i+1}^+$ and
$T_{i+1}^+(K_X)$. Now by using \eqref{eq:T_{i+1}} we get
$$0=\ensuremath{\mathbb{H}}om(\ensuremath{\mathbb{P}}si(E),T_{i+1}^+(D)[-1])=\ensuremath{\mathbb{H}}om(\ensuremath{\mathbb{P}}si^1(E),T_{i+1}^+(D)),$$
for $D=0,K_X$. Thus we have $\ensuremath{\mathbb{P}}si^1(E)=\mathop{\mathrm{cok}}er \psi=0$.
Writing $\ensuremath{\mathbb{T}}T_i(E)$ and $\ensuremath{\mathcal F}_i(E)$ for the components of $E$ in the torsion pair $(\ensuremath{\mathbb{T}}T_i,\ensuremath{\mathcal F}_i)$, it follows from $\mathop{\mathrm{im}}\nolimits\psi\in\ensuremath{\mathcal F}_i$ that $\ensuremath{\mathbb{T}}T_i(E)\subset\ker\psi$, and it is easy to see that then $\ensuremath{\mathbb{T}}T_i(\ker\psi)=\ensuremath{\mathbb{T}}T_i(E)$. Applying $\ensuremath{\mathbb{H}}om(-,T_{i+1}^+(D))$ $(D=0,K_X)$ to the short exact sequence $$0\to\ensuremath{\mathbb{T}}T_i(E)\to E\to\ensuremath{\mathcal F}_i(E)\to 0,$$ we see that $\hom(E,T_{i+1}^+(D))=\hom(\ensuremath{\mathcal F}_i(E),T_{i+1}^+(D))$ for $D=0,K_X$. As
\mathbf{b}egin{equation*}
\mathbf{b}egin{split}
\mathop{\mathrm{ext}}\nolimits^2(T_{i+1}^+,E)=\hom(E,T_{i+1}^+(K_X)),\\
\mathop{\mathrm{ext}}\nolimits^2(T_{i+1}^+,E(K_X))=\hom(E,T_{i+1}^+),
\end{split}
\end{equation*} it follows from the short exact sequence \mathbf{b}egin{equation}\label{eqn:ses for ker psi}0\to\ker\psi/\ensuremath{\mathbb{T}}T_i(E)\to\ensuremath{\mathcal F}_i(E)\to\mathop{\mathrm{Ext}}\nolimits^2(T_{i+1}^+,E(K_X)) \mathop{\ord(\omega_S)}\nolimitstimes T_{i+1}^+ \mathop{\ord(\omega_S)}\nolimitsplus
\mathop{\mathrm{Ext}}\nolimits^2(T_{i+1}^+,E) \mathop{\ord(\omega_S)}\nolimitstimes T_{i+1}^+(K_X)\to 0\end{equation} that $\ensuremath{\mathbb{H}}om(\ker\psi/\ensuremath{\mathbb{T}}T_i(E),T_{i+1}^+(D))=0$ by the same reasoning. But this forces $\ker\psi/\ensuremath{\mathbb{T}}T_i(E)=0$. Indeed, as $E\in\ensuremath{\mathbb{T}}T_{i+1}$, we have $\ensuremath{\mathcal F}_i(E)\in\ensuremath{\mathcal F}_i\cap\ensuremath{\mathbb{T}}T_{i+1}$, so $\ensuremath{\mathcal F}_i(E)$ is a direct sum of copies of $T_{i+1}^+$ and $T_{i+1}^+(K_X)$, and thus so is $\ker\psi/\ensuremath{\mathbb{T}}T_i(E)$ from \eqref{eqn:ses for ker psi}. Thus we have $\ker\psi=\ensuremath{\mathbb{T}}T_i(E)\in\ensuremath{\mathbb{T}}T_i$ so that $\ensuremath{\mathbb{P}}si^0(E)\in\ensuremath{\mathbb{T}}T_{i+1}$. Then \eqref{eq:T_{i+1}} implies $\ensuremath{\mathbb{P}}si^0(E) \in \ensuremath{\mathbb{T}}T_i$. Therefore $\ensuremath{\mathbb{P}}si(E)\in \ensuremath{\mathcal A}_i[1]$, as required.
\end{proof}
As $\ensuremath{\mathcal A}_i^*$ deals in parallel with objects in $\ensuremath{\mathbb{P}}P_0(1)$ considered with respect to $\sigma_-$-stability, we have the following result:
\mathbf{b}egin{Prop}\label{Prop:equiv2}
$R_{T_{i+1}^-}^{-1}$ induces an equivalence
$\ensuremath{\mathcal A}_i^* \to \ensuremath{\mathcal A}_{i+1}^*$.
\end{Prop}
\mathbf{b}egin{proof}
We set $\ensuremath{\mathbb{P}}hi:=R_{T_{i+1}^-}$ and $\ensuremath{\mathbb{P}}si:=R_{T_{i+1}^-}^{-1}$.
We only show that
$\ensuremath{\mathbb{P}}hi(\ensuremath{\mathcal A}_{i+1}^*) \subset \ensuremath{\mathcal A}_i^*$.
We note that
\mathbf{b}egin{equation}\label{eq:Phi2}
\mathbf{b}egin{split}
\ensuremath{\mathbb{H}}om(T_{i+1}^-,\ensuremath{\mathbb{P}}hi(E)[p])& =\ensuremath{\mathbb{H}}om(\ensuremath{\mathbb{P}}hi(T_{i+1}^-(K_X))[1],\ensuremath{\mathbb{P}}hi(E)[p])
=\ensuremath{\mathbb{H}}om(T_{i+1}^-(K_X),E[p-1])=0,\\
\ensuremath{\mathbb{H}}om(T_{i+1}^-(K_X),\ensuremath{\mathbb{P}}hi(E)[p])&=\ensuremath{\mathbb{H}}om(\ensuremath{\mathbb{P}}hi(T_{i+1}^-)[1],\ensuremath{\mathbb{P}}hi(E)[p])
=\ensuremath{\mathbb{H}}om(T_{i+1}^-,E[p-1])=0
\end{split}
\end{equation}
for $E \in \ensuremath{\mathbb{P}}P_0(1)$ and $p \leq 0$.
Assume that $E \in \ensuremath{\mathcal F}_{i+1}^*$.
For the morphism
\mathbf{b}egin{equation}
\mathbf{v}arphi:\ensuremath{\mathbb{H}}om(T_{i+1}^-,E) \mathop{\ord(\omega_S)}\nolimitstimes T_{i+1}^- \mathop{\ord(\omega_S)}\nolimitsplus
\ensuremath{\mathbb{H}}om(T_{i+1}^-(K_X),E) \mathop{\ord(\omega_S)}\nolimitstimes T_{i+1}^- (K_X)
\to E,
\end{equation}
$\ker\mathbf{v}arphi$ and $\mathop{\mathrm{im}}\nolimits \mathbf{v}arphi$ are generated by
$T_{i+1}^-,T_{i+1}^- (K_X)$.
By \eqref{eq:Phi2}, $\ensuremath{\mathbb{P}}hi^{-1}(E)=0$ and $\ensuremath{\mathbb{P}}hi^0(E) \in \ensuremath{\mathcal F}_i^*$.
Since $\ensuremath{\mathbb{P}}hi^1(E)$ is generated by
$T_{i+1}^-,T_{i+1}^- (K_X)$,
$\ensuremath{\mathbb{P}}hi^1(E) \in \ensuremath{\mathbb{T}}T_i^*$.
Therefore $\ensuremath{\mathbb{P}}hi(E[1]) \in \ensuremath{\mathcal A}_i^*$.
Assume that $E \in \ensuremath{\mathbb{T}}T_{i+1}^*$.
Then $\ensuremath{\mathbb{P}}hi^1(E)=0$.
Since $\ensuremath{\mathbb{P}}hi^{-1}(E) \in \ensuremath{\mathcal F}_{i+1}^*$, \eqref{eq:Phi2} implies
$\ensuremath{\mathbb{P}}hi^{-1}(E) \in \ensuremath{\mathcal F}_i^*$.
Since $\mathop{\mathrm{cok}}er \mathbf{v}arphi, T_{i+1}^-, T_{i+1}^- (K_X) \in \ensuremath{\mathbb{T}}T_i^*$,
$\ensuremath{\mathbb{P}}hi^0(E) \in \ensuremath{\mathbb{T}}T_i^*$.
Therefore $\ensuremath{\mathbb{P}}hi(E) \in \ensuremath{\mathcal A}_i^*$.
\end{proof}
For negative $i$, we must make the following definition.
\mathbf{b}egin{Def}
Assume that $i \leq 0$.
\mathbf{b}egin{enumerate}
\item[(1)]
Let $(\ensuremath{\mathbb{T}}T_i^*,\ensuremath{\mathcal F}_i^*)$ be the torsion pair of $\ensuremath{\mathbb{P}}P_0(1)$ such that
\mathbf{b}egin{enumerate}
\item
$\ensuremath{\mathbb{T}}T_i^*$ is generated by $\sigma_+$-stable objects $E$ with
$\phi^+(E) \geq \phi^+(T_i^+)$.
\item
$\ensuremath{\mathcal F}_i^*:=\langle T_0^+,T_0^+ (K_X),...,T_{i+1}^+, T_{i+1}^+ (K_X) \rangle$.
\end{enumerate}
Let $\ensuremath{\mathcal A}_i^*=\langle \ensuremath{\mathbb{T}}T_i^*,\ensuremath{\mathcal F}_i^*[1] \rangle$ be the tilting.
\item[(2)]
Let $(\ensuremath{\mathbb{T}}T_i,\ensuremath{\mathcal F}_i)$ be a torsion pair of $\ensuremath{\mathbb{P}}P_0(1)$ such that
\mathbf{b}egin{enumerate}
\item
$\ensuremath{\mathbb{T}}T_i:=\langle T_0^-,T_0^- (K_X),...,T_{i+1}^-, T_{i+1}^- (K_X) \rangle$.
\item
$\ensuremath{\mathcal F}_i$ is generated by $\sigma_-$-stable objects $E$ with
$\phi^-(E) \leq \phi^-(T_i^-)$.
\end{enumerate}
Let $\ensuremath{\mathcal A}_i=\langle \ensuremath{\mathbb{T}}T_i[-1],\ensuremath{\mathcal F}_i\rangle$ be the tilting.
\end{enumerate}
\end{Def}
Since $\ensuremath{\mathcal F}_0^*=\ensuremath{\mathbb{T}}T_0=0$,
we have $\ensuremath{\mathcal A}_0^*=\ensuremath{\mathcal A}_0=\ensuremath{\mathbb{P}}P_0(1)$.
We also have a similar result to Remark \ref{rem:simple-objects}.
Moreover, we also have the following claims whose proofs are similar to those of Proposition \ref{Prop:equiv1} and Proposition \ref{Prop:equiv2}.
\mathbf{b}egin{Prop}\label{Prop:equiv3}
Assume that $i \leq 0$.
\mathbf{b}egin{enumerate}
\item
We have an equivalence
$R_{T_i^+}^{-1}:\ensuremath{\mathcal A}_i^* \to \ensuremath{\mathcal A}_{i-1}^*$.
\item
We have an equivalence
$R_{T_i^-}:\ensuremath{\mathcal A}_i \to \ensuremath{\mathcal A}_{i-1}$.
\end{enumerate}
\end{Prop}
\subsubsection{Preservation of stability}
Having defined the abelian categories $\ensuremath{\mathcal A}_i$ and $\ensuremath{\mathcal A}_i^*$, we will relate a certain stability on them to $\sigma_\pm$-stability.
\mathbf{b}egin{Ex}\label{ex:BB}
Recall Definition \ref{defn:B'} and Proposition \ref{Prop:BB}, and let $\ensuremath{\mathcal B}=\ensuremath{\mathbb{P}}P_0(1)$.
\mathbf{b}egin{enumerate}
\item[(1)]
We take an orientation preserving injective homomorphism $Z:\ensuremath{\mathbb{H}}H \to \ensuremath{\mathbb{C}}$ such that
$$Z(\mathbf{w}_0),Z(\mathbf{w}_1) \in \ensuremath{\mathbb{H}} \cup \ensuremath{\mathbb{R}}_{<0},\mbox{ and }Z(\mathbf{w}_1)/Z(\mathbf{w}_0) \in \ensuremath{\mathbb{H}}.$$
The second condition means that $\phi_Z(\mathbf{w}_1)>\phi_Z(\mathbf{w}_0)$. Then $E \in \ensuremath{\mathbb{P}}P_0(1)$ is $\sigma_+$-semistable if and only if $E$ is $Z$-semistable. In this case, $\ensuremath{\mathcal A}_i$ $(i \geq 0)$ is an example of $\ensuremath{\mathcal B}'$ for some $\theta \geq 0$, and $\ensuremath{\mathcal A}_i^*$ $(i \leq 0)$ is an example of $\ensuremath{\mathcal B}''$ for some $\theta \leq 0$.
\item[(2)]
We take an orientation reversing injective homomorphism $Z:\ensuremath{\mathbb{H}}H \to \ensuremath{\mathbb{C}}$ such that
$$Z(\mathbf{w}_0),Z(\mathbf{w}_1) \in \ensuremath{\mathbb{H}} \cup \ensuremath{\mathbb{R}}_{<0},\mbox{ and }Z(\mathbf{w}_0)/Z(\mathbf{w}_1) \in \ensuremath{\mathbb{H}}.$$
Then $E \in \ensuremath{\mathbb{P}}P_0(1)$ is $\sigma_-$-semistable if and only if $E$ is $Z$-semistable. In this case, $\ensuremath{\mathcal A}_i$ $(i \leq 0)$ is an example of $\ensuremath{\mathcal B}'$, and $\ensuremath{\mathcal A}_i^*$ $(i \geq 0)$ is an example of $\ensuremath{\mathcal B}''$.
\end{enumerate}
\end{Ex}
Now we can finally prove a sequence of comparison results that allow us to reduce our analysis to the case of minimal Mukai vectors.
\mathbf{b}egin{Prop}\label{Prop:isom-pm}
\mathbf{b}egin{enumerate}
\item
\mathbf{b}egin{enumerate}
\item
$R_{T_1^+}:\ensuremath{\mathcal A}_0 \to \ensuremath{\mathcal A}_1$ induces an isomorphism
$M_{\sigma_-}(\mathbf{v}) \to M_{\sigma_+}(\mathbf{v}')$, where $\mathbf{v}'=R_{\mathbf{w}_1}(\mathbf{v})$.
\item
$R_{T_1^-}^{-1}:\ensuremath{\mathcal A}_0^* \to \ensuremath{\mathcal A}_1^*$ induces an isomorphism
$M_{\sigma_+}(\mathbf{v}) \to M_{\sigma_-}(\mathbf{v}')$, where $\mathbf{v}'=R_{\mathbf{w}_1}^{-1}(\mathbf{v})$.
\end{enumerate}
\item
\mathbf{b}egin{enumerate}
\item
$R_{T_0^-}:\ensuremath{\mathcal A}_0 \to \ensuremath{\mathcal A}_{-1}$ induces an isomorphism
$M_{\sigma_+}(\mathbf{v}) \to M_{\sigma_-}(\mathbf{v}')$, where $\mathbf{v}'=R_{\mathbf{w}_0}(\mathbf{v})$.
\item
$R_{T_0^+}^{-1}:\ensuremath{\mathcal A}_0^* \to \ensuremath{\mathcal A}_{-1}^*$ induces an isomorphism
$M_{\sigma_-}(\mathbf{v}) \to M_{\sigma_+}(\mathbf{v}')$, where $\mathbf{v}'=R_{\mathbf{w}_0}^{-1}(\mathbf{v})$.
\end{enumerate}
\end{enumerate}
\end{Prop}
\mathbf{b}egin{proof}
By Example \ref{ex:BB}, we can apply Proposition \ref{Prop:BB}
to compare the stabilities on $\ensuremath{\mathcal A}_i$ and $\ensuremath{\mathcal A}_i^*$ to
$\sigma_\pm$-stability on $\ensuremath{\mathcal A}_0=\ensuremath{\mathcal A}_0^*$.
Since the orientation of $\ensuremath{\mathbb{H}}H$ is reversed under the reflection,
the claims follow from Proposition \ref{Prop:BB} and
Propositions \ref{Prop:equiv1},
\ref{Prop:equiv2}, \ref{Prop:equiv3}.
\end{proof}
This result has the following significant corollary:
\mathbf{b}egin{Cor}\label{Cor:OrthogonalIsomorphism 1}
If $\langle \mathbf{v},\mathbf{w}_1 \rangle=0$,
then $R_{T_1}$ induces an isomorphism
$M_{\sigma_-}(\mathbf{v}) \to M_{\sigma_+}(\mathbf{v})$.
If $\langle \mathbf{v},\mathbf{w}_0 \rangle=0$,
then $R_{T_0}$ induces an isomorphism
$M_{\sigma_+}(\mathbf{v})\to M_{\sigma_-}(\mathbf{v})$.
\end{Cor}
\mathbf{b}egin{proof}
We apply Proposition \ref{Prop:isom-pm} and observe that for $i=0,1$ we get $R_{\mathbf{w}_i}(\mathbf{v})=\mathbf{v}+2\langle\mathbf{v},\mathbf{w}_i\rangle=\mathbf{v}$.
\end{proof}
In order to use the sequence of weakly spherical reflections to reduce any $\mathbf{v}$ to the minimal Mukai vector in its orbit, as in \eqref{eqn:OrbitOfv0}, we consider the next step in the program beyond that in Proposition \ref{Prop:isom-pm}.
\mathbf{b}egin{Prop}\label{Prop:isom-2}
\mathbf{b}egin{enumerate}
\item\label{enum:iPositive}
Assume that $i>0$.
\mathbf{b}egin{enumerate}
\item
$R_{T_{i+1}^+} \circ R_{T_i^+}:\ensuremath{\mathcal A}_{i-1} \to \ensuremath{\mathcal A}_{i+1}$
induces an isomorphism
$M_{\sigma_+}(\mathbf{v}) \to M_{\sigma_+}(\mathbf{v}')$, where
$\mathbf{v}'=R_{\mathbf{w}_{i+1}} \circ R_{\mathbf{w}_i}(\mathbf{v})$.
\item
$R_{T_{i+1}^-}^{-1} \circ R_{T_i^-}^{-1}:\ensuremath{\mathcal A}_{i-1}^* \to \ensuremath{\mathcal A}_{i+1}^*$
induces an isomorphism
$M_{\sigma_-}(\mathbf{v}) \to M_{\sigma_-}(\mathbf{v}')$, where
$\mathbf{v}'=R_{\mathbf{w}_{i+1}}^{-1} \circ R_{\mathbf{w}_i}^{-1}(\mathbf{v})$.
\end{enumerate}
\item\label{enum:iNegative}
Assume that $i < 0$.
\mathbf{b}egin{enumerate}
\item
$R_{T_{i}^-} \circ R_{T_{i+1}^-}:\ensuremath{\mathcal A}_{i+1} \to \ensuremath{\mathcal A}_{i-1}$
induces an isomorphism
$M_{\sigma_-}(\mathbf{v}) \to M_{\sigma_-}(\mathbf{v}')$, where
$\mathbf{v}'=R_{\mathbf{w}_{i}} \circ R_{\mathbf{w}_{i+1}}(\mathbf{v})$.
\item
$R_{T_{i}^+}^{-1} \circ R_{T_{i+1}^+}^{-1}:\ensuremath{\mathcal A}_{i+1}^* \to \ensuremath{\mathcal A}_{i-1}^*$
induces an isomorphism
$M_{\sigma_+}(\mathbf{v}) \to M_{\sigma_+}(\mathbf{v}')$, where
$\mathbf{v}'=R_{\mathbf{w}_{i}}^{-1} \circ R_{\mathbf{w}_{i+1}}^{-1}(\mathbf{v})$.
\end{enumerate}
\end{enumerate}
\end{Prop}
\mathbf{b}egin{proof}
\ref{enum:iPositive}
Since $R_{T_{i+1}^+} \circ R_{T_i^+}$ and
$R_{T_{i+1}^-}^{-1} \circ R_{T_i^-}^{-1}$
preserve the orientation,
we get the claims by a similar argument as in Proposition \ref{Prop:isom-pm}.
The proof of \ref{enum:iNegative} is similar.
\end{proof}
It turns out that the composition of two consecutive weakly spherical reflections as in the previous proposition is independent of $i$, which is the content of the following lemma.
\mathbf{b}egin{Lem}
\mathbf{b}egin{enumerate}
\item\label{enum:ReflectionsOfExceptionals}
$R_{T_i^+}(T_{i-1}^+) =
\mathbf{b}egin{cases}
T_{i+1}^+[1],T_{i+1}^+(K_X)[1], & i \ne 0,1,\\
T_{i+1}^+, T_{i+1}^+(K_X), & i =0,1.
\end{cases}$
\item\label{enum:CompositionConsecutiveReflections}
We have $R_{T_i^+} \circ R_{T_{i-1}^+}=R_{T_{i+1}^+} \circ R_{T_i^+}$. In particular,
$R_{T_{i+1}^+} \circ R_{T_i^+} =R_{T_1} \circ R_{T_0}$ for all $i$.
\end{enumerate}
\end{Lem}
\mathbf{b}egin{proof}
\ref{enum:ReflectionsOfExceptionals}
Assume that $i \geq 2$.
By Proposition \ref{Prop:isom-2} \ref{enum:iPositive},
$R_{T_{i}^+}\circ R_{T_{i-1}^+}:\ensuremath{\mathcal A}_{i-2} \to \ensuremath{\mathcal A}_{i}$
induces an isomorphism
$$
M_{\sigma_+}(\mathbf{w}_{i-1}) \to M_{\sigma_+}(\mathbf{w}_{i+1}).
$$
Hence $R_{T_{i}^+}\circ R_{T_{i-1}^+}(T_{i-1}^+(K_X))
=R_{T_{i}^+}(T_{i-1}^+[-1])$
is a $\sigma_+$-stable object with Mukai vector $\mathbf{w}_{i+1}$.
Then we get
$R_{T_{i}^+}(T_{i-1}^+[-1])=T_{i+1}^+,T_{i+1}^+(K_X)$.
If $i=0,1$, then Proposition \ref{Prop:isom-pm}
implies $R_{T_i^+}(T_{i-1}^+)=T_{i+1}^+,T_{i+1}^+(K_X)$.
Assume that $i<0$.
Then Proposition \ref{Prop:isom-2} \ref{enum:iNegative} implies
$R_{T_{i}^+}\circ R_{T_{i-1}^+}:\ensuremath{\mathcal A}_{i-2}^* \to \ensuremath{\mathcal A}_{i}^*$
induces an isomorphism
$$
M_{\sigma_+}(-\mathbf{w}_{i-1}) \to M_{\sigma_+}(-\mathbf{w}_{i+1}).
$$
Hence we get
$R_{T_{i}^+}(T_{i-1}^+)=T_{i+1}^+[1],T_{i+1}^+(K_X)[1]$.
\ref{enum:CompositionConsecutiveReflections}
Since $R_{T_i^+} \circ R_{T_{i-1}^+} \circ R_{T_i^+}^{-1}=
R_{R_{T_i^+}(T_{i-1}^+)}$ by \cite[Lemma 8.21]{HL10}, the claim follows from \ref{enum:ReflectionsOfExceptionals}.
\end{proof}
In the same way, we also see that
$R_{T_i^-} \circ R_{T_{i+1}^-} =R_{T_0} \circ R_{T_1}$ for all $i$. This leads us to make the following definition.
\mathbf{b}egin{Def}
We set $R_+:=R_{T_1} \circ R_{T_0}$ and
$R_-:=R_{T_0} \circ R_{T_1}$.
\end{Def}
We have finally studied the action of $G_\ensuremath{\mathbb{H}}H$ enough to prove our main reduction result.
\mathbf{b}egin{Prop}\label{Prop:NonMinimalIsomorphism} Let $\mathbf{v}_n\in\ensuremath{\mathbb{C}}C_n$ be defined as in \eqref{eqn:OrbitOfv0}. That is, $\mathbf{v}_n$ is in the orbit of $\mathbf{v}_0\in\ensuremath{\mathbb{C}}C_0$.
\mathbf{b}egin{enumerate}
\item\label{enum:NonMinimalIsomorphism n even}
If $n$ is even, then $R_+^{\frac{n}{2}} \circ R_-^{\frac{n}{2}}$ induces a birational map
\mathbf{b}egin{equation}
M_{\sigma_-}(\mathbf{v}_n) \cong M_{\sigma_-}(\mathbf{v}_0) \mathbf{d}ashrightarrow M_{\sigma_+}(\mathbf{v}_0)
\cong M_{\sigma_+}(\mathbf{v}_n),
\end{equation}
which is isomorphic in codimension one.
\item\label{enum:NonMinimalIsomorphism n odd}
If $n$ is odd, then $R_+^{\frac{n-1}{2}} \circ R_{T_1} \circ R_{T_1} \circ R_-^{\frac{n-1}{2}}$ induces a birational map
\mathbf{b}egin{equation}
M_{\sigma_-}(\mathbf{v}_n) \cong M_{\sigma_-}(\mathbf{v}_1) \cong M_{\sigma_+}(\mathbf{v}_0)
\mathbf{d}ashrightarrow M_{\sigma_-}(\mathbf{v}_0) \cong M_{\sigma_+}(\mathbf{v}_1)
\cong M_{\sigma_+}(\mathbf{v}_n),
\end{equation}
which is isomorphic in codimension one.
\end{enumerate}
\end{Prop}
\mathbf{b}egin{proof}
\ref{enum:NonMinimalIsomorphism n even}
Using $n/2$ applications of Proposition \ref{Prop:isom-2}, we get that $R_-^{\frac{n}{2}}$ induces an isomorphism $M_{\sigma_-}(\mathbf{v}_n)\mor[\sim] M_{\sigma_-}(\mathbf{v}_0)$, and by Lemma \ref{Lem:non-isotropic no totally semistable wall} the open subset of $\sigma_0$-stable objects $M_{\sigma_0}^s(\mathbf{v}_0)$ provides a birational map $$M_{\sigma_-}(\mathbf{v}_0)\mathbf{d}ashrightarrow M_{\sigma_+}(\mathbf{v}_0).$$ Moreover, the complement of $M_{\sigma_0}^s(\mathbf{v}_0)$ in $M_{\sigma_\pm}(\mathbf{v}_0)$ has codimension at least 2. Using Proposition \ref{Prop:isom-2} again, we get that $R_+^{\frac{n}{2}}$ gives an isomorphism $M_{\sigma_+}(\mathbf{v}_0)\mor[\sim] M_{\sigma_+}(\mathbf{v}_n)$, which gives the result.
The proof of \ref{enum:NonMinimalIsomorphism n odd} follows similarly by using Proposition \ref{Prop:isom-pm} twice in the middle.
\end{proof}
The complementary result for $\mathbf{v}$ on the boundary of some $\ensuremath{\mathbb{C}}C_n$, which generalizes Corollary \ref{Cor:OrthogonalIsomorphism 1}, is the following.
\mathbf{b}egin{Prop}\label{Prop:OrthgonalIsomorphism 2}
Suppose that $\mathbf{v}\in C_\ensuremath{\mathcal W}$ satisfies $\langle\mathbf{v},\mathbf{w}_n\rangle=0$ for some $n$. Then $M_{\sigma_+}(\mathbf{v})\cong M_{\sigma_-}(\mathbf{v})$.
\end{Prop}
\mathbf{b}egin{proof}
First let us suppose that $n$ is odd. Then we take $\mathbf{v}_1 =R_-^{\frac{n-1}{2}}(\mathbf{v})$ which satisfies $\langle\mathbf{v}_1,\mathbf{w}_1\rangle=0$.
In this case, by Propositions \ref{Prop:isom-pm} and \ref{Prop:isom-2}, the composition
$R_+^{\frac{n-1}{2}} \circ R_{T_1^+}
\circ R_-^{\frac{n-1}{2}}$ induces
an isomorphism
\mathbf{b}egin{equation}\label{eq:FM-birat3}
M_{\sigma_-}(\mathbf{v}) \cong M_{\sigma_-}(\mathbf{v}_1) \cong M_{\sigma_+}(\mathbf{v}_1)
\cong M_{\sigma_+}(\mathbf{v}).
\end{equation}
If instead $n$ is even, then we take $\mathbf{v}_0=R_-^{\frac{n}{2}}(\mathbf{v})$, which satisfies $\langle\mathbf{v}_0,\mathbf{w}_0\rangle=0$. In this case, $R_+^{\frac{n}{2}} \circ R_{T_0^+}
\circ R_-^{\frac{n}{2}}$ induces
an isomorphism
\mathbf{b}egin{equation}\label{eq:FM-birat4}
M_{\sigma_-}(\mathbf{v}) \cong M_{\sigma_-}(\mathbf{v}_0) \cong M_{\sigma_+}(\mathbf{v}_0)
\cong M_{\sigma_+}(\mathbf{v}).
\end{equation}
\end{proof}
We can conclude from Propositions \ref{Prop:NonMinimalIsomorphism} and \ref{Prop:OrthgonalIsomorphism 2} that, in the case of a non-isotropic wall of type \ref{enum:TwoExceptional}, the two moduli spaces $M_{\sigma_+}(\mathbf{v})$ and $M_{\sigma_-}(\mathbf{v})$ are birational. As identical statements to these propositions hold for non-isotropic walls of type \ref{enum:TwoSpherical} (see \cite[Prop. 6.8 and Lem. 7.5]{BM14b}), the same conclusion holds in that case. Now we move on to the final non-isotropic case we need to deal with.
\subsection{\cref{enum:OneExceptionalOneSpherical}: Exactly one $\sigma_0$-stable spherical and exceptional object, respectively, up to $-\mathop{\ord(\omega_S)}\nolimitstimes\ensuremath{\mathcal O}_X(K_X)$}\label{subsec:OneSphericalOneExceptional}
We shall briefly explain the case where $\ensuremath{\mathbb{H}}H$ contains
effective exceptional and spherical vectors. Denote the Mukai vector of the unique $\sigma_0$-stable exceptional object (up to $-\mathop{\ord(\omega_S)}\nolimitstimes\ensuremath{\mathcal O}_X(K_X)$) by $\mathbf{w}_0$, and complete it to a basis of $\ensuremath{\mathbb{H}}H$ such that $\ensuremath{\mathbb{H}}H=\ensuremath{\mathbb{Z}}\mathbf{w}_0 +\ensuremath{\mathbb{Z}}\mathbf{z}$, where $\mathbf{z}$ satisfies $\langle \mathbf{w}_0,\mathbf{z} \rangle=0$ and $D:=\mathbf{z}^2>0$.
Furthermore, we know there is an effective $\mathbf{w} \in C_\ensuremath{\mathcal W}\cap \ensuremath{\mathbb{H}}H$ with $\mathbf{w}^2=-2$. Then $x^2-D y^2=2$ has an integral solution, which implies $\sqrt{D}$ is irrational. Let $(s,t)$ be a solution of $x^2-D y^2=2$, and set $\mathbf{a}lpha:=s+t \sqrt{D}$. Then $-\mathbf{a}lpha^2/2=(1-s^2)-s t \sqrt{D},$ and $(x,y)=(1-s^2,-s t)$ is a solution of $x^2-y^2D=1$. Let $(x_1,y_1)$ be a minimal solution of $x^2-y^2D=1$ such that $x_1<0$ and $y_1>0$, and set $\mathbf{b}eta:=x_1+y_1 \sqrt{D}$. Then $-1<\mathbf{b}eta<0$ (see \cite[Thm. 11.3.1]{AW04}) and $\mathbf{a}lpha^2/2=\pm\mathbf{b}eta^n$ for some $n$.
We claim that $n$ is odd. If not, then $u+v\sqrt{D}:=\mathbf{a}lpha/\mathbf{b}eta^{\frac{n}{2}}$
satisfies $(u+v\sqrt{D})^2=2$. But then $\sqrt{D}=\frac{2-u^2-Dv^2}{2uv}$, which contradicts the irrationality of $\sqrt{D}$. Hence $n$ is odd and $\mathbf{a}lpha^2/2=-\mathbf{b}eta^n$. Set $n=2k+1$. Then $s_1+t_1 \sqrt{D}=\mathbf{a}lpha/\mathbf{b}eta^{k}$ satisfies $(s_1+t_1 \sqrt{D})^2=-2\mathbf{b}eta$ and $s_1^2-t_1^2 D=2$. So replacing $(s,t)$ by $(s_1,t_1)$ (and possibly taking a conjugate), we may assume that $\mathbf{a}lpha:=s_1 +t_1 \sqrt{D}$ satisfies $N(\mathbf{a}lpha)=2$, $\mathbf{b}eta=-\mathbf{a}lpha^2/2$, and $s_1<0$ and $t_1>0$. Moreover, the same argument applied to any other solution $\gamma$ of the equation $x^2-Dy^2=2$ shows that $\gamma=\pm\frac{\mathbf{a}lpha^{2n+1}}{2^n}$, where $\gamma^2/2=-\mathbf{b}eta^{2n+1}$.
We define $(s_n,t_n) \in \ensuremath{\mathbb{Z}} \times \ensuremath{\mathbb{Z}}$ by
\mathbf{b}egin{equation}
\mathbf{b}egin{split}
s_{2n}+t_{2n} \sqrt{D}:=& (-1)^{n+1}\mathbf{b}eta^n=-\mathbf{a}lpha^{2n}/2^n,\; (n \geq 1)\\
s_{2n-1}+t_{2n-1}\sqrt{D}:=& \mathbf{a}lpha^{2n-1}/2^{n-1},\; (n \geq 1)\\
s_0+t_0\sqrt{D}:=& 1,\\
s_{-2n}+t_{-2n} \sqrt{D}:=&-s_{2n}+t_{2n}\sqrt{D}=(-\mathbf{b}eta)^{-n},\; (n \geq 1)\\
s_{-2n+1}+t_{-2n+1} \sqrt{D}:=& -s_{2n-1}+t_{2n-1}\sqrt{D}
=2^{n} (-\mathbf{a}lpha)^{-(2n-1)},\; (n \geq 1).\\
\end{split}
\end{equation}
For $n \in \ensuremath{\mathbb{Z}}$, we set $\mathbf{w}_n:=s_n \mathbf{w}_0+t_n \mathbf{z}$.
Then
$$
\mathbf{w}_n^2=\mathbf{b}egin{cases}
-1,& 2 \mid n\\
-2, & 2 \nmid n
\end{cases}
$$
It is easy to see that
\mathbf{b}egin{equation}\label{eqn:SphericalAndExceptionalVectorsReflections}
\mathbf{b}egin{split}
\mathbf{w}_{n+1}=& -R_{\mathbf{w}_n}(\mathbf{w}_{n-1}),\;(n \geq 2)\\
\mathbf{w}_{n-1}=& -R_{\mathbf{w}_n}(\mathbf{w}_{n+1}),\;(n \leq -1)\\
\mathbf{w}_2=& R_{\mathbf{w}_1}(\mathbf{w}_0),\;\mathbf{w}_{-1}=R_{\mathbf{w}_0}(\mathbf{w}_1),
\end{split}
\end{equation}
where $R_{\mathbf{w}_n}$ is, by abuse of notation, the action on $\ensuremath{\mathbb{H}}al(X,\ensuremath{\mathbb{Z}})$ induced by spherical/weakly spherical reflection through $\mathbf{w}_n$. To be concrete, we have
\mathbf{b}egin{equation*}
\mathbf{b}egin{split}
R_{\mathbf{w}_n}(\mathbf{u})&=\mathbf{u}+2\langle\mathbf{u},\mathbf{w}_n\rangle\mathbf{w}_n,\;2\mid n\\
R_{\mathbf{w}_n}(\mathbf{u})&=\mathbf{u}+\langle\mathbf{u},\mathbf{w}_n\rangle\mathbf{w}_n,\;2\nmid n.
\end{split}
\end{equation*}
From this description it is clear that
\mathbf{b}egin{equation}\label{eqn:RootsInH-1}
\ensuremath{\mathbb{D}}elta(X)\cap\ensuremath{\mathbb{H}}H\subseteq\Set{\pm \mathbf{w}_n\ | \ n\in \ensuremath{\mathbb{Z}} },
\end{equation}
where $\ensuremath{\mathbb{D}}elta(X)$ was defined in \eqref{eqn:DefOfEnriquesRoots}. By the assumption on $\ensuremath{\mathbb{H}}H$ and $\ensuremath{\mathcal W}$, there exists a $\sigma_0$-stable spherical object $T$, so we must have $\mathbf{v}(T)=\pm\mathbf{w}_{2n+1}$ for some $n\in\ensuremath{\mathbb{Z}}$. In particular, $c_1(\mathbf{w}_{2n+1})\equiv Z\pmod 2$, where $Z$ is a nodal cycle by Theorem \ref{Thm:exist:nodal}. From \eqref{eqn:SphericalAndExceptionalVectorsReflections}, we see that
\mathbf{b}egin{equation}\label{eqn:RootsAreSpherical}
c_1(\mathbf{w}_{2j+1})\equiv c_1(\mathbf{w}_1)\pmod 2,\mbox{ for all }j\in\ensuremath{\mathbb{Z}},
\end{equation} so the inclusion in \eqref{eqn:RootsInH-1} is an equality:
\mathbf{b}egin{equation}\label{eqn:RootsInH-2}
\ensuremath{\mathbb{D}}elta(X)\cap\ensuremath{\mathbb{H}}H=\Set{\pm\mathbf{w}_n\ |\ n\in\ensuremath{\mathbb{Z}}}.
\end{equation}
As in \cref{subsec:TwoExceptional}, it is not difficult to see that
$$
\Set{\mathbf{u} \in \ensuremath{\mathbb{H}}H \ | \ \frac{Z_{\sigma_0}(\mathbf{u})}{Z_{\sigma_0}(\mathbf{v})}>0,\mathbf{u}^2\geq -2 } \subset
\ensuremath{\mathbb{Q}}_{\geq 0}\mathbf{w}_0+\ensuremath{\mathbb{Q}}_{\geq 0}\mathbf{w}_1,
$$
and thus $C_\ensuremath{\mathcal W}=\ensuremath{\mathbb{R}}_{>0}\mathbf{w}_0+\ensuremath{\mathbb{R}}_{>0}\mathbf{w}_1$. It follows from this and \eqref{eqn:RootsInH-2} that there are $\sigma_0$-stable objects, $T_0$ and $T_1$, of classes $\mathbf{w}_0$ and $\mathbf{w}_1$, respectively. Moreover, as we then have $$\ensuremath{\mathbb{D}}elta(X)\cap C_\ensuremath{\mathcal W}=\Set{\mathbf{w}_n \ |\ n \in \ensuremath{\mathbb{Z}} },$$ it follows from \eqref{eqn:RootsAreSpherical} and Theorem \ref{Thm:exist:nodal} that there are $\sigma_\pm$-stable objects $T_n^\pm$ with
$\mathbf{v}(T_n^\pm)=\mathbf{w}_n$. Note that $T_i^+=T_i^-=T_i$ for $i=0,1$.
Let $\ensuremath{\mathbb{C}}C_n \subset C_\ensuremath{\mathcal W}$ be the region between
$\mathbf{w}_n^\perp$ and $\mathbf{w}_{n+1}^\perp$ as in \cref{subsec:TwoExceptional}.
Then $\ensuremath{\mathbb{C}}C_0$ is again the fundamental domain of the Weyl group $G_\ensuremath{\mathbb{H}}H$ associated to
$\ensuremath{\mathbb{D}}elta(X)\cap\ensuremath{\mathbb{H}}H$. Up to reordering, we may assume that
\mathbf{b}egin{equation}
\phi^+(T_1^+) > \phi^+(T_2^+)>\cdots>\phi^+(E)>
\cdots >\phi^+(T_{-1}^+)>\phi^+(T_0^+)
\end{equation}
for any $\sigma_+$-stable object $E$ with
$\mathbf{v}(E)^2 \geq 0$
and
\mathbf{b}egin{equation}
\phi^-(T_1^-) < \phi^-(T_2^-)<\cdots<\phi^-(E)<
\cdots <\phi^-(T_{-1}^-)<\phi^-(T_0^-)
\end{equation}
for any $\sigma_-$-stable object $E$ with
$\mathbf{v}(E)^2 \geq 0$.
We note that $T_n^\pm (K_X)=T_n^\pm$ if $n$ is odd and
$T_n^\pm (K_X) \not \cong T_n^\pm$ if $n$ is even.
As in \cref{subsec:TwoExceptional}, we see that
\mathbf{b}egin{equation}
R_+:=R_{T_1} \circ R_{T_0} =R_{T_{i+1}^+} \circ R_{T_i^+}
\end{equation}
is an equivalence which preserves $\sigma_+$-semistability
and
\mathbf{b}egin{equation}
R_-:=R_{T_0} \circ R_{T_1}=R_{T_{i-1}^-} \circ R_{T_i^-}
\end{equation}
is an equivalence which preserves $\sigma_-$-semistability. Here, $R_{T_0}$ and $R_{T_1}$ are the weakly-spherical and spherical reflections associated to the exceptional and spherical objects $T_0$ and $T_1$, respectively. Then we have the following results.
\mathbf{b}egin{Prop}\label{Prop:CompositionSphericalExceptional}
Let $\mathbf{v}\in C_\ensuremath{\mathcal W}\cap\ensuremath{\mathbb{H}}H$.
\mathbf{b}egin{enumerate}
\item Suppose that $\mathbf{v}\in\ensuremath{\mathbb{C}}C_n$.
\mathbf{b}egin{enumerate}
\item
If $n$ is even, then $R_+^{\frac{n}{2}} \circ R_-^{\frac{n}{2}}$ induces a birational map
\mathbf{b}egin{equation}
M_{\sigma_-}(\mathbf{v}) \cong M_{\sigma_-}(\mathbf{v}_0)\mathbf{d}ashrightarrow M_{\sigma_+}(\mathbf{v}_0)
\cong M_{\sigma_+}(\mathbf{v}),
\end{equation}
isomorphic in codimension one, where $\mathbf{v}_0=R_-^{\frac{n}{2}}(\mathbf{v})$.
\item
If $n$ is odd, then $R_+^{\frac{n-1}{2}} \circ R_{T_1}^2 \circ R_-^{\frac{n-1}{2}}$ induces a birational map
\mathbf{b}egin{equation}
M_{\sigma_-}(\mathbf{v}) \cong M_{\sigma_-}(\mathbf{v}_1) \cong M_{\sigma_+}(\mathbf{v}_0)
\mathbf{d}ashrightarrow M_{\sigma_-}(\mathbf{v}_0) \cong M_{\sigma_+}(\mathbf{v}_1)
\cong M_{\sigma_+}(\mathbf{v}_n),
\end{equation}
isomorphic in codimension one, where $\mathbf{v}_1=R_-^{\frac{n-1}{2}}(\mathbf{v})$ and $\mathbf{v}_0=R_{T_1}(R_-^{\frac{n-1}{2}}(\mathbf{v}))$.
\end{enumerate}
\item
Suppose that $\langle \mathbf{v}, \mathbf{w}_n \rangle=0$.
\mathbf{b}egin{enumerate}
\item
If $n$ is even, then $R_+^{\frac{n}{2}} \circ R_{T_0^+}
\circ R_-^{\frac{n}{2}}$ induces
an isomorphism
\mathbf{b}egin{equation}
M_{\sigma_-}(\mathbf{v}) \cong M_{\sigma_-}(\mathbf{v}_0) \cong M_{\sigma_+}(\mathbf{v}_0)
\cong M_{\sigma_+}(\mathbf{v}),
\end{equation}
where $\mathbf{v}_0=R_-^{\frac{n}{2}}(\mathbf{v})$.
\item
If $n$ is odd, then $R_+^{\frac{n-1}{2}} \circ R_{T_1^+}\circ R_-^{\frac{n-1}{2}}$ induces an isomorphism
\mathbf{b}egin{equation}
M_{\sigma_-}(\mathbf{v}) \cong M_{\sigma_-}(\mathbf{v}_1) \cong M_{\sigma_+}(\mathbf{v}_1)
\cong M_{\sigma_+}(\mathbf{v}),
\end{equation}
where $\mathbf{v}_1=R_-^{\frac{n-1}{2}}(\mathbf{v})$.
\end{enumerate}
\end{enumerate}
\end{Prop}
Proposition \ref{Prop:CompositionSphericalExceptional} is proven by defining $\ensuremath{\mathcal A}_i$ and $\ensuremath{\mathcal A}_i^*$ analogously to \cref{subsec:TwoExceptional} and by showing that $R_{T_i^{\pm}}$ induces isomorphisms on moduli with identical proofs except for the minor adjustments when $i$ is odd so that $T_i^{\pm}$ is spherical. To avoid this word-for-word repetition, we omit these proofs. Note that the birational map $M_{\sigma_+}(\mathbf{v}_0)\mathbf{d}ashrightarrow M_{\sigma_-}(\mathbf{v}_0)$ follows again from Lemma \ref{Lem:non-isotropic no totally semistable wall}.
We have seen in this section that, regardless of its type, a non-isotropic wall $\ensuremath{\mathcal W}$ induces a birational map $M_{\sigma_+}(\mathbf{v})\mathbf{d}ashrightarrow M_{\sigma_-}(\mathbf{v})$, and this map is isomorphic outside of a subvariety of codimension at least 2 unless $\langle\mathbf{v},\mathbf{w}\rangle=0$ for some $\mathbf{w}\in C_\ensuremath{\mathcal W}\cap\ensuremath{\mathbb{D}}elta(X)$. We finish off the section by explaining how this birational map relates to the contraction maps $$\pi^\pm:M_{\sigma_\pm}(\mathbf{v})\to\mathop{\ord(\omega_S)}\nolimitsverline{M}_\pm$$ induced by $\ell_{\sigma_0,\pm}$, as in Theorem \ref{Thm:WallContraction}. Recall that the curves contracted by $\pi^\pm$ are precisely those curves of $\sigma_+$-semistable objects that become S-equivalent with respect to $\sigma_0$.
Let $E$ be a $\sigma_0$-stable object of class $\mathbf{v}_0\in\mathop{\ord(\omega_S)}\nolimitsverline{\ensuremath{\mathbb{C}}C_0}$. Then
it is an irreducible object of $\ensuremath{\mathcal A}_0$.
By Propositions \ref{Prop:equiv1}, \ref{Prop:equiv2}, and \ref{Prop:equiv3}, and their analogues in the other subcases of Proposition \ref{Prop:lattice classification}\ref{enum:TwoNegative}, it follows that for a non-negative integer $n$, $E_n^+:=R_{T_n^+} \circ R_{T_{n-1}^+} \circ \cdots \circ R_{T_1^+}(E)$
is an irreducible object of $\ensuremath{\mathcal A}_n$
and $E_n^-:=R_{T_n^-}^{-1} \circ
R_{T_{n-1}^-}^{-1} \circ \cdots \circ R_{T_1^-}^{-1}(E)$
is an irreducible object of $\ensuremath{\mathcal A}_n^*$.
By Remark \ref{rem:simple-objects} and
the definition of $R_{T_i^\pm}$,
$E_n^\pm$ are successive extensions of $E$ by $T_i^\pm,T_i^\pm(K_X)$.
In particular, $E_n^\pm$ is S-equivalent to
\mathbf{b}egin{equation}\label{eqn:Sequivalenceunderreflections}
\mathbf{b}egin{cases}
E \mathop{\ord(\omega_S)}\nolimitsplus T_0^{\mathop{\ord(\omega_S)}\nolimitsplus k_0} \mathop{\ord(\omega_S)}\nolimitsplus T_1^{\mathop{\ord(\omega_S)}\nolimitsplus k_1}, &\mbox{ in Case \ref{enum:TwoSpherical}}\\
E\mathop{\ord(\omega_S)}\nolimitsplus \left(T_0\mathop{\ord(\omega_S)}\nolimitsplus T_0(K_X)\right)^{\mathop{\ord(\omega_S)}\nolimitsplus k_0}\mathop{\ord(\omega_S)}\nolimitsplus \left(T_1\mathop{\ord(\omega_S)}\nolimitsplus T_1(K_X)\right)^{\mathop{\ord(\omega_S)}\nolimitsplus k_1}, &\mbox{ in Case \ref{enum:TwoExceptional}}\\
E\mathop{\ord(\omega_S)}\nolimitsplus \left(T_0\mathop{\ord(\omega_S)}\nolimitsplus T_0(K_X)\right)^{\mathop{\ord(\omega_S)}\nolimitsplus k_0}\mathop{\ord(\omega_S)}\nolimitsplus T_1^{\mathop{\ord(\omega_S)}\nolimitsplus k_1}, &\mbox{ in Case \ref{enum:OneExceptionalOneSpherical}}
\end{cases}\end{equation}
with respect to $\sigma_0$, where
\mathbf{b}egin{equation}\label{eqn:Sequivalenceunderreflections-classes}
\mathbf{v}(E_n^\pm)=\mathbf{b}egin{cases}
\mathbf{v}(E)+k_0 \mathbf{w}_0+k_1 \mathbf{w}_1, &\mbox{ in Case \ref{enum:TwoSpherical}}\\
\mathbf{v}(E)+2k_0\mathbf{w}_0+2k_1\mathbf{w}_1, &\mbox{ in Case \ref{enum:TwoExceptional}}\\
\mathbf{v}(E)+2k_0\mathbf{w}_0+k_1\mathbf{w}_1, &\mbox{ in Case \ref{enum:OneExceptionalOneSpherical}}
\end{cases}.\end{equation}
Thus if $\mathbf{v}=R_{\mathbf{w}_n} \circ R_{\mathbf{w}_{n-1}} \circ \cdots \circ R_{\mathbf{w}_1}(\mathbf{v}_0)$ for $\mathbf{v}_0\in\ensuremath{\mathbb{C}}C_0$, then the contraction map $\pi^\pm:M_{\sigma_\pm}\to\mathop{\ord(\omega_S)}\nolimitsverline{M}_\pm$ is injective on the image of $M_{\sigma_0}^s(\mathbf{v}_0)$ under the isomorphism $M_{\sigma_\pm}(\mathbf{v}_0)\mor[\sim] M_{\sigma_\pm}(\mathbf{v})$ given by the above composition of Fourier-Mukai functors.\footnote{As written, this isomorphism exists for $n$ even. For $n$ odd, the composition of Fourier-Mukai functors would give $M_{\sigma_\mp}(\mathbf{v}_0)\mor[\sim] M_{\sigma_\pm}(\mathbf{v})$} In particular, $\pi^\pm$ is birational. Moreover, the S-equivalence in \eqref{eqn:Sequivalenceunderreflections} true even if $E$ is strictly $\sigma_0$-semistable, so the curves contracted by $\pi^\pm:M_{\sigma_\pm}(\mathbf{v})\to\mathop{\ord(\omega_S)}\nolimitsverline{M}_\pm$ are in direct correspondence with the curves of S-equivalent objects contracted by $\pi_0^\pm:M_{\sigma_\pm}(\mathbf{v}_0)\to\mathop{\ord(\omega_S)}\nolimitsverline{M}_{0,\pm}$, where this is the analogous morphism for $\mathbf{v}_0$.
When $\mathbf{v}_0\in\ensuremath{\mathbb{C}}C_0$ proper, that is, $\langle\mathbf{v},\mathbf{w}\rangle>0$ for all $\mathbf{w}\in\ensuremath{\mathbb{D}}elta(X)\cap C_\ensuremath{\mathcal W}$, the codimension of the exceptional locus of $\pi_0^\pm$ is at least two, so the same remains true of $\pi^\pm$. When $\mathbf{v}\in \mathbf{w}_n^\perp$, however, this exceptional locus has codimension one and the two moduli spaces $M_{\sigma_\pm}(\mathbf{v})$ are isomorphic. We explore in the next section when this divisor is contracted upon crossing the wall $\ensuremath{\mathcal W}$.
\section{Divisorial contractions in the non-isotropic case}
In this section we aim to prove the following result.
\mathbf{b}egin{Prop}\label{Prop:NonisotropicDivisorialContraction}
Assume that the potential wall $\ensuremath{\mathcal W}$ is non-isotropic. Then $\ensuremath{\mathcal W}$ induces a divisorial contraction on $M_{\sigma_+}(\mathbf{v},L)$ if and only if either there exists a spherical class $\mathbf{w}\in\ensuremath{\mathbb{D}}elta(X)_{-2}\cap C_\ensuremath{\mathcal W}$ such that $\langle\mathbf{v},\mathbf{w}\rangle=0$ or there exists an exceptional class $\mathbf{w}\in\ensuremath{\mathbb{D}}elta(X)_{-1}\cap C_{\ensuremath{\mathcal W}}$ such that $\langle\mathbf{v},\mathbf{w}\rangle=0$, $\mathbf{v}-2\mathbf{w}$ is spherical, and $L\equiv D+\frac{\mathop{\mathrm{rk}}\mathbf{v}}{2}K_X\pmod 2$ for a nodal cycle $D$. If $T$ is a $\sigma_{\pm}$-stable spherical object of class $\mathbf{w}$ (or $\mathbf{v}-2\mathbf{w}$ in the second case), then the contracted divisor can be described as a Brill-Noether divisor for $T$: it is given either by the condition $\ensuremath{\mathbb{H}}om(T,\mathbf{u}nderline{\hphantom{A}})\neq 0$ or by $\ensuremath{\mathbb{H}}om(\mathbf{u}nderline{\hphantom{A}},T)\neq 0$.
\end{Prop}
By Lemma \ref{Lem:non-isotropic no totally semistable wall}, we know that the locus of strictly $\sigma_0$-semistable objects has codimension one if and only if $\langle\mathbf{v},\mathbf{w}\rangle=0$ for some class $\mathbf{w}\in \ensuremath{\mathbb{D}}elta(X)\cap C_\ensuremath{\mathcal W}$. In particular, this condition must be met if $\ensuremath{\mathcal W}$ induces a divisorial contraction. In the next two lemmas, we prove that only when $\mathbf{w}$ is spherical does this divisor get contracted.
\mathbf{b}egin{Lem}\label{Lem:NonisotropicDivisorialContraction}
Suppose that $\ensuremath{\mathbb{H}}H$ is non-isotropic and $\ensuremath{\mathcal W}$ is a potential wall associated to $\ensuremath{\mathbb{H}}H$. If there exists an effective spherical class $\mathbf{w}$ with $\langle \mathbf{v},\mathbf{w}\rangle=0$, then $\ensuremath{\mathcal W}$ induces a divisorial contraction.
A generic element $E$ in the contracted divisor $D$ admits a short exact sequence $$0\to T\to E\to F\to 0\mbox{ or }0\to F\to E\to T\to 0,$$ where $T\in M^s_{\sigma_+}(\mathbf{w})$ and $F\in M^s_{\sigma_+}(\mathbf{v}-\mathbf{w})$, such that the inclusion $T\ensuremath{\hookrightarrow} E$ or $F\ensuremath{\hookrightarrow} E$ is one of the filtration steps in a JH-filtration for $E$ with respect to $\sigma_0$.
\end{Lem}
\mathbf{b}egin{proof}
By the discussion at the end of Section \ref{Sec:TotallySemistable-non-isotropic}, we can use a composition of spherical or weakly-spherical reflections, as in \cite[Corollary 7.3, Lemma 7.5]{BM14b} (in Case \ref{enum:TwoSpherical} and Proposition \ref{Prop:CompositionSphericalExceptional} (in Case \ref{enum:OneExceptionalOneSpherical}, to reduce the discussion to the case of a minimal Mukai vector. We assume this to be the case. Then the spherical class must be $\mathbf{w}_0$ or $\mathbf{w}_1$ (in Case \ref{enum:TwoSpherical}) or $\mathbf{w}_1$ (in Case \ref{enum:OneExceptionalOneSpherical}), and we assume it is $\mathbf{w}_1$ with the other case being dealt with similarly. As in \cite[Lemma 7.4]{BM14b}, we first prove that $\mathbf{v}-\mathbf{w}_1$ is also minimal.
Let us start by assuming that $\mathbf{v}^2\geq 2$. We note that we must in-fact have $\mathbf{v}^2\geq3$, since $\mathbf{v}^2=2$ gives $(\mathbf{v}-\mathbf{w}_1)^2=0$, contrary to the assumption that $\ensuremath{\mathbb{H}}H$ is non-isotropic. Write $\mathbf{v}=x\mathbf{w}_0+y\mathbf{w}_1$ with $x,y\in\ensuremath{\mathbb{Q}}$. Then $0=\langle\mathbf{w}_1,\mathbf{v}\rangle=0$ gives $y=\frac{m}{2}x$, where recall that $m=\langle\mathbf{w}_0,\mathbf{w}_1\rangle$. As $\langle\mathbf{w}_1,\mathbf{v}-\mathbf{w}_1\rangle=2$, to show that $\mathbf{v}-\mathbf{w}_1$ is minimal it suffices to check that
$$
0\leq \langle \mathbf{w}_0,\mathbf{v}-\mathbf{w}_1\rangle=(x\mathbf{w}_0^2+ym)-m=my\left(\frac{2\mathbf{w}_0^2}{m^2}+1\right)-m=m \left(y \left(1+\frac{2\mathbf{w}_0^2}{m^2}\right)-1 \right).
$$
We now consider the Cases \ref{enum:TwoSpherical} and \ref{enum:OneExceptionalOneSpherical} separately.
First suppose we are in Case \ref{enum:TwoSpherical}. Then $\mathbf{w}_0^2=-2$, so $\mathbf{v}^2\geq 3$ implies that $\frac{3}{2}\leq y^2(1-\frac{4}{m^2})$, and as in the proof of Proposition \ref{Prop:lattice classification}, we have $m\geq 3$. If $m=3$, then it is easy to show that the equations $\mathbf{v}^2=3$ and $\langle \mathbf{v},\mathbf{w}_1\rangle=0$ have no rational solutions, so we may assume that $v^2\geq 4$. But then $2\leq y^2(1-\frac{4}{m^2})$, so $$y^2 \left(1-\frac{4}{m^2}\right)^2=
y^2 \left(1-\frac{4}{m^2}\right)
\frac{5}{9}\geq\frac{10}{9}>1.$$ Taking square-roots, we see that $$y\left(1+\frac{2\mathbf{w}_0^2}{m^2}\right)>1,$$ and therefore we have $\langle\mathbf{w}_0,\mathbf{v}-\mathbf{w}_1\rangle>0$. If, instead, $m\geq 4$, then we get $(1-\frac{4}{m^2})\geq\frac{3}{4}$, from which it follows that
$$y^2 \left(1-\frac{4}{m^2}\right)^2\geq
\frac{9}{8}>1,$$ so indeed $\langle \mathbf{w}_0,\mathbf{v}-\mathbf{w}_1\rangle>0$.
Now suppose we are in \cref{enum:OneExceptionalOneSpherical}. Then $\mathbf{w}_0^2=-1$, so $\mathbf{v}^2\geq 3$ is equivalent to $\frac{3}{2}\leq y^2(1-\frac{2}{m^2})$, and now $m\geq 2$. If $m=2$, then again one can easily check that the equations $\mathbf{v}^2=3$ and $\langle \mathbf{v},\mathbf{w}_1\rangle =0$ have no rational solutions, so $\mathbf{v}^2\geq 4$, i.e. $y^2(1-\frac{2}{m^2})\geq 2$. Thus
$$y^2 \left(1-\frac{2}{m^2}\right)^2=
y^2 \left(1-\frac{2}{m^2}\right)\frac{1}{2}\geq 1,$$ so $\langle \mathbf{w}_0,\mathbf{v}-\mathbf{w}_1\rangle\geq 0$. If, instead, $m\geq 3$, then $\left(1-\frac{2}{m^2}\right)\geq\frac{7}{9}$ and thus $$y^2 \left(1-\frac{2}{m^2}\right)^2\geq y^2 \left(1-\frac{2}{m^2}\right)\frac{7}{9}\geq\frac{7}{6}>1,$$ so we get $\langle \mathbf{w}_0,\mathbf{v}-\mathbf{w}_1\rangle>0$.
As we have shown that $\mathbf{v}-\mathbf{w}_1$ is minimal, Lemma \ref{Lem:non-isotropic no totally semistable wall} guarantees that the generic element $F\in M_{\sigma_+}(\mathbf{v}-\mathbf{w}_1)$ is also $\sigma_0$-stable. But then for the unique $\sigma_0$-stable spherical object $T_1$ with $\mathbf{v}(T_1)=\mathbf{w}_1$ we have $\mathop{\mathrm{ext}}\nolimits^2(F,T_1)=\hom(T_1(K_X),F)=\hom(T_1,F)=0=\hom(F,T_1)$ by stability. Thus $\mathop{\mathrm{ext}}\nolimits^1(F,T_1)=\langle \mathbf{v}-\mathbf{w}_1,\mathbf{w}_1\rangle=2$, so there is a family of extensions $$0\to F\to E_p\to T_1\to 0,$$ parametrized by $p\in\ensuremath{\mathbb{P}}^1=\ensuremath{\mathbb{P}}(\mathop{\mathrm{Ext}}\nolimits^1(T_1,F))$, which are all S-equivalent with respect to $\sigma_0$. By \cite[Lemma 6.9]{BM14b}, they are $\sigma_+$-stable. Thus $\pi^+$ contracts this rational curve. Varying $F\in M_{\sigma_0}^s(\mathbf{v}-\mathbf{w}_1)$ sweeps out a family of $\sigma_+$-stable objects in $M_{\sigma_+}(\mathbf{v})$ of dimension $1+(\mathbf{v}-\mathbf{w}_1)^2+1=\mathbf{v}^2=\mathop{\mathrm{dim}}\nolimits M_{\sigma_+}(\mathbf{v})-1$. Thus we get a divisor contracted by $\pi^+$, which must then have relative Picard rank one, so this is the only component contracted by $\pi^+$.
Finally, suppose that $\mathbf{v}^2=1$. Then $(\mathbf{v}-\mathbf{w}_1)^2=-1$, so we must be in Case \ref{enum:OneExceptionalOneSpherical} with $\mathbf{v}-\mathbf{w}_1=\pm\mathbf{w}_n$ for some $n\in\ensuremath{\mathbb{Z}}$. As $\mathbf{w}_n\in\ensuremath{\mathbb{Z}}\mathbf{w}_0+\ensuremath{\mathbb{Z}}\mathbf{w}_1$, it follows that $\mathbf{v}=x\mathbf{w}_0+y\mathbf{w}_1$ with $x,y\in\ensuremath{\mathbb{Z}}$. But then $\langle\mathbf{v},\mathbf{w}_1\rangle=0$ is equivalent to $x=\frac{2}{m}y$ so that $\mathbf{v}^2=1$ is equivalent to $$1=x^2\left(\frac{m^2}{2}-1\right)=\frac{x^2(m^2-2)}{2},$$ whose only solution in the positive integers is $x=1$ and $m=2$. Thus we have $\mathbf{v}-\mathbf{w}_1=\mathbf{w}_0$, so we are, as above, guaranteed that the unique member of $M_{\sigma_+}(\mathbf{v}-\mathbf{w}_1)$ is $\sigma_0$-stable. The same argument gives a family of extensions
$$0\to T_0\to E_p\to T_1\to 0,$$ parametrized by $p\in\ensuremath{\mathbb{P}}^1=\ensuremath{\mathbb{P}}(\mathop{\mathrm{Ext}}\nolimits^1(T_1,T_0))$, which are S-equivalent with respect to $\sigma_0$ but are $\sigma_+$-stable. This curve is contracted by $\pi^+$ and is a divisor in the two-dimensional moduli space $M_{\sigma_+}(\mathbf{v})$, so $\pi^+$ again has relative Picard rank one. These extensions thus give the unique curve contracted by $\pi^+$, as required.
\end{proof}
Having confirmed that when $\mathbf{w}$ is spherical we do get a divisorial contraction, we prove that this is not the case when $\mathbf{w}$ is exceptional with one exception.
\mathbf{b}egin{Lem}\label{Lem:ExceptionalDivisorialNonContraction}
Suppose that $\ensuremath{\mathbb{H}}H$ is non-isotropic and $\ensuremath{\mathcal W}$ is a potential wall associated to $\ensuremath{\mathbb{H}}H$. If there exists an effective exceptional class $\mathbf{w}$ with $\langle \mathbf{v},\mathbf{w}\rangle=0$, then $\ensuremath{\mathcal W}$ only induces a divisorial contraction on $M_{\sigma_+}(\mathbf{v},L)$ if $\mathbf{v}^2=2$, $L\equiv D+\frac{\mathop{\mathrm{rk}}\mathbf{v}}{2}K_X\pmod 2$ for a nodal cycle $D$, and $\ensuremath{\mathbb{H}}H$ falls into \cref{enum:OneExceptionalOneSpherical} of \cref{Prop:lattice classification}.
In this case, for $E$ in the contracted divisor, there is a short exact sequence $$0\to T\mathop{\ord(\omega_S)}\nolimitsplus T(K_X)\to E\to S\to 0\mbox{ or }0\to S\to E\to T\mathop{\ord(\omega_S)}\nolimitsplus T(K_X)\to 0,$$ where $T\in M^s_{\sigma_+}(\mathbf{w})$ and $S\in M^s_{\sigma_+}(\mathbf{v}-2\mathbf{w})$, such that the inclusion $T\ensuremath{\hookrightarrow} E$ or $S\ensuremath{\hookrightarrow} E$ is one of the filtration steps in a JH-filtration for $E$ with respect to $\sigma_0$, while the generic $E\in M_{\sigma_+}(\mathbf{v})$ satisfies $\ensuremath{\mathbb{H}}om(T,E)=\ensuremath{\mathbb{H}}om(E,T)=0$.
In general, there is nevertheless a divisor $D_{\sigma_+}(\mathbf{v})$ whose generic element $E$ admits a short exact sequence $$0\to T\to E\to F\to 0\mbox{ or }0\to F\to E\to T\to 0,$$ where $T\in M^s_{\sigma_+}(\mathbf{w})$ and $F\in M^s_{\sigma_+}(\mathbf{v}-\mathbf{w})$, such that the inclusion $T\ensuremath{\hookrightarrow} E$ or $F\ensuremath{\hookrightarrow} E$ is one of the filtration steps in a JH-filtration for $E$ with respect to $\sigma_0$, while the generic $E\in M_{\sigma_+}(\mathbf{v})$ satisfies $\ensuremath{\mathbb{H}}om(T,E)=\ensuremath{\mathbb{H}}om(E,T)=0$. Moreover, when $\mathbf{v}$ is minimal, this divisor is precisely the locus of strictly $\sigma_0$-semistable objects.
\end{Lem}
\mathbf{b}egin{proof}
As before, we may assume that $\mathbf{v}$ is minimal. Then in terms of Proposition \ref{Prop:lattice classification}, $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ must fall into cases \ref{enum:OneExceptional}, \ref{enum:TwoExceptional}, or \ref{enum:OneExceptionalOneSpherical}. By minimality of $\mathbf{v}$, $\mathbf{w}$ must be $\mathbf{w}_0$ or $\mathbf{w}_1$ in Case \ref{enum:TwoExceptional} or $\mathbf{w}_0$ in Case \ref{enum:OneExceptionalOneSpherical}, and we assume it is $\mathbf{w}_0$ with the other case being dealt with similarly. Furthermore, observe that there cannot exist any $n\in\ensuremath{\mathbb{Z}}$ such that $\mathbf{v}^2=n^2$ as then $(\mathbf{v}-n\mathbf{w}_0)^2=0$, contrary to the hypothesis that $\ensuremath{\mathcal W}$ is non-isotropic. So, in particular, $\mathbf{v}^2\geq 2$, from which it follows that $(\mathbf{v}-\mathbf{w}_0)^2>0$.
Let us first show that $\mathbf{v}-\mathbf{w}_0$ is minimal. As $\langle \mathbf{v}-\mathbf{w}_0,\mathbf{w}_0\rangle=1>0$, this is clear in \cref{enum:OneExceptional}, so we can restrict ourselves to Cases \ref{enum:TwoExceptional} and \ref{enum:OneExceptionalOneSpherical}. Thus it remains to show that $0\leq\langle \mathbf{v}-\mathbf{w}_0,\mathbf{w}_1\rangle$. Writing $\mathbf{v}=x\mathbf{w}_0+y\mathbf{w}_1$, the conditions $\langle \mathbf{v},\mathbf{w}_0\rangle=0, \mathbf{v}^2\geq 2$, and $\langle \mathbf{v}-\mathbf{w}_0,\mathbf{w}_1\rangle\geq 0$ become $$y=\frac{x}{m},x^2\left(1+\frac{\mathbf{w}_1^2}{m^2}\right)\geq 2,\mbox{ and }m\left[x\left(1+\frac{\mathbf{w}_1^2}{m^2}\right)-1\right]\geq 0,$$ respectively. As $m\geq 2$ in either case, we get $$\left(1+\frac{\mathbf{w}_1^2}{m^2}\right)=\mathbf{b}egin{cases}
1-\frac{1}{m^2}, &\text{if }\mathbf{w}_1^2=-1,\\
1-\frac{2}{m^2},&\text{if }\mathbf{w}_1^2=-2
\end{cases}\geq\mathbf{b}egin{cases}
\frac{3}{4}, &\text{if }\mathbf{w}_1^2=-1,\\
\frac{1}{2}, &\text{if }\mathbf{w}_1^2=-2,
\end{cases}\geq\frac{1}{2},$$ in either case.
Thus $$x^2\left(1+\frac{\mathbf{w}_1^2}{m^2}\right)^2\geq x^2\left(1+\frac{\mathbf{w}_1^2}{m^2}\right)\left(\frac{1}{2}\right)\geq 2\left(\frac{1}{2}\right)\geq 1.$$ Taking square-roots gives that indeed $$x\left(1+\frac{\mathbf{w}_1^2}{m^2}\right)-1\geq 0,$$ as required.
We consider the case when $\mathbf{v}^2=2$, $L\equiv D+\frac{\mathop{\mathrm{rk}}\mathbf{v}}{2}K_X\pmod 2$, and $\ensuremath{\mathbb{H}}H$ falls into \cref{enum:OneExceptionalOneSpherical} of \cref{Prop:lattice classification}. As $\langle\mathbf{v}-\mathbf{w}_0,\mathbf{v}-2\mathbf{w}_0\rangle=0$ and $(\mathbf{v}-2\mathbf{w}_0)^2=-2$, it follows from the minimality of $\mathbf{v}-\mathbf{w}_0$ that $\mathbf{v}-2\mathbf{w}_0=\mathbf{w}_1=\mathbf{v}(S)$, for the unique $\sigma_0$-stable spherical object $S$. We denote by $T_0$ the unique $\sigma_0$-stable object of class $\mathbf{w}_0$ (up to $-\mathop{\ord(\omega_S)}\nolimitstimes\ensuremath{\mathcal O}_X(K_X)$). By stability, we have $$\hom(S,T_0(D))=\mathop{\mathrm{ext}}\nolimits^2(S,T_0(D))=\hom(T_0(D),S)=0,$$ for $D=0,K_X$. Thus $\mathop{\mathrm{ext}}\nolimits^1(S,T_0\mathop{\ord(\omega_S)}\nolimitsplus T_0(K_X))=\langle\mathbf{v}-2\mathbf{w}_0,2\mathbf{w}_0\rangle=4$, so by \cite[Lemma 6.1-6.3]{CH15} there is a $\ensuremath{\mathbb{P}}^1\times \ensuremath{\mathbb{P}}^1$ worth of non-isomorphic $\sigma_+$-stable $E$ fitting into a short exact sequence $$0\to T_0\mathop{\ord(\omega_S)}\nolimitsplus T_0(K_X)\to E\to S\to 0.$$ As this gives a contracted divisor, it must be the only contracted divisor, as claimed.
We now treat the general case, that is, either $\mathbf{v}^2>2$ or $\mathbf{v}^2=2$ and either $L\nequiv D+\frac{\mathop{\mathrm{rk}}\mathbf{v}}{2}K_X\pmod 2$ or $\ensuremath{\mathbb{H}}H$ does not fall into \cref{enum:OneExceptionalOneSpherical} of \cref{Prop:lattice classification}. As $\mathbf{v}-\mathbf{w}_0$ is minimal, it follows from Lemma \ref{Lem:non-isotropic no totally semistable wall} that there exists a $\sigma_0$-stable object $F$ of class $\mathbf{v}-\mathbf{w}_0$. By stability $\hom(F,T_0)=\mathop{\mathrm{ext}}\nolimits^2(F,T_0)=\hom(F,T_0(K_X))=0$, so $\mathop{\mathrm{ext}}\nolimits^1(F,T_0)=\langle \mathbf{v}-\mathbf{w}_0,\mathbf{w}_0\rangle=1$, and there exists a unique non-trivial extension $$0\to T_0\to E\to F\to 0,$$ which is $\sigma_+$-stable by \cite[Lemma 6.9]{BM14b}. By a dimension count, upon varying $F\in M_{\sigma_0}^s(\mathbf{v}-\mathbf{w}_0)$ these extensions sweep out a divisor of strictly $\sigma_0$-semistable objects which does not get contracted by $\pi^+$. Moreover, from the proof of \cref{Lem:non-isotropic no totally semistable wall}, it follows that this is precisely the locus strictly $\sigma_0$-semistable objects.
\end{proof}
\mathbf{b}egin{Rem}
We will see in Section \ref{Sec:FloppingWalls} that, if $\mathbf{v}^2\geq 3$, then in the setup of Lemma \ref{Lem:ExceptionalDivisorialNonContraction}, $\ensuremath{\mathcal W}$ induces a small contraction, contracting a $\ensuremath{\mathbb{P}}^1\times\ensuremath{\mathbb{P}}^1$. As the weakly-spherical reflection $R_{T}$ induces an isomorphism $M_{\sigma_+}(\mathbf{v})\mor[\sim] M_{\sigma_-}(\mathbf{v})$ that acts as the identity on $M_{\sigma_+}(\mathbf{v})\mathbf{b}ackslash D_{\sigma_+}(\mathbf{v})$, where $\ensuremath{\mathbb{H}}om(T,E)=\ensuremath{\mathbb{H}}om(E,T(K_X))=0$, we see that $M_{\sigma_-}(\mathbf{v})$ cannot be the flop of $\pi^+$. It is unclear whether examples such as this show that there are minimal models of $M_{\sigma_+}(\mathbf{v})$ which cannot be obtained by Bridgeland wall-crossing. On the other hand, it may be possible to reach this minimal model by crossing a different wall bounding the chamber containing $\sigma_+$.
\end{Rem}
\mathbf{b}egin{proof}[Proof of Proposition \ref{Prop:NonisotropicDivisorialContraction}]
This follows directly from Lemma \ref{Lem:non-isotropic no totally semistable wall}, Lemma \ref{Lem:NonisotropicDivisorialContraction}, and Lemma \ref{Lem:ExceptionalDivisorialNonContraction}.
\end{proof}
\mathbf{b}egin{Rem}\label{Rem:NonIsotropicDeterminantIrrelevant}
It is important to note that with the exception of \cref{Lem:ExceptionalDivisorialNonContraction}, everything we have said thus far in the non-isotropic case applies to each component $M_{\sigma_+}(\mathbf{v},L),M_{\sigma_+}(\mathbf{v},L+K_X)$, where $c_1(\mathbf{v})=[L\mathop{\mathrm{mod}}\nolimits K_X]$. In particular, by taking $F\in M^s_{\sigma_0}(\mathbf{v}-\mathbf{w},L'),M_{\sigma_0}^s(\mathbf{v}-\mathbf{w},L'+K_X)$ in Lemmas \ref{Lem:NonisotropicDivisorialContraction} and \ref{Lem:ExceptionalDivisorialNonContraction}, we get divisors with the described properties in each component $M_{\sigma_+}(\mathbf{v},L),M_{\sigma_+}(\mathbf{v},L+K_X)$. We will see in the next section that we must take great care to treat the determinants differently as the wall-crossing behavior is often radically different in each component, in a similar way to the case $\mathbf{v}^2=2$, $L\equiv D+\frac{\mathop{\mathrm{rk}}\mathbf{v}}{2}K_X\pmod2$, and $\mathbf{v}-2\mathbf{w}$ is spherical in \cref{Lem:ExceptionalDivisorialNonContraction}.
\end{Rem}
\section{Isotropic walls}\label{Sec:Isotropic walls}
We finally treat the case of isotropic walls. We divide our discussion in two. We will first discuss the case that $\ensuremath{\mathbb{H}}H$ contains a primitive isotropic vector $\mathbf{u}$ with $\ell(\mathbf{u})=2$, in which case the wall $\ensuremath{\mathcal W}$ corresponds, after a Fourier-Mukai transform, to the contraction to the Uhlenbeck compactification, see \cite{Li97,Lo12}. We will consider separately the case that $\ensuremath{\mathbb{H}}H$ only contains primitive isotropic vectors $\mathbf{u}$ with $\ell(\mathbf{u})=1$. In both cases, we again use the stack of Harder-Narasimhan filtrations, as in Section \ref{sec:DimensionsOfHarderNarasimhan}, to study the wall-crossing behavior. We begin by studying in more detail the isotropic lattice $\ensuremath{\mathbb{H}}H$, its isotropic vectors, and the associated moduli spaces.
\subsection{Preliminaries}
We state here a result that summarizes the facts we will need for a more detailed study of wall-crossing in the isotropic case.
\mathbf{b}egin{Prop}\label{Prop:isotropic lattice} Assume that there exists an isotropic class $\mathbf{u}\in\ensuremath{\mathbb{H}}H$. Then there are two effective, primitive, isotropic classes $\mathbf{u}_1$ and $\mathbf{u}_2$ in $\ensuremath{\mathbb{H}}H$, which satisfy $P_{\ensuremath{\mathbb{H}}H}=\ensuremath{\mathbb{R}}_{\geq 0}\mathbf{u}_1+\ensuremath{\mathbb{R}}_{\geq 0}\mathbf{u}_2$ and $\langle\mathbf{v}',\mathbf{u}_i\rangle\geq 0$ for $i=1,2$ and any $\mathbf{v}'\in P_{\ensuremath{\mathbb{H}}H}$. Moreover, one of the following mutually exclusive conditions holds:
\mathbf{b}egin{enumerate}
\item\label{enum:IsotropicLatticeNoEffectiveNegatives} $C_\ensuremath{\mathcal W}=P_\ensuremath{\mathbb{H}}H$ and $\ell(\mathbf{u}_1)\geq\ell(\mathbf{u}_2)$.
In this case, $M^s_{\sigma_0}(\mathbf{u}_i)=M_{\sigma_0}(\mathbf{u}_i)$ for each $i=1,2$ and a generic $\sigma_0\in\ensuremath{\mathcal W}$; or
\item\label{enum:IsotropicLatticeEffectiveExceptional} There exists an exceptional class $\mathbf{w}\in C_\ensuremath{\mathcal W}\cap\ensuremath{\mathbb{H}}H$. In this case $\ell(\mathbf{u}_1)=\ell(\mathbf{u}_2)$, $\ensuremath{\mathbb{H}}H=\ensuremath{\mathbb{Z}}\mathbf{w}+\ensuremath{\mathbb{Z}}\mathbf{u}_1$ and $\mathbf{u}_2=\mathbf{u}_1+2\langle\mathbf{u}_1,\mathbf{w}\rangle\mathbf{w}$. Consequently, $\langle\mathbf{u}_1,\mathbf{u}_2\rangle=2\langle\mathbf{u}_1,\mathbf{w}\rangle^2$ and $C_\ensuremath{\mathcal W}=\ensuremath{\mathbb{R}}_{\geq 0}\mathbf{u}_1+\ensuremath{\mathbb{R}}_{\geq 0}\mathbf{w}$. Finally, in this case $M_{\sigma_0}^s(\mathbf{u}_1)=M_{\sigma_0}(\mathbf{u}_1)$ for a generic $\sigma_0\in\ensuremath{\mathcal W}$, while $\ensuremath{\mathcal W}$ is a totally semistable wall for $\mathbf{u}_2$; or
\item\label{enum:IsotropicLatticeEffectiveSpherical} There exists a spherical class $\mathbf{w}\in C_\ensuremath{\mathcal W}\cap\ensuremath{\mathbb{H}}H$. In this case, we again have $\ell(\mathbf{u}_1)=\ell(\mathbf{u}_2)$, $\ensuremath{\mathbb{H}}H=\ensuremath{\mathbb{Z}}\mathbf{w}+\ensuremath{\mathbb{Z}}\mathbf{u}_1$, and $\mathbf{u}_2=\mathbf{u}_1+\langle\mathbf{u}_1,\mathbf{w}\rangle\mathbf{w}$. Consequently, $\langle\mathbf{u}_1,\mathbf{u}_2\rangle=\langle\mathbf{u}_1,\mathbf{w}\rangle^2$ and $\ensuremath{\mathbb{H}}H$ is even in this case.\footnote{All of these conclusions continue to hold in Case \ref{enum:IsotropicLatticeNoEffectiveNegatives} if $\ensuremath{\mathbb{H}}H$ admits a class $\mathbf{w}\notin C_\ensuremath{\mathcal W}$ with $\mathbf{w}^2=-2$.} Finally, $C_\ensuremath{\mathcal W}=\ensuremath{\mathbb{R}}_{\geq 0}\mathbf{u}_1+\ensuremath{\mathbb{R}}_{\geq 0}\mathbf{w}$ and $M_{\sigma_0}^s(\mathbf{u}_1)=M_{\sigma_0}(\mathbf{u}_1)$ for a generic $\sigma_0\in\ensuremath{\mathcal W}$, while $\ensuremath{\mathcal W}$ is a totally semistable wall for $\mathbf{u}_2$.
\end{enumerate}
\end{Prop}
\mathbf{b}egin{proof}
If $\mathbf{u}\in\ensuremath{\mathbb{H}}H$ is a primitive isotropic class, then up to replacing $\mathbf{u}$ by $-\mathbf{u}$, we may assume that $\mathbf{u}$ is effective, so we set $\mathbf{u}_1=\mathbf{u}$. Completing $\mathbf{u}_1$ to a basis $\ensuremath{\mathbb{H}}H=\ensuremath{\mathbb{Z}}\mathbf{u}_1+\ensuremath{\mathbb{Z}}\mathbf{v}'$, we see that \mathbf{b}egin{equation}\label{eqn:OtherIsotropic}0=(x\mathbf{u}_1+y\mathbf{v}')^2=2xy\langle \mathbf{u}_1,\mathbf{v}'\rangle+y^2 (\mathbf{v}')^2\end{equation} has a second integral solution, since we can assume $y\neq 0$ and $\langle \mathbf{u}_1,\mathbf{v}'\rangle\neq 0$ from the signature of $\ensuremath{\mathbb{H}}H$. Taking the unique effective primitive class on the corresponding line, we get $\mathbf{u}_2$. Clearly $P_{\ensuremath{\mathbb{H}}H}$ is as claimed, and the inequality $\langle \mathbf{v}',\mathbf{u}_i\rangle\geq 0$ follows accordingly.
If $C_\ensuremath{\mathcal W}=P_\ensuremath{\mathbb{H}}H$, then the claim about moduli spaces in Case \ref{enum:IsotropicLatticeNoEffectiveNegatives} follows from the fact that $\mathbf{u}_1$ and $\mathbf{u}_2$ are primitive vectors on extremal rays of $C_\ensuremath{\mathcal W}$. Moreover, up to renumbering, we assume that $\ell(\mathbf{u}_1)\geq \ell(\mathbf{u}_2)$ and, in case of equality, $\langle\mathbf{v},\mathbf{u}_1\rangle\geq\langle\mathbf{v},\mathbf{u}_2\rangle$.
Suppose that there exists a class $\mathbf{w}\in C_\ensuremath{\mathcal W}$ that is not in $P_\ensuremath{\mathbb{H}}H$. Then either $\mathbf{w}$ is exceptional or spherical as in Case \ref{enum:IsotropicLatticeEffectiveExceptional} or \ref{enum:IsotropicLatticeEffectiveSpherical}, respectively.
Let us consider first Case \ref{enum:IsotropicLatticeEffectiveExceptional}, and write $\mathbf{w}\in C_\ensuremath{\mathcal W}\cap\ensuremath{\mathbb{D}}elta(X)_{-1}$ as $\mathbf{w}=x\mathbf{u}_1+y\mathbf{v}'$. Then
$$-1=\mathbf{w}^2=y(y \mathbf{v}'^2+2x \langle \mathbf{v}',\mathbf{u}_1 \rangle)$$ implies
$y=\pm 1$. Replacing $\mathbf{v}'$ by $\mathbf{w}$, we see that $\ensuremath{\mathbb{H}}H=\ensuremath{\mathbb{Z}}\mathbf{u}_1+\ensuremath{\mathbb{Z}}\mathbf{w}$. Then it is easy to that the other primitive effective isotropic vector must satisfy \mathbf{b}egin{equation}\label{eqn:U2Exceptional}\mathbf{u}_2=\mathbf{u}_1+2 \langle \mathbf{u}_1,\mathbf{w} \rangle \mathbf{w}.\end{equation} Pairing this equality with $\mathbf{u}_1$, it is clear that $\langle \mathbf{u}_1,\mathbf{u}_2\rangle=2\langle\mathbf{u}_1,\mathbf{w}\rangle^2$. Moreover, we see that $c_1(\mathbf{u}_2)\equiv c_1(\mathbf{u}_1)\pmod 2$ from \eqref{eqn:U2Exceptional}, so we get the last statement that $\ell(\mathbf{u}_1)=\ell(\mathbf{u}_2)$.
Now consider Case \ref{enum:IsotropicLatticeEffectiveSpherical}, and write $\mathbf{w}\in C_\ensuremath{\mathcal W}\cap\ensuremath{\mathbb{D}}elta(X)_{-2}$ as $\mathbf{w}=x\mathbf{u}_1+y\mathbf{v}'$. Then $$-2=\mathbf{w}^2=y(y\mathbf{v}'^2+2x\langle\mathbf{u}_1,\mathbf{v}'\rangle)$$ implies that $y=\pm1,\pm2$. If $y=\pm2$, then $$\mp1=\pm 2{\mathbf{v}'}^2+2x\langle\mathbf{u}_1,\mathbf{v}'\rangle=2(\pm\mathbf{v}'+x\langle\mathbf{u}_1,\mathbf{v}'\rangle),$$ which is absurd. Thus $y=\pm1$, so that replacing $\mathbf{v}'$ by $\mathbf{w}$ we have $\ensuremath{\mathbb{H}}H=\ensuremath{\mathbb{Z}}\mathbf{u}_1+\ensuremath{\mathbb{Z}}\mathbf{w}$. In this case, the other primitive effective isotropic vector must satisfy \mathbf{b}egin{equation}\label{eqn:U2Spherical}
\mathbf{u}_2=\mathbf{u}_1+\langle\mathbf{u}_1,\mathbf{w}\rangle\mathbf{w}.
\end{equation}
Pairing \eqref{eqn:U2Spherical} with $\mathbf{u}_1$ gives $\langle\mathbf{u}_1,\mathbf{u}_2\rangle=\langle\mathbf{u}_1,\mathbf{w}\rangle^2$. To see that $\ell(\mathbf{u}_1)=\ell(\mathbf{u}_2)$, observe that if $\ell(\mathbf{u}_i)=2$ for say $i=1$, then $2\mid c_1(\mathbf{u}_1)$ implies that $2\mid\langle\mathbf{u}_1,\mathbf{w}\rangle$ as $\mathop{\mathrm{rk}}\mathbf{w}\equiv\mathop{\mathrm{rk}}\mathbf{u}_1\equiv 0\pmod 2$. Thus $\mathbf{u}_1\equiv\mathbf{u}_2\pmod 2$ so that $\ell(\mathbf{u}_2)=2$ as well. Otherwise, $\ell(\mathbf{u}_1)=\ell(\mathbf{u}_2)=1$, and we have equality again. Finally, note that for any $\mathbf{v}=x\mathbf{u}_1+y\mathbf{w}$ with $x,y\in\ensuremath{\mathbb{Z}}$, we have $$\mathbf{v}^2=2xy\langle\mathbf{u}_1,\mathbf{w}\rangle+y^2\mathbf{w}^2=2xy\langle\mathbf{u}_1,\mathbf{w}\rangle-2y^2$$ is even, as claimed.
Observe that Cases \ref{enum:IsotropicLatticeEffectiveExceptional} and \ref{enum:IsotropicLatticeEffectiveSpherical} are indeed mutually exclusive since $\ensuremath{\mathbb{H}}H$ is an odd lattice in the first case and an even lattice in the latter.
For the statements about $C_\ensuremath{\mathcal W}$ and moduli spaces in Cases \ref{enum:IsotropicLatticeEffectiveExceptional} and \ref{enum:IsotropicLatticeEffectiveSpherical}, observe that $C_\ensuremath{\mathcal W}=P_\ensuremath{\mathbb{H}}H+\ensuremath{\mathbb{R}}_{\geq 0}\mathbf{w}$ and $\langle\mathbf{u}_2,\mathbf{w}\rangle=-\langle\mathbf{u}_1,\mathbf{w}\rangle$ in either case. So, up to reordering, we may suppose that $\langle\mathbf{u}_1,\mathbf{w}\rangle>0$ and $\langle\mathbf{u}_2,\mathbf{w}\rangle<0$. In particular, $\mathbf{u}_1$ is an extremal ray of $C_\ensuremath{\mathcal W}$ (see Figure \ref{fig:IsotropicWithNegative}), and $\ensuremath{\mathcal W}$ is totally semistable for $\mathbf{u}_2$ by Lemma \ref{Lem: condition for totally semistable wall}. Thus $M^s_{\sigma_0}(\mathbf{u}_1)=M_{\sigma_0}(\mathbf{u}_1)$, as claimed.
\end{proof}
\mathbf{b}egin{figure}
\mathbf{b}egin{tikzpicture}[scale=1]
\mathbf{d}raw [->] (-4,0) -- (4,0);
\mathbf{d}raw[->] (0,-1) -- (0,4);
\path [fill=gray!50,opacity=0.2] (0,4) -- (0,0) -- (4,0) -- (4,4) -- cycle;
\path [fill=gray!60,opacity=0.4] (0,0) -- (4,0) -- (4,4) -- (4/3,4) -- cycle;
\mathbf{d}raw [red,domain=-4:4] plot (\x,{0});
\mathbf{d}raw [red,domain=-1/3:4/3] plot (\x,{3*\x});
\filldraw [black] (0,3) circle (1.5pt) node [anchor=north east] {$\mathbf{w}$};
\filldraw [black] (1,0) circle (1.5pt) node [anchor=north east] {$\mathbf{u}_1$};
\filldraw [black] (1,3) circle (1.5pt) node [anchor=north east] {$\mathbf{u}_2$};
\node[below] at (2,2) {$P_\ensuremath{\mathbb{H}}H$};
\end{tikzpicture}
\caption{The red lines are defined by $\mathbf{u}^2=0$. The dark gray region is the positive cone $P_\ensuremath{\mathbb{H}}H$, while the first quadrant is the effective cone $C_\ensuremath{\mathcal W}$.}
\label{fig:IsotropicWithNegative}
\end{figure}
\mathbf{b}egin{Rem}\label{Rem:Odd lattice}
It is worth noting that if $\ensuremath{\mathbb{H}}H$ contains a vector $\mathbf{v}$ such that $\mathbf{v}^2$ is odd, then $\ell(\mathbf{u}_1)=\ell(\mathbf{u}_2)$. Indeed, as above, we may complete $\mathbf{u}_1$ to a basis so that $\ensuremath{\mathbb{H}}H=\ensuremath{\mathbb{Z}}\mathbf{u}_1+\ensuremath{\mathbb{Z}}\mathbf{v}'$, and as $\mathbf{v}^2$ is odd, we must have $\mathbf{v}'^2$ is odd as well. From \eqref{eqn:OtherIsotropic}, we see immediately that in writing $\mathbf{u}_2=x\mathbf{u}_1+y\mathbf{v}'$ with $\gcd(x,y)=1$, we must have $y$ even and $x$ odd. But then $c_1(\mathbf{u}_1)\equiv c_1(\mathbf{u}_2)\pmod 2$ so that $\ell(\mathbf{u}_1)=\ell(\mathbf{u}_2)$, as claimed.
\end{Rem}
\mathbf{b}egin{Rem}\label{Rem:Even and Odd pairings}
We note that $\ell(\mathbf{u}_1)=2$ and $2\mid\mathop{\mathrm{rk}}(\mathbf{u}_2)$ force $\langle\mathbf{u}_1,\mathbf{u}_2\rangle$ to be even. Let us be more specific each case.
In Case \ref{enum:IsotropicLatticeNoEffectiveNegatives}, we must have $\langle\mathbf{u}_1,\mathbf{u}_2\rangle\geq 4$ if $\ell(\mathbf{u}_2)=2$. Indeed, if $\ell(\mathbf{u}_2)=2$, then $\mathbf{u}_1-\mathbf{u}_2$ is divisible by 2 in $\ensuremath{\mathbb{H}}al(X,\ensuremath{\mathbb{Z}})$, and thus in the saturated sublattice $\ensuremath{\mathbb{H}}H$ as well, so $\langle\mathbf{u}_1,\mathbf{u}_2\rangle=2$ would imply that $\ensuremath{\mathbb{H}}H$ contains the exceptional class $\frac{\mathbf{u}_1-\mathbf{u}_2}{2}$, an impossibility. If, instead, $\ell(\mathbf{u}_2)=1$, then $\ensuremath{\mathbb{H}}H$ must be even by Remark \ref{Rem:Odd lattice}.
In Case \ref{enum:IsotropicLatticeEffectiveSpherical}, when there exists an effective spherical class, we also have $\langle\mathbf{u}_1,\mathbf{u}_2\rangle\geq 4$ since then $\langle\mathbf{u}_1,\mathbf{w}\rangle$ is even so that $\langle\mathbf{u}_1,\mathbf{u}_2\rangle=\langle\mathbf{u}_1,\mathbf{w}\rangle^2$ is divisible by 4.
In \cref{enum:IsotropicLatticeEffectiveExceptional}, we have $\langle\mathbf{u}_1,\mathbf{w}\rangle$ is odd so that $4\nmid\langle\mathbf{u}_1,\mathbf{u}_2\rangle=2\langle\mathbf{u}_1,\mathbf{w}\rangle^2$. Indeed, writing $\mathbf{u}_1=(2r_1,2c_1,s_1)$ and $\mathbf{w}=(r,c,\frac{s}{2})$ with $r,s$ odd integers, we observe that $$\langle\mathbf{u}_1,\mathbf{w}\rangle=2c_1.c-rs_1-r_1s\equiv -(r_1+s_1)\equiv1\pmod2$$ by \cref{primitive}.
\end{Rem}
\mathbf{b}egin{Rem}\label{Rem:IsotropicOrientation}
Up to relabeling $\sigma_+$ and $\sigma_-$, we may assume that the orientation on $\ensuremath{\mathbb{H}}H_\ensuremath{\mathbb{R}}$ is as in Figure \ref{fig:IsotropicWithNegative}, even in Case \ref{enum:IsotropicLatticeNoEffectiveNegatives}. That is, we will assume for the remainder of this section that $\phi^+(\mathbf{u}_1)<\phi^+(\mathbf{u}_2)<\phi^+(\mathbf{w})$.
\end{Rem}
Our main result about isotropic walls is the following classification:
\mathbf{b}egin{Prop}\label{Prop:isotropic-classification}
Assume that $\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}}$ is isotropic and $\mathbf{v}\in\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ with $\mathbf{v}^2>0$. Set $r:=\mathop{\mathrm{rk}} \mathbf{v}$.
\mathbf{b}egin{enumerate}
\item
If $\ensuremath{\mathcal W}$ is totally semistable, that is, $M_{\sigma_0}^s(\mathbf{v},L)=\emptyset$, then
\mathbf{b}egin{enumerate}
\item\label{enum:IostropicTSS-NonMinimal} $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ contains an effective exceptional or spherical class $\mathbf{w}$ such that $\langle\mathbf{v},\mathbf{w}\rangle<0$; or
\item\label{enum:IsotropicTSS-HC} $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ contains a primitive isotropic $\mathbf{u}$ such that $\ell(\mathbf{u})=2$ and $\langle \mathbf{v},\mathbf{u} \rangle=1$; or
\item\label{enum:IsotropicTSS-P1Fibration Spherical l=1} $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ contains a primitive isotropic $\mathbf{u}$ and an effective spherical class $\mathbf{w}$ such that $\langle \mathbf{v},\mathbf{u} \rangle=\ell(\mathbf{u})=2$, $\langle \mathbf{v},\mathbf{w} \rangle=0$, $L\equiv D+\frac{r}{2}K_X \pmod 2$,
where $D$ is a nodal cycle; or
\item\label{enum:IsotropicTSS-P1Fibration Exceptional} $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ contains a primitive isotropic $\mathbf{u}$ and an effective exceptional class $\mathbf{w}$ such that
$\langle \mathbf{v},\mathbf{u} \rangle=\ell(\mathbf{u})=2$, $\langle \mathbf{v},\mathbf{w} \rangle=0$, and $L \equiv K_X \pmod 2$;
or
\item\label{enum:IsotropicTSS-P1Fibration Spherical l=2}$\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ contains a primitive isotropic $\mathbf{u}$ and an effective spherical class $\mathbf{w}$ such that $\langle \mathbf{v},\mathbf{u} \rangle=1=\ell(\mathbf{u})$, $\langle\mathbf{v},\mathbf{w}\rangle=0$, and $L \equiv D+\frac{r}{2}K_X \pmod 2$,
where $D$ is a nodal cycle.
\end{enumerate}
\item
$\ensuremath{\mathcal W}$ induces a divisorial contraction if and only if
\mathbf{b}egin{enumerate}
\item\label{enum:IsotropicDivisorialContraction-HC} $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ contains a primitive isotropic $\mathbf{u}$ and an effective exceptional class $\mathbf{w}$ such that $\langle\mathbf{v},\mathbf{u}\rangle=1$, $\ell(\mathbf{u})=2$, and $\langle\mathbf{v},\mathbf{w}\rangle\neq0$; or
\item\label{enum:IsotropicDivisorialContraction-<v.w>=0} $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ contains a primitive isotropic $\mathbf{u}$ and an effective spherical class $\mathbf{w}$ such that $\langle\mathbf{v},\mathbf{w}\rangle=0$ and $\langle\mathbf{v},\mathbf{u}\rangle>\ell(\mathbf{u})$; or
\item\label{enum:IsotropicDivisorialContraction-<v.u>=l(u)} $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ contains a primitive isotropic $\mathbf{u}$ and an effective exceptional or spherical class $\mathbf{w}$ such that $\langle\mathbf{v},\mathbf{u}\rangle=\ell(\mathbf{u})$ and $\langle\mathbf{v},\mathbf{w}\rangle\neq0$; or
\item\label{enum:IsotropicDivisorialContraction-<v.u>=l(u) no negative} $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ satisfies $C_\ensuremath{\mathcal W}=P_\ensuremath{\mathbb{H}}H$, contains a primitive isotropic $\mathbf{u}$ such that $\langle\mathbf{v},\mathbf{u}\rangle=\ell(\mathbf{u})$, and $\mathbf{v}^2\geq \ell(\mathbf{u})+2$.
\end{enumerate}
\end{enumerate}
\end{Prop}
\mathbf{b}egin{Rem}Let us make a few comment about irreducible components and determinants.
\mathbf{b}egin{enumerate}
\item
In \cref{enum:IsotropicTSS-P1Fibration Spherical l=1}, then
$\mathbf{v}=\mathbf{w}+\mathbf{u}$ and $\mathbf{v}^2=2$.
If $L \equiv D+\frac{r}{2}K_X+K_X \pmod 2$, then $M_{\sigma_0}^s(\mathbf{v},L)=M_{\sigma_0}(\mathbf{v},L)$. In \cref{enum:IsotropicTSS-P1Fibration Exceptional}, $\mathbf{v}=2(\mathbf{w}+\mathbf{u})$. If $L \equiv K_X \pmod 2$, then
a connected component of $M_{\sigma_+}(\mathbf{v},L)$ has two irreducible
components (Proposition \ref{prop:connected}) and $\ensuremath{\mathcal W}$ is totally semistable for one of them.
If $L \equiv 0 \pmod 2$, then $M_{\sigma_+}(\mathbf{v},L)\setminus M_{\sigma_0}^s(\mathbf{v},L)$ is a divisor. In \cref{enum:IsotropicTSS-P1Fibration Spherical l=2},
we also see that
$\mathbf{v}=\mathbf{w}+2\mathbf{u}$. By Proposition \ref{prop:irred-comp:v^2=2},
$M_\sigma (\mathbf{v})$ has two irreducible components and
each component becomes totally semistable at walls
$\ensuremath{\mathcal W}$ (of type \ref{enum:IsotropicTSS-P1Fibration Spherical l=1}) and $\ensuremath{\mathcal W}'$ (of type \ref{enum:IsotropicTSS-P1Fibration Spherical l=2}).
\item
In \cref{enum:IsotropicDivisorialContraction-<v.u>=l(u) no negative} with $\ell(\mathbf{u})=1$,
assume that $\mathbf{u}=\mathbf{u}_1$. Then we can show
$\mathbf{v}=\frac{\mathbf{v}^2}{2}\mathbf{u}_1+\mathbf{u}_2$ with $\langle \mathbf{u}_1,\mathbf{u}_2 \rangle=1$. In particular $\ell(\mathbf{u}_1)=\ell(\mathbf{u}_2)=1$.
\end{enumerate}
\end{Rem}
We will prove \cref{Prop:isotropic-classification} in the next two subsections. For clarity of discussion, in the next section we will first tackle the case that $\mathbf{v}$ is minimal, that is, $\langle\mathbf{v},\mathbf{w}\rangle\geq0$ for the unique exceptional/spherical class $\mathbf{w}\in\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$. Then we will show in \cref{subsec:non-minimal case} that if $\langle\mathbf{v},\mathbf{w}\rangle<0$, we may apply the spherical or weakly-spherical twist associated to $T$ of class $\mathbf{w}$ to reduce the wall-crossing behavior to that of a minimal $\mathbf{v}$.
\subsection{Minimal Mukai vectors}
In this section we assume that $\mathbf{v}$ is \emph{minimal}, i.e. that $\langle \mathbf{v},\mathbf{w}\rangle\geq 0$ for the (unique) spherical or exceptional effective class $\mathbf{w}$, if it exists. When such $\mathbf{w}$ exists, let us denote by $T$ the spherical or exceptional $\sigma_0$-stable object of class $\mathbf{w}$. We divide our analysis into two based on whether $\ell(\mathbf{u}_i)=2$ for some $i=1,2$ or not. We begin with the case where $\ell(\mathbf{u}_i)=2$ for some $i$.
\subsubsection{$\ell(\mathbf{u}_i)=2$ for some $i$}
By Proposition \ref{Prop:isotropic lattice} above, we may assume that $i=1$ so that $M_{\sigma_0}^s(\mathbf{u}_1)=M_{\sigma_0}(\mathbf{u}_1)$ and $\ell(\mathbf{u}_1)=2$ implies that $M_{\sigma_0}(\mathbf{u}_1)\cong X$ by \cite[Lemma 9.3]{Nue14b}. Using the Fourier-Mukai transform with kernel given by the universal family of $M_{\sigma_0}(\mathbf{u}_1)$, $$\ensuremath{\mathbb{P}}hi:\ensuremath{\mathbb{D}}b(X)\cong\ensuremath{\mathbb{D}}b(X),$$ we get $\ensuremath{\mathbb{P}}hi(\mathbf{u}_1)=(0,0,1)$. By construction of $\mathop{\mathrm{Stab}}\nolimitsd(X)$, skyscraper sheaves of points on $X$ are $\ensuremath{\mathbb{P}}hi(\sigma_0)$-stable. By Bridgeland's Theorem \ref{thm:GeometricStabilityConditions}, there exist divisor classes $\mathop{\ord(\omega_S)}\nolimitsmega,\mathbf{b}eta\in\ensuremath{\mathbb{N}}S(X)_{\ensuremath{\mathbb{Q}}}$, with $\mathop{\ord(\omega_S)}\nolimitsmega$ ample, such that up to the $\mathop{\mathrm{GL}}\nolimits_2(\ensuremath{\mathbb{R}})$-action, $\ensuremath{\mathbb{P}}hi(\sigma_0)=\sigma_{\mathop{\ord(\omega_S)}\nolimitsmega,\mathbf{b}eta}$. In particular, the category $\ensuremath{\mathbb{P}}P_{\mathop{\ord(\omega_S)}\nolimitsmega,\mathbf{b}eta}(1)$ is the extension-closure of skyscraper sheaves of points and the shifts, $F[1]$, of $\mu_{\mathop{\ord(\omega_S)}\nolimitsmega}$-stable torsion-free sheaves $F$ with slope $\mu_{\mathop{\ord(\omega_S)}\nolimitsmega}(F) =\mathop{\ord(\omega_S)}\nolimitsmega\cdot\mathbf{b}eta$. Since $\sigma_0$ by assumption does not lie on any other wall with respect to $\mathbf{v}$, the divisor $\mathop{\ord(\omega_S)}\nolimitsmega$ is generic with respect to $\ensuremath{\mathbb{P}}hi(\mathbf{v})$. Under these identifications, we have the following result whose proof is identical to that of \cite[Theorem 3.2.7]{MYY14b}, \cite[Proposition 8.2]{BM14b}, and \cite[Section 5]{LQ14}.
\mathbf{b}egin{Prop}\label{Prop:Uhlenbeck morphism}
An object $E$ of class $\mathbf{v}$ is $\sigma_+$-stable if and only if $\ensuremath{\mathbb{P}}hi(E)$ is the shift $F[1]$ of a $(\mathbf{b}eta,\mathop{\ord(\omega_S)}\nolimitsmega)$-Gieseker stable sheaf $F$ on $X$; therefore $[-1]\circ\ensuremath{\mathbb{P}}hi$ induces the following identification of moduli spaces: $$M_{\sigma_+}(\mathbf{v}) = M_{\mathop{\ord(\omega_S)}\nolimitsmega}^{\mathbf{b}eta}(-\ensuremath{\mathbb{P}}hi(\mathbf{v})).$$
Moreover, the contraction morphism $\pi^+$ induced by the wall $\ensuremath{\mathcal W}$ is the Li-Gieseker-Uhlenbeck (LGU) morphism to the Uhlenbeck compactification.
Similarly, an object $F$ of class $\mathbf{v}$ is $\sigma_-$-stable if and only if it is the shift $F^\mathbf{v}ee[1]$ of the derived dual of a $(-\mathbf{b}eta,\mathop{\ord(\omega_S)}\nolimitsmega)$-Gieseker stable sheaf on $X$.
\end{Prop}
It follows from the above description that a $\sigma_+$-stable object $E$ becomes $\sigma_0$-semistable if and only if $F=\ensuremath{\mathbb{P}}hi(E)[-1]$ is not locally free or if $F$ is not $\mu$-stable, as these are the sheaves contracted by the Uhlenbeck contraction.
\mathbf{b}egin{Prop}\label{Prop:LGU walls of low codimension}
Assume that $\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}}$ contains a primitive isotropic vector $\mathbf{u}$ with $\ell(\mathbf{u})=2$. Suppose that $\mathbf{v}\in\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}}$ satisfies $\mathbf{v}^2>0$ and $\langle \mathbf{v},\mathbf{w}\rangle\geq 0$ for the (unique) effective spherical or exceptional class $\mathbf{w}$ (if it exists).
\mathbf{b}egin{enumerate}
\item\label{enum:IsotropicTotallySemistable} If $\ensuremath{\mathcal W}$ is totally semistable for $M_{\sigma_+}(\mathbf{v})$, then either
\mathbf{b}egin{enumerate}
\item\label{enum:IsotropicTotallySemistable-HC} $\langle\mathbf{v},\mathbf{u}\rangle=1$ for primitive isotropic $\mathbf{u}\in\ensuremath{\mathbb{H}}H$ with $\ell(\mathbf{u})=2$; or
\item\label{enum:IsotropicTotallySemistable-Exceptional/Spherical} $\langle\mathbf{v},\mathbf{u}\rangle=2$ for primitive isotropic $\mathbf{u}\in\ensuremath{\mathbb{H}}H$ with $\ell(\mathbf{u})=2$ and $\langle\mathbf{v},\mathbf{w}\rangle=0$ for the unique spherical or exceptional class $\mathbf{w}$.
\end{enumerate}
\item\label{enum:IsotropicCodimOne} If $\mathop{\mathrm{codim}}\nolimits(M_{\sigma_+}(\mathbf{v})\mathbf{b}ackslash M^s_{\sigma_0}(\mathbf{v}))=1$, then either
\mathbf{b}egin{enumerate}
\item\label{enum:IsotropicCodimOne <v.u>=2=l(u)}$\langle\mathbf{v},\mathbf{u}\rangle=2=\ell(\mathbf{u})$ for primitive isotropic $\mathbf{u}\in\ensuremath{\mathbb{H}}H$; or
\item\label{enum:IsotropicCodimOne <v.w>=0}$\langle\mathbf{v},\mathbf{w}\rangle=0$ for the unique spherical or exceptional class $\mathbf{w}$; or
\end{enumerate}
\item In all other cases, $\mathop{\mathrm{codim}}\nolimits{M_{\sigma_+}(\mathbf{v})\mathbf{b}ackslash M_{\sigma_0}^s(\mathbf{v})}\geq 2$.
\end{enumerate}
\end{Prop}
\mathbf{b}egin{proof}
We assume that $\mathbf{u}_1$, $\mathbf{u}_2$, and $\mathbf{w}$ are labelled and oriented in accordance with \cref{Rem:IsotropicOrientation} and the discussion preceeding \cref{Prop:Uhlenbeck morphism}. In particular, we assume that $\ell(\mathbf{u}_1)=2$ and $\langle\mathbf{u}_1,\mathbf{w}\rangle>0$.
For a given $E\in M_{\sigma_+}(\mathbf{v})$, let the Harder-Narasimhan filtration of $E$ with respect to $\sigma_-$ correspond to a decomposition $\mathbf{v}=\sum_i \mathbf{a}_i$. Using Proposition \ref{Prop:HN codim}, we shall estimate the codimension of the sublocus $\ensuremath{\mathcal F}(\mathbf{a}_0,\ldots,\mathbf{a}_n)^o$ of destabilized objects which is equal to
\mathbf{b}egin{equation}
\sum_i (\mathbf{a}_i^2-\mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_i))+\sum_{i<j}\langle \mathbf{a}_i,\mathbf{a}_j \rangle.
\end{equation}
(I) We first assume that one of the $\mathbf{a}_i$ satisfies $\mathbf{a}_i^2<0$, say $\mathbf{a}_0=b_0 \mathbf{w}$ for an effective spherical or exceptional class $\mathbf{w}$. Then we are Cases \ref{enum:IsotropicLatticeEffectiveExceptional} or \ref{enum:IsotropicLatticeEffectiveSpherical} in Proposition \ref{Prop:isotropic lattice} so that $\ell(\mathbf{u}_1)=\ell(\mathbf{u}_2)=2$.
Assume that $\mathbf{a}_1$ and $\mathbf{a}_2$ are isotropic. We may set $\mathbf{a}_1=b_1 \mathbf{u}_1$ and $\mathbf{a}_2=b_2 \mathbf{u}_2$. Then
\mathbf{b}egin{equation}\label{eq:l=2, case I}
\mathbf{b}egin{split}
& \sum_i (\mathbf{a}_i^2-\mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_i))+\sum_{i<j}\langle \mathbf{a}_i,\mathbf{a}_j \rangle\\
\geq & (\mathbf{a}_0^2-\mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0))+\sum_{i \geq 1}b_0 \langle \mathbf{w},\mathbf{a}_i \rangle
-b_1-b_2+b_1 b_2 \langle \mathbf{u}_1,\mathbf{u}_2 \rangle\\
= & -\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)+b_0 \langle \mathbf{w},\mathbf{v} \rangle-b_1-b_2+b_1 b_2 \langle \mathbf{u}_1,\mathbf{u}_2 \rangle\\
\geq & -\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)-b_1-b_2+b_1 b_2 \langle \mathbf{u}_1,\mathbf{u}_2 \rangle,
\end{split}
\end{equation}
where the first inequality follows from Proposition \ref{prop:isotropic} and the second inequality follows from the assumption that $\langle\mathbf{v},\mathbf{w}\rangle\geq 0$.
First suppose that $\mathbf{w}^2=-2$. Then we note that $\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)=-b_0^2$ and $\langle\mathbf{u}_1,\mathbf{w}\rangle$ is even because $\ell(\mathbf{u}_1)=2$ and $\mathbf{w}^2=-2$ is even. From Proposition \ref{prop:isotropic} we also have $\langle\mathbf{u}_1,\mathbf{u}_2\rangle=\langle\mathbf{u}_1,\mathbf{w}\rangle^2\geq 4$. Thus
\mathbf{b}egin{equation}\label{eq:l=2, case I spherical}
\mathbf{b}egin{split}
\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\ldots,\mathbf{a}_n)^o &\geq b_0^2+4b_1 b_2-b_1-b_2\geq 1+2b_1b_2+b_1(b_2-1)+b_2(b_1-1)\geq 3.
\end{split}
\end{equation}
So we must have $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\mathbf{d}ots,\mathbf{a}_n)^o\geq 3$ in this case.
If instead $\mathbf{w}^2=-1$, then $\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)=\left\lfloor-\frac{b_0^2}{2}\right\rfloor$. Moreover, by \cref{Rem:Even and Odd pairings} $\langle\mathbf{u}_1,\mathbf{w}\rangle$ and $\frac{\langle\mathbf{u}_1,\mathbf{u}_2\rangle}{2}$ are odd. Thus
\mathbf{b}egin{equation}\label{eq:l=2, case I exceptional}
\mathbf{b}egin{split}
\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\ldots,\mathbf{a}_n)^o &\geq \frac{b_0^2}{2}+2b_1b_2-b_1-b_2\geq\frac{1}{2}+b_1(b_2-1)+b_2(b_1-1)\geq\frac{1}{2}.
\end{split}
\end{equation}
Thus $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\mathbf{d}ots,\mathbf{a}_n)^o\geq 1$ with equality only if $b_0=b_1=b_2=1$. But then $\mathbf{v}=\mathbf{w}+\mathbf{u}_1+\mathbf{u}_2$ so that $\langle\mathbf{v},\mathbf{w}\rangle=\mathbf{w}^2=-1<0$, contrary to the assumption. Thus $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\mathbf{d}ots,\mathbf{a}_n)^o\geq 2$ in this case.
Now assume that $\mathbf{a}_1=b_1 \mathbf{u}_j$ and $\mathbf{a}_i^2>0$ for $i>1$. Then
\mathbf{b}egin{equation}
\mathbf{b}egin{split}
\sum_i (\mathbf{a}_i^2-\mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_i))+\sum_{i<j}\langle \mathbf{a}_i,\mathbf{a}_j \rangle
&\geq \mathbf{a}_0^2-\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)+\sum_{i \geq 1}b_0 \langle \mathbf{w},\mathbf{a}_i \rangle
-b_1+\sum_{i \geq 2} b_1 \langle \mathbf{u}_j,\mathbf{a}_i \rangle\\
&\geq -\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)+b_0 \langle \mathbf{w},\mathbf{v} \rangle
-b_1+b_1 \langle \mathbf{u}_j,\mathbf{a}_2 \rangle\\
&\geq -\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)+b_1(\langle\mathbf{u}_j,\mathbf{a}_2\rangle-1).
\end{split}
\end{equation}
If $\mathbf{w}^2=-2$, then $\ensuremath{\mathbb{H}}H$ is even by Proposition \ref{Prop:isotropic lattice}, so $\ell(\mathbf{u}_j)=2$ implies that $2\mid\langle\mathbf{u}_j,\mathbf{a}_2\rangle$. Thus $$\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\mathbf{d}ots,\mathbf{a}_n)^o\geq-\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)+b_1=b_0^2+b_1\geq2.$$
If $\mathbf{w}^2=-1$, then $$\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\ldots,\mathbf{a}_n)^o\geq-\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)=-\left\lfloor-\frac{b_0^2}{2}\right\rfloor\geq 1,$$ with equality in the last inequality only if $b_0=1$, $\langle\mathbf{v},\mathbf{w}\rangle=0$ and $\langle\mathbf{u}_j,\mathbf{a}_2\rangle=1$. But $$1\leq\langle\mathbf{v},\mathbf{u}_j\rangle=\langle\mathbf{w},\mathbf{u}_j\rangle+\langle\mathbf{a}_2,\mathbf{u}_j\rangle=\langle\mathbf{w},\mathbf{u}_j\rangle+1,$$ which forces $j=1$. Writing $\mathbf{a}_2=x\mathbf{u}_1+y\mathbf{w}$, we see that $1=\langle\mathbf{u}_1,\mathbf{a}_2\rangle$ forces $y=1$ and $\langle\mathbf{u}_1,\mathbf{w}\rangle=1$. Thus, in addition to $\langle\mathbf{v},\mathbf{w}\rangle=0$, we also have $\langle\mathbf{v},\mathbf{u}_1\rangle=2$. So $\mathbf{v}$ falls into both types \ref{enum:IsotropicCodimOne <v.u>=2=l(u)} and \ref{enum:IsotropicCodimOne <v.w>=0}. Otherwise, we get $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\mathbf{d}ots,\mathbf{a}_n)^o\geq 2$.
We can now assume that there are no positive classes in the Harder-Narasimhan factors, i.e. $\mathbf{v}=b_0 \mathbf{w}+b_1 \mathbf{u}_j$. But $\mathbf{v}^2>0$ forces $j=1$ (see Figure \ref{fig:IsotropicWithNegative}), so we may assume this outright. Then $0 \leq \langle \mathbf{v},\mathbf{w} \rangle=b_0 \mathbf{w}^2+b_1 \langle \mathbf{u}_1,\mathbf{w} \rangle$, so our estimate becomes
\mathbf{b}egin{equation}\label{eq:l=2, case I no positive}
\mathbf{b}egin{split}
\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\mathbf{a}_1)^o=&\sum_i (\mathbf{a}_i^2-\mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_i))+\sum_{i<j}\langle \mathbf{a}_i,\mathbf{a}_j \rangle\\
= & b_0^2 \mathbf{w}^2-\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)-b_1+b_0b_1\langle \mathbf{w},\mathbf{u}_1\rangle\\
\geq & b_0^2 \mathbf{w}^2-\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)+\frac{b_0b_1\langle \mathbf{w},\mathbf{u}_1\rangle}{2}+\frac{b_1}{2}\left(b_0\langle \mathbf{w},\mathbf{u}_1\rangle-2\right)\\
\geq & b_0^2 \mathbf{w}^2-\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)+\frac{b_0b_1\langle \mathbf{w},\mathbf{u}_1\rangle}{2}+\frac{b_1}{2}\left(\langle\mathbf{v},\mathbf{u}_1\rangle-2\right).
\end{split}
\end{equation}
If $\mathbf{w}^2=-2$, then $b_0^2\mathbf{w}^2-\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)=-b_0^2$, and $\ensuremath{\mathbb{H}}H$ is even so that again $\ell(\mathbf{u}_1)=2$ implies that $2\mid\langle\mathbf{v},\mathbf{u}_1\rangle$. The last line of \eqref{eq:l=2, case I no positive} then becomes $$\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\mathbf{a}_1)^o\geq-b_0^2+\frac{b_0 b_1\langle \mathbf{w},\mathbf{u}_1\rangle}{2}+\frac{b_1}{2}(\langle \mathbf{v},\mathbf{u}_1\rangle-2)=\frac{b_0}{2}\langle \mathbf{v},\mathbf{w}\rangle+\frac{b_1}{2}(\langle \mathbf{v},\mathbf{u}_1\rangle-2)\geq 0,$$ with equality only if $\langle \mathbf{v},\mathbf{w}\rangle=0$ and $\langle \mathbf{v},\mathbf{u}_1\rangle=2$, as in Case \ref{enum:IsotropicTotallySemistable-Exceptional/Spherical}.
Moreover, $\mathop{\mathrm{codim}}\nolimits \ensuremath{\mathcal F}(\mathbf{a}_0,\mathbf{a}_1)^o\geq 2$ unless $\langle \mathbf{w},\mathbf{u}_1\rangle=2$ and $\mathbf{v}=\mathbf{w}+2\mathbf{u}_1$. In this case we have $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\mathbf{a}_1)^o=1$ as in Case \ref{enum:IsotropicCodimOne <v.u>=2=l(u)}.
If $\mathbf{w}^2=-1$, then $b_0^2\mathbf{w}^2-\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)=\left\lceil-\frac{b_0^2}{2}\right\rceil$, so the last line of \eqref{eq:l=2, case I no positive} gives \mathbf{b}egin{equation}\label{eqn:l=2 case I no positive exceptional}\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\mathbf{a}_1)^o\geq-\frac{b_0^2}{2}+\frac{b_0 b_1\langle \mathbf{w},\mathbf{u}_1\rangle}{2}+\frac{b_1}{2}(\langle \mathbf{v},\mathbf{u}_1\rangle-2)=\frac{b_0}{2}\langle \mathbf{v},\mathbf{w}\rangle+\frac{b_1}{2}(\langle \mathbf{v},\mathbf{u}_1\rangle-2)\geq0\end{equation}
unless $\langle \mathbf{v},\mathbf{u}_1\rangle=1$, in which case $b_0=1=\langle\mathbf{w},\mathbf{u}_1\rangle$. But then $b_0^2\mathbf{w}^2-\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)=0$, so $$\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\mathbf{a}_1)^o\geq \frac{b_1}{2}-\frac{b_1}{2}=0,$$ and indeed $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\mathbf{a}_1)^o=0$ when $\langle\mathbf{v},\mathbf{u}_1\rangle=1$ as in Case \ref{enum:IsotropicTotallySemistable-HC}. We get equality in \eqref{eqn:l=2 case I no positive exceptional} only if $\langle\mathbf{v},\mathbf{w}\rangle=0$ and $\langle\mathbf{v},\mathbf{u}_1\rangle=2$. We can derive from these two equations that $\langle\mathbf{w},\mathbf{u}_1\rangle=1$ and $\mathbf{v}=2\mathbf{w}+2\mathbf{u}_1$, in which case indeed $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\mathbf{a}_1)^o=0$ as in Case \ref{enum:IsotropicTotallySemistable-Exceptional/Spherical}. Moreover, $\mathop{\mathrm{codim}}\nolimits \ensuremath{\mathcal F}(\mathbf{a}_0,\mathbf{a}_1)^o\geq 2$ unless $\frac{b_0}{2}\langle\mathbf{v},\mathbf{w}\rangle=1$ and $\langle \mathbf{v},\mathbf{u}_1\rangle=2$; or $\langle\mathbf{v},\mathbf{w}\rangle=0$ and $\frac{b_1}{2}(\langle\mathbf{v},\mathbf{u}_1\rangle-2)=1$; or $\frac{b_0}{2}\langle\mathbf{v},\mathbf{w}\rangle=\frac{1}{2}$ and $\langle\mathbf{v},\mathbf{u}_1\rangle=2$. In the latter two cases, however, we would have $\langle\mathbf{w},\mathbf{u}_1\rangle=2$, which is impossible, as noted in \cref{Rem:Even and Odd pairings}. Thus we are left with the first case, in which $\mathbf{v}=2\mathbf{w}+3\mathbf{u}_1$ and $\langle\mathbf{w},\mathbf{u}_1\rangle=1$. Notice that $\langle\mathbf{v},\mathbf{u}_1\rangle=2$ so that we are in \cref{enum:IsotropicCodimOne <v.u>=2=l(u)}.
Finally, assume that other than $\mathbf{a}_0=b_0 \mathbf{w}$, $\mathbf{a}_i^2>0$ for all $i>0$. Then the estimate becomes
\mathbf{b}egin{equation}\label{eq:l=2 case I no isotropic}
\mathbf{b}egin{split}
\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\ldots,\mathbf{a}_n)^o=&\sum_i (\mathbf{a}_i^2-\mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_i))+\sum_{i<j}\langle \mathbf{a}_i,\mathbf{a}_j \rangle\\
= & -\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)+b_0\langle \mathbf{w},\mathbf{v}\rangle+\sum_{0<i<j}\langle \mathbf{a}_i,\mathbf{a}_j\rangle\geq \frac{b_0^2}{2}>0.
\end{split}
\end{equation}
Moreover, $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\ldots,\mathbf{a}_n)^o\geq 2$ unless $\langle \mathbf{w},\mathbf{v}\rangle=0$ and $\mathbf{v}=\mathbf{w}+\mathbf{a}_1$, in which case $\ensuremath{\mathcal F}(\mathbf{a}_0,\mathbf{a}_1)^o$ has codimension one as in \cref{enum:IsotropicCodimOne <v.w>=0}. Note that in this case we require $0<\mathbf{a}_1^2=(\mathbf{v}-\mathbf{w})^2=\mathbf{v}^2+\mathbf{w}^2$, so $\mathbf{v}^2>2$ or $\mathbf{v}^2>1$ if $\mathbf{w}^2=-2$ or $\mathbf{w}^2=-1$, respectively.
(II) We next assume that $\mathbf{a}_i^2 \geq 0$ for all $i$.
Suppose first that $\mathbf{a}_1=b_1 \mathbf{u}_1$ and $\mathbf{a}_2=b_2 \mathbf{u}_2$. Then we can be in any case of Proposition \ref{Prop:isotropic lattice}, and our estimate now becomes
\mathbf{b}egin{equation}\label{eq:l=2 case II}
\mathbf{b}egin{split}
\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_1,\mathbf{d}ots,\mathbf{a}_n)^o=& \sum_i (\mathbf{a}_i^2-\mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_i))+\sum_{i<j}\langle \mathbf{a}_i,\mathbf{a}_j \rangle\\
\geq &
-b_1-\left\lfloor\frac{b_2\ell(\mathbf{u}_2)}{2}\right\rfloor+b_1 b_2 \langle \mathbf{u}_1,\mathbf{u}_2 \rangle\\
\geq &b_1(b_2-1)+b_2(b_1-1)\geq0,
\end{split}
\end{equation}
with equality only if $b_1=b_2=1$, $\ell(\mathbf{u}_2)=2$, and $\langle\mathbf{u}_1,\mathbf{u}_2\rangle=2$, so that $\ensuremath{\mathbb{H}}H$ falls into \cref{enum:IsotropicLatticeEffectiveExceptional} of \cref{Prop:isotropic lattice}. In particular, we have $\langle\mathbf{v},\mathbf{u}_1\rangle=2$ and $\langle\mathbf{v},\mathbf{w}\rangle=\langle\mathbf{u}_1,\mathbf{w}\rangle+\langle\mathbf{u}_2,\mathbf{w}\rangle=0$, as in \cref{enum:IsotropicTotallySemistable-Exceptional/Spherical}. Furthermore, by \cref{Rem:Even and Odd pairings} we have $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_1,\mathbf{d}ots,\mathbf{a}_n)^o=1$ only if $\ensuremath{\mathbb{H}}H$ falls into Case \ref{enum:IsotropicLatticeEffectiveExceptional} of Proposition \ref{Prop:isotropic lattice} and $\mathbf{v}=2\mathbf{u}_1+\mathbf{u}_2$ or $\mathbf{u}_1+2\mathbf{u}_2$ with $\langle\mathbf{u}_1,\mathbf{u}_2\rangle=2$, so that $\langle\mathbf{v},\mathbf{u}\rangle=2=\ell(\mathbf{u})$ as in \cref{enum:IsotropicCodimOne <v.u>=2=l(u)}, or $\ensuremath{\mathbb{H}}H$ falls into Case \ref{enum:IsotropicLatticeNoEffectiveNegatives} of \cref{Prop:isotropic lattice} and $\mathbf{v}=\mathbf{u}_1+\mathbf{u}_2$ with $\ell(\mathbf{u}_2)=1$ and $\langle\mathbf{u}_1,\mathbf{u}_2\rangle=2$, so that $\langle\mathbf{v},\mathbf{u}_1\rangle=2$, as in \cref{enum:IsotropicCodimOne <v.u>=2=l(u)}. Otherwise, $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_1,\mathbf{d}ots,\mathbf{a}_n)^o\geq 2$.
Now we assume that $\mathbf{a}_1=b_1 \mathbf{u}_j$ and $\mathbf{a}_i^2>0$ for $i \geq 2$.
In this case, we also see that
\mathbf{b}egin{equation}
\mathbf{b}egin{split}
\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_1,\mathbf{d}ots,\mathbf{a}_n)^o=&\sum_i (\mathbf{a}_i^2-\mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_i))+\sum_{i<j}\langle \mathbf{a}_i,\mathbf{a}_j \rangle\\
=&-\left\lfloor\frac{b_1\ell(\mathbf{u}_j)}{2}\right\rfloor+\sum_{i>1}b_1\langle \mathbf{u}_j,\mathbf{a}_i\rangle+\sum_{1<i<k}\langle \mathbf{a}_i,\mathbf{a}_k\rangle\\
\geq&b_1(\langle \mathbf{v},\mathbf{u}_j\rangle-1)+\sum_{1<i<k}\langle \mathbf{a}_i,\mathbf{a}_k\rangle\\
\geq&b_1(\langle \mathbf{v},\mathbf{u}_j\rangle-1)\geq 0.
\end{split}
\end{equation}
Thus $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_1,\mathbf{d}ots,\mathbf{a}_n)^o=0$ only if $\langle\mathbf{v},\mathbf{u}_j\rangle=1$, $\ell(\mathbf{u}_j)=2$, and $\mathbf{v}=b_1\mathbf{u}_j+\mathbf{a}_2$, as in \cref{enum:IsotropicTotallySemistable-HC}. Similarly, $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_1,\mathbf{d}ots,\mathbf{a}_n)^o=1$ only if either $\mathbf{v}=\mathbf{u}_j+\mathbf{a}_2$ with $\langle\mathbf{v},\mathbf{u}_j\rangle=2$ and $\ell(\mathbf{u}_j)=2$ as in \cref{enum:IsotropicCodimOne <v.u>=2=l(u)}, or $\mathbf{v}=\mathbf{u}_2+\mathbf{a}_2$, $2\mathbf{u}_2+\mathbf{a}_2$ with $\langle\mathbf{v},\mathbf{u}_2\rangle=1=\ell(\mathbf{u}_2)$. But this latter case is impossible. Indeed, $\ensuremath{\mathbb{H}}H$ must be even by \cref{Rem:Odd lattice} and $\ell(\mathbf{u}_1)\neq\ell(\mathbf{u}_2)$. But then we see that we can write $\mathbf{u}_1=-\frac{\mathbf{a}_2^2}{2}\mathbf{u}_2+\mathbf{a}_2$ so that $\langle\mathbf{u}_1,\mathbf{u}_2\rangle=1$, which is impossible as $\ell(\mathbf{u}_1)=2$.
Finally, if $\mathbf{a}_i^2>0$ for all $i$, then $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_1,\mathbf{d}ots,\mathbf{a}_n)^o\geq 2$ by Proposition \ref{Prop:HN filtration all positive classes}.
\end{proof}
\mathbf{b}egin{Rem}
Proposition \ref{Prop:LGU walls of low codimension} can be proven by using the Fourier-Mukai transform $\ensuremath{\mathbb{P}}hi$ to translate the problem to the equivalent problem of determining the codimensions of the strictly $\mu$-semistable locus and the non-locally free locus. One could then use the estimates in \cite{Yos16a} to get the result. However, there is a small error in Case C there, which misses the spherical case of \cref{enum:IsotropicTotallySemistable-Exceptional/Spherical}, so we use the method above. We explore this example more fully in \cref{prop:irred-comp:v^2=2}.
\end{Rem}
\mathbf{b}egin{Rem}
The reader may notice that when $\langle \mathbf{v},\mathbf{w}\rangle=0$ and $\langle \mathbf{v},\mathbf{u}\rangle=2=\ell(\mathbf{u})$, we simultaneously claim that $\mathop{\mathrm{codim}}\nolimits(M_{\sigma_+}(\mathbf{v})\mathbf{b}ackslash M^s_{\sigma_0}(\mathbf{v}))$ is both zero and one. Indeed, we shall prove in \cref{App: exceptional case} that for one choice of the determinant $L$, $M_{\sigma_+}(\mathbf{v},L)$ contains a connected component with two irreducible components, $M_0$ and $M_1$. For $M_1$, $\ensuremath{\mathcal W}$ is a totally semistable wall inducing a $\ensuremath{\mathbb{P}}^1$-fibration over the singular locus, where it meets $M_0$. The strictly $\sigma_0$ locus on $M_0$ is this singular locus, which is a divisor. When $\mathbf{w}^2=-2$, this describes all of $M_{\sigma_+}(\mathbf{v},L)$ as it is connected. For the other determinant, $L+K_X$, the strictly $\sigma_0$-semistable locus is a divisor, if non-empty.
\end{Rem}
Now we demonstrate the converse of \ref{Prop:LGU walls of low codimension} in the following sequence of lemmas. Furthermore, we determine precisely when the divisor in \cref{enum:IsotropicCodimOne} gets contracted. We make free use of the Fourier-Mukai transform $\ensuremath{\mathbb{P}}hi$ to translate the problem to that of moduli of sheaves.
\mathbf{b}egin{Lem}\label{Lem: Hilbert-Chow}
Assume that $\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}}$ contains a primitive isotropic vector $\mathbf{u}$ such that $\ell(\mathbf{u})=2$ and that $\langle \mathbf{v},\mathbf{u}\rangle=1$ for $\mathbf{v}$ minimal. Then $\mathbf{v}^2$ is odd and $\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}}$ contains an exceptional class. Moreover, $\ensuremath{\mathcal W}$ is totally semistable and, if $\mathbf{v}^2>1$, induces a divisorial contraction.
\end{Lem}
\mathbf{b}egin{proof}
Let us first prove that $\mathbf{v}^2$ is odd and $\ensuremath{\mathbb{H}}H$ contains an exceptional class. Write $\mathbf{v}=(r,c,\frac{s}{2})$ and $\mathbf{u}=(2r',2c',s')$, with $r'+s'$ odd, as $\ell(\mathbf{u})=2$. Then as $r\equiv s\pmod 2$, it follows that $$1=\langle\mathbf{v},\mathbf{u}\rangle=2c.c'-r's-rs'\equiv-r(r'+s')\equiv r\pmod2,$$ from which we see that $r$ is odd, or equivalently $\mathbf{v}^2$ is odd. Thus $\mathbf{v}-\frac{\mathbf{v}^2+1}{2}\mathbf{u}\in\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ is an exceptional class.
As $\ensuremath{\mathbb{H}}H$ falls into \cref{enum:IsotropicLatticeEffectiveExceptional} of \cref{Prop:isotropic lattice}, we see that $\ell(\mathbf{u}_2)=2$ and \mathbf{b}egin{equation}\label{eqn:u2 and u1}
\mathbf{u}_2=\mathbf{u}_1+2\langle\mathbf{u}_1,\mathbf{w}\rangle\mathbf{w}.
\end{equation}
We observe from \eqref{eqn:u2 and u1} that we may assume that $\mathbf{u}=\mathbf{u}_1$. Indeed, if $\langle\mathbf{v},\mathbf{u}_2\rangle=1$, then pairing \eqref{eqn:u2 and u1} with $\mathbf{v}$, we get $$1=\langle \mathbf{v},\mathbf{u}_2\rangle=\langle\mathbf{v},\mathbf{u}_1\rangle+2\langle\mathbf{u}_1,\mathbf{w}\rangle\langle\mathbf{v},\mathbf{w}\rangle\geq \langle\mathbf{v},\mathbf{u}_1\rangle>0,$$ so that $\langle\mathbf{v},\mathbf{u}_1\rangle=1$ as well. It follows that $M_{\sigma_+}(\mathbf{v})\cong M_{\mathop{\ord(\omega_S)}\nolimitsmega}^{\mathbf{b}eta}(-\ensuremath{\mathbb{P}}hi(\mathbf{v}))$ is isomorphic to the Hilbert scheme of points. Finally, $\ensuremath{\mathcal W}$ is the Hilbert-Chow wall inducing the Hilbert-Chow morphism $\ensuremath{\mathbb{H}}ilb^n(X)\to\mathop{\mathrm{Sym}}\nolimits^n(X)$, which is a divisorial contraction for $1<n=\frac{\mathbf{v}^2+1}{2}$, and every ideal sheaf is strictly semistable as in \cite[Proposition 13.1]{Nue14b}.
\end{proof}
Although the behavior in \cref{Lem: Hilbert-Chow} is analogous with the corresponding case on K3 surfaces, we see some new behavior in the next two lemmas. We begin with the exceptional case of \cref{enum:IsotropicTotallySemistable-Exceptional/Spherical} in \cref{Prop:LGU walls of low codimension}.
\mathbf{b}egin{Lem}\label{Lem:P1FibrationExceptional}
Suppose that $\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}}$ contains a primitive isotropic $\mathbf{u}$ such that $\langle\mathbf{v},\mathbf{u}\rangle=2=\ell(\mathbf{u})$ and $\langle \mathbf{v},\mathbf{w}\rangle=0$ for an exceptional class $\mathbf{w}\in C_\ensuremath{\mathcal W}\cap\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}}$. Then $\ensuremath{\mathcal W}$ is totally semistable for, and induces a $\ensuremath{\mathbb{P}}^1$-fibration on, precisely one component of $M_{\sigma_+}(\mathbf{v},2L'+K_X)$.
\end{Lem}
\mathbf{b}egin{proof}
As $\langle\mathbf{v},\mathbf{w}\rangle=0$, we see from \eqref{eqn:u2 and u1} that $\langle\mathbf{v},\mathbf{u}_2\rangle=\langle\mathbf{v},\mathbf{u}_1\rangle$, so we may assume that $\mathbf{u}=\mathbf{u}_1$. After applying $[-1]\circ\ensuremath{\mathbb{P}}hi$, and possibly tensoring by a line bundle, we may assume that $\mathbf{v}=(2,0,-1)$, $\mathbf{w}=(1,0,\frac{1}{2})$, and $M_{\sigma_+}(\mathbf{v})$ is isomorphic to $M_H(-\ensuremath{\mathbb{P}}hi(\mathbf{v}))$, the moduli space of H-Gieseker semistable sheaves of Mukai vector $-\ensuremath{\mathbb{P}}hi(\mathbf{v})$ with respect to a generic polarization $H$. The contraction $\pi^+$ is then the LGU-contraction morphism as in Proposition \ref{Prop:Uhlenbeck morphism}. As mentioned in \cite[Remark 2.3]{Yos16a} and proven in Section \ref{App: exceptional case} (see \cref{prop:connected}), there is precisely one component of $M_{\sigma_+}(\mathbf{v},K_X)$ consisting of stable non-locally free sheaves $E$ fitting into the short exact sequence $$0\to E\to F\mor[(\phi_1,\phi_2)]\ensuremath{\mathbb{C}}_p\mathop{\ord(\omega_S)}\nolimitsplus\ensuremath{\mathbb{C}}_q\to 0,$$ where $F:=\ensuremath{\mathcal O}_X\mathop{\ord(\omega_S)}\nolimitsplus\ensuremath{\mathcal O}_X(K_X)$ and $\phi_1:\ensuremath{\mathcal O}_X\to\ensuremath{\mathbb{C}}_p\mathop{\ord(\omega_S)}\nolimitsplus\ensuremath{\mathbb{C}}_q$ and $\phi_2:\ensuremath{\mathcal O}_X(K_X)\to\ensuremath{\mathbb{C}}_p\mathop{\ord(\omega_S)}\nolimitsplus\ensuremath{\mathbb{C}}_q$ are both surjective. The polystable object in the same S-equivalence class as $E$ with respect to $\sigma_0$ is $(\ensuremath{\mathbb{C}}_p\mathop{\ord(\omega_S)}\nolimitsplus\ensuremath{\mathbb{C}}_q)[-1]\mathop{\ord(\omega_S)}\nolimitsplus F$, and the set of distinct $\sigma_+$ stable objects in the same S-equivalence class is parametrized by $\ensuremath{\mathbb{P}}\ensuremath{\mathbb{H}}om(F,\ensuremath{\mathbb{C}}_p)\times\ensuremath{\mathbb{P}}\ensuremath{\mathbb{H}}om(F,\ensuremath{\mathbb{C}}_q)/(\mathop{\mathrm{Aut}}\nolimits(F)/\ensuremath{\mathbb{C}}^*)$, which is a curve birational to $\ensuremath{\mathbb{P}}^1$. Thus $\ensuremath{\mathcal W}$ is totally semistable for this component and induces a $\ensuremath{\mathbb{P}}^1$-fibration.
\end{proof}
We get similar behavior in the spherical case:
\mathbf{b}egin{Lem}\label{Lem:P1FibrationSpherical}
Suppose that $\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}}$ contains a primitive isotropic $\mathbf{u}$ such that $\langle \mathbf{v},\mathbf{u}\rangle=2=\ell(\mathbf{u})$ and $\langle \mathbf{v},\mathbf{w}\rangle=0$ for a spherical class $w\in C_\ensuremath{\mathcal W}\cap\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}}$.
Then $\ensuremath{\mathcal W}$ is totally semistable for, and induces a $\ensuremath{\mathbb{P}}^1$-fibration on, precisely one component of $M_{\sigma_+}(\mathbf{v},L)$ with $L \equiv Z+\frac{\mathop{\mathrm{rk}} \mathbf{v}}{2}K_X \mathop{\mathrm{mod}}\nolimits 2$,
where $Z$ is a nodal cycle. Moreover
$M_{\sigma_+}(\mathbf{v},L+K_X)=M_{\sigma_0}^s(\mathbf{v},L+K_X)$.
\end{Lem}
\mathbf{b}egin{proof}
As in the proof of \cref{Lem:P1FibrationExceptional}, $\langle\mathbf{v},\mathbf{w}\rangle=0$ means that $\langle\mathbf{v},\mathbf{u}_2\rangle=\langle\mathbf{v},\mathbf{u}_1\rangle$, so we may assume that $\mathbf{u}=\mathbf{u}_1$, and applying $[-1]\circ\ensuremath{\mathbb{P}}hi$, we may assume that $\mathbf{v}=(2,D,s)$ with $\gcd(2,D)=1$ and $s\in\ensuremath{\mathbb{Z}}$ and $\mathbf{w}=(2,D,\frac{D^2+2}{4})$, where $4\mid D^2+2$ since $D=c_1(\mathbf{v})\equiv Z\pmod 2$ for a nodal cycle $Z$. Then $M_{\sigma_+}(\mathbf{v})$ is isomorphic to
$M_H(2,D,s)$. Let $T$ be the stable spherical bundle with $\mathbf{v}(T)=\mathbf{w}$. Then we have a family of non-locally free sheaves $E$ parameterized by a $\ensuremath{\mathbb{P}}^1$-bundle over $X$:
$$
0 \to E \to T \to \ensuremath{\mathbb{C}}_p \to 0.
$$
This $\ensuremath{\mathbb{P}}^1$-bundle is one component of $M_{\sigma_+}(\mathbf{v},L)$, and each such $E$ is strictly $\sigma_0$-semistable, S-equivalent to $T\mathop{\ord(\omega_S)}\nolimitsplus\ensuremath{\mathbb{C}}_p[-1]$. As $\mathbf{d}et(T)\equiv Z+\frac{\mathop{\mathrm{rk}}(\mathbf{v})}{2}K_X\pmod 2$ and $\mathbf{d}et(\ensuremath{\mathbb{C}}_p)=0$ so that $\mathbf{d}et(E)\equiv Z+\frac{\mathop{\mathrm{rk}}(\mathbf{v})}{2}K_X\pmod2$, we get the first claim.
For the other determinant, notice that as $\gcd(2,D)=1$, all stable sheaves are $\mu$-stable, so for any non-locally free $E\in M_H((2,D,s),L+K_X)$, $E^{\mathbf{v}ee\mathbf{v}ee}$ would be a $\mu$-stable locally free sheaf in $M_H((2,D,\frac{D^2+2}{4}),L+K_X)=\emptyset$. Thus every $E\in M_H((2,D,s),L+K_X)$ is a $\mu$-stable locally free sheaf so that $M_{\sigma_+}(\mathbf{v},L+K_X)=M_{\sigma_0}^s(\mathbf{v},L+K_X)$, as claimed.
\end{proof}
Having considered the behavior of totally semistable walls, we move on to determining when the codimension one strictly $\sigma_0$-semistable locus gets contracted. We begin with \cref{enum:IsotropicCodimOne <v.u>=2=l(u)}:
\mathbf{b}egin{Lem}\label{Lem: isotropic divisorial l=2 1}
Suppose that $\langle \mathbf{v},\mathbf{u}_1\rangle=2=\ell(\mathbf{u}_1)$ for $\mathbf{v}$ minimal. Assume further that
\mathbf{b}egin{equation}\label{eqn:RestrictionsOnv^2}
\mathbf{b}egin{cases}
\mathbf{v}^2\geq 4,\;\;\;&\mbox{in \cref{enum:IsotropicLatticeNoEffectiveNegatives} of \cref{Prop:isotropic lattice}};\\
\mathbf{v}^2\geq 3,\mathbf{v}^2\neq 4,\;\;\;&\mbox{in \cref{enum:IsotropicLatticeEffectiveExceptional} of \cref{Prop:isotropic lattice}};\\ \mathbf{v}^2>2,\;\;\;&\mbox{ in \cref{enum:IsotropicLatticeEffectiveSpherical} of \cref{Prop:isotropic lattice}}.
\end{cases}
\end{equation}
Then $\ensuremath{\mathcal W}$ is not a totally semistable wall and induces a divisorial contraction on $M_{\sigma_+}(\mathbf{v},L)$.
\end{Lem}
\mathbf{b}egin{proof}
In all cases, we will show that our assumptions imply that $\ensuremath{\mathcal W}$ is not totally semistable for $M_{\sigma_+}(\mathbf{v}-\mathbf{u}_1,L'),M_{\sigma_+}(\mathbf{v}-\mathbf{u}_1,L'+K_X)$. Assuming we have done this, then taking $F\in M_{\sigma_0}^s(\mathbf{v}-\mathbf{u}_1,L')$ (or $M_{\sigma_0}^s(\mathbf{v}-\mathbf{u}_1,L'+K_X)$) and $G\in M_{\sigma_0}(\mathbf{u}_1)$, we get a $\ensuremath{\mathbb{P}}^1$ worth of distinct extensions $$0\to G\to E\to F\to 0$$ of objects in $M_{\sigma_+}(\mathbf{v},L)$ (or $M_{\sigma_+}(\mathbf{v},L+K_X)$, respectively) that are S-equivalent with respect to $\sigma_0$. A quick dimension count shows that these sweep out a divisor.
Now we show that $\ensuremath{\mathcal W}$ is not totally semistable for $M_{\sigma_+}(\mathbf{v}-\mathbf{u}_1,L'),M_{\sigma_+}(\mathbf{v}-\mathbf{u}_1,L'+K_X)$. Under the assumptions in \eqref{eqn:RestrictionsOnv^2}, we may have $(\mathbf{v}-\mathbf{u}_1)^2=-1$ in \cref{enum:IsotropicLatticeEffectiveExceptional} of \cref{Prop:isotropic lattice}. But then $\mathbf{v}-\mathbf{u}_1=\mathbf{w}$ and $\langle\mathbf{u}_1,\mathbf{w}\rangle=2$, which is impossible by \cref{Rem:Even and Odd pairings}. Otherwise, $(\mathbf{v}-\mathbf{u}_1)^2=\mathbf{v}^2-4\geq 0$, with equality only if $\mathbf{v}=\mathbf{u}_1+k\mathbf{u}_2$. In this case, $\langle\mathbf{v},\mathbf{u}_1\rangle=2$ implies that $k=1$, $\langle\mathbf{u}_1,\mathbf{u}_2\rangle=2$, $\ell(\mathbf{u}_2)=1$, and $\ensuremath{\mathbb{H}}H$ falls into \cref{enum:IsotropicLatticeNoEffectiveNegatives} by \cref{Rem:Even and Odd pairings} and \eqref{eqn:RestrictionsOnv^2}. So $\mathbf{v}-\mathbf{u}_1=\mathbf{u}_2$, and $M_{\sigma_0}^s(\mathbf{u}_2)=M_{\sigma_0}(\mathbf{u}_2)$ so that $\ensuremath{\mathcal W}$ is not totally semistable for $M_{\sigma_+}(\mathbf{u}_2)$ (or a wall at all).
It remains to show that $\ensuremath{\mathcal W}$ is not totally semistable when $(\mathbf{v}-\mathbf{u}_1)^2>0$. As both conditions \ref{enum:IsotropicTotallySemistable-HC} and \ref{enum:IsotropicTotallySemistable-Exceptional/Spherical} of \cref{Prop:LGU walls of low codimension} for $\ensuremath{\mathcal W}$ being totally semistable require the existence of a spherical/exceptional class (see \cref{Lem: Hilbert-Chow} for \cref{enum:IsotropicTotallySemistable-HC}), $\ensuremath{\mathcal W}$ is automatically not totally semistable in \cref{enum:IsotropicLatticeNoEffectiveNegatives} of \cref{Prop:isotropic lattice}. In \cref{enum:IsotropicLatticeEffectiveExceptional,enum:IsotropicLatticeEffectiveSpherical}, if we write $\mathbf{v}=x\mathbf{u}_1+y\mathbf{w}$ with $x,y\in\ensuremath{\mathbb{Z}}_{\geq 0}$, then by \cref{Rem:Even and Odd pairings}, $\langle\mathbf{v},\mathbf{u}_1\rangle=2$ and $\mathbf{v}^2>4$ is equivalent to $x>2$, $y=2$ and $\langle\mathbf{u}_1,\mathbf{w}\rangle=1$ in \cref{enum:IsotropicLatticeEffectiveExceptional}, while in \cref{enum:IsotropicLatticeEffectiveSpherical}, it is equivalent to $x>1$, $y=1$ and $\langle\mathbf{u}_1,\mathbf{w}\rangle=2$. Thus $\langle\mathbf{v}-\mathbf{u}_1,\mathbf{w}\rangle\geq 0$, and in case of equality, we may choose $F$ above to be in the component of $M_{\sigma_+}(\mathbf{v}-\mathbf{u}_1,L')$ (or $M_{\sigma_+}(\mathbf{v}-\mathbf{u}_1,L'+K_X)$) that contains $\sigma_0$-stable objects, as guaranteed by \cref{Lem:P1FibrationExceptional,Lem:P1FibrationSpherical}. If $\langle\mathbf{v}-\mathbf{u}_1,\mathbf{w}\rangle>0$, then $\langle\mathbf{v}-\mathbf{u}_1,\mathbf{u}_2\rangle>\langle\mathbf{v}-\mathbf{u}_1,\mathbf{u}_1\rangle=2$, so $\ensuremath{\mathcal W}$ is not totally semistable for $M_{\sigma_+}(\mathbf{v}-\mathbf{u}_1)$ by \cref{Prop:LGU walls of low codimension}, as claimed.
\end{proof}
It is worth noting that the possibilities excluded by the condition \eqref{eqn:RestrictionsOnv^2} have either been dealt with already, or are irrelevant. Indeed, suppose $\langle\mathbf{v},\mathbf{u}_1\rangle=2$ and $0<\mathbf{v}^2<4$. If $\mathbf{v}^2=3$, then $(\mathbf{v}-\mathbf{u}_1)^2=-1$, so we must be in \cref{enum:IsotropicLatticeEffectiveExceptional} of \cref{Prop:isotropic lattice}, which was included in \eqref{eqn:RestrictionsOnv^2}. If $\mathbf{v}^2=2$, then $(\mathbf{v}-\mathbf{u}_1)^2=-2$, so in \cref{enum:IsotropicLatticeEffectiveSpherical} we have already seen (and will prove in \cref{App: exceptional case}) that the divisorial component of the strictly $\sigma_0$-semistable locus is not contracted, while in \cref{enum:IsotropicLatticeNoEffectiveNegatives} we see that $\ensuremath{\mathcal W}$ is not a wall for $\mathbf{v}$. This is also the case if $\mathbf{v}^2=1$. Finally, if $\mathbf{v}^2=4$ in \cref{enum:IsotropicLatticeEffectiveExceptional}, then again we have already seen (and will prove in \cref{App: exceptional case}) that the divisorial component of the strictly $\sigma_0$-semistable locus is not contracted.
Now we consider \cref{enum:IsotropicCodimOne <v.w>=0} of \cref{Prop:LGU walls of low codimension}:
\mathbf{b}egin{Lem}\label{Lem: isotropic divisorial l=2 2}
Suppose that $\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}}$ contains a primitive isotropic vector $\mathbf{u}$ with $\ell(\mathbf{u})=2$ and $\mathbf{v}^2>2$. If $\langle \mathbf{v},\mathbf{w}\rangle=0$ for an effective spherical class $\mathbf{w}$, then $\ensuremath{\mathcal W}$ induces a divisorial contraction.
\end{Lem}
\mathbf{b}egin{proof}
Let $T$ be the unique $\sigma_0$-stable spherical object of class $\mathbf{w}$. Consider $\mathbf{a}:=\mathbf{v}-\mathbf{w}$. Then $\mathbf{a}^2=\mathbf{v}^2-2>0$ and $\langle \mathbf{a},\mathbf{w}\rangle=2$. By \cref{Prop:isotropic lattice} we see that $\ensuremath{\mathbb{H}}H$ is even, and from $\ell(\mathbf{u})=2$ it follows that $\ell(\mathbf{u}_1)=\ell(\mathbf{u}_2)=2$. Thus $\langle\mathbf{a},\mathbf{u}_i\rangle$ is even and at least 2, by \cref{Rem:Even and Odd pairings}. By \cref{Prop:LGU walls of low codimension}, $M_{\sigma_0}^s(\mathbf{a})\neq\mathbf{v}arnothing$, so letting $A$ vary in $M^s_{\sigma_0}(\mathbf{a})$, we see that the $\ensuremath{\mathbb{P}}^1$'s of S-equivalent extensions $$0\to A\to E\to T\to 0$$ sweep out a contracted divisor in $M_{\sigma_+}(v)$.
\end{proof}
With the exact same proof, one can show that the analogous situation for $\mathbf{w}$ exceptional results in a divisor of strictly $\sigma_0$-semistable objects that does not get contracted.
\mathbf{b}egin{Lem}\label{Lem:IsotropicNoncontractedDivisor l=2}
Suppose that $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ contains a primitive isotropic vector $\mathbf{u}$ with $\ell(\mathbf{u})=2$ and $\mathbf{v}^2\neq 1,4$. If $\langle\mathbf{v},\mathbf{w}\rangle=0$ for an effective exceptional class $\mathbf{w}$, then the locus $M_{\sigma_+}(\mathbf{v})\mathbf{b}ackslash M_{\sigma_0}^s(\mathbf{v})$ is a divisor that is not contracted.
\end{Lem}
\mathbf{b}egin{Lem}\label{Lem: isotropic divisorial l=2 3}
Let $\ensuremath{\mathcal W}$ be a potential wall and $\mathbf{v}$ minimal, and suppose that $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ contains a primitive isotropic class $\mathbf{u}$ such that $\ell(\mathbf{u})=2$. Assume that either $\langle \mathbf{v},\mathbf{u}\rangle=1$, or $\langle\mathbf{v},\mathbf{u}\rangle=2$ and
\mathbf{b}egin{equation}\label{eqn:RestrictionsOnv^2-2}
\mathbf{b}egin{cases}
\mathbf{v}^2\geq 4,\;\;\;&\mbox{in \cref{enum:IsotropicLatticeNoEffectiveNegatives} of \cref{Prop:isotropic lattice}};\\
\mathbf{v}^2\geq 3,\mathbf{v}^2\neq 4,\;\;\;&\mbox{in \cref{enum:IsotropicLatticeEffectiveExceptional} of \cref{Prop:isotropic lattice}};\\ \mathbf{v}^2>2,\;\;\;&\mbox{ in \cref{enum:IsotropicLatticeEffectiveSpherical} of \cref{Prop:isotropic lattice}}.
\end{cases}
\end{equation}
Then $\ensuremath{\mathcal W}$ induces a divisorial contraction on $M_{\sigma_+}(\mathbf{v},L)$.
\end{Lem}
\mathbf{b}egin{proof}
The class $ֿֿֿֿֿֿ\mathbf{u}$ is automatically effective. By \cref{Lem: Hilbert-Chow,Lem: isotropic divisorial l=2 1}, the only remaining case is $\mathbf{u}=\mathbf{u}_2$ and $\langle\mathbf{v},\mathbf{u}_2\rangle=2$.
First, suppose that $\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}}$ admits an effective exceptional or spherical class $\mathbf{w}$. Then from the minimality of $\mathbf{v}$ and the assumptions in \eqref{eqn:RestrictionsOnv^2-2}, we have $\langle \mathbf{v},\mathbf{w}\rangle> 0$. Indeed, we may write $\mathbf{v}=x\mathbf{u}_2+y\mathbf{w}$ with $x,y\in\ensuremath{\mathbb{Z}}$ by \cref{Prop:isotropic lattice}, so the conditions $\langle\mathbf{v},\mathbf{u}_2\rangle=2$ and $\langle\mathbf{v},\mathbf{w}\rangle=0$ imply that $\mathbf{v}^2=4$ or $2$ if $\mathbf{w}$ is exceptional or spherical, respectively, contrary to \eqref{eqn:RestrictionsOnv^2-2}. Thus by \eqref{eqn:u2 and u1} (and the analogue for spherical $\mathbf{w}$), we must have $0<\langle \mathbf{u}_1,\mathbf{v}\rangle<\langle\mathbf{v},\mathbf{u}_2\rangle=2$, so $\langle\mathbf{v},\mathbf{u}_1\rangle=1$ and the result follows from \cref{Lem: Hilbert-Chow}.
Now suppose that $\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}}$ admits no effective spherical or exceptional classes. Then the proof of \cref{Lem: isotropic divisorial l=2 1} applies with $\mathbf{u}_2$ instead, giving the result.
\end{proof}
\subsubsection{$\ell(\mathbf{u}_1)=\ell(\mathbf{u}_2)=1$} We begin again by determining necessary conditions for a potential wall $\ensuremath{\mathcal W}$ to be totally semistable and for $\mathop{\mathrm{codim}}\nolimits (M_{\sigma_+}(\mathbf{v})\mathbf{b}ackslash M_{\sigma_0}^s(\mathbf{v}))=1$.
\mathbf{b}egin{Prop}\label{Prop: 1-1 case totally semistable and codim 1}
Let $\ensuremath{\mathcal W}$ be a potential wall for positive and minimal $\mathbf{v}$ such that $\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}}$ is isotropic with $\ell(\mathbf{u}_1)=\ell(\mathbf{u}_2)=1$.
\mathbf{b}egin{enumerate}
\item\label{enum:IsotropicTotallySemistable l=1} If $\ensuremath{\mathcal W}$ is totally semistable, then $\langle\mathbf{v},\mathbf{w}\rangle=0$ and $\langle \mathbf{v},\mathbf{u}_1\rangle=1$, where $\mathbf{w}$ is an effective spherical class in $\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}}$.
\item If $\mathop{\mathrm{codim}}\nolimits(M_{\sigma_0}(\mathbf{v})\mathbf{b}ackslash M^s_{\sigma_0}(\mathbf{v}))=1$, then
\mathbf{b}egin{enumerate}
\item\label{enum:IsotropicCodimOne-<v.u>=1 l=1} $\langle \mathbf{v},\mathbf{u}\rangle=1$ for a primitive isotropic $\mathbf{u}\in\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}}$, or
\item\label{enum:IsotropicCodimOne-<v.w>=0 l=1} $\langle \mathbf{v},\mathbf{w}\rangle=0$ for a spherical or exceptional class $\mathbf{w}$.
\end{enumerate}
\end{enumerate}
\end{Prop}
\mathbf{b}egin{proof}Again we assume that $\mathbf{u}_1$, $\mathbf{u}_2$, and $\mathbf{w}$ are labelled and oriented in accordance with \cref{Rem:Even and Odd pairings} and the discussion preceeding \cref{Prop:LGU walls of low codimension} so that $\langle\mathbf{u}_1,\mathbf{w}\rangle>0$.
For a given $E\in M_{\sigma_+}(\mathbf{v})$, let the Harder-Narasimhan filtration of $E$ with respect to $\sigma_-$ correspond to a decomposition $\mathbf{v}=\sum_i \mathbf{a}_i$. We shall estimate the codimension of the sublocus $\ensuremath{\mathcal F}(\mathbf{a}_0,\ldots,\mathbf{a}_n)^o$ of destabilized objects which is equal to
\mathbf{b}egin{equation}
\sum_i (\mathbf{a}_i^2-\mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_i))+\sum_{i<j}\langle \mathbf{a}_i,\mathbf{a}_j \rangle.
\end{equation}
We will divide the discussion into two cases as we did for \cref{Prop:LGU walls of low codimension}, depending on whether or not one of the classes has negative square.
(I) We first assume that one of the $\mathbf{a}_i$ satisfies $\mathbf{a}_i^2<0$, say $\mathbf{a}_0=b_0 \mathbf{w}$ for an effective spherical or exceptional class $\mathbf{w}$.
Assume that $\mathbf{a}_1=b_1\mathbf{u}_1$ and $\mathbf{a}_2=b_2\mathbf{u}_2$ are isotropic. Then
\mathbf{b}egin{equation}\label{eq: 1,1 case I}
\mathbf{b}egin{split}
& \sum_i (\mathbf{a}_i^2-\mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_i))+\sum_{i<j}\langle \mathbf{a}_i,\mathbf{a}_j \rangle\\
\geq & (\mathbf{a}_0^2-\mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0))+\sum_{i \geq 1}b_0 \langle \mathbf{w},\mathbf{a}_i \rangle
-\left\lfloor\frac{b_1}{2}\right\rfloor-\left\lfloor\frac{b_2}{2}\right\rfloor+b_1 b_2 \langle \mathbf{u}_1,\mathbf{u}_2 \rangle\\
\geq & -\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)+b_0 \langle \mathbf{w},\mathbf{v} \rangle-\left\lfloor\frac{b_1}{2}\right\rfloor-\left\lfloor\frac{b_2}{2}\right\rfloor+b_1 b_2 \langle \mathbf{u}_1,\mathbf{u}_2 \rangle\\
\geq & -\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)-\left\lfloor\frac{b_1}{2}\right\rfloor-\left\lfloor\frac{b_2}{2}\right\rfloor+b_1 b_2 \langle \mathbf{u}_1,\mathbf{u}_2 \rangle,
\end{split}
\end{equation}
where $b_0\langle\mathbf{v},\mathbf{w}\rangle\geq 0$ from the minimality of $\mathbf{v}$.
First suppose that $\mathbf{w}^2=-2$, so $\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)=-b_0^2$ and thus
\mathbf{b}egin{equation}\label{eq: 1,1 case I spherical}
\mathbf{b}egin{split}
\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\ldots,\mathbf{a}_n)^o &\geq b_0^2+b_1 b_2-\frac{b_1}{2}-\frac{b_2}{2}\geq 1+b_1 b_2-\frac{b_1}{2}-\frac{b_2}{2}\\
&=1+\frac{b_2(b_1-1)+b_1(b_2-1)}{2}\geq 1.
\end{split}
\end{equation}
Thus if $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\mathbf{d}ots,\mathbf{a}_n)^o=1$ then we must have $\langle \mathbf{v}, \mathbf{w}\rangle=0$, $\langle \mathbf{u}_1,\mathbf{u}_2\rangle=1$, and $\mathbf{v}=\mathbf{w}+\mathbf{u}_1+\mathbf{u}_2$. But then $\langle \mathbf{v},\mathbf{w}\rangle=-2<0$, contrary to assumption. So we must have $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\mathbf{d}ots,\mathbf{a}_n)^0\geq 2$ in this case.
If instead $\mathbf{w}^2=-1$, then $\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)=\left\lfloor-\frac{b_0^2}{2}\right\rfloor$ and $2\mid\langle \mathbf{u}_1,\mathbf{u}_2\rangle$ by part \ref{enum:IsotropicLatticeEffectiveExceptional} of \cref{Prop:isotropic lattice}, so
\mathbf{b}egin{equation}\label{eq: 1,1 case I exceptional}
\mathbf{b}egin{split}
\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\ldots,\mathbf{a}_n)^o &\geq \frac{b_0^2}{2}+2b_1b_2-\frac{b_1}{2}-\frac{b_2}{2}\geq\frac{1}{2}+2b_1b_2-\frac{b_1}{2}-\frac{b_2}{2}\\
&=\frac{1}{2}+\frac{b_1(2b_2-1)+b_2(2b_1-1)}{2}\geq\frac{3}{2},
\end{split}
\end{equation}
so $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\ldots,\mathbf{a}_n)^o\geq 2$ in this case.
Now assume that $\mathbf{a}_1=b_1 \mathbf{u}_j$ and $\mathbf{a}_i^2>0$ for $i>1$. Then
\mathbf{b}egin{equation}
\mathbf{b}egin{split}
\sum_i (\mathbf{a}_i^2-\mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_i))+\sum_{i<j}\langle \mathbf{a}_i,\mathbf{a}_j \rangle
&\geq \mathbf{a}_0^2-\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)+\sum_{i \geq 1}b_0 \langle \mathbf{w},\mathbf{a}_i \rangle
-\left\lfloor\frac{b_1}{2}\right\rfloor+\sum_{i \geq 2} b_1 \langle \mathbf{u}_j,\mathbf{a}_i \rangle\\
&\geq -\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)+b_0 \langle \mathbf{w},\mathbf{v} \rangle
-\left\lfloor\frac{b_1}{2}\right\rfloor+b_1 \langle \mathbf{u}_j,\mathbf{a}_2 \rangle\\
&\geq -\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)+\frac{b_1}{2}.
\end{split}
\end{equation}
If $\mathbf{w}^2=-2$ then $$-\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)+\frac{b_1}{2}=b_0^2+\frac{b_1}{2}\geq\frac{3}{2},$$ while if $\mathbf{w}^2=-1$, then $$\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\ldots,\mathbf{a}_n)^o\geq-\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)+\frac{b_1}{2}\geq \frac{b_0^2}{2}+\frac{b_1}{2}\geq 1,$$ with equality in the last inequality only if $b_1=b_0=1$, in which case the first inequality is strict. So we always have $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\mathbf{d}ots,\mathbf{a}_n)^o\geq 2$ in this case.
We can now assume that there are no positive classes amongst the Harder-Narasimhan factors, i.e. $\mathbf{v}=b_0 \mathbf{w}+b_1 \mathbf{u}_j$. But $\mathbf{v}^2>0$ forces $j=1$, so we may assume this outright. Then $0 \leq \langle \mathbf{v},\mathbf{w} \rangle=b_0 \mathbf{w}^2+b_1 \langle \mathbf{u}_1,\mathbf{w} \rangle$, so our estimate becomes
\mathbf{b}egin{equation}\label{eq: 1,1 case I no positive}
\mathbf{b}egin{split}
\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\mathbf{a}_1)^o=&\sum_i (\mathbf{a}_i^2-\mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_i))+\sum_{i<j}\langle \mathbf{a}_i,\mathbf{a}_j \rangle\\
= & b_0^2 \mathbf{w}^2-\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)-\left\lfloor\frac{b_1}{2}\right\rfloor+b_0b_1\langle \mathbf{w},\mathbf{u}_1\rangle\\
\geq & b_0^2 \mathbf{w}^2-\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)+\frac{b_0b_1\langle \mathbf{w},\mathbf{u}_1\rangle}{2}+\frac{b_1}{2}\left(b_0\langle \mathbf{w},\mathbf{u}_1\rangle-1\right).
\end{split}
\end{equation}
If $\mathbf{w}^2=-2$, then $b_0^2\mathbf{w}^2-\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)=-b_0^2$, so the last line of \eqref{eq: 1,1 case I no positive} becomes $$-b_0^2+\frac{b_0 b_1\langle \mathbf{w},\mathbf{u}_1\rangle}{2}+\frac{b_1}{2}(b_0\langle \mathbf{w},\mathbf{u}_1\rangle-1)=\frac{b_0}{2}\langle \mathbf{v},\mathbf{w}\rangle+\frac{b_1}{2}(\langle \mathbf{v},\mathbf{u}_1\rangle-1)>0$$
unless $\langle \mathbf{v},\mathbf{w}\rangle=0$ and $\langle \mathbf{v},\mathbf{u}_1\rangle=1$. But then $\mathbf{v}=\mathbf{w}+2\mathbf{u}_1$ and $\langle \mathbf{w},\mathbf{u}_1\rangle=1$, in which case indeed $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\mathbf{a}_1)^o=0$ as in \cref{enum:IsotropicTotallySemistable l=1}. Moreover, $\mathop{\mathrm{codim}}\nolimits \ensuremath{\mathcal F}(\mathbf{a}_0,\mathbf{a}_1)^o\geq 2$ unless $\langle \mathbf{w},\mathbf{u}_1\rangle=2$ and $\mathbf{v}=\mathbf{w}+\mathbf{u}_1$, as in both \cref{enum:IsotropicCodimOne-<v.w>=0 l=1,enum:IsotropicCodimOne-<v.u>=1 l=1}, or $\langle \mathbf{w},\mathbf{u}_1\rangle=1$ and $\mathbf{v}=\mathbf{w}+3\mathbf{u}_1$ or $\mathbf{w}+4\mathbf{u}_1$, as in \cref{enum:IsotropicCodimOne-<v.u>=1 l=1}. In each of these cases $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\mathbf{a}_1)^o=1$.
If $\mathbf{w}^2=-1$, then $b_0^2\mathbf{w}^2-\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)=\left\lceil-\frac{b_0^2}{2}\right\rceil$, so the last line of \eqref{eq: 1,1 case I no positive} gives $$\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\mathbf{a}_1)^o\geq-\frac{b_0^2}{2}+\frac{b_0 b_1\langle \mathbf{w},\mathbf{u}_1\rangle}{2}+\frac{b_1}{2}(b_0\langle \mathbf{w},\mathbf{u}_1\rangle-1)=\frac{b_0}{2}\langle \mathbf{v},\mathbf{w}\rangle+\frac{b_1}{2}(\langle \mathbf{v},\mathbf{u}_1\rangle-1)>0$$
unless again $\langle \mathbf{v},\mathbf{w}\rangle=0$ and $\langle \mathbf{v},\mathbf{u}_1\rangle=1$, in which case $\mathbf{v}=\mathbf{w}+\mathbf{u}_1$ and $\langle \mathbf{w},\mathbf{u}_1\rangle=1$. In this case, however, $\mathop{\mathrm{codim}}\nolimits \ensuremath{\mathcal F}(\mathbf{a}_0,\mathbf{a}_1)^o=1$ as in both \cref{enum:IsotropicCodimOne-<v.w>=0 l=1} and \cref{enum:IsotropicCodimOne-<v.u>=1 l=1}. Moreover, $\mathop{\mathrm{codim}}\nolimits \ensuremath{\mathcal F}(\mathbf{a}_0,\mathbf{a}_1)^o\geq 2$ unless $\langle \mathbf{w},\mathbf{u}_1\rangle=1$ and $\mathbf{v}=\mathbf{w}+2\mathbf{u}_1,2(\mathbf{w}+\mathbf{u}_1)$, in which case $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\mathbf{a}_1)^o=1$.
Finally, assume that other than $\mathbf{a}_0=b_0 \mathbf{w}$, $\mathbf{a}_i^2>0$ for all $i>0$. Then the estimate becomes
\mathbf{b}egin{equation}\label{eq: 1,1 case I no isotropic}
\mathbf{b}egin{split}
\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\ldots,\mathbf{a}_n)^o=&\sum_i (\mathbf{a}_i^2-\mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_i))+\sum_{i<j}\langle \mathbf{a}_i,\mathbf{a}_j \rangle\\
= & -\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_0)+b_0\langle \mathbf{w},\mathbf{v}\rangle+\sum_{0<i<j}\langle \mathbf{a}_i,\mathbf{a}_j\rangle\geq \frac{b_0^2}{2}>0.
\end{split}
\end{equation}
Moreover, $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\ldots,\mathbf{a}_n)^o\geq 2$ unless $\langle \mathbf{w},\mathbf{v}\rangle=0$ and $\mathbf{v}=\mathbf{w}+\mathbf{a}_1$, as in \cref{enum:IsotropicCodimOne-<v.w>=0 l=1}. In this case we have $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_0,\mathbf{a}_1)^o=1$, and we require that $$0<\mathbf{a}_1^2=(\mathbf{v}-\mathbf{w})^2=\mathbf{v}^2+\mathbf{w}^2,$$ so $\mathbf{v}^2>2$ or $\mathbf{v}^2>1$ if $\mathbf{w}^2=-2$ or $\mathbf{w}^2=-1$, respectively.
(II) We next assume that $\mathbf{a}_i^2 \geq 0$ for all $i$.
We assume $\mathbf{a}_1=b_1 \mathbf{u}_1$ and $\mathbf{a}_2=b_2 \mathbf{u}_2$.
Then
\mathbf{b}egin{equation}\label{eq: spherical 1,1 case II,a}
\mathbf{b}egin{split}
\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_1,\mathbf{d}ots,\mathbf{a}_n)^o=& \sum_i (\mathbf{a}_i^2-\mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_i))+\sum_{i<j}\langle \mathbf{a}_i,\mathbf{a}_j \rangle\\
\geq &
-\left\lfloor\frac{b_1}{2}\right\rfloor-\left\lfloor\frac{b_2}{2}\right\rfloor+b_1 b_2 \langle \mathbf{u}_1,\mathbf{u}_2 \rangle\\
\geq &\frac{b_1(b_2-1)+b_2(b_1-1)}{2}>0,
\end{split}
\end{equation}
unless $\mathbf{v}=\mathbf{u}_1+\mathbf{u}_2$ and $\langle \mathbf{u}_1,\mathbf{u}_2\rangle=1$, in which case $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_1,\mathbf{a}_2)^o=1$, as in \cref{enum:IsotropicCodimOne-<v.u>=1 l=1}. If, say, $b_1=1$ and $b_2\geq 2$, then we have $$\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_1,\mathbf{a}_2)^o\geq-\left\lfloor\frac{1}{2}\right\rfloor-\left\lfloor\frac{b_2}{2}\right\rfloor+b_2\langle \mathbf{u}_1,\mathbf{u}_2\rangle\geq \frac{b_2}{2}\geq 1,$$ with equality only if $b_2=2$ and $\langle \mathbf{u}_1,\mathbf{u}_2\rangle=1$. Thus $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_1,\mathbf{d}ots,\mathbf{a}_n)^o$ is always positive in this case. Moreover, $\ensuremath{\mathcal F}(\mathbf{a}_1,\mathbf{d}ots,\mathbf{a}_n)^o$ has codimension one only if $\mathbf{v}=\mathbf{u}_1+\mathbf{u}_2,2\mathbf{u}_1+\mathbf{u}_2,\mathbf{u}_1+2\mathbf{u}_2$ with $\langle \mathbf{u}_1,\mathbf{u}_2\rangle=1$, as in \cref{enum:IsotropicCodimOne-<v.u>=1 l=1}. Otherwise, $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_1,\mathbf{d}ots,\mathbf{a}_n)^o\geq 2$.
Now we assume that $\mathbf{a}_1=b_1 \mathbf{u}_j$ and $\mathbf{a}_i^2>0$ for $i \geq 2$.
In this case, we also see that
\mathbf{b}egin{equation}
\mathbf{b}egin{split}
\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_1,\mathbf{d}ots,\mathbf{a}_n)^o=&\sum_i (\mathbf{a}_i^2-\mathop{\mathrm{dim}}\nolimits \ensuremath{\mathcal M}_{\sigma_-}(\mathbf{a}_i))+\sum_{i<k}\langle \mathbf{a}_i,\mathbf{a}_k \rangle\\
=&-\left\lfloor\frac{b_1}{2}\right\rfloor+\sum_{i>1}b_1\langle \mathbf{u}_j,\mathbf{a}_i\rangle+\sum_{1<i<k}\langle \mathbf{a}_i,\mathbf{a}_k\rangle\\
\geq&b_1(\langle \mathbf{v},\mathbf{u}_j\rangle-\frac{1}{2})+\sum_{1<i<k}\langle \mathbf{a}_i,\mathbf{a}_k\rangle\\
\geq&b_1(\langle \mathbf{v},\mathbf{u}_j\rangle-\frac{1}{2})>1,
\end{split}
\end{equation}
unless $\langle \mathbf{v},\mathbf{u}_j\rangle=1$, and $\mathbf{v}=b_1\mathbf{u}_j+\mathbf{a}_2$ with $b_1=1,2$, in which case $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_1,\mathbf{a}_2)^o=1$, as in \cref{enum:IsotropicCodimOne-<v.u>=1 l=1}.
Finally, if $\mathbf{a}_i^2>0$ for all $i$, then $\mathop{\mathrm{codim}}\nolimits\ensuremath{\mathcal F}(\mathbf{a}_1,\mathbf{d}ots,\mathbf{a}_n)^o\geq 2$ by Proposition \ref{Prop:HN filtration all positive classes}.
\end{proof}
We prove the converse to Proposition \ref{Prop: 1-1 case totally semistable and codim 1} in the following lemmas. We begin with the case of a totally semistable wall as in \cref{enum:IsotropicTotallySemistable l=1} of \cref{Prop: 1-1 case totally semistable and codim 1}.
\mathbf{b}egin{Lem}\label{Lem:isotropic totally semistable divisorial contraction l=1}
Suppose that $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ contains an effective spherical class $\mathbf{w}$ and an isotropic class $\mathbf{u}$ such that $\langle \mathbf{v},\mathbf{u}\rangle=1=\ell(\mathbf{u})$ and $\langle\mathbf{v},\mathbf{w}\rangle=0$. Then $\ensuremath{\mathcal W}$ is totally semistable and induces a $\ensuremath{\mathbb{P}}^1$-fibration on $M_{\sigma_+}(\mathbf{v},L)$ for $L\equiv D+\frac{\mathop{\mathrm{rk}} \mathbf{v}}{2}K_X\pmod 2$, where $D$ is a nodal cycle. For the other determinant, $M_{\sigma_+}(\mathbf{v},L+K_X)\mathbf{b}ackslash M_{\sigma_0}^s(\mathbf{v},L+K_X)$ is a divisor which does not get contracted.
\end{Lem}
\mathbf{b}egin{proof}
We first observe that since $\langle\mathbf{v},\mathbf{w}\rangle=0$, it follows from the analogue of \eqref{eqn:u2 and u1} in the spherical case that $\langle\mathbf{v},\mathbf{u}_1\rangle=\langle\mathbf{v},\mathbf{u}_2\rangle$. As $\ell(\mathbf{u}_1)=\ell(\mathbf{u}_2)$, we may assume that $\mathbf{u}=\mathbf{u}_1$. By \cref{Prop:isotropic lattice} we may write $\mathbf{v}=x\mathbf{w}+y\mathbf{u}_1$ with $x,y\in\ensuremath{\mathbb{Z}}$. Then $1=\langle \mathbf{v},\mathbf{u}_1\rangle=x\langle \mathbf{w},\mathbf{u}_1\rangle$ which implies that $x=1=\langle \mathbf{w},\mathbf{u}_1\rangle$. But then $\langle\mathbf{v},\mathbf{w}\rangle=0$ forces $y=2$. Since $\ell(\mathbf{u}_1)=1$, $M^s_{\sigma_0}(2\mathbf{u}_1,2L')$ is non-empty and two-dimensional by \cref{prop:isotropic}. Moreover, for the unique $\sigma_0$-stable spherical object $T$ of class $\mathbf{w}$ and any $A\in M^s_{\sigma_0}(2\mathbf{u}_1,2L')$, stability ensures that $\mathop{\mathrm{ext}}\nolimits^1(T,A)=\langle 2\mathbf{u}_1,\mathbf{w}\rangle=2$. Then the $\ensuremath{\mathbb{P}}^1$ worth of extensions $$0\to A\to E\to T\to 0$$ gets contracted by crossing $\ensuremath{\mathcal W}$, and varying $A$ in $M_{\sigma_0}^s(2\mathbf{u}_1,2L')$ generically sweeps out an entire irreducible component of $M_{\sigma_+}(\mathbf{v},L)$, where $$L=2L'+\mathbf{d}et(T)\equiv \mathbf{d}et(T)\equiv D+\frac{\mathop{\mathrm{rk}} \mathbf{v}}{2}K_X\pmod 2.$$
For the other determinant, observe that the only decompositions of $\mathbf{v}$ into effective classes are $\mathbf{v}=\mathbf{w}+2\mathbf{u}_1=\mathbf{u}_1+\mathbf{u}_2$, and from the proof of Proposition \ref{Prop: 1-1 case totally semistable and codim 1} only the former decomposition corresponds to a totally semistable wall. Moreover, in the case of the decomposition $\mathbf{v}=\mathbf{w}+2\mathbf{u}_1$ for the determinant $L+K_X$, the strictly $\sigma_0$-semistable locus has codimension 1, so $\ensuremath{\mathcal W}$ is not totally semistable for $M_{\sigma_+}(\mathbf{v},L+K_X)$. Indeed, if $E\in M_{\sigma_+}(\mathbf{v},L+K_X)$ has this decomposition for its Harder-Narasimhan filtration with respect to $\sigma_-$, then the kernel $A$ of the surjection $E\mathop{\ord(\omega_S)}\nolimitsnto T$ would be in $M_{\sigma_-}(2\mathbf{u}_1,2L'+K_X)$. But $M_{\sigma_-}^s(2\mathbf{u}_1,2L'+K_X)=\mathbf{v}arnothing$ by \cref{prop:isotropic}, so $A$ would have to be strictly $\sigma_-$-semistable, and from determinant considerations its Jordan-H\"{o}lder factors would have to be $A_1\in M_{\sigma_-}(\mathbf{u}_1,L')$ and $A_2\in M_{\sigma_-}(\mathbf{u}_1,L'+K_X)$. But then $A_1\ncong A_2,A_2(K_X)$, as $\mathbf{d}et(A_1)\neq\mathbf{d}et(A_2)=\mathbf{d}et(A_2(K_X))$, so $$\mathop{\mathrm{ext}}\nolimits^1(A_1,A_2)=\langle \mathbf{u}_1,\mathbf{u}_1\rangle+\hom(A_1,A_2)+\mathop{\mathrm{ext}}\nolimits^2(A_1,A_2)=\hom(A_2,A_1(K_X))=0,$$ from which it follows that $A=A_1\mathop{\ord(\omega_S)}\nolimitsplus A_2$. Using \cite[Lemmas 6.1-6.3]{CH15}, we thus get a unique $\sigma_+$-stable extension $$0\to A_1\mathop{\ord(\omega_S)}\nolimitsplus A_2\to E\to T\to 0,$$ which is unique in its S-equivalence class with respect to $\sigma_0$, and varying the $A_i$ spans a non-contracted divisor of strictly $\sigma_0$-semistable objects with the prescribed Harder-Narasimhan filtration for $\sigma_-$.
Now consider the other decomposition, $\mathbf{v}=\mathbf{u}_1+\mathbf{u}_2$, and take $A_1\in M_{\sigma_+}(\mathbf{u}_1,L')$ and $A_2\in M_{\sigma_+}(\mathbf{u}_2)$. Then any nontrivial extension \mathbf{b}egin{equation}\label{eqn:Other decomposition}
0\to A_1\to E\to A_2\to 0
\end{equation} is $\sigma_+$-stable by \cite[Lemma 9.3]{BM14b}, as the parallelogram spanned by $\mathbf{u}_1$ and $\mathbf{u}_2$ has no lattices points other than its vertices. In order for $\mathbf{d}et(E)=L+K_X$ we must have $$\mathbf{d}et(A_2)+L'=L+K_X=2L'+\mathbf{d}et(T)+K_X,$$ so that $\mathbf{d}et(A_2)=L'+\mathbf{d}et(T)+K_X$. As $M_{\sigma_0}^s(\mathbf{u}_2)=\mathbf{v}arnothing$, $A_2$ must be strictly $\sigma_0$-semistable with stable factors $T$ and $A_1'\in M_{\sigma_+}(\mathbf{u}_1)$. It follows that $$L'+\mathbf{d}et(T)+K_X=\mathbf{d}et(A_2)=\mathbf{d}et(T)+\mathbf{d}et(A_1'),$$ so $A_1'\in M_{\sigma_+}(\mathbf{u}_1,L'+K_X)$. In particular, $\mathbf{d}et(A_1)\neq\mathbf{d}et(A_1')$, and thus $A_1\ncong A_1',A_1'(K_X)$. Hence by stability, $$\ensuremath{\mathbb{H}}om(T,A_1)=\ensuremath{\mathbb{H}}om(A_1,T)=\ensuremath{\mathbb{H}}om(A_1',A_1)=\ensuremath{\mathbb{H}}om(A_1,A_1'(K_X))=0,$$ from which we see that $\ensuremath{\mathbb{H}}om(A_2,A_1)=0=\mathop{\mathrm{Ext}}\nolimits^2(A_2,A_1)=0$, by applying $\ensuremath{\mathbb{H}}om(\mathbf{u}nderline{\hphantom{A}},A_1)$ to the short exact sequence \eqref{eqn:Other decomposition}. Thus $\mathop{\mathrm{ext}}\nolimits^1(A_2,A_1)=1$, and there exists a unique $\sigma_+$-stable extension $E$, which is also unique in its S-equivalence class with respect to $\sigma_0$. Letting the $A_i$ vary, we again sweep out a divisor that is not contracted by $\ensuremath{\mathcal W}$, as claimed.
\end{proof}
Now we move on to \cref{enum:IsotropicCodimOne-<v.u>=1 l=1}.
\mathbf{b}egin{Lem}\label{Lem:isotropic divisorial l=1 1}
Assume that $\mathbf{v}$ is minimal in $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$, which contains primitive isotropic classes $\mathbf{u}_1$ and $\mathbf{u}_2$ such that $\ell(\mathbf{u}_1)=\ell(\mathbf{u}_2)=1$, and suppose that $\langle \mathbf{v},\mathbf{u}\rangle=1$ for a primitive isotropic $\mathbf{u}\in\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ with $\ell(\mathbf{u})=1$.
\mathbf{b}egin{enumerate}
\item\label{enum:IsotropicDivisorialContraction l=1 v^2>=3} If $\mathbf{v}^2\geq 3$, then $\ensuremath{\mathcal W}$ induces a divisorial contraction on $M_{\sigma_+}(\mathbf{v},L)$.
\item If either
\mathbf{b}egin{enumerate}
\item\label{enum:IsotropicDivisorialNonContraction l=1 v^2=1} $\mathbf{v}^2=1$ or
\item\label{enum:IsotropicDivisorialNonContraction l=1 v^2=2} $\mathbf{v}^2=2$, $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ contains a spherical class, and $L \equiv D+(\frac{\mathop{\mathrm{rk}} \mathbf{v}}{2}+1)K_X \pmod 2$, where $D$ is a nodal cycle,
\end{enumerate}
then $M_{\sigma_+}(\mathbf{v},L)\mathbf{b}ackslash M^s_{\sigma_0}(\mathbf{v},L)$ is a divisor which is not contracted by $\ensuremath{\mathcal W}$.
\end{enumerate}
\end{Lem}
\mathbf{b}egin{proof}
Suppose first that $\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}}$ contains an effective spherical or exceptional class $\mathbf{w}$ and $\mathbf{v}^2\geq 3$. Then by minimality of $\mathbf{v}$ and \eqref{eqn:u2 and u1} (and its analogue in the spherical case), $\langle\mathbf{v},\mathbf{u}_2\rangle\geq \langle\mathbf{v},\mathbf{u}_1\rangle$ with equality only if $\langle\mathbf{v},\mathbf{w}\rangle=0$. But if $\langle\mathbf{v},\mathbf{w}\rangle=0$, then $\mathbf{u}$ could be either $\mathbf{u}_1$ or $\mathbf{u}_2$, and writing $\mathbf{v}=x\mathbf{w}+y\mathbf{u}$ with $x,y\in\ensuremath{\mathbb{Z}}$, we see that $\langle\mathbf{v},\mathbf{u}\rangle=1$ and $\langle\mathbf{v},\mathbf{w}\rangle=0$ imply that $\mathbf{v}=\mathbf{w}+(-\mathbf{w}^2)\mathbf{u}$, where $\langle \mathbf{u},\mathbf{w}\rangle=1$. It follows that $\mathbf{v}^2=-\mathbf{w}^2$, which were explicitly excluded. Thus $\langle\mathbf{v},\mathbf{w}\rangle>0$ and we see that $\mathbf{u}=\mathbf{u}_1$. Moreover, we see by the same reasoning that $\langle\mathbf{v},\mathbf{u}_1\rangle=1$ implies that $\mathbf{v}=\mathbf{w}+y\mathbf{u}_1$ and $\langle\mathbf{w},\mathbf{u}_1\rangle=1$ so that $\mathbf{v}^2=\mathbf{w}^2+2y\equiv\mathbf{w}^2\pmod 2$.
We will break the proof up into different parts based on the decomposition of $\mathbf{v}$ we will use.
Suppose first that $\mathbf{v}^2\geq 3$ if $\mathbf{w}$ is exceptional and $\mathbf{v}^2\geq 8$ if $\mathbf{w}$ is spherical. Setting $\mathbf{v}'=\mathbf{v}-2\mathbf{u}_1$, it follows that $\mathbf{v}'^2=\mathbf{v}^2-4\geq-1$ or $\mathbf{v}'^2\geq 4$ if $\mathbf{w}$ is exceptional or spherical, respectively. As $\langle \mathbf{v}',\mathbf{u}_1\rangle=1$, we have $\mathbf{v}'\in C_{\ensuremath{\mathcal W}}$, and from $\langle\mathbf{v}',\mathbf{w}\rangle=\frac{\mathbf{v}^2+w^2-4}{2}$, which is positive if $\mathbf{w}$ is spherical, we have $M^s_{\sigma_0}(\mathbf{v}')\neq\mathbf{v}arnothing$ by Proposition \ref{Prop: 1-1 case totally semistable and codim 1}. Then for $E_1\in M^s_{\sigma_0}(2\mathbf{u}_1)$ and $E_2\in M^s_{\sigma_0}(\mathbf{v}')$, we have $\ensuremath{\mathbb{H}}om(E_2,E_1)=\ensuremath{\mathbb{H}}om(E_1,E_2(K_X))=0$ by stability. It follows that $\mathop{\mathrm{ext}}\nolimits^1(E_2,E_1)=\langle\mathbf{v}',2\mathbf{u}_1\rangle=2$, so there is a $\ensuremath{\mathbb{P}}^1$ worth of extensions of the form $$0\to E_1\to E\to E_2\to 0,$$ which gets contracted by $\ensuremath{\mathcal W}$. Varying $E_1$ and $E_2$ in their moduli sweeps out a divisor in $M_{\sigma_+}(\mathbf{v})$. Moreover, as $\mathbf{d}et(E_1)=2L'$ by \cref{prop:isotropic}, we may choose $E_2\in M_{\sigma_0}^s(\mathbf{v}')$ to have either determinant $L''$ or $L''+K_X$, where $[L'' \mathop{\mathrm{mod}}\nolimits K_X]=c_1(\mathbf{v}')$, to get a divisorial contraction on each $M_{\sigma_+}(\mathbf{v},2L'+L'')$ and $M_{\sigma_+}(\mathbf{v},2L'+L''+K_X)$. We have proven the first claim of the lemma for $\mathbf{w}$ exceptional, and to complete the proof of this claim for $\mathbf{w}$ spherical we must consider when $\mathbf{v}^2=6,4$.
If $\mathbf{v}^2=6$ and $L\equiv D+(\frac{\mathop{\mathrm{rk}} \mathbf{v}}{2}+1)K_X\pmod 2$, then the same argument gives a divisorial contraction on $M_{\sigma_+}(\mathbf{v},L)$, as then $\mathbf{v}'^2=2$ (i.e. $\langle\mathbf{v}',\mathbf{w}\rangle=0$) and $M_{\sigma_0}^s(\mathbf{v}',L-2L')\neq\mathbf{v}arnothing$ by Lemma \ref{Lem:isotropic totally semistable divisorial contraction l=1}. If instead $L\equiv D+\frac{\mathop{\mathrm{rk}} \mathbf{v}}{2}K_X\pmod 2$, then we will use a different decomposition of $\mathbf{v}$. The conditions $\langle\mathbf{v},\mathbf{u}_1\rangle=1$ and $\mathbf{v}^2=6$ force $\mathbf{v}=\mathbf{w}+4\mathbf{u}_1$ with $\langle\mathbf{u}_1,\mathbf{w}\rangle=1$, so instead of the above decomposition ($\mathbf{v}=(\mathbf{v}-2\mathbf{u}_1)+2\mathbf{u}_1$), we use a different one, $\mathbf{v}=\mathbf{w}+\mathbf{v}''$ where $\mathbf{v}'':=2\mathbf{u}_1+2\mathbf{u}_1$. Indeed, take the unique $\sigma_0$-stable object $T$ of class $\mathbf{w}$, and two non-isomorphic objects $E_1,E_2\in M^s_{\sigma_0}(2\mathbf{u}_1,2L')$. Then by \cite[Lemmas 6.1-6.3]{CH15} the extensions of the form $$0\to E_1\mathop{\ord(\omega_S)}\nolimitsplus E_2\to E\to T\to 0$$ are $\sigma_+$-stable and move in a two-dimensional family contracted to the same point by $\ensuremath{\mathcal W}$. Varying $(E_1,E_2)\in (M^s_{\sigma_0}(2\mathbf{u}_1,2L')\times M^s_{\sigma_0}(2\mathbf{u}_1,2L'))\mathbf{b}ackslash\ensuremath{\mathbb{D}}elta$, where $\ensuremath{\mathbb{D}}elta$ is diagonal, sweeps out a contracted divisor in $M_{\sigma_+}(\mathbf{v},L)$.
If $\mathbf{v}^2=4$, then the condition $\langle\mathbf{v},\mathbf{u}_1\rangle=1$ forces $\mathbf{v}=\mathbf{w}+3\mathbf{u}_1$. Take the unique $\sigma_0$-stable object $T$ of class $\mathbf{w}$, let $E_1\in M^s_{\sigma_0}(2\mathbf{u}_1,2L')$, and $E_2\in M^s_{\sigma_0}(\mathbf{u}_1)$. We consider extensions of the form $$0\to 0\to E_1\mathop{\ord(\omega_S)}\nolimitsplus E_2\to E\to T \to 0.$$ These extensions move in a one-dimensional family by \cite[Lemma 6.3]{CH15} and are $\sigma_+$-stable by \cite[Lemma 6.1]{CH15}. For fixed $E_i$, this curve of extensions is contracted by $\ensuremath{\mathcal W}$, and varying the $E_i$ sweeps out a divisor. As $\mathbf{d}et(E_2)$ can be either $L'$ or $L'+K_X$, we get a divisorial contraction in each component as before.
This concludes the proof of \cref{enum:IsotropicDivisorialContraction l=1 v^2>=3} for $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ that falls into \cref{enum:IsotropicLatticeEffectiveExceptional,enum:IsotropicLatticeEffectiveSpherical} of \cref{Prop:isotropic lattice}.
For the second claim of the lemma for $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ falling into \cref{enum:IsotropicLatticeEffectiveExceptional,enum:IsotropicLatticeEffectiveSpherical} of \cref{Prop:isotropic lattice}, we must consider $\mathbf{v}^2=1$ and $\mathbf{v}^2=2$, which occur when $\mathbf{w}$ is exceptional and spherical, respectively. In the first case, we must have $\mathbf{v}=\mathbf{w}+\mathbf{u}_1$. Letting $F\in M_{\sigma_0}^s(\mathbf{u}_1)$ and $T$ be the unique (up to $\mathop{\ord(\omega_S)}\nolimitstimes\ensuremath{\mathcal O}_X(K_X)$) $\sigma_0$-stable exceptional object of class $\mathbf{w}$, we consider the unique non-trivial extension, $$0\to F\to E\to T\to 0.$$ Varying $F
\in M_{\sigma_0}^s(\mathbf{u}_1)$, we sweep out a non-contracted divisor in each of $M_{\sigma_+}(\mathbf{v},L)$ and $M_{\sigma_+}(\mathbf{v},L+K_X)$, giving \cref{enum:IsotropicDivisorialNonContraction l=1 v^2=1}. The second case has been dealt with in the second statement of Lemma \ref{Lem:isotropic totally semistable divisorial contraction l=1}, giving \cref{enum:IsotropicDivisorialNonContraction l=1 v^2=2}.
Finally, we suppose that $\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}}$ contains no effective spherical or exceptional class. We note that since $\langle \mathbf{v},\mathbf{u}\rangle=1$, $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ must be even. Indeed, it follows from $\langle\mathbf{v},\mathbf{u}\rangle=1$ that $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}=\ensuremath{\mathbb{Z}}\mathbf{u}+\ensuremath{\mathbb{Z}}\mathbf{v}$, and if $\mathbf{v}^2$ were odd, then $\frac{\mathbf{v}^2+1}{2}\mathbf{u}-\mathbf{v}$ would be an exceptional class, so $\mathbf{v}^2$ must be even. But then $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ must be even as well.
We first prove \cref{enum:IsotropicDivisorialContraction l=1 v^2>=3} in this case. So suppose that $\mathbf{v}^2\geq 3$. Then $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ being even gives $\mathbf{v}^2\geq 4$, and by \cref{Rem:IsotropicOrientation} we may write $\mathbf{v}=\mathbf{v}'+2\mathbf{u}_1$ as above to get a divisor swept out by contracted $\ensuremath{\mathbb{P}}^1$'s of extensions $$0\to E_1\to E\to E_2\to 0$$ with $E_1\in M_{\sigma_0}^s(2\mathbf{u}_1,2L')$ and $E_2\in M_{\sigma_0}^s(\mathbf{v}')$. Note that if $\mathbf{v}^2>4$, so that $\mathbf{v}'^2>0$, $M_{\sigma_0}^s(\mathbf{v}')$ is non-empty by Proposition \ref{Prop: 1-1 case totally semistable and codim 1}, while if $\mathbf{v}^2=4$, so that $\mathbf{v}'^2=0$, then $\mathbf{v}'=\mathbf{u}_2$ and $M_{\sigma_0}^s(\mathbf{u}_2)\neq\mathbf{v}arnothing$ because $C_{\ensuremath{\mathcal W}}=P_{\ensuremath{\mathbb{H}}H}$ by \cref{Prop:isotropic lattice}.
Either way, we may choose $E_2$ to have the appropriate determinant to give a divisorial contraction in each $M_{\sigma_+}(\mathbf{v},L)$. This completes the proof of \cref{enum:IsotropicDivisorialContraction l=1 v^2>=3} of the Lemma.
The final option to consider is $\mathbf{v}^2=2$, in which case the only possibility for a destabilizing exact sequence is $$0\to E_1\to E\to E_2\to 0$$ for $E_i\in M^s_{\sigma_0}(\mathbf{u}_i)$, which span a divisor which is not contracted as $\mathop{\mathrm{ext}}\nolimits^1(E_2,E_1)=1$.
\end{proof}
Now we prove the converse to \cref{enum:IsotropicCodimOne-<v.w>=0 l=1} of \cref{Prop: 1-1 case totally semistable and codim 1}.
\mathbf{b}egin{Lem}\label{Lem:isotropic divisorial 1-1 2}
Suppose that $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ is isotropic with $\ell(\mathbf{u}_1)=\ell(\mathbf{u}_2)=1$ and $\langle \mathbf{v},\mathbf{w}\rangle=0$ for an effective spherical or exceptional class $\mathbf{w}\in\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$.
\mathbf{b}egin{enumerate}
\item\label{enum:IsotropicDivisorialContraction l=1 <v.w>=0} If $\mathbf{w}^2=-2$ and either
\mathbf{b}egin{enumerate}
\item $\mathbf{v}^2>2$, or
\item $\mathbf{v}^2=2$ and $\langle\mathbf{v},\mathbf{u}_1\rangle>1$,
\end{enumerate} then $\ensuremath{\mathcal W}$ induces a divisorial contraction on $M_{\sigma_+}(\mathbf{v},L)$.
\item If either
\mathbf{b}egin{enumerate}
\item\label{enum:IsotropicDivisorialNonContraction l=1 <v.w>=0 v^2=2}$\mathbf{w}^2=-2$, $\mathbf{v}^2=2$, $\langle\mathbf{v},\mathbf{u}_1\rangle=1$, and $L \equiv D+(\frac{\mathop{\mathrm{rk}} \mathbf{v}}{2}+1)K_X \pmod2$, or
\item\label{enum:IsotropicDivisorialNonContraction l=1 <v.w>=0 w^2=-1} $\mathbf{w}^2=-1$,
\end{enumerate}
then $\mathop{\mathrm{codim}}\nolimits(M_{\sigma_+}(\mathbf{v},L)\mathbf{b}ackslash M^s_{\sigma_0}(\mathbf{v},L))=1$ but this divisor is not contracted.
\end{enumerate}
\end{Lem}
\mathbf{b}egin{proof}
Consider the Mukai vector $\mathbf{a}:=\mathbf{v}-\mathbf{w}$. Then $$\mathbf{a}^2=\mathbf{v}^2+\mathbf{w}^2>\mathbf{w}^2, \langle \mathbf{a},\mathbf{v} \rangle=\mathbf{v}^2>0,
\mbox{ and }\langle \mathbf{a},\mathbf{w}\rangle=-\mathbf{w}^2.$$ If $\mathbf{a}^2>0$, then since $\langle \mathbf{a},\mathbf{w}\rangle>0$, $M^s_{\sigma_0}(\mathbf{a})\neq\mathbf{v}arnothing$ by Proposition \ref{Prop: 1-1 case totally semistable and codim 1}, and we consider the $\sigma_+$-stable extensions $$0\to F\to E\to G\to 0,$$ where $F\in M^s_{\sigma_0}(\mathbf{a})$ and $G\in M_{\sigma_0}^s(\mathbf{w})$. If $\mathbf{w}^2=-2$, then $G$ is the unique $\sigma_0$-stable spherical object $T$ of class $\mathbf{w}$, and by stability $\mathop{\mathrm{ext}}\nolimits^1(T,F)=\langle\mathbf{w},\mathbf{a}\rangle=2$, so these extensions span a $\ensuremath{\mathbb{P}}^1$ which gets contracted by $\ensuremath{\mathcal W}$. If $\mathbf{w}^2=-1$, then $G$ is $T$ or $T(K_X)$, and each choice of $G$ gives a unique such non-trivial extension, as $\mathop{\mathrm{ext}}\nolimits^1(G,F)=\langle\mathbf{a},\mathbf{w}\rangle=1$ by stability. A dimension count immediately gives that varying $F$ in $M_{\sigma_0}^s(\mathbf{a})$ spans a divisor which gets contracted if $\mathbf{w}^2=-2$ but does not if $\mathbf{w}^2=-1$. Note that we may choose $F$ to have either determinant appropriately to give the claimed behavior for each $M_{\sigma_+}(\mathbf{v},L)$.
It remains to consider when $\mathbf{a}^2=0$, i.e $\mathbf{v}^2=-\mathbf{w}^2$. The fact that $\mathbf{a}$ is an effective isotropic class such that $\langle \mathbf{a},\mathbf{w}\rangle>0$ implies that $\mathbf{a}=k\mathbf{u}_1$. If $\mathbf{w}^2=-1$, then $k=\langle \mathbf{u}_1,\mathbf{w}\rangle=1$, in which case $\mathbf{v}=\mathbf{w}+\mathbf{u}_1$ and the claim follows from \cref{enum:IsotropicDivisorialNonContraction l=1 v^2=1} of Lemma \ref{Lem:isotropic divisorial l=1 1}. On the other hand, if $\mathbf{w}^2=-2$, then $$2=-\mathbf{w}^2=\langle \mathbf{a},\mathbf{w}\rangle=\langle k\mathbf{u}_1,\mathbf{w}\rangle=k\langle \mathbf{u}_1,\mathbf{w}\rangle,$$ so either $\mathbf{v}=\mathbf{w}+2\mathbf{u}_1$ and $\langle \mathbf{u}_1,\mathbf{w}\rangle=1$ or $\mathbf{v}=\mathbf{w}+\mathbf{u}_1$ and $\langle \mathbf{u}_1,\mathbf{w}\rangle=2$. In the first case, $\mathbf{v}^2=2$ and $\langle\mathbf{v},\mathbf{u}_1\rangle=1$, as in \cref{enum:IsotropicDivisorialNonContraction l=1 <v.w>=0 v^2=2}, so the claim follows from \cref{Lem:isotropic totally semistable divisorial contraction l=1}. In the second case, we consider the $\ensuremath{\mathbb{P}}^1$ worth of extensions $$0\to F\to E\to T\to 0$$ with $F\in M^s_{\sigma_0}(\mathbf{u}_1)$ and $T\in M^s_{\sigma_0}(\mathbf{w})$ the unique $\sigma_0$-stable spherical object of class $\mathbf{w}$. Varying $F\in M_{\sigma_0}^s(\mathbf{u}_1,L')$ (or $M_{\sigma_0}^s(\mathbf{u}_1,L'+K_X)$), these extensions span a contracted divisor in each $M_{\sigma_+}(\mathbf{v},L)$.
\end{proof}
\subsection{Non-minimal case}\label{subsec:non-minimal case}
Finally, we consider the case that $\langle\mathbf{v},\mathbf{w}\rangle<0$. As usual, we follow the orientation described in \cref{Rem:IsotropicOrientation} so that $\phi^+(\mathbf{w})>\phi^+(\mathbf{v})$, and hence
$\phi^-(\mathbf{w})<\phi^-(\mathbf{v})$, and denote by $T$ the unique (up to $-\mathop{\ord(\omega_S)}\nolimitstimes\ensuremath{\mathcal O}_X(K_X)$) $\sigma_0$-stable object of class $\mathbf{w}$.
We set
$\ensuremath{\mathbb{T}}T_1:=\langle T,T(K_X) \rangle$ if $\mathbf{w}^2=-1$ (resp. $\ensuremath{\mathbb{T}}T_1:=\langle T\rangle$ if $\mathbf{w}^2=-2$) and
$\ensuremath{\mathcal F}_1$ is the full subcategory of
$\ensuremath{\mathbb{P}}P(1)$ generated by $\sigma_0$-stable objects $E$
with $\phi^+(E)<\phi^+(T)$.
We also let
$\ensuremath{\mathbb{T}}T_1^*$ be the full subcategory of
$\ensuremath{\mathbb{P}}P(1)$ generated by $\sigma_0$-stable objects $E$
with $\phi^-(E)>\phi^-(T)$
and
$\ensuremath{\mathcal F}_1^*:=\langle T,T(K_X) \rangle$ if $\mathbf{w}^2=-1$ (resp. $\ensuremath{\mathcal F}_1^*:=\langle T\rangle$ if $\mathbf{w}^2=-2$).
We set $\ensuremath{\mathcal A}_0=\ensuremath{\mathbb{P}}P(1)$,
$\ensuremath{\mathcal A}_1=\langle \ensuremath{\mathbb{T}}T_1[-1],\ensuremath{\mathcal F}_1 \rangle$ and
$\ensuremath{\mathcal A}_1^*:=\langle \ensuremath{\mathbb{T}}T_1^*,\ensuremath{\mathcal F}_1^*[1] \rangle$.
Let $\ensuremath{\mathbb{P}}hi:\ensuremath{\mathbb{D}}b(X) \to \ensuremath{\mathbb{D}}b(X)$ be the equivalence
defined by $T$ as in \cref{eqn:spherical reflection,eqn:weakly spherical reflection}.
Then we have equivalences
\mathbf{b}egin{equation}
\mathbf{b}egin{split}
\ensuremath{\mathbb{P}}hi:& \ensuremath{\mathcal A}_0 \mor[\sim] \ensuremath{\mathcal A}_1\\
\ensuremath{\mathbb{P}}hi^{-1}:&\ensuremath{\mathcal A}_0 \mor[\sim] \ensuremath{\mathcal A}_1^*.
\end{split}
\end{equation}
The proof is identical to that for the non-isotropic case.
Write $n:=-2\langle \mathbf{v},\mathbf{w} \rangle$ if $\mathbf{w}^2=-1$ (resp. $n:=-\langle \mathbf{v},\mathbf{w}\rangle$ if $\mathbf{w}^2=-2$), and assume that $n>0$.
Then we have isomorphisms
\mathbf{b}egin{equation}\label{eq:iso:Phi}
\mathbf{b}egin{matrix}
\ensuremath{\mathbb{P}}hi:& M_{\sigma_+}(\mathbf{v})& \mor[\sim]& M_{\sigma_-}(\mathbf{v}-n\mathbf{w}),\\
\ensuremath{\mathbb{P}}hi:& M_{\sigma_+}(\mathbf{v}-n\mathbf{w})& \mor[\sim]& M_{\sigma_-}(\mathbf{v}),\\
\end{matrix}
\end{equation}
where $\mathbf{v}-n\mathbf{w}$ is minimal and $\langle\mathbf{v}-n\mathbf{w},\mathbf{w}\rangle>0$. By \cref{Prop:LGU walls of low codimension,Prop: 1-1 case totally semistable and codim 1} and the fact that $\langle\mathbf{v},\mathbf{w}\rangle\neq0$, we have a birational map
\mathbf{b}egin{equation}\label{eqn:birational map for v-nw}M_{\sigma_-}(\mathbf{v}-n\mathbf{w}) \mathbf{d}ashrightarrow M_{\sigma_+}(\mathbf{v}-n\mathbf{w})
\end{equation}
unless $\langle\mathbf{v}-n\mathbf{w},\mathbf{u}\rangle=1$ and $\ell(\mathbf{u})=2$. In this case, $\ensuremath{\mathcal W}$ is totally semistable for $\mathbf{v}-n\mathbf{w}$ as well and induces a divisorial contraction. We note that the S-equivalence class of $E\in M_{\sigma_+}(\mathbf{v})$ is determined by that of $\ensuremath{\mathbb{P}}hi(E)\in M_{\sigma_+}(\mathbf{v}-n\mathbf{w})$, as $E$ is an extension of $\ensuremath{\mathbb{P}}hi(E)$ and $(T\mathop{\ord(\omega_S)}\nolimitsplus T(K_X))^{\mathop{\ord(\omega_S)}\nolimitsplus n}$ if $\mathbf{w}^2=-1$ (resp. $T^{\mathop{\ord(\omega_S)}\nolimitsplus n}$ if $\mathbf{w}^2=-2$), so it follows that $\ensuremath{\mathcal W}$ induces a divisorial contraction for $M_{\sigma_+}(\mathbf{v})$ as well.
Otherwise, precomposing and postcomposing the birational map in \eqref{eqn:birational map for v-nw} with the isomorphisms from \eqref{eq:iso:Phi}, we get a birational map
\mathbf{b}egin{equation}\label{eqn:PrePostComposition}
\ensuremath{\mathbb{P}}hi \circ \ensuremath{\mathbb{P}}hi:M_{\sigma_+}(\mathbf{v}) \mathbf{d}ashrightarrow M_{\sigma_-}(\mathbf{v}).
\end{equation}
As the birational map in \eqref{eqn:birational map for v-nw} is isomorphic in codimension one unless
\mathbf{b}egin{enumerate}
\item$\langle\mathbf{v}-n\mathbf{w},\mathbf{u}\rangle=2=\ell(\mathbf{u})$; or
\item $\langle\mathbf{v}-n\mathbf{w},\mathbf{u}\rangle=1=\ell(\mathbf{u}),$
\end{enumerate}
the same holds true for the birational map in \eqref{eqn:PrePostComposition}. In either of these cases, crossing $\ensuremath{\mathcal W}$ induces a divisorial contraction on $M_{\sigma_+}(\mathbf{v}-n\mathbf{w})$ by \cref{Lem:isotropic divisorial l=1 1,Lem: isotropic divisorial l=2 3}, so the same holds true for $M_{\sigma_+}(\mathbf{v})$ by the above discussion regarding S-equivalence in the case $\langle\mathbf{v}-n\mathbf{w},\mathbf{u}\rangle=1$, $\ell(\mathbf{u})=2$.
\mathbf{b}egin{Rem}\label{Rem:OrthogonalIsomorphism}As in the non-isotropic case, if $\langle \mathbf{v}, \mathbf{w} \rangle=0$, then
$\ensuremath{\mathbb{P}}hi$ again induces an isomorphism
$$\ensuremath{\mathbb{P}}hi:M_{\sigma_+}(\mathbf{v}) \to M_{\sigma_-}(\mathbf{v}).$$
\end{Rem}
\mathbf{b}egin{proof}[Proof of \cref{Prop:isotropic-classification}]
The proposition follows from the above discussion and \cref{Lem:P1FibrationExceptional,Lem:P1FibrationSpherical,Lem: isotropic divisorial l=2 2,Lem:IsotropicNoncontractedDivisor l=2,Lem: isotropic divisorial l=2 3,Lem:isotropic totally semistable divisorial contraction l=1,Lem:isotropic divisorial l=1 1,Lem:isotropic divisorial 1-1 2}. The only point that needs mentioning is that the restrictions on $\mathbf{v}^2$ in these lemmas can be equivalently rephrased in terms of either the pairing with $\mathbf{w}$ or $\mathbf{u}$ as in the statement of \cref{Prop:isotropic-classification}.
\end{proof}
\mathbf{b}egin{Ex}\label{Ex:ConfusingSmallContraction}
An important example arises in the context of \cref{Rem:OrthogonalIsomorphism}. Suppose that $\langle\mathbf{v},\mathbf{w}\rangle=0$ for the effective exceptional class $\mathbf{w}$ in the isotropic lattice $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$. Then we have seen that if $\langle\mathbf{v},\mathbf{u}\rangle>\ell(\mathbf{u})$ for all primitive isotropic $\mathbf{u}\in C_\ensuremath{\mathcal W}\cap\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$, then $M_{\sigma_+}(\mathbf{v},L)\setminus M_{\sigma_0}^s(\mathbf{v},L)$ is a divisor that does not get contracted. We will see in the next section (specifically \cref{prop:flops}) that $\ensuremath{\mathcal W}$ nevertheless induces a small contraction if $\mathbf{v}$ is primitive.
This is an interesting situation. On the one hand, we know from \cref{Rem:OrthogonalIsomorphism} that $\ensuremath{\mathbb{P}}hi$ induces an isomorphism between $M_{\sigma_+}(\mathbf{v})$ and $M_{\sigma_-}(\mathbf{v})$. Moreover, from its definition, $\ensuremath{\mathbb{P}}hi$ restricts to the identity on the common open subset $M_{\sigma_0}^s(\mathbf{v})\subset M_{\sigma_+}(\mathbf{v})\cap M_{\sigma_-}(\mathbf{v})$. On the other hand, as $\pi^+$ is a small contraction, we may flop it to get another minimal model $\tilde{M}$ of $M_{\sigma_+}(\mathbf{v})$, but $\tilde{M}\ncong M_{\sigma_-}(\mathbf{v})$.
This phenomenon, which is present on any Enriques surface, leads one to wonder whether or not the minimal model $\tilde{M}$ can nevertheless be obtained by Bridgeland wall-crossing. It may be possible to reach $\tilde{M}$ by crossing a different wall bounding the chamber containing $\sigma_+$, or this may be a counter-example to the conjecture that all minimal models of $M_\sigma(\mathbf{v})$, for $\sigma\in\mathop{\mathrm{Stab}}\nolimitsd(X)$ generic, are isomorphic to $M_\tau(\mathbf{v})$ for some $\tau\in\mathop{\mathrm{Stab}}\nolimitsd(X)$.
\end{Ex}
\section{Flopping walls}\label{Sec:FloppingWalls}
In \crefrange{Sec:TotallySemistable-non-isotropic}{Sec:Isotropic walls} we have given necessary and sufficient criteria for the wall $\ensuremath{\mathcal W}$ to be totally semistable, to induce a $\ensuremath{\mathbb{P}}^1$ fibration, and to induce a divisorial contraction. In this section, we discuss the remaining possibility for the contraction morphism $\pi^+$. That is, if $\ensuremath{\mathcal W}$ does not induce a fibration or a divisorial contraction, then it must either induce a small contraction, that is, the exceptional locus of $\pi^+$ must have codimension at least two, or $\ensuremath{\mathcal W}$ is fake wall so that $\pi^+$ does not contract any curves. In the next result, we give precise criteria for when $\ensuremath{\mathcal W}$ is a genuine wall inducing a small contraction, at least for $\mathbf{v}$ primitive. It is the only result in our work so far that has assumed that $\mathbf{v}$ is primitive.
\mathbf{b}egin{Prop} \label{prop:flops}
Assume that $\mathbf{v}$ is primitive and that $\ensuremath{\mathcal W}$ induces neither a divisorial contraction nor a $\ensuremath{\mathbb{P}}^1$-fibration. If either
\mathbf{b}egin{enumerate}
\item \label{enum-prop:sum2positive}
$\mathbf{v}^2\geq 3$ and $\mathbf{v}$ can be written as a sum
$\mathbf{v} = \mathbf{a}_1 +\mathbf{a}_2$ with $\mathbf{a}_i\in P_\ensuremath{\mathbb{H}}H$ such that $L\equiv \frac{r}{2}K_X\pmod 2$ if for each $i$, $\mathbf{a}_i^2=0$ and $\ell(\mathbf{a}_i)=2$; or
\item\label{enum-prop:exceptional} there exists an exceptional class $\mathbf{w}$ and either
\mathbf{b}egin{enumerate}
\item\label{enum-prop:exceptionalflop1}
$0< \langle \mathbf{w},\mathbf{v}\rangle\leq\frac{\mathbf{v}^2}{2}$, or
\item\label{enum-prop:exceptionalflop2}
$\langle \mathbf{v},\mathbf{w}\rangle=0$ and $\mathbf{v}^2\geq 3$; or
\end{enumerate}
\item\label{enum-prop:spherical} there exists a spherical class $\mathbf{w}$ and either
\mathbf{b}egin{enumerate}
\item\label{enum-prop:sphericalflop1}
$0 < \langle \mathbf{w}, \mathbf{v}\rangle < \frac{\mathbf{v}^2}2$, or
\item\label{enum-prop:sphericalflop2}
$\langle \mathbf{w},\mathbf{v}\rangle=\frac{\mathbf{v}^2}{2}$ and $\mathbf{v}-\mathbf{w}$ is a spherical class,
\end{enumerate}
\end{enumerate}
then $\ensuremath{\mathcal W}$ induces a small contraction on $M_{\sigma_+}(\mathbf{v},L)$.
\end{Prop}
\mathbf{b}egin{proof}
Note that it suffices to show that some positive dimensional subvariety of $\sigma_+$-stable objects becomes S-equivalent with respect to $\sigma_0$ and thus gets contracted by $\pi^+$.
We consider \cref{enum-prop:sum2positive} first, so $\mathbf{v}=\mathbf{a}_1+\mathbf{a}_2$ with $\mathbf{a}_i\in P_{\ensuremath{\mathbb{H}}H}$. Using \cite[Lemma 9.2]{BM14b}, we may assume that the parallelogram with vertices $0,\mathbf{a}_1,\mathbf{v},\mathbf{a}_2$ does not contain any lattice point other than its vertices. In particular, the $\mathbf{a}_i$ are primitive, and without loss of generality, we may assume that $\phi^+(\mathbf{a}_1)<\phi^+(\mathbf{a}_2)$. By Theorem \ref{Thm:exist:nodal}, there exist $\sigma_+$-stable objects $A_i$ with $\mathbf{v}(A_i)=\mathbf{a}_i$. If $\mathbf{a}_i^2>0$ for each $i$, then the signature of $\ensuremath{\mathbb{H}}H$ forces $\langle \mathbf{a}_1,\mathbf{a}_2\rangle\geq 2$ so that $\mathop{\mathrm{ext}}\nolimits^1(A_2,A_1)\geq 2$. If, say, $\mathbf{a}_1^2=0$, then by part \ref{thm:Classification,Divisorial} of Theorem \ref{classification of walls} and the assumptions that $\mathbf{v}^2\geq 3$ and that $\ensuremath{\mathcal W}$ does not induce a divisorial contraction, we must have either $\ell(\mathbf{a}_1)=2$ and $\langle \mathbf{v},\mathbf{a}_1\rangle \geq 3$ or $\ell(\mathbf{a}_1)=1$ and $\langle \mathbf{v},\mathbf{a}_1\rangle\geq 2$. So either way $\langle \mathbf{a}_2,\mathbf{a}_1\rangle\geq 2$ and again $\mathop{\mathrm{ext}}\nolimits^1(A_2,A_1)\geq 2$. By \cite[Lemma 9.3]{BM14b}, any nontrivial extension $$0\to A_1\to E\to A_2\to 0$$ is $\sigma_+$-stable of class $\mathbf{v}$. All such extensions are non-isomorphic but S-equivalent with respect to $\sigma_0$, giving a projective space of positive dimension contracted by $\pi^+$. Moreover, in all of the above cases, we may choose $A_1$ or $A_2$ to have the appropriate determinant so that $E$ can have either of the two possible determinants for $\mathbf{v}$, except when $\mathbf{a}_1^2=\mathbf{a}_2^2=0$ and $\ell(\mathbf{a}_1)=\ell(\mathbf{a}_2)=2$.
When $\mathbf{a}_1^2=\mathbf{a}_2^2=0$ and $\ell(\mathbf{a}_1)=\ell(\mathbf{a}_2)=2$, then the argument above produces a projective space of positive dimension dimension contracted by $\pi^+$ if $$L=\mathbf{d}et(A_1)+\mathbf{d}et(A_2)\equiv\frac{\mathop{\mathrm{rk}}(A_1)}{2}K_X+\frac{\mathop{\mathrm{rk}}(A_2)}{2}K_X=\frac{r}{2}K_X\pmod 2,$$ as claimed. Observe further that if $\ensuremath{\mathbb{H}}H$ contains an exceptional or spherical class $\mathbf{w}$, then $\mathbf{a}_2=\mathbf{a}_1-2\frac{\langle\mathbf{a}_1,\mathbf{w}\rangle}{\mathbf{w}^2}\mathbf{w}$, where we note that if $\mathbf{w}^2=-2$ then $\langle\mathbf{a}_1,\mathbf{w}\rangle$ is even by \cref{Rem:Even and Odd pairings}, so $$\mathbf{v}=\mathbf{a}_1+\mathbf{a}_2=2\mathbf{a}_1-2\frac{\langle\mathbf{a}_1,\mathbf{w}\rangle}{\mathbf{w}^2}\mathbf{w}=2\left(\mathbf{a}_1-\frac{\langle\mathbf{a}_1,\mathbf{w}\rangle}{\mathbf{w}^2}\mathbf{w}\right)$$ is not primitive, contary to our assumptions. Thus this possibility only occurs in \cref{enum:nonegativeclasses} of \cref{Prop:lattice classification}.
We move on to \cref{enum-prop:exceptionalflop1}. Assume first that $\mathbf{v}$ is minimal. Then $\langle \mathbf{v},\mathbf{w} \rangle>0$ means
$\mathbf{w}$ is effective. Since $(\mathbf{v}-\mathbf{w})^2 \geq -1$ and $\langle\mathbf{v},\mathbf{w}\rangle\leq\frac{\mathbf{v}^2}{2}<\mathbf{v}^2$, we see that $\langle\mathbf{v},\mathbf{v}-\mathbf{w}\rangle>0$ so that $\mathbf{v}-\mathbf{w}$ must be effective. From the assumptions, we observe that $\langle \mathbf{w}, \mathbf{v}-\mathbf{w}\rangle=\langle \mathbf{w},\mathbf{v}\rangle+1\geq 2$. As in the proof of \cite[Proposition 9.1]{BM14b}, we consider the parallelogram $\mathbf{P}$ with vertices $0,\mathbf{w},\mathbf{v},\mathbf{v}-\mathbf{w}$ and the function $f(\mathbf{a})=\mathbf{a}^2$ on $\mathbf{P}$, and the same argument as given there shows that $f(\mathbf{a})>-1$ unless $\mathbf{a}\in\{\mathbf{w},\mathbf{v}-\mathbf{w}\}$. It follows that if $\mathbf{P}$ contains any lattice point $\mathbf{a}$ other than its vertices, then both $\mathbf{a}^2\geq 0$ and $(\mathbf{v}-\mathbf{a})^2\geq 0$. So $\mathbf{v}$ is the sum of two positive classes, and we are in \cref{enum-prop:sum2positive}. It is easy to see that indeed $\mathbf{v}$ satisfies the extra condition $\mathbf{v}^2\geq 3$ if such an $\mathbf{a}$ is isotropic. We may therefore assume that no such lattice points exist. Let $T$ be a $\sigma_+$-stable object of class $\mathbf{w}$ and $F$ be any $\sigma_+$-stable object of class $\mathbf{v}-\mathbf{w}$. Then, assuming $\phi^+(\mathbf{w})<\phi^+(\mathbf{v}-\mathbf{w})$ without loss of generality, we get $\mathop{\mathrm{ext}}\nolimits^1(F,T)\geq2$, so there is a positive dimensional projective space worth of $\sigma_+$-stable extensions that are S-equivalent with respect to $\sigma_0$ and thus get contracted by $\pi^+$.
Now assume that $\mathbf{v}$ is not minimal. Consider the composition of spherical and weakly-spherical twists as in Proposition \ref{Prop:NonMinimalIsomorphism} or Proposition \ref{Prop:CompositionSphericalExceptional}, and denote it by $\ensuremath{\mathbb{P}}hi$. Then $\ensuremath{\mathbb{P}}hi_*(\mathbf{v})$ is minimal, and as $\langle \ensuremath{\mathbb{P}}hi_*(\mathbf{w}),\ensuremath{\mathbb{P}}hi_*(\mathbf{v})\rangle=\langle \mathbf{w},\mathbf{v}\rangle$, we see that $\ensuremath{\mathbb{P}}hi_*(\mathbf{w})$ is an effective exceptional class and satisfies the same inequality in the hypothesis of \ref{enum-prop:exceptionalflop1} for $\ensuremath{\mathbb{P}}hi_*(\mathbf{v})$ instead $\mathbf{v}$. Thus $\ensuremath{\mathbb{P}}hi_*(\mathbf{v}-\mathbf{w})$ is effective and satisfies $\ensuremath{\mathbb{P}}hi_*(\mathbf{v}-\mathbf{w})^2\geq -1$. Moreover, depending on the parity of the index of the chamber occupied by $\mathbf{v}$, we either get $M_{\sigma_\pm}(\mathbf{v})\cong M_{\sigma_\pm}(\ensuremath{\mathbb{P}}hi_*(\mathbf{v}))$ or $M_{\sigma_{\pm}}(\mathbf{v})\cong M_{\sigma_\mp}(\ensuremath{\mathbb{P}}hi_*(\mathbf{v}))$, and the S-equivalence class of $E\in M_{\sigma_+}(\mathbf{v})$ is determined by that of $\ensuremath{\mathbb{P}}hi(E)\in M_{\sigma_\pm}(\ensuremath{\mathbb{P}}hi_*(\mathbf{v}))$, respectively. Thus the result follows from the work of the previous paragraph.
Now let us consider \cref{enum-prop:exceptionalflop2}. We first assume that $\mathbf{v}$ is minimal. As $\mathbf{v}^2\geq3$ by assumption, $(\mathbf{v}-2\mathbf{w})^2\geq-1$, and since $\langle \mathbf{v},\mathbf{w}\rangle=0$ and $\langle\mathbf{v},\mathbf{v}-2\mathbf{w}\rangle=\mathbf{v}^2>0$, we may assume that both $\mathbf{w}$ and $\mathbf{v}-2\mathbf{w}$ are effective. Note also that $\langle \mathbf{w},\mathbf{v}-2\mathbf{w}\rangle=2$. Let $T,T(K_X)$ be the two $\sigma_+$-stable exceptional objects of class $\mathbf{w}$ and let $F$ be a $\sigma_+$-stable object of class $\mathbf{v}-2\mathbf{w}$. As in the previous cases, we may assume that the parallelogram with vertices $0,\mathbf{w},\mathbf{v},\mathbf{v}-\mathbf{w}$ has no additional lattices points so that therefore the parallelogram with vertices $0,2\mathbf{w},\mathbf{v},\mathbf{v}-2\mathbf{w}$ has no lattices points other than $0,\mathbf{w},2\mathbf{w},\mathbf{v},\mathbf{v}-\mathbf{w},\mathbf{v}-2\mathbf{w}$. Without loss of generality we may assume that $\phi^+(\mathbf{w})>\phi^+(\mathbf{v})$. Then for any extension $$0\to F\to E\to T\mathop{\ord(\omega_S)}\nolimitsplus T(K_X)\to 0$$ corresponding to non-zero extensions in each of $\mathop{\mathrm{Ext}}\nolimits^1(T,F)$ and $\mathop{\mathrm{Ext}}\nolimits^1(T(K_X),F)$, $E$ satisfies $$\ensuremath{\mathbb{H}}om(T,E)=\ensuremath{\mathbb{H}}om(T(K_X),E)=0.$$ It follows that $E$ is $\sigma_+$-stable of class $\mathbf{v}$. Indeed, if not, then the class of the maximal destabilizing subobject $A$ would satisfy $\phi^+(\mathbf{v}(A))>\phi^+(\mathbf{v})$ and thus must either be $\mathbf{w}$ or $2\mathbf{w}$. But then we would get $\ensuremath{\mathbb{H}}om(T,E)\neq 0$ or $\ensuremath{\mathbb{H}}om(T(K_X),E)\neq 0$, a contradiction. Thus we get a $\ensuremath{\mathbb{P}}^1\times\ensuremath{\mathbb{P}}^1$ worth of non-isomorphic $\sigma_+$-stable objects of class $\mathbf{v}$ that gets contracted by $\pi^+$. If $\mathbf{v}$ is not minimal, then as in \cref{enum-prop:exceptionalflop1}, we may apply the composition of spherical and weakly-spherical twists $\ensuremath{\mathbb{P}}hi$ to reduce to the minimal case.
Finally, we move on to \cref{enum-prop:spherical} and deal with both subcases at the same time. The proof proceeds exactly as in \cite[Lemma 9.1, case (b)]{BM14b} persuant to the following remark: if there is a lattice point $\mathbf{a}$ in the parallelogram with vertices $0,\mathbf{w},\mathbf{v},\mathbf{v}-\mathbf{w}$ that satisfies $\mathbf{a}^2=-1$, then we are in \cref{enum-prop:exceptional}, which we have covered already. The argument of \cite[Lemma 9.1, case (b)]{BM14b} then carries through without change.
\end{proof}
Now we prove the converse to Proposition \ref{prop:flops}, namely that if $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ does not fall into any of the above mentioned cases, then $\ensuremath{\mathcal W}$ is not a genuine wall.
\mathbf{b}egin{Prop}\label{prop: fake or non-walls}
Assume that $\mathbf{v}$ is primitive and that $\ensuremath{\mathcal W}$ induces neither a divisorial contraction nor a $\ensuremath{\mathbb{P}}^1$-fibration. Assume further that we are not in \crefrange{enum-prop:sum2positive}{enum-prop:spherical} of Proposition \ref{prop:flops}. Then $\ensuremath{\mathcal W}$ is either a fake wall, or not a wall at all.
\end{Prop}
\mathbf{b}egin{proof}
We consider first the case that $\mathbf{v}$ is minimal in its $G_{\ensuremath{\mathbb{H}}H}$-orbit. Furthermore, we assume for now that $\mathbf{v}^2\geq 3$ and prove that in this case every $\sigma_+$-stable object $E$ of class $\mathbf{v}$ is $\sigma_0$-stable. If not, then some such $E$ is strictly $\sigma_0$-semistable, and thus $\sigma_-$-unstable. Let $\mathbf{a}_1,\ldots,\mathbf{a}_n$ be the Mukai vectors of the HN-filtration factors of $E$ with respect to $\sigma_-$. By assumption on the failure of condition \ref{enum-prop:sum2positive}, the $\mathbf{a}_i$ cannot all be in $P_{\ensuremath{\mathbb{H}}H}$, so $E$ must have a destabilizing spherical or exceptional subobject or quotient $T$ with $\mathbf{v}(T)=\mathbf{w}$.
If there is only one $\sigma_0$-stable spherical or exceptional object (in the latter case, uniqueness is only up to $-\mathop{\ord(\omega_S)}\nolimitstimes\ensuremath{\mathcal O}_X(K_X)$ of course), then clearly $\mathbf{v}-\mathbf{w}\in P_{\ensuremath{\mathbb{H}}H}$, so $\mathbf{v}^2-2\langle \mathbf{w},\mathbf{v}\rangle+\mathbf{w}^2\geq 0$, contradicting the assumption about the failure of conditions \ref{enum-prop:exceptional} and \ref{enum-prop:spherical} of Proposition \ref{prop:flops}.
Now suppose instead that there are two $\sigma_0$-stable spherical/exceptional objects with Mukai vectors $\mathbf{w}_0,\mathbf{w}_1$. We must have $\mathbf{v}-\mathbf{w}\in C_{\ensuremath{\mathcal W}}$, and moreover, by \cite[Lemma 4.6]{Yos16b} any stable factor of $T$ must also be spherical or exceptional, so $\mathbf{v}-\mathbf{w}_0$ or $\mathbf{v}-\mathbf{w}_1$ must be effective as well. The assumption about the failure of conditions \ref{enum-prop:exceptional} and \ref{enum-prop:spherical} in addition to the minimality assumption on $\mathbf{v}$ force $\langle \mathbf{v},\mathbf{w}_i\rangle>\frac{\mathbf{v}^2}{2}$, and thus that $(\mathbf{v}-\mathbf{w}_i)^2<\mathbf{w}_i^2$, for $i=0,1$. But then $\mathbf{v}$ must lie above the concave up (region of the) hyperbola $(\mathbf{v}-\mathbf{w}_1)^2=\mathbf{w}_1^2$ and below the concave down hyperbola $(\mathbf{v}-\mathbf{w}_0)^2=\mathbf{w}_0^2$. In case $\mathbf{w}_0^2=\mathbf{w}_1^2$, these two hyperbolas intersect at 0 and $\mathbf{w}_0+\mathbf{w}_1$, as pictured in \cref{fig:RegionBetweenHyperbolas}, while if, say, $-1=\mathbf{w}_0^2\neq \mathbf{w}_1^2=-2$, then we must have $(\mathbf{v}-\mathbf{w}_0)^2\leq -2$ and $(\mathbf{v}-\mathbf{w}_1)^2<-2$. Similarly to the previous case, $\mathbf{v}$ must lie above or on the hyperbola $(\mathbf{v}-\mathbf{w}_0)^2=-2$ and below the concave down hyperbola $(\mathbf{v}-\mathbf{w}_1)^2=-2$. One can easily check that, writing $\mathbf{v}_i=x_i\mathbf{w}_0+y_i\mathbf{w}_1$, $i=1,2$, these hyperbola intersect at two points $\mathbf{v}_1$ and $\mathbf{v}_2$ both of which satisfy $0<x_i,y_i<1$. In either case, it follows that $\mathbf{v}$ must be located in the interior of the parallelogram with vertices $0,\mathbf{w}_0,\mathbf{w}_1,\mathbf{w}_0+\mathbf{w}_1$. But then neither $\mathbf{v}-\mathbf{w}_0$ nor $\mathbf{v}-\mathbf{w}_1$ can be effective, a contradiction. Thus $\ensuremath{\mathcal W}$ is not a wall at all as every $E\in M_{\sigma_+}(\mathbf{v})$ is $\sigma_0$-stable.
\mathbf{b}egin{figure}
\mathbf{b}egin{tikzpicture}[scale=1.5]
\mathbf{d}raw [->] (-1.5,0) -- (2,0);
\mathbf{d}raw[->] (0,-.2) -- (0,2);
\mathbf{d}raw [blue,domain=-1.5:0] plot (\x,{sqrt(.5*(pow(\x-1,2)-1))});
\mathbf{d}raw [blue,domain=-.43593:0] plot (\x,{.733002-sqrt(.5*(pow(\x+1.43593,2)-1))});
\mathbf{d}raw [blue,domain=-.43593:.5] plot (\x,{.733002+sqrt(.5*(pow(\x+1.43593,2)-1))});
\mathbf{d}raw [red,domain=-1.5:2] plot (\x,{sqrt(.5*(pow(\x,2)+.25))});
\filldraw [gray] (-.2,.38079) circle (1pt) node [anchor=north east] {$\mathbf{v}_0$};
\filldraw [gray] (1,0) circle (1pt) node [anchor=south west] {$\mathbf{w}_0$};
\filldraw [gray] (-1.43593,0.733002) circle (1pt) node [anchor=east] {$\mathbf{w}_1$};
\node[above] at (-1,1.5) {$(\mathbf{v}-\mathbf{w}_0)^2=-1$};
\node[above right] at (.3,1.5) {$(\mathbf{v}-\mathbf{w}_1)^2=-1$};
\node[left] at (2,1) {$\mathbf{v}^2=\mathbf{v}_0^2$};
\end{tikzpicture}
\caption{The region between the two hyperbolas when $\mathbf{w}_0^2=\mathbf{w}_1^2=-1$.}
\label{fig:RegionBetweenHyperbolas}
\end{figure}
Now let us suppose that $\mathbf{v}$ is still minimal but $\mathbf{v}^2=1,2$. We will show that though there may be some strictly $\sigma_0$-semistable object $E$, there are no curves of such objects that $\sigma_+$-stable. Take a strictly $\sigma_0$-semistable object $E$ and consider its Jordan-H\"{o}lder filtration with respect to $\sigma_0$ with $\sigma_0$-stable factors $E_i$ with $\mathbf{v}(E_i)=\mathbf{a}_i$. Then we may write $\mathbf{v}=\sum_{i=1}^n\mathbf{a}_i$, and the minimality of $\mathbf{v}$ forces $\langle\mathbf{v},\mathbf{a}_i\rangle\geq 0$ for all $i$, so we may order the $\mathbf{a}_i$ such that $$0\leq\langle\mathbf{v},\mathbf{a}_1\rangle\leq\langle\mathbf{v},\mathbf{a}_2\rangle\leq\mathbf{d}ots\leq\langle\mathbf{v},\mathbf{a}_n\rangle.$$
Let us begin with $\mathbf{v}^2=1$, and we observe that for $E$ to be strictly $\sigma_0$-semistable we must have $\langle\mathbf{v},\mathbf{a}_1\rangle=0$, since otherwise $$1=\mathbf{v}^2=\sum_{i=1}^n\langle\mathbf{v},\mathbf{a}_i\rangle\geq n.$$ But then the signature of $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ forces $\mathbf{a}_1^2<0$, so that $\mathbf{a}_1^2=-2$ or $-1$ and $E_1$ is spherical or exceptional, respectively. We claim that actually $E_1$ must be exceptional. Indeed, if $E_1$ were spherical then $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ would be non-isotropic by \cref{Prop:isotropic lattice}, so $\ensuremath{\mathcal W}$ would induce a divisorial contraction for each choice of $L$ by \cref{Lem:NonisotropicDivisorialContraction}, contrary to assumption. Thus $E_1$ must be exceptional. It follows that $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ is isotropic as $(\mathbf{v}-\mathbf{a}_1)^2=0$, and $\langle\mathbf{v}-\mathbf{a}_1,\mathbf{a}_1\rangle=1$ implies that $\mathbf{v}=\mathbf{a}_1+\mathbf{u}_1$, which is the only decomposition of $\mathbf{v}$ into effective classes. If $\ell(\mathbf{u}_1)=2$, then by \cref{Lem: Hilbert-Chow} $\ensuremath{\mathcal W}$ is totally semistable but contracts no curves. If $\ell(\mathbf{u}_1)=1$, then by \cref{Lem:isotropic divisorial l=1 1} $M_{\sigma_+}(\mathbf{v},L)\mathbf{b}ackslash M_{\sigma_0}^s(\mathbf{v},L)$ is a divisor containing no contracted curves for each choice of $L$, as required.
Now we consider $\mathbf{v}^2=2$. If $\langle\mathbf{v},\mathbf{a}_1\rangle>0$, then we must have $n=2$ and $\langle\mathbf{v},\mathbf{a}_1\rangle=\langle\mathbf{v},\mathbf{a}_2\rangle=1$. Moreover, $$\mathbf{a}_1^2=(\mathbf{v}-\mathbf{a}_2)^2=\mathbf{v}^2-2\langle\mathbf{v},\mathbf{a}_2\rangle+\mathbf{a}_2^2=\mathbf{a}_2^2.$$ Thus if $\mathbf{a}_1^2\geq 0$, then $\mathbf{a}_2^2\geq 0$, so from $\mathbf{v}^2=2$, we see that the only decomposition with $\mathbf{a}_i\in P_\ensuremath{\mathbb{H}}H$ occurs when $\mathbf{a}_i^2=0$ and $\langle\mathbf{a}_1,\mathbf{a}_2\rangle=1$. It follows that $\ell(\mathbf{a}_1)=\ell(\mathbf{a}_2)=1$, so we get that $M_{\sigma_+}(\mathbf{v},L)\mathbf{b}ackslash M_{\sigma_0}^s(\mathbf{v},L)$ is a divisor containing no contracted curves by the end of the proof of \cref{Lem:isotropic divisorial l=1 1}. Otherwise, $\mathbf{a}_i^2<0$, and we have $\langle\mathbf{v},\mathbf{a}_i\rangle=\frac{\mathbf{v}^2}{2}$ as in \cref{enum-prop:exceptionalflop1,enum-prop:sphericalflop2} of \cref{prop:flops}, contrary to our assumption.
Thus we must consider $\langle\mathbf{v},\mathbf{a}_1\rangle=0$, and from the signature of $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ we must have $\mathbf{a}_1^2<0$ so that $a_1^2=-2$ or $-1$ and $E_1$ is spherical or exceptional, respectively.
If $E_1$ is spherical, then $(\mathbf{v}-\mathbf{a}_1)^2=0$, so $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ is isotropic, and we either have $\mathbf{v}-\mathbf{a}_1=\mathbf{u}_1$ with $\langle\mathbf{u}_1,\mathbf{a}_1\rangle=2$ or $\mathbf{v}-\mathbf{a}_1=2\mathbf{u}_1$ with $\langle\mathbf{u}_1,\mathbf{a}_1\rangle=1$ (in which case $\ell(\mathbf{u}_1)=1$). If $\ell(\mathbf{u}_1)=2$, then we must have $\mathbf{v}=\mathbf{a}_1+\mathbf{u}_1$ with $\langle\mathbf{a}_1,\mathbf{u}_1\rangle=2$, and this is the only decomposition of $\mathbf{v}$. But then by \cref{Lem:P1FibrationSpherical} $\ensuremath{\mathcal W}$ is not a wall at all since by assumption $\ensuremath{\mathcal W}$ does not induce a $\ensuremath{\mathbb{P}}^1$-fibration. If $\ell(\mathbf{u}_1)=1$, then from the end of the proof of \cref{Lem:isotropic divisorial 1-1 2}, we cannot have the decomposition $\mathbf{v}=\mathbf{a}_1+\mathbf{u}_1$ as this would lead to a divisorial contraction for each choice of $L$, contrary to assumption. Thus we must have $\mathbf{v}=\mathbf{a}_1+2\mathbf{u}_1$ with $\langle\mathbf{a}_1,\mathbf{u}_1\rangle=1$. But then as $\ensuremath{\mathcal W}$ does not induce a $\ensuremath{\mathbb{P}}^1$-fibration, we must have $L\equiv D+(\frac{\mathop{\mathrm{rk}}\mathbf{v}}{2}+1)K_X\pmod 2$ and $M_{\sigma_+}(\mathbf{v},L)\mathbf{b}ackslash M_{\sigma_0}^s(\mathbf{v},L)$ is a divisor that contains no contracted curves by \cref{Lem:isotropic totally semistable divisorial contraction l=1}, as required.
If $E_1$ is exceptional, then $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ must be non-isotropic. Indeed, if $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ were isotropic, then we could write $\mathbf{v}=x\mathbf{u}_1+y\mathbf{a}_1$ with $x,y\in\ensuremath{\mathbb{Z}}$, and the two conditions $\mathbf{v}^2=2$ and $\langle\mathbf{v},\mathbf{a}_1\rangle=0$ would force $2=y^2$, a contradiction. If $L\equiv D+\frac{\mathop{\mathrm{rk}}\mathbf{v}}{2}K_X\pmod2$ and $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ falls into \cref{enum:OneExceptionalOneSpherical} of \cref{Prop:lattice classification}, then from \cref{Lem:ExceptionalDivisorialNonContraction}, $\ensuremath{\mathcal W}$ would induce a divisorial contraction, contrary to our assumptions. So either $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ falls into \cref{enum:OneExceptionalOneSpherical} of \cref{Prop:lattice classification} and $L\notequiv D+\frac{\mathop{\mathrm{rk}}\mathbf{v}}{2}K_X\pmod 2$ or $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ does not fall into \cref{enum:OneExceptionalOneSpherical} of \cref{Prop:lattice classification}. But then \cref{Lem:ExceptionalDivisorialNonContraction} shows that no curves get contracted, as required.
If $\mathbf{v}$ is not minimal, then $\mathbf{v}\in\ensuremath{\mathbb{C}}C_n$ for some $0\neq n\in\ensuremath{\mathbb{Z}}$ and there exists a minimal class $\mathbf{v}_0$ in the same orbit. By \cref{Prop:NonMinimalIsomorphism,Prop:CompositionSphericalExceptional}, if $n$ is even then $M_{\sigma_+}(\mathbf{v})\cong M_{\sigma_+}(\mathbf{v}_0)$, induced by a sequence of spherical/exceptional twists, and if $n$ is odd, then $M_{\sigma_+}(\mathbf{v})\cong M_{\sigma_+}(\mathbf{v}_1)\cong M_{\sigma_-}(\mathbf{v}_0)$. Since the assumptions of the proposition are invariant under $G_{\ensuremath{\mathbb{H}}H}$, the same asumptions apply to $\mathbf{v}_0$, so every $\sigma_\pm$-stable object of class $\mathbf{v}_0$ is $\sigma_0$-stable. If $\ensuremath{\mathbb{P}}hi$ is the sequence of spherical/exceptional twists used in this isomorphism, then from the definition of a spherical/exceptional twist, it is easy to see that the $S$-equivalence class of $\ensuremath{\mathbb{P}}hi(E_0)$ is determined by that of $E_0$. Since this equivalence is trivial on $M_{\sigma_{\pm}}(\mathbf{v}_0)$, it must be trivial on $M_{\sigma_+}(\mathbf{v})$ as well, implying that $\pi^+$ is an isomorphism. Thus $\ensuremath{\mathcal W}$ is a fake wall, as claimed.
\end{proof}
Finally we can prove \cref{classification of walls}:
\mathbf{b}egin{proof}[Proof of \cref{classification of walls}]
The theorem follows from \cref{prop: fake or non-walls,prop:flops,Prop:isotropic-classification,Lem: condition for totally semistable wall,Prop:NonisotropicDivisorialContraction}.
\end{proof}
We will end this section by observing that \cref{classification of walls,Prop:OrthgonalIsomorphism 2,Prop:CompositionSphericalExceptional} prove part \ref{enum:MT1-two moduli are birational} of \cref{Thm:MainTheorem1}. Indeed, connect $\sigma$ and $\tau$ by a path, $\sigma(t)$, with $0\leq t\leq 1$, $\sigma(0)=\sigma$, and $\sigma(1)=\tau$. Observe that as the set of walls is lcoally finite, $\sigma(t)$ only crosses finitely many of them, and by perturbing $\sigma(t)$ slightly, we may assume that $\sigma(t)$ only crosses one wall at a time (that is, if $\sigma(t_0)\in\ensuremath{\mathcal W}$ then $\sigma(t_0)$ is a generic point of $\ensuremath{\mathcal W}$). If $\sigma(t)$ does not cross any totally semistable walls, then $M_\sigma(\mathbf{v})$ and $M_\tau(\mathbf{v})$ are clearly birational. Otherwise, it suffices consider that $\sigma=\sigma_+$ and $\tau=\sigma_-$ are two sufficiently close stability conditions separated by a single totally semistable wall $\ensuremath{\mathcal W}$. If there exists a spherical/exceptional class $\mathbf{w}\in\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ such that $\langle\mathbf{v},\mathbf{w}\rangle=0$, then $M_{\sigma_+}(\mathbf{v})\cong M_{\sigma_-}(\mathbf{v})$ by \cref{Prop:OrthgonalIsomorphism 2,Prop:CompositionSphericalExceptional}. Otherwise, by \cref{classification of walls,Prop:NonMinimalIsomorphism,Prop:CompositionSphericalExceptional}, we may assume that $\mathbf{v}$ is minimal so that we must have $\langle\mathbf{v},\mathbf{u}\rangle=1$ for an isotropic $\mathbf{u}\in\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ such that $\ell(\mathbf{u})=2$. But then we have seen that $\pi^+$ is a divisorial contraction, so we again have $M_{\sigma_+}(\mathbf{v})$ and $M_{\sigma_-}(\mathbf{v})$ are birational, as claimed.
In \cref{sec:Main Theorems} we will prove part \ref{enum:MT1-birational map given by FM transform} of \cref{Thm:MainTheorem1} and the rest of our main results. In particular, we will show that, under generic conditions, the birational map between $M_{\sigma_+}(\mathbf{v})$ and $M_{\sigma_-}(\mathbf{v})$ is induced by a Fourier-Mukai transform. This is obvious when $\ensuremath{\mathcal W}$ induces a small contraction, but is more subtle in the case of divisorial contractions. But while the behavior of divisorial contractions for Bridgeland moduli on Enriques surfaces is analogous to the K3 case for divisorial contractions of Brill-Noether, Hilbert-Chow, and Li-Gieseker-Uhlenbeck type, there is an additional type of divisorial contraction that requires extra care. We will turn to this subject in the next section.
\section{LGU on the covering K3 surface}
As its name suggests, the induced Li-Gieseker-Uhlenbeck (iLGU) type contraction, which occurs when the hyperbolic lattice $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ contains an isotropic $\mathbf{u}$ such that $\langle\mathbf{v},\mathbf{u}\rangle=1=\ell(\mathbf{u})$, is induced from the covering K3. The associated HN-filtration factors are objects $F\in M_{\sigma_0}^s(2\mathbf{u})$. But while this stable locus has dimension two, it is not proper as $M_{\sigma_0}(2\mathbf{u})\mathbf{b}ackslash M_{\sigma_0}^s(2\mathbf{u})\neq\mathbf{v}arnothing$. So we cannot use the usual machinery as in the regular LGU case where $\ell(\mathbf{u})=2$. There we use the universal family associated to the proper two-dimensional moduli space to obtain the requisite Fourier-Mukai transform as in \cite[Corollary 2.8]{BM01}. Instead, we show that we may induce a Fourier-Mukai transform on $X$ by considering the corresponding LGU type contraction on its K3 cover $\mathbf{w}idetilde{X}$. So we turn now to considering LGU contractions more carefully in the K3 case.
\subsection{Fourier-Mukai transform associated to the Li-Gieseker-Uhlenbeck contraction}\label{Sec:FM transform associated to LGU}
Let $X_1$ be a K3 surface and $\mathbf{u}_1$
be a primitive isotropic Mukai vector on $X_1$.
For a general stability condition
$\sigma\in\mathop{\mathrm{Stab}}\nolimitsd(X_1)$, we consider $X_2:=M_\sigma(\mathbf{u}_1)$, which is another K3 surface.
Let $\ensuremath{\mathcal E} \in \ensuremath{\mathbb{D}}b(X_1 \times X_2,1_{X_1} \times \mathbf{a}lpha)$ be a
universal object for $M_\sigma(\mathbf{u}_1)$, considered as a twisted object where
$\mathbf{a}lpha$ is a 2-cocycle of $\ensuremath{\mathcal O}_{X_2}^{\times}$.
We set
\mathbf{b}egin{equation}
\ensuremath{\mathbb{P}}hi:=\ensuremath{\mathbb{P}}hi_{X_1 \to X_2}^{\ensuremath{\mathcal E}^{\mathbf{v}ee}},\;
\ensuremath{\mathbb{P}}si:=\ensuremath{\mathbb{P}}hi_{X_2 \to X_1}^{\ensuremath{\mathcal E}}.
\end{equation}
For a smooth variety $Y$,
we also set
\mathbf{b}egin{equation}
D_Y(E):=E^{\mathbf{v}ee}=\ensuremath{\mathbb{R}}lHom_{\ensuremath{\mathcal O}_Y}(E,\ensuremath{\mathcal O}_Y),\; E \in\ensuremath{\mathbb{D}}b(Y).
\end{equation}
Let $\mathbf{v}_1$ be a Mukai vector on $X_1$ such that $\langle \mathbf{u}_1,\mathbf{v}_1 \rangle=2$. It is well known that, using the Fourier-Mukai transform $\ensuremath{\mathbb{P}}hi$, the moduli space $M_{\sigma}(\mathbf{v}_1)$ is isomorphic to a moduli space of rank two Gieseker stable sheaves of Mukai vector $\mathbf{v}_2=\mathbf{v}(\ensuremath{\mathbb{P}}hi(\mathbf{v}_1))$ on $X_2$. In the next result, we translate a well-known concrete result about crossing the Uhlenbeck wall for $\mathbf{v}_2$ on $X_2$ to an instrinsic analogous statement for $X_1$.
\mathbf{b}egin{Prop}\label{prop:uhl}
There is an object $\ensuremath{\mathcal F} \in \ensuremath{\mathbb{D}}b(X_1 \times X_1)$
which induces a Fourier-Mukai transform
$\ensuremath{\mathbb{P}}hi_{X_1 \to X_1}^{\ensuremath{\mathcal F}^{\mathbf{v}ee}}:\ensuremath{\mathbb{D}}b(X_1) \to \ensuremath{\mathbb{D}}b(X_1)$
such that
\mathbf{b}egin{enumerate}
\item\label{enum:PreservationOfStabd}
$\ensuremath{\mathbb{P}}hi_{X_1 \to X_1}^{\ensuremath{\mathcal F}^{\mathbf{v}ee}}$ preserves $\mathop{\mathrm{Stab}}\nolimits^\mathbf{d}agger(X_1)$.
\item\label{enum:FormulaForLGUFMTransform} The induced action on $H^*(X_1,\ensuremath{\mathbb{Z}})$ is given by the formula
\mathbf{b}egin{equation}
D_{X_1} \circ \ensuremath{\mathbb{P}}hi_{X_1 \to X_1}^{\ensuremath{\mathcal F}^{\mathbf{v}ee}}(\mathbf{v})
=-(\mathbf{v}+\tfrac{\mathbf{v}_1^2}{2} \langle \mathbf{v},\mathbf{u}_1 \rangle \mathbf{u}_1-\langle \mathbf{v},\mathbf{v}_1 \rangle \mathbf{u}_1-
\langle \mathbf{v},\mathbf{u}_1 \rangle \mathbf{v}_1),
\end{equation} for any $\mathbf{v}\in H^*(X_1,\ensuremath{\mathbb{Z}})$.
\end{enumerate}
\end{Prop}
\mathbf{b}egin{proof}
For $F \in \ensuremath{\mathbb{D}}b(X_1)$ with $\mathbf{v}(F)=\mathbf{v}_1$,
we set
\mathbf{b}egin{equation}
N:=\mathbf{d}et \ensuremath{\mathbb{P}}hi(F),\mbox{ and }\mathbf{v}_2:=\mathbf{v}(\ensuremath{\mathbb{P}}hi(F))=(2,\xi,a).
\end{equation}
As mentioned above, $\ensuremath{\mathbb{P}}hi$ induces an isomorphism between $M_{\sigma}(\mathbf{v}_1)$ and the moduli space $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v}_2,\mathbf{a}lpha)$ of $\mathop{\ord(\omega_S)}\nolimitsmega$-Gieseker stable $\mathbf{a}lpha$-twisted sheaves of Mukai vector $\mathbf{v}_2$ on $X_2$. It is well known that crossing the LGU wall of the Gieseker chamber for $\mathbf{v}_2$ is induced by the Fourier-Mukai transform $(\mathbf{u}nderline{\hphantom{A}})^\mathbf{v}ee\mathop{\ord(\omega_S)}\nolimitstimes N=(\mathop{\ord(\omega_S)}\nolimitstimes N)\circ D_{X_2}$. Pulling this back to $X_1$, we have a sequence of equivalences:
\mathbf{b}egin{equation}
\mathbf{b}egin{CD}
\ensuremath{\mathbb{D}}b(X_1) @>{\ensuremath{\mathbb{P}}hi}>> \ensuremath{\mathbb{D}}b(X_2,\mathbf{a}lpha^{-1}) @>{D_{X_2}}>>
\ensuremath{\mathbb{D}}b(X_2,\mathbf{a}lpha)
@>{\mathop{\ord(\omega_S)}\nolimitstimes N}>>\ensuremath{\mathbb{D}}b(X_2,\mathbf{a}lpha^{-1})@>{\ensuremath{\mathbb{P}}si}>> \ensuremath{\mathbb{D}}b(X_1),
\end{CD}
\end{equation}
and we denote their composition by
\mathbf{b}egin{equation}\label{eq:Xi}
\ensuremath{\mathbf{w}idetilde{X}}i:=\ensuremath{\mathbb{P}}si \circ(\mathop{\ord(\omega_S)}\nolimitstimes N) \circ D_{X_2} \circ \ensuremath{\mathbb{P}}hi,
\end{equation}
which is a (contravariant)-equivalence $\ensuremath{\mathbb{D}}b(X_1) \to \ensuremath{\mathbb{D}}b(X_1)$.
Thus
$D_{X_1} \circ \ensuremath{\mathbf{w}idetilde{X}}i$ is an autoequivalence of $\ensuremath{\mathbb{D}}b(X_1)$.
Since
$[2] \circ \ensuremath{\mathbf{w}idetilde{X}}i=
D_{X_1} \circ \ensuremath{\mathbb{P}}hi_{X_2 \to X_1}^{\ensuremath{\mathcal E}^{\mathbf{v}ee}} \circ(\mathop{\ord(\omega_S)}\nolimitstimes N^{\mathbf{v}ee}) \circ \ensuremath{\mathbb{P}}hi$
(by Grothendieck-Serre duality),
$D_{X_1} \circ \ensuremath{\mathbf{w}idetilde{X}}i$ defines a Fourier-Mukai transform
$\ensuremath{\mathbb{P}}hi_{X_1 \to X_1}^{\ensuremath{\mathcal F}^{\mathbf{v}ee}}:\ensuremath{\mathbb{D}}b(X_1) \to \ensuremath{\mathbb{D}}b(X_1)$.
To see that $\ensuremath{\mathbb{P}}hi_{X_1\to X_1}^{\ensuremath{\mathcal F}^\mathbf{v}ee}$ preserves $\mathop{\mathrm{Stab}}\nolimitsd(X_1)$, note first that $\ensuremath{\mathbb{P}}hi(\sigma)$ is geometric as $\ensuremath{\mathbb{P}}hi$ identifies the objects of $M_{\sigma}(\mathbf{u}_1)$ with the points of $X_2$ considered as $\ensuremath{\mathbb{P}}hi(\sigma)$-stable objects of class $(0,0,1)$. Thus
$\ensuremath{\mathbb{P}}hi$ induces an isomorphism $\mathop{\mathrm{Stab}}\nolimitsd(X_1) \cong
\mathop{\mathrm{Stab}}\nolimitsd(X_2)$. Since $\mathop{\ord(\omega_S)}\nolimitstimes N^{\mathbf{v}ee}$ also preserves the space of full numerical stability conditions, $\ensuremath{\mathbb{P}}hi_{X_1 \to X_1}^{\ensuremath{\mathcal F}^{\mathbf{v}ee}}$
preserves $\mathop{\mathrm{Stab}}\nolimitsd(X_1)$, as claimed in \ref{enum:PreservationOfStabd}.
Now we prove \ref{enum:FormulaForLGUFMTransform}. Since $\mathbf{v}_2=(2,\xi,a)$, it follows that
$$
(\ensuremath{\mathbb{Q}} \mathbf{v}_2+\ensuremath{\mathbb{Q}} \mathbf{v}arrho_{X_2})^\perp=\Set{e^{\xi/2}(0,D,0) \ | \ D\in H^2(X_2,\ensuremath{\mathbb{Q}})},
$$
where the orthogonal complement is taking inside $H^*(X_2,\ensuremath{\mathbb{Q}})$. Moreover, under $(\mathop{\ord(\omega_S)}\nolimitstimes N) \circ D_{X_2}$, we have
\mathbf{b}egin{equation}
\mathbf{b}egin{matrix}
\mathbf{v}_2 & \mapsto & \mathbf{v}_2\\
\mathbf{v}arrho_{X_2} & \mapsto & \mathbf{v}arrho_{X_2}\\
e^{\xi/2}(0,D,0) & \mapsto & -e^{\xi/2}(0,D,0).
\end{matrix}
\end{equation}
Hence
\mathbf{b}egin{equation}
\ensuremath{\mathbf{w}idetilde{X}}i_{|(\ensuremath{\mathbb{Q}} \mathbf{u}_1+\ensuremath{\mathbb{Q}} \mathbf{v}_1)}=1_{(\ensuremath{\mathbb{Q}} \mathbf{u}_1+\ensuremath{\mathbb{Q}} \mathbf{v}_1)},\;
\ensuremath{\mathbf{w}idetilde{X}}i_{|(\ensuremath{\mathbb{Q}} \mathbf{u}_1+\ensuremath{\mathbb{Q}} \mathbf{v}_1)^\perp}=-1_{(\ensuremath{\mathbb{Q}} \mathbf{u}_1+\ensuremath{\mathbb{Q}} \mathbf{v}_1)^\perp}.
\end{equation}
Then it is easy to see that for any $\mathbf{v}\in H^*(X_1,\ensuremath{\mathbb{Z}})$ we have
\mathbf{b}egin{equation}
\ensuremath{\mathbf{w}idetilde{X}}i(\mathbf{v})=-(\mathbf{v}+\tfrac{\mathbf{v}_1^2}{2} \langle \mathbf{v},\mathbf{u}_1 \rangle \mathbf{u}_1-
\langle \mathbf{v},\mathbf{v}_1 \rangle \mathbf{u}_1-
\langle \mathbf{v},\mathbf{u}_1 \rangle \mathbf{v}_1).
\end{equation}
\end{proof}
\subsection{The induced Li-Gieseker-Uhlenbeck wall and its Fourier-Mukai transform on an Enriques surface}
Having delved more deeply into the FM transform associated to a wall of LGU type on K3 surfaces, we explore the corresponding picture on the Enriques quotient. So let $X$ be an Enriques surface with K3 cover $\mathbf{w}idetilde{X}$.
\mathbf{b}egin{Prop}\label{prop:Enriques-refl}
Assume that $\ensuremath{\mathbb{P}}ic(\mathbf{w}idetilde{X})=\mathbf{v}arpi^*\ensuremath{\mathbb{P}}ic(X)$.
Let $\mathbf{u}_0$ and $\mathbf{v}_0$ be Mukai vectors such that
$\mathbf{u}_0$ is primitive and isotropic,
$\langle \mathbf{u}_0,\mathbf{v}_0 \rangle=1$ and $\ell(\mathbf{u}_0)=1$.
We set
$$
\mathbf{v}':=-(\mathbf{v}arrho_X+2\mathbf{v}_0^2 \langle \mathbf{v}arrho_X,\mathbf{u}_0 \rangle \mathbf{u}_0
-2\langle \mathbf{v}arrho_X,\mathbf{v}_0 \rangle \mathbf{u}_0-2
\langle \mathbf{v}arrho_X,\mathbf{u}_0 \rangle \mathbf{v}_0).
$$
Then there is an autoequivalence
$\ensuremath{\mathbb{P}}hi_{X \to X}^{\ensuremath{\mathcal E}^{\mathbf{v}ee}}:\ensuremath{\mathbb{D}}b(X) \to \ensuremath{\mathbb{D}}b(X)$
such that
\mathbf{b}egin{enumerate}
\item\label{enum:FormulaForLGUFM-Enriques} For any $\mathbf{v}\in\ensuremath{\mathbb{H}}al(X,\ensuremath{\mathbb{Z}})$,
\mathbf{b}egin{equation}
D_X \circ \ensuremath{\mathbb{P}}hi_{X \to X}^{\ensuremath{\mathcal E}^{\mathbf{v}ee}}(\mathbf{v})=
-(\mathbf{v}+2\mathbf{v}_0^2 \langle \mathbf{v},\mathbf{u}_0 \rangle \mathbf{u}_0-2\langle \mathbf{v},\mathbf{v}_0 \rangle \mathbf{u}_0-
2\langle \mathbf{v},\mathbf{u}_0 \rangle \mathbf{v}_0).
\end{equation}
\item\label{enum:ClassOfRestriction}
$\mathbf{v}(\ensuremath{\mathcal E}|_{X \times \{ x\}})=\mathbf{v}'$.
\item
$\mathbf{v}arpi^* \circ D_X \circ \ensuremath{\mathbb{P}}hi_{X \to X}^{\ensuremath{\mathcal E}^{\mathbf{v}ee}}=\ensuremath{\mathbf{w}idetilde{X}}i \circ \mathbf{v}arpi^*$
for $\ensuremath{\mathbf{w}idetilde{X}}i$ in \eqref{eq:Xi}.
\end{enumerate}
\end{Prop}
\mathbf{b}egin{proof}
We note that $\mathbf{v}'$ is a primitive and isotropic Mukai vector with
$\ell(\mathbf{v}')=2$.
We set $\mathbf{v}_1:=\mathbf{v}arpi^*(\mathbf{v}_0)$ and $\mathbf{u}_1:=\mathbf{v}arpi^*(\mathbf{u}_0)$.
By \cref{prop:uhl}, we have an object
$\ensuremath{\mathcal F} \in \ensuremath{\mathbb{D}}b(\mathbf{w}idetilde{X} \times \mathbf{w}idetilde{X})$
which defines a Fourier-Mukai transform satisfying \ref{enum:PreservationOfStabd} and \ref{enum:FormulaForLGUFMTransform} of \cref{prop:uhl}. We will show that $\ensuremath{\mathcal F}$ descends, in an appropriate sense, to an object $\ensuremath{\mathcal E}\in\ensuremath{\mathbb{D}}b(X\times X)$ that defines the desired autoequivalence of $\ensuremath{\mathbb{D}}b(X)$.
Since $\ensuremath{\mathbb{P}}hi_{\mathbf{w}idetilde{X} \to \mathbf{w}idetilde{X}}^{\ensuremath{\mathcal F}^{\mathbf{v}ee}}$ preserves
$\mathop{\mathrm{Stab}}\nolimits^\mathbf{d}agger(\mathbf{w}idetilde{X})$, which is isomorphic to $\mathop{\mathrm{Stab}}\nolimits^\mathbf{d}agger(X)$ from the assumption $\ensuremath{\mathbb{P}}ic(\mathbf{w}idetilde{X})=\mathbf{v}arpi^*\ensuremath{\mathbb{P}}ic(X)$ by \cite[Theorem 1.2]{MMS09}, there is an $\iota$-invariant stability condition
$\sigma' \in \mathop{\mathrm{Stab}}\nolimits^\mathbf{d}agger(\mathbf{w}idetilde{X})$ such that
$\ensuremath{\mathcal F}$ is a family of $\sigma'$-stable objects.
By the $\iota$-invariance of $\sigma'$,
$\iota^*(\ensuremath{\mathcal F}|_{\mathbf{w}idetilde{X} \times \{ \mathbf{w}idetilde{X} \}})$ is also $\sigma'$-stable, and moreover, $2\mathbf{v}(\ensuremath{\mathcal F}|_{\mathbf{w}idetilde{X} \times \{ \mathbf{w}idetilde{X}\}})=\mathbf{v}arpi^*(\mathbf{v}')$. Hence
we have
an isomorphism $\tau:\mathbf{w}idetilde{X} \to \mathbf{w}idetilde{X}$
such that
\mathbf{b}egin{equation}
(\iota \times 1_{\mathbf{w}idetilde{X}})^*(\ensuremath{\mathcal F}) \cong
(1_{\mathbf{w}idetilde{X}} \times \tau)^*(\ensuremath{\mathcal F}) \mathop{\ord(\omega_S)}\nolimitstimes L,
\end{equation}
where $L$ is a line bundle on $\mathbf{w}idetilde{X}$.
Then we have a commutative diagram
\mathbf{b}egin{equation}
\mathbf{b}egin{CD}
\ensuremath{\mathbb{D}}b(\mathbf{w}idetilde{X}) @>{\ensuremath{\mathbb{P}}hi_{\mathbf{w}idetilde{X} \to
\mathbf{w}idetilde{X}}^{\ensuremath{\mathcal F}^{\mathbf{v}ee}}}>> \ensuremath{\mathbb{D}}b(\mathbf{w}idetilde{X})\\
@V{\iota^*}VV @VV{(\mathop{\ord(\omega_S)}\nolimitstimes L^{\mathbf{v}ee}) \circ \tau^*}V \\
\ensuremath{\mathbb{D}}b(\mathbf{w}idetilde{X}) @>>{\ensuremath{\mathbb{P}}hi_{\mathbf{w}idetilde{X} \to
\mathbf{w}idetilde{X}}^{\ensuremath{\mathcal F}^{\mathbf{v}ee}}}>\ensuremath{\mathbb{D}}b(\mathbf{w}idetilde{X}).\\
\end{CD}
\end{equation}
By \cref{prop:uhl}, we see that
\mathbf{b}egin{equation}
\iota^* \circ \ensuremath{\mathbb{P}}hi_{\mathbf{w}idetilde{X} \to \mathbf{w}idetilde{X}}^{\ensuremath{\mathcal F}^{\mathbf{v}ee}}(\mathbf{v})
=\ensuremath{\mathbb{P}}hi_{\mathbf{w}idetilde{X} \to \mathbf{w}idetilde{X}}^{\ensuremath{\mathcal F}^{\mathbf{v}ee}} \circ \iota^*(\mathbf{v}),\;
\mathbf{v} \in H^*(\mathbf{w}idetilde{X},\ensuremath{\mathbb{Z}}).
\end{equation}
Hence we get
\mathbf{b}egin{equation}
\iota^*(\mathbf{v})=(\mathop{\ord(\omega_S)}\nolimitstimes L^{\mathbf{v}ee}) \circ \tau^*(\mathbf{v}),\;
\mathbf{v} \in H^*(\mathbf{w}idetilde{X},\ensuremath{\mathbb{Z}}),
\end{equation}
which implies $L=\ensuremath{\mathcal O}_{\mathbf{w}idetilde{X}}$
and $\iota^*=\tau^*$
on $H^*(\mathbf{w}idetilde{X},\ensuremath{\mathbb{Z}})$ (by substituting $\mathbf{v}=\mathbf{v}(\ensuremath{\mathcal O}_{\mathbf{w}idetilde{X}})$).
By the Torelli theorem,
$\iota=\tau$. Therefore $\ensuremath{\mathbb{P}}hi_{\mathbf{w}idetilde{X} \to \mathbf{w}idetilde{X}}^{\ensuremath{\mathcal F}^{\mathbf{v}ee}}$ and
$\iota$ commute.
Then as in the proof of \cite[Theorem 4.5]{BM98}, there is $\ensuremath{\mathcal E} \in \ensuremath{\mathbb{D}}b(X \times X)$ such that
\mathbf{b}egin{equation}
(\mathbf{v}arpi \times 1_{\mathbf{w}idetilde{X}})_*(\ensuremath{\mathcal F})
\cong (1_X \times \mathbf{v}arpi )^*(\ensuremath{\mathcal E}).
\end{equation}
It follows that $\ensuremath{\mathbb{P}}hi_{X \to X}^{\ensuremath{\mathcal E}^{\mathbf{v}ee}}$ defines the desired
Fourier-Mukai transform.
\end{proof}
\mathbf{b}egin{Rem}
The conditions (1) and (2) are cohomological, and
\cite[Theorem 1.2 (ii)]{MMS09} shows that there are autoequivalences of $\ensuremath{\mathbb{D}}b(X)$
satisfying (1) and (2)
without the assumption $\ensuremath{\mathbb{P}}ic(\mathbf{w}idetilde{X})=\mathbf{v}arpi^*\ensuremath{\mathbb{P}}ic(X)$.
Since we need the property (3) for the proof of Proposition \ref{prop:LGUK3} below, we
gave a proof.
\end{Rem}
Now we can relate the induced Fourier-Mukai transform $\ensuremath{\mathbb{P}}hi_{X\to X}^{\ensuremath{\mathcal E}^\mathbf{v}ee}$ to the birational behavior at a iLGU wall.
\mathbf{b}egin{Prop}\label{prop:LGUK3}
Assume that $\ensuremath{\mathbb{P}}ic(\mathbf{w}idetilde{X})=\mathbf{v}arpi^*\ensuremath{\mathbb{P}}ic(X)$.
Let $\ensuremath{\mathcal W}$ be a wall for $\mathbf{v}$ defined by an isotropic Mukai vector
$\mathbf{u}$ such that $\langle \mathbf{u},\mathbf{v} \rangle=1$ and $\ell(\mathbf{u})=1$.
Let $\sigma_0 \in \ensuremath{\mathcal W}$ be a generic stability condition.
Assume that $\sigma_\pm$ are sufficiently close stability conditions in opposite, adjacent chambers separated by $\ensuremath{\mathcal W}$.
Then
$D_X \circ \ensuremath{\mathbb{P}}hi_{X \to X}^{\ensuremath{\mathcal E}^{\mathbf{v}ee}}$ induces an isomorphism
$M_{\sigma_+}(\mathbf{v}) \cong M_{\sigma_-}(\mathbf{v})$.
\end{Prop}
\mathbf{b}egin{proof}
There are two important consequences of our assumption that $\ensuremath{\mathbb{P}}ic(\mathbf{w}idetilde{X})=\mathbf{v}arpi^*\ensuremath{\mathbb{P}}ic(X)$. The first is that $H^*_{\mathrm{alg}}(\mathbf{w}idetilde{X},{\mathbb Z})=\ensuremath{\mathbb{N}}S(X)_{tf}(2) \mathop{\ord(\omega_S)}\nolimitsplus \langle 2 \rangle
\mathop{\ord(\omega_S)}\nolimitsplus \langle -2 \rangle$, where we recall that $\ensuremath{\mathbb{N}}S(X)_{tf}(2)$ is the same underlying lattice as the torsion free lattice $\ensuremath{\mathbb{N}}S(X)_{tf}$ but with pairing multiplied by 2; and the second is that $\mathop{\mathrm{Stab}}\nolimitsd(\mathbf{w}idetilde{X})=\mathop{\mathrm{Stab}}\nolimitsd(X)$. In particular, it follows from the first consequence that setting $X_1:=\mathbf{w}idetilde{X}$,
$\mathbf{v}_1:=\mathbf{v}arpi^*(\mathbf{v})$, and $\mathbf{u}_1:=\mathbf{v}arpi^*(\mathbf{u})=\mathbf{u}_1$, there is no $\mathbf{w}\in\ensuremath{\mathbb{H}}al(X_1,\ensuremath{\mathbb{Z}})$ with $\langle \mathbf{w},\mathbf{u}_1 \rangle=1$.
Thus $\mathbf{a}lpha$ as in \cref{Sec:FM transform associated to LGU} defines a non-trivial Brauer class on $X_2:=M_{\mathbf{v}arpi^*(\sigma)}(\mathbf{u}_1)$, and there are no $\mathbf{a}lpha$-twisted sheaves on $X_2$ of rank 1. From the second consequence it follows that $\mathbf{v}arpi^*(\sigma_\pm)$ remains generic with respect to $\mathbf{v}arpi^*(\mathbf{v})$.
Considering the autoequivalence $\ensuremath{\mathbb{P}}hi$ constructed in the proof of \cref{prop:uhl}, we see that for $F \in \ensuremath{\mathcal M}_{\sigma_+}(\mathbf{v})$, $\ensuremath{\mathbb{P}}hi(\mathbf{v}arpi^*(F))$ is a $\mu$-stable $\mathbf{a}lpha^{-1}$-twisted sheaf of rank two, as there are no twisted sheaves of rank one. Since $\mathbf{v}arpi^*(\sigma_\pm)$ remains generic, $\mu$-stability is taken with respect to a generic polarization, in which case $\mu$-stability is independent of the choice of the B-field. Thus $\mathbf{v}arpi^*(\sigma_+)$ and $\mathbf{v}arpi^*(\sigma_-)$ are separated by the single wall associated to the LGU contraction. The construction of $\ensuremath{\mathbf{w}idetilde{X}}i$ in \cref{prop:uhl} shows that $\ensuremath{\mathbf{w}idetilde{X}}i$ switches the chambers containing $\mathbf{v}arpi^*(\sigma_\pm)$, so it follows that $\ensuremath{\mathbf{w}idetilde{X}}i(\mathbf{v}arpi^*(\sigma_+))$ and $\mathbf{v}arpi^*(\sigma_-)$ belong to the same chamber. Therefore $\ensuremath{\mathbf{w}idetilde{X}}i$ induces an isomorphism
$\ensuremath{\mathcal M}_{\mathbf{v}arpi^*(\sigma_+)}(\mathbf{v}_1) \to \ensuremath{\mathcal M}_{\mathbf{v}arpi^*(\sigma_-)}(\mathbf{v}_1)$.
By \cref{prop:Enriques-refl} (3), it follows that
$D_X \circ \ensuremath{\mathbb{P}}hi_{X \to X}^{\ensuremath{\mathcal E}^{\mathbf{v}ee}}$ induces
an isomorphism $\ensuremath{\mathcal M}_{\sigma_+}(\mathbf{v}) \to \ensuremath{\mathcal M}_{\sigma_-}(\mathbf{v})$, as claimed.
\end{proof}
\subsection{The case of odd rank}\label{subsec:iLGU odd rank}
For the construction of the equivalence $\ensuremath{\mathbb{P}}hi_{X \to X}^{\ensuremath{\mathcal E}^{\mathbf{v}ee}}$,
we had to assume that $\ensuremath{\mathbb{P}}ic(\mathbf{w}idetilde{X})=\mathbf{v}arpi^*\ensuremath{\mathbb{P}}ic(X)$.
Then by using the equivalence, we constructed the isomorphism in Proposition \ref{prop:LGUK3}. In this subsection, we shall construct
an isomorphism $M_{\sigma_+}(\mathbf{v},L) \to M_{\sigma_-}(\mathbf{v},L)$ without
the construction of $\ensuremath{\mathbb{P}}hi_{X \to X}^{\ensuremath{\mathcal E}^{\mathbf{v}ee}}$, and thus without the assumption $\ensuremath{\mathbb{P}}ic(\mathbf{w}idetilde{X})=\mathbf{v}arpi^*\ensuremath{\mathbb{P}}ic(X)$, if $\mathop{\mathrm{rk}} \mathbf{v}$ is odd.
For a stability condition $\sigma \in \mathop{\mathrm{Stab}}\nolimits^\mathbf{d}agger(X)$,
we set $\sigma':=\mathbf{v}arpi^*(\sigma)$.
Then we have a morphism
$\psi:\ensuremath{\mathcal M}_\sigma(\mathbf{v}) \to \ensuremath{\mathcal M}_{\sigma'}(\mathbf{v}')$
by sending $E \in \ensuremath{\mathcal M}_\sigma(\mathbf{v})$ to
$\mathbf{v}arpi^*(E)$, where $\mathbf{v}'=\mathbf{v}arpi^*(\mathbf{v})$. We need to study when the restriction of $\psi$ to the stable locus gives $\sigma'$-stable objects, the content of the next lemma.
\mathbf{b}egin{Lem}[{\cite[Theorem 8.1]{Nue14b}}]\label{Lem:pullback remains stable}
For a $\sigma$-stable object $E$,
$\mathbf{v}arpi^*(E)$ is properly $\sigma'$-semistable if and only if
$E=\mathbf{v}arpi_*(F)$, where $F$ is a $\sigma'$-stable object
with $F \not \cong \iota^*(F)$.
In particular $\mathop{\mathrm{rk}} E$ is even.
\end{Lem}
\mathbf{b}egin{Lem}\label{Lem:odd rank embedding}
If $\mathop{\mathrm{rk}}\mathbf{v}$ is odd, then
$\psi:\ensuremath{\mathcal M}_\sigma^s(\mathbf{v},L) \to \ensuremath{\mathcal M}_{\sigma'}(\mathbf{v}')$ is an embedding and
the image is contained in $\ensuremath{\mathcal M}_{\sigma'}^s(\mathbf{v}')$, where $[L \mathop{\mathrm{mod}}\nolimits K_X]=c_1(\mathbf{v})$.
\end{Lem}
\mathbf{b}egin{proof}
Since $\mathop{\mathrm{rk}}\mathbf{v}$ is odd, the image is contained in $\ensuremath{\mathcal M}_{\sigma'}^s(\mathbf{v}')$ by \cref{Lem:pullback remains stable}.
Combining with \cite[Proposition 7.2]{Nue14b}, we get the claim.
\end{proof}
In particular, if $\mathbf{v}$ has odd rank and $\sigma$ is generic, then \cref{Lem:odd rank embedding} implies that $\psi$ embeds all of $M_\sigma(\mathbf{v},L)$ into $M_{\sigma'}^s(\mathbf{v}',L)$. Moreover, for any $\sigma''$ in a sufficiently small neighborhood of $\sigma'$, we also have an embedding
\mathbf{b}egin{equation}\label{eq:psi}
M_\sigma^s(\mathbf{v},L) \hookrightarrow M_{\sigma'}^s(\mathbf{v}') \hookrightarrow M_{\sigma''}(\mathbf{v}')
\end{equation}
by openness of stability.
For the rest of the section, we assume that $\mathop{\mathrm{rk}} \mathbf{v}$ is odd. Let $\ensuremath{\mathcal W}\subset\mathop{\mathrm{Stab}}\nolimitsd(X)$ be a wall defined by a primitive isotropic $\mathbf{u}$ with $\langle\mathbf{v},\mathbf{u}\rangle=1=\ell(\mathbf{u})$ and $\sigma_0$ a generic stability condition in $\ensuremath{\mathcal W}$. We denote by $\sigma_0'=\mathbf{v}arpi^*(\sigma_0)$, and we continue to denote by $\mathbf{v}'$ and $\mathbf{u}'$ the pull-backs $\mathbf{v}arpi^*(\mathbf{v})$ and $\mathbf{v}arpi^*(\mathbf{u})$, respectively. Similarly, let $\ensuremath{\mathcal W}'$ be the wall for $\mathbf{v}'$ in $\mathop{\mathrm{Stab}}\nolimitsd(\mathbf{w}idetilde{X})$ defined by $\mathbf{u}'$. Take $\sigma'' \in \ensuremath{\mathcal W}'$ in a neighborhood of $\sigma_0'$ such that $\ensuremath{\mathcal W}'$ is the unique wall for $\mathbf{v}'$ in a small neighborhood $U_{\sigma''}$ of $\sigma''$ and such that $\sigma''$ is generic with respect to $\mathbf{u}'$.
We also take $\sigma_\pm'' \in U_{\sigma''}$ in opposite and adjacent chambers for $\mathbf{v}'$, separated by the single wall $\ensuremath{\mathcal W}'$.
We may assume that $\mathbf{v}arpi^*(\sigma_+)$ and $\mathbf{v}arpi^*(\sigma_-)$ belong to the same chambers as $\sigma_+''$ and $\sigma_-''$, respectively.
Let $\psi_\pm:M_{\sigma_\pm}(\mathbf{v},L) \to M_{\sigma_\pm''}(\mathbf{v}')$ be the embeddings for
$\sigma_{\pm}$
in \eqref{eq:psi}.
We will show in a moment that there is an isomorphism $\mathbf{v}arphi:M_{\sigma_+}(\mathbf{v},L)\to M_{\sigma_-}(\mathbf{v},L)$ despite $\mathbf{v}arphi$ (possibly) not being induced by an autoequivalence of $\ensuremath{\mathbb{D}}b(X)$. But first we start with a cohomological lemma.
\mathbf{b}egin{Lem}\label{Lem:u and v}
Let $\mathbf{v}'=\mathbf{v}arpi^*(\mathbf{v})$ and $\mathbf{u}'=\mathbf{v}arpi^*(\mathbf{u})$ where $\langle\mathbf{v},\mathbf{u}\rangle=1=\ell(\mathbf{u})$ for the primitive isotropic vector $\mathbf{u}$. Then the sublattice $\ensuremath{\mathbb{Z}} \mathbf{u}'+\ensuremath{\mathbb{Z}} \mathbf{v}'$ is saturated in $H^*(\mathbf{w}idetilde{X},\ensuremath{\mathbb{Z}})$.
In particular, if $\ensuremath{\mathcal W}\subset\mathop{\mathrm{Stab}}\nolimitsd(X)$ is the wall for $\mathbf{v}$ defined by $\mathbf{u}$ and $\ensuremath{\mathcal W}'\subset\mathop{\mathrm{Stab}}\nolimitsd(\mathbf{w}idetilde{X})$ is the wall for $\mathbf{v}'$ defined by $\mathbf{u}'$), then we have $\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}'}=\ensuremath{\mathbb{Z}}\mathbf{v}'+\ensuremath{\mathbb{Z}}\mathbf{u}'=\mathbf{v}arpi^*\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ so that $\langle\mathbf{v}_1,\mathbf{v}_2\rangle\in 2\ensuremath{\mathbb{Z}}$ for any $\mathbf{v}_1,\mathbf{v}_2\in\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}'}$.
\end{Lem}
\mathbf{b}egin{proof}
Let $L$ be the saturation of $\ensuremath{\mathbb{Z}}\mathbf{u}'+\ensuremath{\mathbb{Z}}\mathbf{v}'$ in $H^*(\mathbf{w}idetilde{X},\ensuremath{\mathbb{Z}})$. Since $\mathbf{u}'$ is primitive, there is $\mathbf{w} \in L$ such that $L=\ensuremath{\mathbb{Z}} \mathbf{u}'+\ensuremath{\mathbb{Z}} \mathbf{w}$. We set $\mathbf{v}'=a \mathbf{u}'+b \mathbf{w}$ ($a,b \in \ensuremath{\mathbb{Z}}$). Then $2=\langle \mathbf{v}',\mathbf{u}' \rangle=b \langle \mathbf{w},\mathbf{u}' \rangle$. Hence $b=\pm 1,\pm 2$. If $b=\pm 2$, then $\mathop{\mathrm{rk}} \mathbf{v}'=a \mathop{\mathrm{rk}} \mathbf{u}'\pm 2 \mathop{\mathrm{rk}} \mathbf{w} \in 2\ensuremath{\mathbb{Z}}$, which is impossible if $\mathop{\mathrm{rk}}\mathbf{v}'=\mathop{\mathrm{rk}}\mathbf{v}$ is odd. If $\mathop{\mathrm{rk}}\mathbf{v}$ is even, then let us write $\mathbf{v}=(2n,D_1,s)$ and $\mathbf{u}=(2m,D_2,t)$ so that $\mathbf{v}'=(2n,\mathbf{v}arpi^*D_1,2s)$ and $\mathbf{u}'=(2m,\mathbf{v}arpi^*D_2,2t)$. Then $\langle\mathbf{v}',\mathbf{u}'\rangle=2$ implies that $$2(D_1,D_2)=(\mathbf{v}arpi^*D_1,\mathbf{v}arpi^*D_2)\equiv 2\pmod 4,$$ so $(D_1,D_2)$ is odd. On the other hand, $\mathbf{v}'=\pm 2\mathbf{w}+a\mathbf{u}'$ gives $\mathbf{v}arpi^*D_1=\pm2D_3+a\mathbf{v}arpi^*D_2$, where $D_3=c_1(\mathbf{w})$. But as $\mathbf{v}arpi^*:\ensuremath{\mathbb{P}}ic(X)/\langle K_X\rangle\ensuremath{\hookrightarrow}\ensuremath{\mathbb{P}}ic(\mathbf{w}idetilde{X})$ is a primitive embedding, we must have $D_3=\mathbf{v}arpi^*D_3'$ for some $D_3\in\ensuremath{\mathbb{P}}ic(X)$. It follows that $D_1=\pm2D_3'+aD_2$ and thus $$(D_1,D_2)=\pm2(D_3',D_2)+a(D_2)^2\equiv0\pmod 2,$$ a contradiction. Thus, in either case, $b=\pm 1$ so that $L=\ensuremath{\mathbb{Z}} \mathbf{u}'+\ensuremath{\mathbb{Z}} \mathbf{v}'$, as required.
The first equality of the last statement follows because $\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}'}\subset\ensuremath{\mathbb{H}}al(\mathbf{w}idetilde{X},\ensuremath{\mathbb{Z}})$ is a saturated hyperbolic sublattice of rank two containing $\ensuremath{\mathbb{Z}}\mathbf{v}'+\ensuremath{\mathbb{Z}}\mathbf{u}'$, and the second equality follows from $\langle\mathbf{v},\mathbf{u}\rangle=1$ so that $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}=\ensuremath{\mathbb{Z}}\mathbf{v}+\ensuremath{\mathbb{Z}}\mathbf{w}$.
\end{proof}
Now we are in position to prove the following result.
\mathbf{b}egin{Prop}\label{Prop:phi isomorphism odd rank}
We have an isomorphism $\mathbf{v}arphi:M_{\sigma_+}(\mathbf{v},L) \to M_{\sigma_-}(\mathbf{v},L)$
with a commutative diagram
\mathbf{b}egin{equation}\label{eq:varphi=Xi}
\mathbf{b}egin{CD}
M_{\sigma_+}(\mathbf{v},L) @>{\mathbf{v}arphi}>> M_{\sigma_-}(\mathbf{v},L)\\
@V{\psi_+}VV @VV{\psi_-}V\\
M_{\sigma_+''}(\mathbf{v}') @>>{\ensuremath{\mathbf{w}idetilde{X}}i}> M_{\sigma_-''}(\mathbf{v}')\\
\end{CD}
\end{equation}
In particular, it is the identity on $M_{\sigma_0}^s(\mathbf{v},L)$.
\end{Prop}
\mathbf{b}egin{proof}
For $X_1:=\mathbf{w}idetilde{X}$ and $X_2:=M_{\sigma''}(\mathbf{u}')$, we shall apply Proposition \ref{prop:uhl} to get an
(anti-) autoequivalence $\ensuremath{\mathbf{w}idetilde{X}}i$ of $\ensuremath{\mathbb{D}}b(X_1)$. Recall from the construction of $\ensuremath{\mathbf{w}idetilde{X}}i$ that there is an equivalence $\ensuremath{\mathbb{P}}hi:\ensuremath{\mathbb{D}}b(X_1)\to\ensuremath{\mathbb{D}}b(X_2,\mathbf{a}lpha)$ inducing an isomorphism between $M_{\sigma_+''}(\mathbf{v}')$ and the moduli space $M_\mathop{\ord(\omega_S)}\nolimitsmega(2,D,a)$ of $\mathop{\ord(\omega_S)}\nolimitsmega$-Gieseker semistable $\mathbf{a}lpha$-twisted sheaves on $X_2$. Moreover, $\ensuremath{\mathbf{w}idetilde{X}}i$ is the pull-back to $X_1$ of the autoequivalence $E\mapsto E^\mathbf{v}ee\mathop{\ord(\omega_S)}\nolimitstimes\mathbf{d}et(E)$, which induces an isomorphism $M_{\sigma_+''}(\mathbf{v}')\to M_{\sigma_-''}(\mathbf{v}')$.
We claim that $M_\mathop{\ord(\omega_S)}\nolimitsmega(2,D,a)$ consists only of $\mu$-stable sheaves. Granting this for the moment, we see that for locally free $E\in M_\mathop{\ord(\omega_S)}\nolimitsmega(2,D,a)$, we have $E^\mathbf{v}ee\mathop{\ord(\omega_S)}\nolimitstimes\mathbf{d}et(E)\cong E$, so the isomorphism
$M_{\sigma_+''}(\mathbf{v}') \to M_{\sigma_-''}(\mathbf{v}')$ induced by $\ensuremath{\mathbf{w}idetilde{X}}i$ is the identity
on $M_{\sigma''}^s(\mathbf{v}')$, which corresponds to the locus $M_\mathop{\ord(\omega_S)}\nolimitsmega^{lf}(2,D,a)$ on $X_2$.
Now let us prove our claim that $M_\mathop{\ord(\omega_S)}\nolimitsmega(2,D,a)=M^{\mu s}_\mathop{\ord(\omega_S)}\nolimitsmega(2,D,a)$. We note first that the claim is obvious if $\mathbf{a}lpha$ is non-trivial, as we saw in the proof of \cref{prop:LGUK3}. So suppose that the Brauer class $\mathbf{a}lpha$ is trivial. If there exists a strictly $\mu$-semistable sheaf $E\in M_\mathop{\ord(\omega_S)}\nolimitsmega(2,D,a)$ with subsheaf $F\subset E$ such that $\mu(F)=\mu(E)$, then from the genericity of $\mathop{\ord(\omega_S)}\nolimitsmega$, it would follow that $2\mid D$ and $\mathbf{v}(F)=(1,\tfrac{D}{2},b)$ so that $\langle-\mathbf{v}(F),(0,0,1)\rangle=1$. But $2\mathbf{v}(F)=(2,D,a)+(2b-a)(0,0,1)$, so writing $-\mathbf{v}(F)=\ensuremath{\mathbb{P}}hi(\mathbf{w})$, we get $\mathbf{w}$ such that $2\mathbf{w}\in\ensuremath{\mathbb{Z}}\mathbf{v}'+\ensuremath{\mathbb{Z}}\mathbf{u}'$ and $\langle\mathbf{w},\mathbf{u}'\rangle=1$. As $\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}'}=\ensuremath{\mathbb{Z}}\mathbf{v}'+\ensuremath{\mathbb{Z}}\mathbf{u}'$ is saturated by Lemma \ref{Lem:u and v}, this is impossible, proving the claim in this case as well.
By \eqref{eq:psi}, we get an isomorphism $\mathbf{v}arphi$ with
the commutative diagram \eqref{eq:varphi=Xi}.
\end{proof}
We close this subsection by describing the relationship between the Picard groups of $M_{\sigma_\pm}(\mathbf{v},L)$.
\mathbf{b}egin{Prop}\label{Prop:R relates thetas}
Let $R$ be the cohomological action on $K(X)$ given by
$$
R(E)=E+2\mathbf{v}^2 \langle \mathbf{v}(E),\mathbf{u} \rangle U-2\langle \mathbf{v}(E),\mathbf{v} \rangle U-2\langle \mathbf{v}(E),\mathbf{u} \rangle V,\;
E \in K(X),
$$
where $U, V \in K(X)$ satisfy $\mathbf{v}(U)=\mathbf{u},\mathbf{v}(V)=\mathbf{v}$.
Then we have a commutative diagram modulo torsion.
\mathbf{b}egin{equation}\label{eqn:ReflectionRelatesThetas}
\mathbf{b}egin{CD}
K(X)_\mathbf{v} @>{R}>> K(X)_\mathbf{v} \\
@V{\theta_{\mathbf{v},\sigma_+}}VV @VV{\theta_{\mathbf{v},\sigma_-}}V\\
\ensuremath{\mathbb{P}}ic(M_{\sigma_+}(\mathbf{v},L)) @<<{\mathbf{v}arphi^*}< \ensuremath{\mathbb{P}}ic(M_{\sigma_-}(\mathbf{v},L))\\
\end{CD}
\end{equation}
\end{Prop}
\mathbf{b}egin{proof}
Consider a quasi-universal family $\ensuremath{\mathcal E}\in\ensuremath{\mathbb{D}}b(X\times M_{\sigma_\pm}(\mathbf{v},L))$ for the moduli space $M_{\sigma_\pm}(\mathbf{v},L)$ and the map $\mathbf{v}arpi\times 1$ fitting into the commutative diagram:
\[\xymatrix{
\mathbf{w}idetilde{X}\times M_{\sigma_\pm}(\mathbf{v},L) \mathbf{a}r[r]^-{\mathbf{v}arpi\times 1} \mathbf{a}r[rd]^{}_{p_{M_{\sigma_\pm}(\mathbf{v},L)}} &
X\times M_{\sigma_\pm}(\mathbf{v},L) \mathbf{a}r[d]^{p_{M_{\sigma_\pm}(\mathbf{v},L)}}\\
&M_{\sigma_\pm}(\mathbf{v},L)}\]
Then by \eqref{eq:psi}, we may consider $(\mathbf{v}arpi\times 1)^*\ensuremath{\mathcal E}$ to be a family of objects in $M_{\sigma_\pm''}(\mathbf{v}')$ parameterized by $M_{\sigma_\pm}(\mathbf{v},L)$. The morphism $\psi_\pm$ is the classifying map associated to the family $(\mathbf{v}arpi\times 1)^*\ensuremath{\mathcal E}$, so by \cite[Theorem 8.1.5]{HL10} we get $$\theta_{(\mathbf{v}arpi\times 1)^*\ensuremath{\mathcal E}}=\psi_\pm^*\circ\theta_{\mathbf{v}',\sigma_\pm''},$$ where $\theta_{(\mathbf{v}arpi\times 1)^*\ensuremath{\mathcal E}}:K(\mathbf{w}idetilde{X})_{\mathbf{v}'}\to\ensuremath{\mathbb{P}}ic(M_{\sigma_\pm}(\mathbf{v},L))$ is the Donaldson-Mukai homomorphism associated to the family $(\mathbf{v}arpi\times 1)^*\ensuremath{\mathcal E}$. Precomposing with the pull-back from $K(X)_\mathbf{v}$ gives the following commutative diagram:
\mathbf{b}egin{equation}\label{eqn:PullBackDonaldsonMukai}
\xymatrix{
K(X)_\mathbf{v} \mathbf{a}r[r]^{\mathbf{v}arpi^*} \mathbf{a}r[d]^{}_{2\theta_{\mathbf{v},\sigma_{\pm}}}& K(\mathbf{w}idetilde{X})_{\mathbf{v}'}\mathbf{a}r[ld]_{\theta_{(\mathbf{v}arpi\times 1)^*\ensuremath{\mathcal E}}}\mathbf{a}r[d]^{\theta_{\mathbf{v}',\sigma_{\pm}''}}\\
\ensuremath{\mathbb{P}}ic(M_{\sigma_\pm}(\mathbf{v},L))& \mathbf{a}r[l]^{\psi_\pm^*} \ensuremath{\mathbb{P}}ic(M_{\sigma_{\pm}''}(\mathbf{v}'))
}
\end{equation}
Indeed, for $E\in K(X)_\mathbf{v}$, we have
\mathbf{b}egin{align*}
\theta_{(\mathbf{v}arpi\times 1)^*\ensuremath{\mathcal E}}(\mathbf{v}arpi^*E)&=\mathbf{d}et(p_{M_{\sigma_\pm}(\mathbf{v},L)!}((\mathbf{v}arpi\times 1)^*\ensuremath{\mathcal E}\mathop{\ord(\omega_S)}\nolimitstimes p_{\mathbf{w}idetilde{X}}^*(\mathbf{v}arpi^*E)))\\
&=\mathbf{d}et(p_{M_{\sigma_\pm}(\mathbf{v},L)!}((\mathbf{v}arpi\times 1)^*\ensuremath{\mathcal E}\mathop{\ord(\omega_S)}\nolimitstimes (\mathbf{v}arpi\times 1)^*(p_X^*E)))\\
&=\mathbf{d}et(p_{M_{\sigma_\pm}(\mathbf{v},L)!}((\mathbf{v}arpi\times 1)^*(\ensuremath{\mathcal E}\mathop{\ord(\omega_S)}\nolimitstimes p_X^*E)))\\
&=\mathbf{d}et(p_{M_{\sigma_\pm}(\mathbf{v},L)!}((\mathbf{v}arpi\times 1)_*(\mathbf{v}arpi\times 1)^*(\ensuremath{\mathcal E}\mathop{\ord(\omega_S)}\nolimitstimes p_X^*E)))\\
&=\mathbf{d}et(p_{M_{\sigma_\pm}(\mathbf{v},L)!}((\ensuremath{\mathcal E}\mathop{\ord(\omega_S)}\nolimitstimes p_X^*(E\mathop{\ord(\omega_S)}\nolimitstimes(\ensuremath{\mathcal O}_X\mathop{\ord(\omega_S)}\nolimitsplus\ensuremath{\mathcal O}_X(K_X))))))=\theta_{\mathbf{v},\sigma_\pm}(E)+\theta_{\mathbf{v},\sigma_\pm}(E(K_X)),
\end{align*}
so modulo torsion, $\theta_{(\mathbf{v}arpi\times 1)^*\ensuremath{\mathcal E}}(\mathbf{v}arpi^*E)=2\theta_{\mathbf{v},\sigma_\pm}(E)$, as claimed.
Using $\mathbf{v}arpi^* \circ R=-\ensuremath{\mathbf{w}idetilde{X}}i \circ \mathbf{v}arpi^*$ and $\theta_{\mathbf{v}',\sigma''_+}=
-\ensuremath{\mathbf{w}idetilde{X}}i^* \circ \theta_{\mathbf{v}',\sigma''_-} \circ \ensuremath{\mathbf{w}idetilde{X}}i$, where we have abused notation by using $\ensuremath{\mathbf{w}idetilde{X}}i$ to denote both the autoequivalence and the induced isomorphism $\ensuremath{\mathbf{w}idetilde{X}}i:M_{\sigma_+''}(\mathbf{v}')\to M_{\sigma_-''}(\mathbf{v}')$, the proposition then follows from \eqref{eq:varphi=Xi}.
\end{proof}
\mathbf{b}egin{Rem}\label{Rem:ReflectionBMDivisors}
\mathbf{b}egin{enumerate}
\item
Since the claim is independent of the choice of complex structure,
we can reduce to the case where $\ensuremath{\mathbb{P}}ic(\mathbf{w}idetilde{X})=\mathbf{v}arpi^*\ensuremath{\mathbb{P}}ic(X)$.
Then the claim is a consequence of Proposition \ref{prop:Enriques-refl}.
\item
Since $\mathbf{v}arpi^*(\xi_\tau)=2\xi_{\mathbf{v}arpi^*(\tau)}$, it follows that
$$\ell_{\ensuremath{\mathbb{C}}C_\pm}(\tau)=\theta_{\mathbf{v},\sigma_\pm}(\xi_\tau)=
\psi_\pm^*(\theta_{\mathbf{v}',\sigma_\pm''}(\xi_{\mathbf{v}arpi^*(\tau)}))=\psi_\pm^*\ell_{\ensuremath{\mathbb{C}}C_\pm''}(\tau).$$ This gives another proof of \cite[Proposition 10.2]{Nue14b}.
\end{enumerate}
\end{Rem}
\subsection{The case of even rank}\label{subsec:iLGU even rank}We show in this subsection that the technique of \cref{subsec:iLGU odd rank} also generalizes to Mukai vectors $\mathbf{v}$ of even rank.
For the rest of the section, we assume that $\mathop{\mathrm{rk}} \mathbf{v}$ is even. Let $\ensuremath{\mathcal W}\subset\mathop{\mathrm{Stab}}\nolimitsd(X)$ be a wall defined by a primitive isotropic $\mathbf{u}$ with $\langle\mathbf{v},\mathbf{u}\rangle=1=\ell(\mathbf{u})$ and $\sigma_0$ a generic stability condition in $\ensuremath{\mathcal W}$. We denote by $\sigma_0'=\mathbf{v}arpi^*(\sigma_0)$, and we again denote by $\mathbf{v}'$ and $\mathbf{u}'$ the pull-backs $\mathbf{v}arpi^*(\mathbf{v})$ and $\mathbf{v}arpi^*(\mathbf{u})$, respectively. Similarly, let $\ensuremath{\mathcal W}'\subset \mathop{\mathrm{Stab}}\nolimitsd(\mathbf{w}idetilde{X})$ be the wall for $\mathbf{v}'$ defined by $\mathbf{u}'$. Take $\sigma'' \in \ensuremath{\mathcal W}'$ in a neighborhood of $\sigma_0'$ such that $\ensuremath{\mathcal W}'$ is the unique wall for $\mathbf{v}'$ in a small neighborhood $U_{\sigma''}$ of $\sigma''$ and such that $\sigma''$ is generic with respect to $\mathbf{u}'$.
We also take $\sigma_\pm'' \in U_{\sigma''}$ in opposite and adjacent chambers for $\mathbf{v}'$, separated by the single wall $\ensuremath{\mathcal W}'$.
We may assume that $\mathbf{v}arpi^*(\sigma_+)$ and $\mathbf{v}arpi^*(\sigma_-)$ belong to the same chambers as $\sigma_+''$ and $\sigma_-''$, respectively.
We observe first that we still have $\ell(\mathbf{v})=1$ so that $\mathbf{v}arpi^*\mathbf{v}$ is primitive. Indeed, as in \cref{Rem:Even and Odd pairings}, if $\ell(\mathbf{v})$ were $2$, then $2\mid\mathop{\mathrm{rk}}(\mathbf{u})$ would imply that $\langle\mathbf{v},\mathbf{u}\rangle$ were even, which is absurd. It therefore follows from \cref{Lem:pullback remains stable} that we have morphisms $\psi_\pm:M_{\sigma_\pm}(\mathbf{v},L)_{sm} \to M_{\mathbf{v}arpi^*(\sigma_\pm)}^s(\mathbf{v}') \ensuremath{\hookrightarrow} M_{\sigma_\pm''}^s(\mathbf{v}')$ which are \'{e}tale double covers onto their images. Moreover, by \cref{Lem:isotropic divisorial l=1 1} we have a birational map $\mathbf{v}arphi:M_{\sigma_+}(\mathbf{v},L)\mathbf{d}ashrightarrow M_{\sigma_-}(\mathbf{v},L)$ which is the identity on $M_{\sigma_0}^s(\mathbf{v},L)$. As $M_{\sigma_\pm}(\mathbf{v},L)$ has only terminal l.c.i. singularities and numerically trivial canonical divisor by \cite[Theorem 8.2]{Nue14b}, it follows that $\mathbf{v}arphi$ is an isomorphism in codimension one by \cite[Corollary 3.54]{KM98}. This defines an isomorphism $\mathbf{v}arphi^*:\ensuremath{\mathbb{C}}l(M_{\sigma_-}(\mathbf{v},L))\mor[\sim]\ensuremath{\mathbb{C}}l(M_{\sigma_+}(\mathbf{v},L))$, where $\ensuremath{\mathbb{C}}l(-)$ denotes the group of Weil divisors moduli linear equivalence. If $\mathbf{v}^2\geq 8$, then $\mathop{\mathrm{codim}}\nolimits(\mathop{\mathrm{Sing}}(M_{\sigma_\pm}(\mathbf{v},L))\geq 4$ so that $M_{\sigma_\pm}(\mathbf{v},L)$ is locally factorial by \cite[Exp. XI, Cor. 3.14]{SGA2} and thus $\ensuremath{\mathbb{C}}l(M_{\sigma_\pm}(\mathbf{v},L))=\ensuremath{\mathbb{P}}ic(M_{\sigma_\pm}(\mathbf{v},L))$. We will show in the next result that, even for $2<\mathbf{v}^2<8$, $\mathbf{v}arphi$ extends to an isomorphism so that we may nevertheless identify $\ensuremath{\mathbb{P}}ic(M_{\sigma_\pm}(\mathbf{v},L))$.
\mathbf{b}egin{Prop}\label{Prop:phi isomorphism even rank}
If $\mathbf{v}^2>2$, then the birational map $\mathbf{v}arphi:M_{\sigma_+}(\mathbf{v},L)\mathbf{d}ashrightarrow M_{\sigma_-}(\mathbf{v},L)$ extends to an isomorphism fitting into a commutative diagram modulo torsion.
\mathbf{b}egin{equation}\label{eqn:ReflectionRelatesThetasEven}
\mathbf{b}egin{CD}
K(X)_\mathbf{v} @>{R}>> K(X)_\mathbf{v} \\
@V{\theta_{\mathbf{v},\sigma_+}}VV @VV{\theta_{\mathbf{v},\sigma_-}}V\\
\ensuremath{\mathbb{P}}ic(M_{\sigma_+}(\mathbf{v},L)) @<<{\mathbf{v}arphi^*}< \ensuremath{\mathbb{P}}ic(M_{\sigma_-}(\mathbf{v},L))\\
\end{CD}
\end{equation}
\end{Prop}
\mathbf{b}egin{proof}
As in the proof of \cref{Prop:phi isomorphism odd rank}, we have an autoequivalence $\ensuremath{\mathbf{w}idetilde{X}}i$ that induces an isomorphism $M_{\sigma_+''}(\mathbf{v}')\to M_{\sigma_-''}(\mathbf{v}')$ which is the identity on $M_{\sigma''}^s(\mathbf{v}')$. Indeed, the same proof goes through since we still have $\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}'}=\ensuremath{\mathbb{Z}}\mathbf{v}'+\ensuremath{\mathbb{Z}}\mathbf{u}'$ by \cref{Lem:u and v}.
Restricting the birational map $\mathbf{v}arphi$ to the smooth locus $M_{\sigma_+}(\mathbf{v},L)_{sm}$, we get a commutative diagram
\[
\xymatrix{M_{\sigma_+}(\mathbf{v},L)_{sm}\mathbf{a}r@{-->}[r]^{\mathbf{v}arphi}\mathbf{a}r[d]_{\psi_+}&M_{\sigma_-}(\mathbf{v},L)_{sm}\mathbf{a}r[d]^{\psi_-}\\
M_{\sigma_+''}(\mathbf{v}')\mathbf{a}r[r]^\ensuremath{\mathbf{w}idetilde{X}}i & M_{\sigma_-''}(\mathbf{v}')}
\] with $\mathbf{v}arphi$ the identity on $M_{\sigma_0}^s(\mathbf{v},L)_{sm}$. Moreover, we have two homomorphisms
\mathbf{b}egin{equation*}
\mathbf{b}egin{split}
\mathbf{v}arphi^*\circ\theta_{\mathbf{v},\sigma_-}\circ R:K(X)_\mathbf{v}\to \ensuremath{\mathbb{C}}l(M_{\sigma_+}(\mathbf{v},L))\\
\theta_{\mathbf{v},\sigma_+}:K(X)_\mathbf{v}\to\ensuremath{\mathbb{P}}ic(M_{\sigma_+}(\mathbf{v},L)),
\end{split}
\end{equation*}
where $R$ is defined as in \cref{Prop:R relates thetas} and we have abused notation by using $\mathbf{v}arphi^*$ for the restriction of $$\mathbf{v}arphi^*:\ensuremath{\mathbb{C}}l(M_{\sigma_-}(\mathbf{v},L))\to\ensuremath{\mathbb{C}}l(M_{\sigma_+}(\mathbf{v},L))$$ to the subgroup $\ensuremath{\mathbb{P}}ic(M_{\sigma_-}(\mathbf{v},L))\subset\ensuremath{\mathbb{C}}l(M_{\sigma_-}(\mathbf{v},L))$. For any $x\in K(X)_\mathbf{v}$, \cref{Prop:R relates thetas}, which had nothing to do with $\mathop{\mathrm{rk}}\mathbf{v}$ being odd, implies that $\theta_{\mathbf{v},\sigma_+}(x)$ and $\mathbf{v}arphi^*\circ\theta_{\mathbf{v},\sigma_-}\circ R(x)$ agree away from $\mathop{\mathrm{Sing}}(M_{\sigma_+}(\mathbf{v},L))$, which has codimension at least two since $\mathbf{v}^2>2$. It follows that $\mathbf{v}arphi^*\circ\theta_{\mathbf{v},\sigma_-}\circ R=\theta_{\mathbf{v},\sigma_+}$, which gives the commutative diagram \eqref{eqn:ReflectionRelatesThetasEven}.
Finally, we prove that $\mathbf{v}arphi$ extends to an isomorphism. As $\xi_{\sigma_0}\in(\mathbf{v}^\perp\cap\mathbf{u}^\perp)_\ensuremath{\mathbb{R}}$, $R(\xi_{\sigma_0})=\xi_{\sigma_0}$ and thus $\mathbf{v}arphi^*\ell_{\ensuremath{\mathbb{C}}C^+}(\sigma_0)=\ell_{\ensuremath{\mathbb{C}}C^-}(\sigma_0)$. Moreover, letting $\mathbf{d}=\mathbf{v}-\mathbf{v}^2\mathbf{u}\in\ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}}\cap\mathbf{v}^\perp$, it is easy to see that $R=R_\mathbf{d}$ is just the reflection of $\mathbf{v}^\perp$ in the hyperplane $\mathbf{d}^\perp$ defined by $R_\mathbf{d}(\mathbf{z})=\mathbf{z}-2\frac{\langle\mathbf{z},\mathbf{d}\rangle}{\mathbf{d}^2}\mathbf{d}$ (see \cref{Sec:Picard groups} for more on this). Perturbing $\sigma_\pm$ and $\sigma_0$ slightly, we may assume that we can write $\xi_{\sigma_\pm}=\pm x\mathbf{d}+\xi_{\sigma_0}$ for $x\in\ensuremath{\mathbb{R}}$.
In particular, $R(\xi_{\sigma_+})=\xi_{\sigma_-}$. But then $$\ell_{\ensuremath{\mathbb{C}}C^+}(\sigma_+)=\mathbf{v}arphi^*(\theta_{\mathbf{v},\sigma_-}(R(\xi_{\sigma_+})))=\mathbf{v}arphi^*(\theta_{\mathbf{v},\sigma_-}(\xi_{\sigma_-}))=\mathbf{v}arphi^*\ell_{\ensuremath{\mathbb{C}}C^-}(\sigma_-).$$ As both $\ell_{\ensuremath{\mathbb{C}}C^\pm}(\sigma_\pm)$ are ample, $\mathbf{v}arphi$ extends to an isomorphism by \cite[Exercise 5.6]{KSC}, as required.
\end{proof}
\section{Main theorems}\label{sec:Main Theorems}
\mathbf{b}egin{proof}[Proof of Theorem \ref{Thm:MainTheorem1}]
We proved part \ref{enum:MT1-two moduli are birational} at the end of \cref{Sec:FloppingWalls}, so we focus on part \ref{enum:MT1-birational map given by FM transform}. Nevertheless, we recall part of the setup form the proof of part \ref{enum:MT1-two moduli are birational}. We may connect $\sigma$ and $\tau$ by a path which intersects walls in points that lie on no other walls. As the set of walls is locally finite, the path will intersect only finitely many walls, and thus for the purpose of proving the theorem, it suffices to consider one wall $\ensuremath{\mathcal W}$, a generic stability condition $\sigma_0\in\ensuremath{\mathcal W}$, and nearby stability conditions $\sigma_\pm$.
Suppose that $\langle \mathbf{v},\mathbf{w}\rangle<0$ for an effective spherical or exceptional class $\mathbf{w}\in\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$. Then suppose that $\mathbf{v}=\mathbf{v}_n\in\ensuremath{\mathbb{C}}C_n$ with $\mathbf{v}_0$ the minimal Mukai vector in the $G_\ensuremath{\mathbb{H}}H$-orbit of $\mathbf{v}$. Letting $\ensuremath{\mathbb{P}}hi^\pm$ be the sequence of spherical or exceptional twists giving the isomorphism $M_{\sigma_\pm}(\mathbf{v})\to M_{\sigma_\pm}(\mathbf{v}_0)$ if $n$ is even (resp., $M_{\sigma\pm}(\mathbf{v})\to M_{\sigma_\mp}(\mathbf{v}_0)$ if $n$ is odd), we see that it suffices to prove the theorem in the case that $\mathbf{v}$ is minimal. Indeed, if $\ensuremath{\mathbb{P}}hi$ is an autoequivalence inducing a birational map $M_{\sigma_+}(\mathbf{v}_0)\mathbf{d}ashrightarrow M_{\sigma_-}(\mathbf{v}_0)$ as in the theorem, then $(\ensuremath{\mathbb{P}}hi^-)^{-1}\circ\ensuremath{\mathbb{P}}hi\circ\ensuremath{\mathbb{P}}hi^+$ (resp., $(\ensuremath{\mathbb{P}}hi^-)^{-1}\circ\ensuremath{\mathbb{P}}hi^{-1}\circ\ensuremath{\mathbb{P}}hi^+$) proves the theorem for $\mathbf{v}$ if $n$ is even (resp., if $n$ is odd).
Thus we may assume that $\langle \mathbf{v},\mathbf{w}\rangle\geq 0$ for all effective spherical and exceptional classes $\mathbf{w}\in\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$, and we break up the proof into cases.
Suppose first that $\langle\mathbf{v},\mathbf{w}\rangle=0$ for some effective spherical or exceptional class $\mathbf{w}\in\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$. But then the minimality of $\mathbf{v}$ forces $\mathbf{w}=\mathbf{w}_0=\mathbf{v}(T_0^+)$ or $\mathbf{w}_1=\mathbf{v}(T_1^+)$ as in \cref{Prop:NonMinimalIsomorphism,Prop:CompositionSphericalExceptional} and the discussions following them. But we showed there that the spherical/exceptional twists $R_{T_0^+}$ or $R_{T_1^+}$ induce an isomorphism $M_{\sigma_+}(\mathbf{v})\to M_{\sigma_-}(\mathbf{v})$. So we may take $U$ to be the entire moduli space $M_{\sigma_+}(\mathbf{v})$. Notice that this discussion covers walls inducing $\ensuremath{\mathbb{P}}^1$-fibrations, walls of Brill-Noether type, as well as flopping walls induced by an exceptional class $\mathbf{w}\in\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ such that $\langle\mathbf{v},\mathbf{w}\rangle=0$ when $\mathbf{v}^2\geq 3$.
Otherwise $\langle\mathbf{v},\mathbf{w}\rangle>0$ for all effective spherical or exceptional classes $\mathbf{w}\in\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$. We first consider the case that $\ensuremath{\mathcal W}$ is a flopping wall or a fake wall. Then $\ensuremath{\mathcal W}$ being a flopping or fake wall implies that $\mathop{\mathrm{codim}}\nolimits(M_{\sigma_+}(\mathbf{v})\mathbf{b}ackslash M_{\sigma_0}^s(\mathbf{v}))\geq 2$ by \cref{Lem:non-isotropic no totally semistable wall,Prop:LGU walls of low codimension,Prop: 1-1 case totally semistable and codim 1}. In this case we may take $U$ to be the open subset of $\sigma_0$-stable objects, so there is nothing to prove (i.e. we just take $\ensuremath{\mathbb{P}}hi=\mathop{\mathrm{Id}}\nolimits$).
Now we consider the case that $\ensuremath{\mathcal W}$ induces a divisorial contraction and $\langle \mathbf{v},\mathbf{w}\rangle>0$ for all effective spherical/exceptional classes $\mathbf{w}\in\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$, i.e. divisorial contractions coming from Hilbert-Chow, LGU, or iLGU walls. We will show that we may take $U$ to be the entire moduli space $M_{\sigma_+}(\mathbf{v})$ in the case of Hilbert-Chow and LGU walls. That is, we show that there is an autoequivalence that induces an isomorphism $M_{\sigma_+}(\mathbf{v})\mor[\sim] M_{\sigma_-}(\mathbf{v})$. We show that the same is true for an iLGU wall if $\ensuremath{\mathbb{P}}ic(\mathbf{w}idetilde{X})=\mathbf{v}arpi^*\ensuremath{\mathbb{P}}ic(X)$, while in general we can still take $U$ to be an open subset of codimension one, proving the theorem.
\textbf{Hilbert-Chow:} Here we assume that $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ contains an isotropic vector $\mathbf{u}$ such that $\langle \mathbf{v},\mathbf{u}\rangle=1$ and $\ell(\mathbf{u})=2$. Then by Proposition \ref{Prop:Uhlenbeck morphism} and Lemma \ref{Lem: Hilbert-Chow} we may, up to the shift by 1, identify $M_{\sigma_+}(\mathbf{v})$ with the $(\mathbf{b}eta,\mathop{\ord(\omega_S)}\nolimitsmega)$-Gieseker moduli space $M^\mathbf{b}eta_{\mathop{\ord(\omega_S)}\nolimitsmega}(-\mathbf{v})$ of stable sheaves of rank one. Up to tensoring with a line bundle, we may assume that $M_{\sigma_+}(\mathbf{v})$ parametrizes the shifts $I_Z[1]$ of ideal sheaves of 0-dimensional subschemes $Z\in\ensuremath{\mathbb{H}}ilb^n(X)$, where $n=\frac{\mathbf{v}^2+1}{2}$. Moreover, $\sigma_-$-stable objects are precisely $I_Z^{\mathbf{v}ee}[1]$. But then $\ensuremath{\mathbb{P}}hi(\mathbf{u}nderline{\hphantom{A}}):=(\mathbf{u}nderline{\hphantom{A}})^{\mathbf{v}ee}[2]$ provides the required autoequivalence.
\textbf{LGU:} As in \cref{Prop:Uhlenbeck morphism}, shifting by one identifies $M_{\sigma_+}(\mathbf{v})$ with the moduli space $M_{\mathop{\ord(\omega_S)}\nolimitsmega}(2,c,s)$ of $\mathop{\ord(\omega_S)}\nolimitsmega$-Gieseker stable sheaves $F$ with $\mathbf{v}(F)=(2,c,s)=-\mathbf{v}$. Choosing $L\in\ensuremath{\mathbb{P}}ic(X)$ with $[L\mathop{\mathrm{mod}}\nolimits K_X]=c$, we get that $\ensuremath{\mathbb{P}}hi(\mathbf{u}nderline{\hphantom{A}}):=(\mathbf{u}nderline{\hphantom{A}})^{\mathbf{v}ee}\mathop{\ord(\omega_S)}\nolimitstimes \ensuremath{\mathcal O}(L)[2]$ is the required autoequivalence, as $\ensuremath{\mathbb{P}}hi(F[1])=F^{\mathbf{v}ee}\mathop{\ord(\omega_S)}\nolimitstimes \ensuremath{\mathcal O}(L)[1]$ is an object of $M_{\sigma_-}(\mathbf{v})$, for any $F\in M_{\mathop{\ord(\omega_S)}\nolimitsmega}(2,c,s)$.
\textbf{iLGU:} If we take $U$ to be the open subset $M_{\sigma_0}^s(\mathbf{v})\subset M_{\sigma_+}(\mathbf{v})$, then $M_{\sigma_+}(\mathbf{v})\mathbf{b}ackslash U$ has codimension one, and we may take $\ensuremath{\mathbb{P}}hi=\mathop{\mathrm{Id}}\nolimits$. We can say more if $\ensuremath{\mathbb{P}}ic(\mathbf{w}idetilde{X})=\mathbf{v}arpi^*\ensuremath{\mathbb{P}}ic(X)$. Indeed, then we may take $U=M_{\sigma_+}(\mathbf{v})$ and $\ensuremath{\mathbb{P}}hi=D_X\circ\ensuremath{\mathbb{P}}hi_{X\to X}^{\ensuremath{\mathcal E}^\mathbf{v}ee}$ as in \cref{prop:LGUK3}.
\end{proof}
Now let us prove \cref{Thm:application1} as an application of \cref{Thm:MainTheorem1}.
\mathbf{b}egin{proof}[Proof of \cref{Thm:application1}]
By the proof of \cite{Yos03}, there is an (anti)-autoequivalence
$\ensuremath{\mathbb{P}}hi$ such that $\mathop{\mathrm{rk}} \ensuremath{\mathbb{P}}hi(\mathbf{v}) =1$.
Applying Theorem \ref{Thm:MainTheorem1}, we get the first claim.
For the second claim, we use \cite[sect. 1]{OS11}.
\end{proof}
Similarly, \cref{Thm:application2} follows from \cref{Thm:MainTheorem1}.
\mathbf{b}egin{proof}[Proof of \cref{Thm:application2}]
As in \cite[Section 4]{Yos16a} (for generic $X$) or the arguments in \cref{Lem:e-poly,Lem:rank2}, there is an autoequivalence $\ensuremath{\mathbb{P}}hi$ such that $\ensuremath{\mathbb{P}}hi(\mathbf{v})=\mathbf{w}=(0,C,\mathop{\mathrm{ch}}\nolimitsi)$ where $C\to\ensuremath{\mathbb{P}}^1$ is a double cover of $\ensuremath{\mathbb{P}}^1$ and $\mathop{\mathrm{ch}}\nolimitsi\neq 0$. Then by Theorem \ref{Thm:MainTheorem1},
we get the first claim.
If $\ensuremath{\mathbb{P}}ic(\mathbf{w}idetilde{X})=\mathbf{v}arpi^*\ensuremath{\mathbb{P}}ic(X)$, then by \cite{Yos16a}, \cite[Assumption 2.16]{Sac12} holds
for $M_H(\mathbf{w},L')$. Hence by
\cite[Thm. 3.1, Thm. 4.4]{Sac12}, we get the second claim.
\end{proof}
\section{Picard groups of moduli spaces}\label{Sec:Picard groups}
In this section we prove \cref{Cor:Picard}. Before doing so, however, we prove that the maps $\ell_\ensuremath{\mathbb{C}}C:\ensuremath{\mathbb{C}}C\to\ensuremath{\mathbb{N}}um(M_{\ensuremath{\mathbb{C}}C}(\mathbf{v},L))$ fit together to form one coherent map $$\mathop{\mathrm{Stab}}\nolimitsd(X)\to\ensuremath{\mathbb{N}}um(M_{\sigma_+}(\mathbf{v},L))$$ whose image is contained in $\mathop{\mathrm{Mov}}\nolimits(M_{\sigma_+}(\mathbf{v},L))$.
\subsection{How the Bayer-Macr\`{i} maps fit together}Recall from \cref{eqn:def of xi,eqn:def of ell(sigma)} that $\ell_\ensuremath{\mathbb{C}}C$ can be written as the composition
\mathbf{b}egin{equation}\label{eqn:ell as composition}
\ensuremath{\mathbb{C}}C\ensuremath{\hookrightarrow}\mathop{\mathrm{Stab}}\nolimitsd(X)\mor[\ensuremath{\mathbb{Z}}Z]\ensuremath{\mathbb{H}}al(X,\ensuremath{\mathbb{C}})\mor[I]\mathbf{v}^\perp\mor[\theta_{\mathbf{v},\sigma_\pm}]\ensuremath{\mathbb{N}}um(M_{\sigma_+}(\mathbf{v},L)),
\end{equation}
where $\ensuremath{\mathbb{Z}}Z(\sigma)=\mho_\sigma$, $I(\mho_\sigma)=\xi_\sigma$, and $\theta_{\mathbf{v},\sigma_\pm}$ are cohomological Donaldson-Mukai maps associated to the universal families for $M_{\sigma_\pm}(\mathbf{v},L)$ as in \cref{eqn:DonaldsonMukai}. To show that these fit together it suffices to study how the $\ell_\ensuremath{\mathbb{C}}C$ are related across a single wall crossing, and from \eqref{eqn:ell as composition}, it therefore suffices to study the relationship between $\theta_{\mathbf{v},\sigma_+}$ and $\theta_{\mathbf{v},\sigma_-}$.
So, for a given Mukai vector $\mathbf{v}\in\ensuremath{\mathbb{H}}al(X,\ensuremath{\mathbb{Z}})$ with $\mathbf{v}^2>0$, consider two adjacent chambers $\ensuremath{\mathbb{C}}C^+$ and $\ensuremath{\mathbb{C}}C^-$ separated by a wall $\ensuremath{\mathcal W}$, and, as always, choose a generic $\sigma_0\in\ensuremath{\mathcal W}$ and nearby stability conditions $\sigma_\pm\in\ensuremath{\mathbb{C}}C^\pm$. The signature of $\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ and its saturatedness force $\mathbf{v}^{\perp} \cap \ensuremath{\mathbb{H}}H_{\ensuremath{\mathcal W}}=\ensuremath{\mathbb{Z}}\mathbf{d}$ for some integral $\mathbf{d}$. We have already seen in \cref{Prop:OrthgonalIsomorphism 2,Prop:CompositionSphericalExceptional} that in some cases $\mathbf{d}=\mathbf{w}_n$ is a spherical or exceptional class, in which case there is an isomorphism $M_{\sigma_-}(\mathbf{v},L)\mor[\sim] M_{\sigma_+}(\mathbf{v},L)$ induced by $R_{T_n^+}$, where $T_n^+$ is the $\sigma_+$-stable object of class $\mathbf{w}_n$. When $\mathbf{v}$ is primitive and $\ensuremath{\mathcal W}$ is defined by an isotropic vector $\mathbf{u}$ with
\mathbf{b}egin{enumerate}
\item $\langle \mathbf{v},\mathbf{u} \rangle=1,2$ and $\ell(\mathbf{u})=2$, or
\item $\langle \mathbf{v},\mathbf{u} \rangle=\ell(\mathbf{u})=1$,
\end{enumerate}
then we claim that $\mathbf{d}=\mathbf{v}-\frac{\mathbf{v}^2}{\langle \mathbf{v},\mathbf{u} \rangle}\mathbf{u}$. We certainly have $\mathbf{d}':=\mathbf{v}-\frac{\mathbf{v}^2}{\langle\mathbf{v},\mathbf{u}\rangle}\mathbf{u}\in\left(\mathbf{v}^\perp\cap\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}\right)_\ensuremath{\mathbb{Q}}$, and when $\langle\mathbf{v},\mathbf{u}\rangle=1$, it is clear that $\mathbf{d}'\in\mathbf{v}^\perp\cap\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ so that $\mathbf{d}'=m\mathbf{d}$ for some $m\in\ensuremath{\mathbb{Z}}$. But then $1=\langle\mathbf{d}',\mathbf{u}\rangle$ forces $m=1$. Otherwise, $\langle\mathbf{v},\mathbf{u}\rangle=2$ and $\ell(\mathbf{u})=2$, forcing $\mathop{\mathrm{rk}}(\mathbf{v})$, and thus $\mathbf{v}^2$, to be even, so $\mathbf{d}'\in\mathbf{v}^\perp\cap\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ with $\mathbf{d}'=m\mathbf{d}$. If $m\neq 1$, then $\langle\mathbf{d}',\mathbf{u}\rangle=2$ forces $m=2$, and, as $\mathbf{d}'^2=-\mathbf{v}^2$, we see that $4\mid\mathbf{v}^2$, so $2 \mid \mathbf{v}$, contradicting the assumption that $\mathbf{v}$ was primitive. Therefore $m=1$, and $\mathbf{d}=\mathbf{v}-\frac{\mathbf{v}^2}{\langle\mathbf{v},\mathbf{u}\rangle}\mathbf{u}$, as claimed. In any case, we define $R_{\mathbf{d}}$ to be the reflection of $\mathbf{v}^\perp$ in the hyperplane $\mathbf{d}^\perp$. That is, $R_\mathbf{d}$ is defined by the formula $$R_{\mathbf{d}}(\mathbf{z})=\mathbf{z}-2 \frac{\langle \mathbf{z},\mathbf{d} \rangle}{\mathbf{d}^2}\mathbf{d}=\mathbf{z}-2\frac{\langle \mathbf{z},\mathbf{u} \rangle}{\langle \mathbf{v},\mathbf{u} \rangle}\mathbf{d}=\mathbf{z}-2\frac{\langle\mathbf{z},\mathbf{u}\rangle}{\langle\mathbf{v},\mathbf{u}\rangle}\mathbf{v}+2\mathbf{v}^2\frac{\langle\mathbf{z},\mathbf{u}\rangle}{\langle\mathbf{v},\mathbf{u}\rangle^2}\mathbf{u},\;\;\mathbf{z}\in\mathbf{v}^\perp.$$ Observing that $R_{m\mathbf{d}}(\mathbf{z})=R_\mathbf{d}(\mathbf{z})$ for any $m\in\ensuremath{\mathbb{Z}}$, we see that even if $\mathbf{d}=\frac{1}{2}(\mathbf{v}-\frac{\mathbf{v}^2}{\langle\mathbf{v},\mathbf{u}\rangle}\mathbf{u})$, when $\langle\mathbf{v},\mathbf{u}\rangle=2=\ell(\mathbf{u})$ and $\mathbf{v}$ is not primitive, the reflection remains the same.
We will prove in the next result that in all of the cases mentioned above $\theta_{\mathbf{v},\sigma_\pm}$ differ by precomposing with the reflection $R_\mathbf{d}$.
\mathbf{b}egin{Prop}\label{Prop:Pic-relation}
Let $\ensuremath{\mathcal W}$ be a wall for $\mathbf{v}\in\ensuremath{\mathbb{H}}al(X,\ensuremath{\mathbb{Z}})$ with $\mathbf{v}^2>0$, and for generic $\sigma_0\in\ensuremath{\mathcal W}$ let $\sigma_\pm$ be sufficiently close stability conditions in the opposite and adjacent chambers separated by $\ensuremath{\mathcal W}$. Then we may identify $\ensuremath{\mathbb{P}}ic(M_{\sigma_+}(\mathbf{v},L))$ with
$\ensuremath{\mathbb{P}}ic(M_{\sigma_-}(\mathbf{v},L))$. Moreover, if
\mathbf{b}egin{enumerate}
\item $\langle \mathbf{v},\mathbf{w} \rangle=0$ for a spherical or exceptional class $\mathbf{w}\in C_\ensuremath{\mathcal W}\cap\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$, or
\item $\langle \mathbf{v},\mathbf{u} \rangle=\ell(\mathbf{u})$ for an isotropic vector $\mathbf{u}\in\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$, or
\item $\langle \mathbf{v},\mathbf{u} \rangle=1$ for an isotropic vector $\mathbf{u}\in\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ with $\ell(\mathbf{u})=2$,
\end{enumerate}
then $\theta_{\mathbf{v},\sigma_+}=\theta_{\mathbf{v},\sigma_-} \circ R_\mathbf{d}$,
where $\mathbf{v}^\perp\cap\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}=\ensuremath{\mathbb{Z}}\mathbf{d}$ and $R_\mathbf{d}$ is the reflection of $\mathbf{v}^\perp$ in the hyperplane $\mathbf{d}^\perp$. Otherwise, we have
$\theta_{\mathbf{v},\sigma_+}=\theta_{\mathbf{v},\sigma_-}$.
\end{Prop}
\mathbf{b}egin{proof}
We claim first that we may identify $\ensuremath{\mathbb{P}}ic(M_{\sigma_+}(\mathbf{v},L))$ with $\ensuremath{\mathbb{P}}ic(M_{\sigma_-}(\mathbf{v},L))$. Indeed, by \cref{classification of walls,Prop:NonMinimalIsomorphism,Prop:OrthgonalIsomorphism 2,Prop:CompositionSphericalExceptional}, $M_{\sigma_+}(\mathbf{v},L)\cong M_{\sigma_-}(\mathbf{v},L)$ if $\langle\mathbf{v},\mathbf{w}\rangle=0$ for a spherical or exceptional $\mathbf{w}\in C_\ensuremath{\mathcal W}\cap\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$; in particular, the two moduli spaces are isomorphic when $\ensuremath{\mathcal W}$ is a totally semistable wall of types (TSS3) and (TSS4) and when $\ensuremath{\mathcal W}$ is a Brill-Noether wall. The two moduli spaces are also isomorphic when $\ensuremath{\mathcal W}$ is a wall of Hilbert-Chow or Li-Gieseker-Uhlenbeck type, as we showed in the proof of part \ref{enum:MT1-birational map given by FM transform} of \cref{Thm:MainTheorem1}. For walls of iLGU type, we proved that $M_{\sigma_+}(\mathbf{v},L)\cong M_{\sigma_-}(\mathbf{v},L)$ in \cref{Prop:phi isomorphism odd rank,Prop:phi isomorphism even rank}. According to \cref{classification of walls}, $M_{\sigma_+}(\mathbf{v},L)$ and $M_{\sigma_-}(\mathbf{v},L)$ are isomorphic away from a locus of codimension at least two in all other cases.
Now let us move on to proving the claims about the compatibility of the $\theta$-maps. As usual, let $\mathbf{v}_0$ be the minimal Mukai vector in the $G_\ensuremath{\mathbb{H}}H$-orbit of $\mathbf{v}$, and if $\mathbf{v}\in\ensuremath{\mathbb{C}}C_n$, then we consider the composition of spherical or exceptional twists $\ensuremath{\mathbb{P}}hi^{\pm}$ giving the isomorphism $M_{\sigma_\pm}(\mathbf{v})\to M_{\sigma_\pm}(\mathbf{v}_0)$ (resp., $M_{\sigma\pm}(\mathbf{v})\to M_{\sigma_\mp}(\mathbf{v}_0)$) if $n$ is even (resp., if $n$ is odd).
Suppose first that $\mathop{\mathrm{codim}}\nolimits(M_{\sigma_+}(\mathbf{v}_0,L_0)\mathbf{b}ackslash M_{\sigma_0}^s(\mathbf{v}_0,L_0))\geq 2$. Then the two moduli spaces $M_{\sigma_\pm}(\mathbf{v}_0,L_0)$ share a common open subset on which the universal families agree. As the complement of $M_{\sigma_0}^s(\mathbf{v}_0,L_0)$ has codimension at least two and the maps $\theta_{\mathbf{v}_0,\sigma_\pm}$ are determined by their restriction to curves in this open subset, we get $\theta_{\mathbf{v}_0,\sigma_+}=\theta_{\mathbf{v}_0,\sigma_-}$. For $\mathbf{v}$, the autoequivalence giving the birational map $M_{\sigma_+}(\mathbf{v},L)\mathbf{d}ashrightarrow M_{\sigma_-}(\mathbf{v},L)$ is induced by $(\ensuremath{\mathbb{P}}hi^-)^{-1}\circ\ensuremath{\mathbb{P}}hi^+$. As the classes of the spherical/exceptional objects occuring in $\ensuremath{\mathbb{P}}hi^+$ and $\ensuremath{\mathbb{P}}hi^-$ are identical, this autoequivalence does not change the class of the universal family in the K-group. Therefore, we again have $\theta_{\mathbf{v},\sigma_+}=\theta_{\mathbf{v},\sigma_-}$.
Now suppose that $\mathop{\mathrm{codim}}\nolimits(M_{\sigma_+}(\mathbf{v}_0,L_0)\mathbf{b}ackslash M_{\sigma_0}^s(\mathbf{v}_0,L_0))=1$. Then by \cref{Prop:LGU walls of low codimension,Prop: 1-1 case totally semistable and codim 1} and \cref{Lem:non-isotropic no totally semistable wall}, we must have either $\langle\mathbf{v}_0,\mathbf{w}\rangle=0$ for a spherical or exceptional class $\mathbf{w}\in C_\ensuremath{\mathcal W}\cap\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ or $\langle\mathbf{v}_0,\mathbf{u}\rangle=\ell(\mathbf{u})$ for a primitive isotropic $\mathbf{u}\in\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$.
In the first case, we have that $\mathbf{w}=\mathbf{w}_0$ or $\mathbf{w}_1$ and $R_{T_0^+}$ or $R_{T_1^+}$ induces an isomorphism $M_{\sigma_+}(\mathbf{v}_0,L_0)\to M_{\sigma_-}(\mathbf{v}_0,L_0)$ by \cref{Prop:OrthgonalIsomorphism 2,Prop:CompositionSphericalExceptional}. Considering the classes of the universal families in the K-group we see that $\theta_{\mathbf{v}_0,\sigma_+}=\theta_{\mathbf{v}_0,\sigma_-}\circ R_\mathbf{w}$. Similarly, we have seen that $(\ensuremath{\mathbb{P}}hi^{-})^{-1}\circ R_{T_i^+}\circ\ensuremath{\mathbb{P}}hi^+$ induces an isomorphism $M_{\sigma_+}(\mathbf{v},L)\to M_{\sigma_-}(\mathbf{v},L)$ for $i=0$ or 1, so taking the classes of the universal families in the K-group we see that \[\theta_{\mathbf{v},\sigma_+}=\theta_{\mathbf{v},\sigma_-}\circ ((\ensuremath{\mathbb{P}}hi^{-})_*^{-1}\circ R_{\mathbf{w}}\circ(\ensuremath{\mathbb{P}}hi^+)_*)=\theta_{\mathbf{v},\sigma_-}\circ R_{(\ensuremath{\mathbb{P}}hi^+)_*^{-1}(\mathbf{w})},\] because $(\ensuremath{\mathbb{P}}hi^+)_*=(\ensuremath{\mathbb{P}}hi^-)_*$, as we noted above, and $\ensuremath{\mathbb{P}}hi\circ R_T\circ\ensuremath{\mathbb{P}}hi^{-1}=R_{\ensuremath{\mathbb{P}}hi(T)}$ for any autoequivalence $\ensuremath{\mathbb{P}}hi$ and spherical/exceptional object $T$ \cite[Lemma 8.21]{Hu1}. Setting $\mathbf{w}':=(\ensuremath{\mathbb{P}}hi^+)_*^{-1}(\mathbf{w})$, we get $\theta_{\mathbf{v},\sigma_+}=\theta_{\mathbf{v},\sigma_-}\circ R_{\mathbf{w}'}$ for the spherical/exceptional $\mathbf{w}'\in C_\ensuremath{\mathcal W}\cap\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ such that $\langle\mathbf{v},\mathbf{w}'\rangle=0$, as claimed.
In the second case, \cite{BM14b} or the arguments in section 10
showed that $\theta_{\mathbf{v}_0,\sigma_+}=\theta_{\mathbf{v}_0,\sigma_-} \circ R_\mathbf{d}$.
As in the previous cases, we see that $$\theta_{\mathbf{v},\sigma_+}=\theta_{\mathbf{v},\sigma_-}\circ R_{(\ensuremath{\mathbb{P}}hi^+)_*^{-1}(\mathbf{d})},$$ giving the claim for $\mathbf{v}$.
Finally, we must consider when $M_{\sigma_0}^s(\mathbf{v}_0,L)=\mathbf{v}arnothing$. By \cref{classification of walls}, we then have $\mathbf{v}=\mathbf{v}_0$, and either $\ensuremath{\mathcal W}$ is a Hilbert-Chow wall or $\langle\mathbf{v},\mathbf{w}\rangle=0$ for a spherical or exceptional $\mathbf{w}\in C_\ensuremath{\mathcal W}\cap\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$ and $\langle\mathbf{v},\mathbf{u}\rangle=\ell(\mathbf{u})$ for primitive isotropic $\mathbf{u}\in\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}$. These latter cases have already been covered above, so it only remains to consider the case of a Hilbert-Chow wall. We saw in the proof of part \ref{enum:MT1-birational map given by FM transform} of \cref{Thm:MainTheorem1} that $\ensuremath{\mathbb{P}}hi(\mathbf{u}nderline{\hphantom{A}})=(\mathbf{u}nderline{\hphantom{A}})^\mathbf{v}ee[2]$ induces an isomorphism between $M_{\sigma_+}(\mathbf{v},L)$ and $M_{\sigma_-}(\mathbf{v},L)$, where we may assume that $\mathbf{v}=-(1,0,\tfrac{1}{2}-n)$, $\mathbf{u}=(0,0,1)$, and $L=\ensuremath{\mathcal O}_X$. Then $\mathbf{v}^\perp$ is spanned by $-\mathbf{d}=(1,0,n-\frac{1}{2})$ and classes of the form $(0,c,0)$ for $c\in\ensuremath{\mathbb{N}}um(X)$. As the universal family $\ensuremath{\mathcal E}_-$ of $M_{\sigma_-}(\mathbf{v},L)$ is given by $\ensuremath{\mathcal E}_+^\mathbf{v}ee[2]$, where $\ensuremath{\mathcal E}_+$ is the universal family of $M_{\sigma_+}(\mathbf{v},L)$, we have by Grothendieck duality: $$\theta_{\mathbf{v},\sigma_+}(\mathbf{z})=-\theta_{\mathbf{v},\sigma_-}(\mathbf{z}^\mathbf{v}ee).$$ In particular, $\theta_{\mathbf{v},\sigma_+}((1,0,n-\frac{1}{2}))=-\theta_{\mathbf{v},\sigma_-}((1,0,n-\frac{1}{2}))$ and $$\theta_{\mathbf{v},\sigma_+}((0,c,0))=-\theta_{\mathbf{v},\sigma_-}((0,-c,0))=\theta_{\mathbf{v},\sigma_-}((0,c,0)).$$ On the other hand, $R_\mathbf{d}((1,0,n-\frac{1}{2}))=-(1,0,n-\frac{1}{2})$ and $R_\mathbf{d}((0,c,0))=(0,c,0)$, so we see that indeed $$\theta_{\mathbf{v},\sigma_+}=\theta_{\mathbf{v},\sigma_-}\circ R_\mathbf{d},$$ as required.
\end{proof}
Since $\xi_{\sigma_0} \in (\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}^\perp)_\ensuremath{\mathbb{R}}$, so in particular $\xi_{\sigma_0}\in(\mathbf{d}^\perp\cap\mathbf{v}^\perp)_\ensuremath{\mathbb{R}}$, we have
\mathbf{b}egin{equation}\label{eqn:nef divisors agree at wall}
\theta_{\mathbf{v},\sigma_+}(\xi_{\sigma_0})=
\theta_{\mathbf{v},\sigma_-}(R_\mathbf{d}(\xi_{\sigma_0}))=\theta_{\mathbf{v},\sigma_-}(\xi_{\sigma_0})
\end{equation}
by Proposition \ref{Prop:Pic-relation}. It follows that the maps $\ell_{\ensuremath{\mathbb{C}}C^\pm}$ agree along $\ensuremath{\mathcal W}$ and thus fit together to give a piece-wise analytic continuous map $$\ell:\mathop{\mathrm{Stab}}\nolimitsd(X)\to\ensuremath{\mathbb{N}}um(M_{\sigma_+}(\mathbf{v},L)),$$ as claimed. It is important to note that in any of the three cases enumerated in \cref{Prop:Pic-relation}, the image under $\ell$ of a path $\sigma:[-\tfrac{1}{2},\frac{1}{2}]\to\mathop{\mathrm{Stab}}\nolimitsd(X)$ with $\sigma(-\tfrac{1}{2})=\sigma_+$, $\sigma(0)=\sigma_0$, and $\sigma(\frac{1}{2})=\sigma_+$ is a path contained entirely in the nef cone of $M_{\sigma_+}(\mathbf{v},L)$ because of the action of the reflection $R_\mathbf{d}$. Indeed, as $\sigma(t)$ approaches $\sigma(0)=\sigma_0$, $\ell(\sigma(t))$ approaches $\ell(\sigma_0)$, but then $R_\mathbf{d}$ causes $\ell(\sigma(t))$, $t>0$, to bounce off the wall of $\ensuremath{\mathbb{N}}ef(M_{\sigma_+}(\mathbf{v},L))$ containing $\ell(\sigma_0)$ and continue back into the interior.
Note that while most of these bouncing walls correspond either to divisorial contractions or $\ensuremath{\mathbb{P}}^1$-fibrations, and thus extremal walls of $\mathop{\mathrm{Mov}}\nolimits(M_{\sigma}(\mathbf{v},L))$, the Enriques case differs from the K3 case in that there are bouncing walls that induce small contractions. Indeed, in the presence of exceptional classes $\mathbf{w}\in C_\ensuremath{\mathcal W}\cap\ensuremath{\mathbb{H}}H_\ensuremath{\mathcal W}\cap\mathbf{v}^\perp$, $\ensuremath{\mathcal W}$ is a bouncing wall inducing a small contraction if $\mathbf{v}^2\geq 3$ as in \cref{Ex:ConfusingSmallContraction}.
\mathbf{b}egin{Rem}
Recall that the contraction morphisms $\pi^\pm:M_{\sigma_\pm}(\mathbf{v},L)\to \mathop{\ord(\omega_S)}\nolimitsverline{M}_\pm$ are defined by the linear systems
$|n \theta_{\mathbf{v},\sigma_\pm}(\xi_{\sigma_0})|$ $(n \gg 0)$. From \eqref{eqn:nef divisors agree at wall}, these
define morphisms $\pi^\pm:M_{\sigma_{\pm}}(\mathbf{v},L) \to \ensuremath{\mathbb{P}}$
such that $\mathop{\mathrm{im}}\nolimits \pi^+=\mathop{\mathrm{im}}\nolimits \pi^-$, and thus $\mathop{\ord(\omega_S)}\nolimitsverline{M}_+=\mathop{\ord(\omega_S)}\nolimitsverline{M}_-$, as the normalization of $\mathop{\mathrm{im}}\nolimits\pi^+=\mathop{\mathrm{im}}\nolimits\pi^-$.
Let $M'_{\sigma_0}(\mathbf{v})$ be the set of
S-equivalence classes
of $\sigma_0$-semistable objects $E$ with $\mathbf{v}(E)=\mathbf{v}$.
Then $\mathop{\mathrm{im}}\nolimits \pi^\pm$ is a subset of $M'_{\sigma_0}(\mathbf{v})$, which is a proper subset in general.
\end{Rem}
\subsection{Proof of \cref{Cor:Picard}}
Now we shall prove Corollary \ref{Cor:Picard}.
For the computation of the Picard groups in odd rank cases,
we can use deformation of Enriques surfaces, since
$\theta_{\mathbf{v},\sigma}$ is well-defined for a relative moduli space over
a family of Enriques surfaces.
Thus in either case of \cref{Cor:Picard}, we may assume that $\mathbf{v}arpi^*\ensuremath{\mathbb{P}}ic(X)=\ensuremath{\mathbb{P}}ic(\mathbf{w}idetilde{X})$, where $\mathbf{w}idetilde{X}$
is the covering K3 surface.
By \cref{Thm:application1,Thm:application2,Prop:Pic-relation},
it is sufficient to prove that $\theta_{\mathbf{v},\sigma}$ is an isomorphism
for a special pair of $\mathbf{v}$ and $\sigma$. Specifically, we may assume that $\mathbf{v}=(1,0,\tfrac{1}{2}-n)$ or
$\mathbf{v}=(2,D,a)$ with
$(D,\eta)=1$ for a divisor $\eta$ with $(\eta^2)=0$.
In the first case,
$\theta_{\mathbf{v},\sigma}$ is an isomorphism if
$M_\sigma(\mathbf{v},L)=\ensuremath{\mathbb{H}}ilb^n(X)$ $(n \geq 2)$ (see the proof of \cite[Corollary A.4]{Yos16a}).
So we shall treat the second case.
Replacing $\mathbf{v}$ by $\mathbf{v} \exp(k \eta)$, we may assume that
$D^2=-2,-4$.
Since $X$ is unnodal,
we can take an ample divisor $H$ with $(D,H)=0$.
We set $n:=\frac{D^2}{2}+1-a$.
Then
$$
\mathbf{v}=(2,D,\tfrac{D^2}{2}+1-n)=\mathbf{v}(\ensuremath{\mathcal O}_X(D))+\mathbf{v}(I_Z),\;
I_Z \in \ensuremath{\mathbb{H}}ilb^n(X).
$$
We take $\mathbf{b}eta_0 \in \ensuremath{\mathbb{P}}ic(X)_{\ensuremath{\mathbb{Q}}}$
with $(\mathbf{b}eta_0,D)=\frac{D^2}{2}+n$.
Then $\mathop{\mathrm{ch}}\nolimitsi(\ensuremath{\mathcal O}_X(D-\mathbf{b}eta_0))=\mathop{\mathrm{ch}}\nolimitsi(I_{Z}(-\mathbf{b}eta_0))$.
We take $\mathbf{b}eta \in \ensuremath{\mathbb{P}}ic(X)_{\ensuremath{\mathbb{Q}}}$ in a neighborhood of $\mathbf{b}eta_0$
such that $\mathop{\mathrm{ch}}\nolimitsi(\ensuremath{\mathcal O}_X(D-\mathbf{b}eta))<\mathop{\mathrm{ch}}\nolimitsi(I_{Z}(-\mathbf{b}eta))$.
We take a stability condition $\sigma$ such that $M_\sigma(\mathbf{v},L)$ is the
moduli space of $\mathbf{b}eta$-twisted stable sheaves $M_H^\mathbf{b}eta(\mathbf{v},L)$.
For a non-trivial extension
$$
0 \to \ensuremath{\mathcal O}_X(D) \to E \to I_{Z} \to 0,\; Z \in \ensuremath{\mathbb{H}}ilb^n(X),
$$
$E$ is a $\mathbf{b}eta$-twisted stable sheaf. Let us denote by $\ensuremath{\mathbb{Z}}Z$ the universal subscheme $\ensuremath{\mathbb{Z}}Z\subset X\times\ensuremath{\mathbb{H}}ilb^n(X)$ and by $p_X$, $p_{\ensuremath{\mathbb{H}}ilb^n(X)}$ the projections of $X\times\ensuremath{\mathbb{H}}ilb^n(X)$ onto its first and second factors, respectively.
Since $H^0(X,\ensuremath{\mathcal O}_X(\pm D))=H^0(X,\ensuremath{\mathcal O}_X(\pm D+K_X))=0$, we see that
$\ensuremath{\mathcal V}:=\mathop{\mathcal Ext}\nolimits^1_{p_{\ensuremath{\mathbb{H}}ilb_X^n}}(I_{\ensuremath{\mathbb{Z}}Z},p_X^*\ensuremath{\mathcal O}_X(D))$
is a locally free sheaf
on $\ensuremath{\mathbb{H}}ilb^n(X)$ of rank $n-1-\tfrac{D^2}{2}$.
For $n>0$, let $P=\ensuremath{\mathbb{P}}(\ensuremath{\mathcal V}^\mathbf{v}ee)$ and let $\pi:P \to \ensuremath{\mathbb{H}}ilb^n(X)$ be the projective bundle
parameterizing non-trivial extensions
of $I_{Z}$ by $\ensuremath{\mathcal O}_X(D)$ with universal family of extensions
\mathbf{b}egin{equation}\label{eq:beta}
0 \to \ensuremath{\mathcal O}_X(D) \mathbf{b}oxtimes \ensuremath{\mathcal O}_P(\lambda) \to \ensuremath{\mathcal E} \to
\pi^*I_{\ensuremath{\mathbb{Z}}Z} \to 0 ,
\end{equation}
where
$\ensuremath{\mathcal O}_P(\lambda)$ is the tautological line bundle on $P$.
Hence we have a morphism
$\psi:P \to M_\sigma(\mathbf{v},L)$.
\mathbf{b}egin{Lem}
$\theta_{\mathbf{v},\sigma}$ is an isomorphism if $\mathbf{v}^2 \geq 4$.
\end{Lem}
\mathbf{b}egin{proof}
We note that $\mathbf{v}^2=-D^2+4(n-1)$.
Hence $\mathbf{v}^2 \geq 4$ if and only if
$n \geq 2$, or $n=1$ and $D^2=-4$.
We fix $\eta \in \ensuremath{\mathbb{N}}S(X)$ with $(\eta,D)=1$.
Then we have a decomposition $\ensuremath{\mathbb{N}}S(X)=D^\perp \mathop{\ord(\omega_S)}\nolimitsplus \ensuremath{\mathbb{Z}} \eta$.
For $\xi \in \ensuremath{\mathbb{N}}S(X)$, let $u_\xi \in K(X)$ be an element of $K(X)$ such that
$\mathbf{v}(u_\xi)=(0,\xi,0)$ and $c_1(u_\xi)=\xi$.
Any $u\in K(X)$ can be written $u=r \ensuremath{\mathcal O}_X+s \ensuremath{\mathbb{C}}_x+u_\xi$, so from $\langle\mathbf{v},\mathbf{v}(u_\eta)\rangle=1$ it follows that
\mathbf{b}egin{equation}
u-\langle \mathbf{v},\mathbf{v}(u) \rangle u_\eta=
r \ensuremath{\mathcal O}_X+s \ensuremath{\mathbb{C}}_x+u_{\xi-(\xi,D)\eta}+
(r(2+\tfrac{D^2}{2}-n)+2s)u_\eta \in K_\mathbf{v}(X).
\end{equation}
Hence any $u \in K(X)_\mathbf{v}$ is written as
\mathbf{b}egin{equation}\label{eq:u}
u=r \ensuremath{\mathcal O}_X+s \ensuremath{\mathbb{C}}_x+u_{\xi}+
(r(2+\tfrac{D^2}{2}-n)+2s)u_\eta,\; r,s \in\ensuremath{\mathbb{Z}}, \xi \in D^\perp.
\end{equation}
For any $\mathbf{a}lpha\in K(X)$, we set
$\theta_0(\mathbf{a}lpha):=\mathbf{d}et p_{\ensuremath{\mathbb{H}}ilb^n(X) !}(I_{\ensuremath{\mathbb{Z}}Z} \mathop{\ord(\omega_S)}\nolimitstimes p_X^*(\mathbf{a}lpha^{\mathbf{v}ee}))$.
Then $\theta_0(\ensuremath{\mathbb{C}}_p)=\ensuremath{\mathcal O}_{\ensuremath{\mathbb{H}}ilb^n(X)}$.
We have an injective homomorphism
$\ensuremath{\mathbb{N}}S(X) \to \ensuremath{\mathbb{N}}S(\ensuremath{\mathbb{H}}ilb^n(X)) (\cong \ensuremath{\mathbb{P}}ic(\ensuremath{\mathbb{H}}ilb^n(X)))$
by sending $\xi \in \ensuremath{\mathbb{N}}S(X)$ to $c_1(\theta_0(u_\xi))$.
We regard $\ensuremath{\mathbb{N}}S(X)$ as a subgroup of $\ensuremath{\mathbb{N}}S(\ensuremath{\mathbb{H}}ilb^n(X))$
by this homomorphism.
If $n \geq 2$, then
$\ensuremath{\mathbb{N}}S(\ensuremath{\mathbb{H}}ilb^n(X))=\ensuremath{\mathbb{Z}} \mathbf{d}elta \mathop{\ord(\omega_S)}\nolimitsplus \ensuremath{\mathbb{N}}S(X)$ with
$\mathbf{d}elta:=c_1(\theta_0(\ensuremath{\mathcal O}_X))$.
From the exact sequence \eqref{eq:beta} and \cite[Lemma 8.1.2]{HL10}, we see that writing any $u\in K(X)_\mathbf{v}$ as in \eqref{eq:u}, we have
\mathbf{b}egin{equation}\label{eqn:DonaldsonMukai for P}
\mathbf{b}egin{split}
c_1 (p_{P!}(\ensuremath{\mathcal E} \mathop{\ord(\omega_S)}\nolimitstimes p_X^*(u^{\mathbf{v}ee})))=&
-\langle \mathbf{v}(\ensuremath{\mathcal O}_X(D)),\mathbf{v}(u) \rangle\lambda
+\pi^*c_1(\theta_0(u))\\
=&
r(\mathbf{d}elta+(n-1)\lambda+(2+\tfrac{D^2}{2}-n)\eta)
-s(\lambda-2\eta)+\xi,
\end{split}
\end{equation}
where we have surpressed the $\pi^*$ in the second equality since $\pi^*:\ensuremath{\mathbb{N}}S(\ensuremath{\mathbb{H}}ilb^n(X))\to\ensuremath{\mathbb{N}}S(P)$ is injective and
$$
\ensuremath{\mathbb{N}}S(P)=\ensuremath{\mathbb{N}}S(X) \mathop{\ord(\omega_S)}\nolimitsplus \ensuremath{\mathbb{Z}} \mathbf{d}elta \mathop{\ord(\omega_S)}\nolimitsplus \ensuremath{\mathbb{Z}} \lambda=
D^\perp \mathop{\ord(\omega_S)}\nolimitsplus \ensuremath{\mathbb{Z}} \eta \mathop{\ord(\omega_S)}\nolimitsplus \ensuremath{\mathbb{Z}} \mathbf{d}elta \mathop{\ord(\omega_S)}\nolimitsplus \ensuremath{\mathbb{Z}} \lambda.
$$
By \cite[Theorem 8.1.5]{HL10}, the homomorphism $K(X)_\mathbf{v}\to\ensuremath{\mathbb{N}}S(P)$ in \eqref{eqn:DonaldsonMukai for P} is precisely $c_1$ composed with the pull-back along the classifying morphism $\psi$ of the universal Donaldson-Mukai map $\theta_{\mathbf{v},\sigma}$, it follows from the formula in \eqref{eqn:DonaldsonMukai for P} that
$$
c_1 \circ \psi^* \circ \theta_{\mathbf{v},\sigma}:K(X)_\mathbf{v} \to \ensuremath{\mathbb{P}}ic(M_H^\mathbf{b}eta(\mathbf{v},L)) \to \ensuremath{\mathbb{P}}ic(P) \to \ensuremath{\mathbb{N}}S(P)
$$
is injective (up to torsion) and its image is a direct summand of $\ensuremath{\mathbb{N}}S(P)$.
If $n=1$ and $D^2=-4$, then $\mathop{\mathrm{rk}}\ensuremath{\mathcal V}=n+1=2$ and
$\ensuremath{\mathbb{N}}S(P)=\ensuremath{\mathbb{N}}S(X) \mathop{\ord(\omega_S)}\nolimitsplus \ensuremath{\mathbb{Z}} \lambda$.
Since
$2+\tfrac{D^2}{2}-n=-1$,
\mathbf{b}egin{equation}
c_1(p_{P!}(\ensuremath{\mathcal E} \mathop{\ord(\omega_S)}\nolimitstimes p_X^*(u^{\mathbf{v}ee})))=
-r \eta-s(\lambda-2\eta)+\xi.
\end{equation}
Hence $c_1 \circ \psi^* \circ \theta_{\mathbf{v},\sigma}$
is an isomorphism.
In either case, it follows that $\theta_{\mathbf{v},\sigma} (K(X)_\mathbf{v})$ is a direct summand of $\ensuremath{\mathbb{P}}ic(M_\sigma(\mathbf{v},L))$.
Since the torsion submodule of $\ensuremath{\mathbb{P}}ic(M_\sigma(\mathbf{v},L))$ is
isomorphic to
$\ensuremath{\mathbb{Z}} /2 \ensuremath{\mathbb{Z}}$ and $\mathop{\mathrm{rk}} \ensuremath{\mathbb{P}}ic(M_\sigma(\mathbf{v},L))=\mathop{\mathrm{rk}} K(X)_\mathbf{v}$
(\cite[Theorem 5.1]{Sac12}),
$\theta_{\mathbf{v},\sigma}$ is an isomorphism.
\end{proof}
\section{Appendix: Some non-normal moduli spaces of sheaves}\label{App: exceptional case}
In this final section, we shall describe the two fundamental outlying examples of moduli spaces of sheaves (Bridgeland semistable objects) on Enriques surfaces. Indeed, by \cref{prop:pss} for $\sigma\in\mathop{\mathrm{Stab}}\nolimitsd(X)$ generic with respect to $\mathbf{v}$ satisfying $\mathbf{v}^2>0$, $M_{\sigma}(\mathbf{v})$ is normal unless
\mathbf{b}egin{enumerate}
\item $\mathbf{v}=2\mathbf{v}_0$ with $\mathbf{v}_0^2=1$, or
\item $\mathbf{v}^2=2$ and $X$ is nodal.
\end{enumerate}
Moreover, these are precisely the exceptions to $M_\sigma^s(\mathbf{v})$ having torsion canonical divisor and Gorenstein, terminal, l.c.i. singularities \cite[Theorem 8.2]{Nue14b}. Similarly, we have seen in \cref{Thm:exist:nodal} when $\mathbf{v}$ is primitive $M_\sigma(\mathbf{v},L)$ is irreducible if $X$ is unnodal and can only be reducible for nodal $X$ if $\mathbf{v}^2=2$.
We will prove here that the failure of normality in these two cases is due to the presence of multiple irreducible components. In the first case, when $\mathbf{v}=2\mathbf{v}_0$ with $\mathbf{v}_0^2=1$, we describe a conneected component of $M_\sigma(\mathbf{v},L)$ for $L\equiv K_X\pmod 2$ consisting of two irreducible components. In the second case, when $\mathbf{v}^2=2$, we describe all of $M_\sigma(\mathbf{v},L)$ for $L\equiv D+\frac{r}{2}K_X\pmod 2$, which consists of precisely two irreducible components. In both cases, we also describe the other component parametrizing objects with the other determinant.
\subsection{Some components of $M_\sigma(\mathbf{v},L)$ when $\mathbf{v}=2\mathbf{v}_0$ with $\mathbf{v}_0^2=1$}
By \cite[Theorem 4.6]{Yos03}, there exists a Fourier-Mukai transform $\ensuremath{\mathbb{P}}hi:\ensuremath{\mathbb{D}}b(X)\to\ensuremath{\mathbb{D}}b(X)$ such that $\ensuremath{\mathbb{P}}hi_*(\mathbf{v}_0)=(1,0,-\frac{1}{2})$. By \cref{Thm:MainTheorem1}, it follows that $M_\sigma(\mathbf{v})$ is birational to the moduli space $M_\mathop{\ord(\omega_S)}\nolimitsmega(2,0,-1)$ of $\mathop{\ord(\omega_S)}\nolimitsmega$-Gieseker semistable sheaves of Mukai vector $2(1,0,-\frac{1}{2})=(2,0,-1)$ for a generic ample divisor $\mathop{\ord(\omega_S)}\nolimitsmega$. In particular, to understand the number of irreducible components, we are reduced to analyzing the moduli space $M_\mathop{\ord(\omega_S)}\nolimitsmega(2,0,-1)$.
\subsubsection{A connected component of $M_\mathop{\ord(\omega_S)}\nolimitsmega((2,0,-1),K_X)$}
For the Mukai vector $\mathbf{v}=(2,0,-1)$, we shall describe a connected component
of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)$, which is a refinement of \cite[Remark 2.3]{Yos16a}.
Recall that $\mathbf{v}arpi:\mathbf{w}idetilde{X} \to X$ is the quotient map from the covering K3 surface $\mathbf{w}idetilde{X}$ with covering involution $\iota$.
We set $E_0:=\ensuremath{\mathcal O}_X \mathop{\ord(\omega_S)}\nolimitsplus \ensuremath{\mathcal O}_X(K_X)$ and begin by concretely studying the singular locus of the stable locus of $M_\mathop{\ord(\omega_S)}\nolimitsmega^s(\mathbf{v},K_X)$.
\mathbf{b}egin{Lem}\label{Lem:ext^2}
In the open subscheme $M_\mathop{\ord(\omega_S)}\nolimitsmega^s(\mathbf{v},K_X)$ of stable sheaves,
the singular locus is
\mathbf{b}egin{equation}\label{eq:sing}
\Set{ \mathbf{v}arpi_*(I_W(D)) \ | \ I_W \in \ensuremath{\mathbb{H}}ilb^n(\mathbf{w}idetilde{X}),\;\iota^*(D)=-D, n=(D^2)/2+2, I_W(D) \not \cong \iota^*(I_W(D)) },
\end{equation}
where $D=0$ or $(D^2)=-4$.
In particular, the singular locus consists of isolated singular points in
the open subscheme of locally free sheaves, and
an irreducible 4-dimensional subscheme.
\end{Lem}
\mathbf{b}egin{proof}
If $M_\mathop{\ord(\omega_S)}\nolimitsmega^s(\mathbf{v},K_X)$ is singular at $E$, then
$E(K_X) \cong E$, which implies $E= \mathbf{v}arpi_*(I_W(D))$, where
$I_W$ is the ideal sheaf of a 0-dimensional subscheme $W$ of $\mathbf{w}idetilde{X}$
and $D$ is a divisor on $\mathbf{w}idetilde{X}$ (cf. \cite[Lemma 2.13]{Yamada}).
By the stability of $E$, $\iota^*(I_W(D)) \not \cong I_W(D)$.
Since $\mathbf{v}arpi^*(\mathbf{v}arpi_*(I_W(D)))=I_W(D) \mathop{\ord(\omega_S)}\nolimitsplus I_{\iota(W)}(\iota^*(D))$, it follows that
$D+\iota^*(D)=0$ and $\mathop{\mathrm{deg}} W=(D^2)/2+2$.
In particular, as $$(D,\mathbf{v}arpi^*(\mathop{\ord(\omega_S)}\nolimitsmega))=(\iota^*D,\iota^*\mathbf{v}arpi^*(\mathop{\ord(\omega_S)}\nolimitsmega))=(\iota^*D,\mathbf{v}arpi^*(\mathop{\ord(\omega_S)}\nolimitsmega)),$$ we get $(D,\mathbf{v}arpi^*(\mathop{\ord(\omega_S)}\nolimitsmega))=0$, so either $D=0$ or neither $D$ nor $-D$ is effective. As $\mathbf{w}idetilde{X}$ is a K3 surface, we cannot have $(D^2)\geq-2$, since then $D$ or $-D$ would be effective, so we must in fact have $(D^2)\leq -4$. From $\mathop{\mathrm{deg}} W\geq 0$, however, we get $(D^2) \geq -4$. We conclude therefore that either $D=0$ or $(D^2)=-4$, giving the description in \eqref{eq:sing}.
In the open subscheme of locally free sheaves,
$W=\emptyset$, so the singular locus there is isolated, consisting of $\mathbf{v}arpi^*(\ensuremath{\mathcal O}_{\mathbf{w}idetilde{X}}(D))$ for the finitely many divisors $D$ such that $\iota^*D=-D$ and $(D^2)=-4$.
If $D=0$ and $\mathop{\mathrm{deg}} W=2$, then
$\mathbf{v}arpi_*(I_W)$ is a non-locally free sheaf
with $\mathbf{v}arpi_*(I_W)^{\mathbf{v}ee \mathbf{v}ee} \cong E_0$,
which gives an irreducible component
$$
\Set{ \mathbf{v}arpi_*(I_W) \in M_\mathop{\ord(\omega_S)}\nolimitsmega^s(\mathbf{v},K_X) \ | \ I_W \in \ensuremath{\mathbb{H}}ilb^2(\mathbf{w}idetilde{X}) }
$$
of the singular locus of dimension 4.
\end{proof}
From the lemma it follows that the singularities of the open subscheme $M_\mathop{\ord(\omega_S)}\nolimitsmega^{s,lf}(\mathbf{v},K_X)$ parametrizing locally free sheaves in $M_\mathop{\ord(\omega_S)}\nolimitsmega^s(\mathbf{v},K_X)$ are isolated 5-dimensional hypersurface singularities. Therefore, we get the following corollary:
\mathbf{b}egin{Cor}
The open subscheme $M_\mathop{\ord(\omega_S)}\nolimitsmega^{s,lf}(\mathbf{v},K_X)$ parametrizing $\mathop{\ord(\omega_S)}\nolimitsmega$-stable locally free sheaves of Mukai vector $\mathbf{v}$ and determinant $K_X$ is normal.
\end{Cor}
Now we turn to the 4-dimensional component of the singular locus.
We have an injective morphism $X \to \ensuremath{\mathbb{H}}ilb^2(\mathbf{w}idetilde{X})$ by sending
$I_z$ $(z \in X)$ to $\mathbf{v}arpi^*(I_z)=I_{\mathbf{v}arpi^{-1}(z)}$.
The image is the $\iota$-invariant Hilbert scheme $\ensuremath{\mathbb{H}}ilb^2(\mathbf{w}idetilde{X})^{\iota}$.
We shall identify $X$ with $\ensuremath{\mathbb{H}}ilb^2(\mathbf{w}idetilde{X})^{\iota}$.
We also have a morphism
$$
\mathbf{b}egin{matrix}
\ensuremath{\mathbb{H}}ilb^2(\mathbf{w}idetilde{X}) & \to & M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)\\
I_W & \mapsto & \mathbf{v}arpi_*(I_W),
\end{matrix}
$$
which is a double covering to
its image.
If $I_W \in \ensuremath{\mathbb{H}}ilb_{\mathbf{w}idetilde{X}}^2 \setminus X$, then
$\mathbf{v}arpi_*(I_W)$ is a stable non-locally free sheaf and
the fiber over $\mathbf{v}arpi_*(I_W)$ is $\{ I_W, I_{\iota(W)} \}$.
If $I_W=I_{\mathbf{v}arpi^{-1}(z)}$, then
$$\mathbf{v}arpi_*(I_W)=\mathbf{v}arpi_*(\mathbf{v}arpi^*(I_z))=I_z\mathop{\ord(\omega_S)}\nolimitstimes(\ensuremath{\mathcal O}_X\mathop{\ord(\omega_S)}\nolimitsplus\ensuremath{\mathcal O}_X(K_X))=I_z \mathop{\ord(\omega_S)}\nolimitstimes E_0,$$ which is a properly semi-stable
sheaf.
For $E=\mathbf{v}arpi_*(I_W)$, we have $-\mathop{\mathrm{ch}}\nolimitsi(E,E)/2=2$, and hence
by using
\cite[Fact 2.4]{Yamada} and the paragraph preceeding \cite[Lemma 2.13]{Yamada},
we see that around $E$,
$M_\mathop{\ord(\omega_S)}\nolimitsmega^s(\mathbf{v},K_X)$ is analytic locally defined by
a hypersurface $F(t_1,t_2,...,t_6)=0$ in $(\ensuremath{\mathbb{C}}^6,0)$
such that
$$
F(t_1,t_2,...,t_6)=\sum_{i=1}^n t_i^2+G(t_1,t_2,...,t_6),\;\;
n \geq 2,\; G(t_1,t_2,...,t_6) \in (t_1,t_2,...,t_6)^3.
$$
Since the singular locus is 4-dimensional at $E=\mathbf{v}arpi_*(I_W)$, we must have $n=2$.
Therefore there are at most two irreducible components intersecting
along the 4-dimensional singular locus. We shall prove that there are exactly two such irreducible components $M_0$ and $M_1$ and that their union is connected.
Let $\psi:M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X) \to N$ be the contraction map to the
Uhlenbeck compactification.
\mathbf{b}egin{Lem}\label{Lem:M_0}
There is an irreducible component
$M_0$ of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)$
such that $M_0$ contains a $\mu$-stable locally free sheaf and
$\psi(M_0)$ contains $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v}',K_X) \times S^2 X$,
where $\mathbf{v}'=\mathbf{v}(E_0)$.
\end{Lem}
\mathbf{b}egin{proof}
Consider the stack $\ensuremath{\mathcal M}_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)^{\mu ss}$
of $\mu$-semistable sheaves $E$ with $\mathbf{v}(E)=\mathbf{v}$ and $\mathbf{d}et(E)=K_X$, and let $\ensuremath{\mathcal M}_0$ be the irreducible component containing all locally free sheaves fitting in the short exact sequence
\mathbf{b}egin{equation}
0 \to \ensuremath{\mathcal O}_X \to E \to I_Z (K_X) \to 0,
\end{equation}
where $I_Z \in \ensuremath{\mathbb{H}}ilb^2(X)$. Then by the proof of \cite[Lemma 2.8]{Yos16a},
$$
\ensuremath{\mathcal M}_0':=\Set{ E \in \ensuremath{\mathcal M}_0 \ | \ \text{$E$ is a $\mu$-stable locally free sheaf}}
$$
is a nonempty open and dense substack of $\ensuremath{\mathcal M}_0$.
Let $M_0$ be the irreducible component of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)$
containing the associated coarse moduli scheme of $\ensuremath{\mathcal M}_0'$.
We shall prove that $\psi(M_0)$ contains $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v}',K_X) \times S^2 X$,
where $\mathbf{v}'=\mathbf{v}(E_0)$.
Let $E\in\ensuremath{\mathcal M}_0$ be a locally free sheaf fitting in
\mathbf{b}egin{equation}
0 \to \ensuremath{\mathcal O}_X \to E \to I_Z (K_X) \to 0,
\end{equation}
where $I_Z \in \ensuremath{\mathbb{H}}ilb^2(X)$. Consider a point $F\in\ensuremath{\mathcal M}_0'$ and a generic curve $T\subset\ensuremath{\mathcal M}_0$ connecting $E$ and $F$. This corresponds to a deformation $\ensuremath{\mathcal E}$ over the curve $T$ such that $\ensuremath{\mathcal E}_{t_0}=E$ and $\ensuremath{\mathcal E}_{t_1}=F$ for points $t_0,t_1\in T$. In particular, as $T$ is taken generic, we have $\ensuremath{\mathcal E}_t$ is a $\mu$-stable locally free sheaf for $t\neq t_0$. Note that $E$ is $\mu$-semistable but not Gieseker semistable, as $$\mu(\ensuremath{\mathcal O}_X)=0=\mu(I_Z(K_X))\text{ and } \mathop{\mathrm{ch}}\nolimitsi(\ensuremath{\mathcal O}_X)=1>-1=\mathop{\mathrm{ch}}\nolimitsi(I_Z(K_X)),$$ so $\ensuremath{\mathcal E}$ does not induce a morphism $T\to M_0$. By Langton's theorem \cite[Theorem 2.B.1]{HL10}, however, we obtain another family $\ensuremath{\mathcal F}$ over $T$ of semistable sheaves $\ensuremath{\mathcal F}_t$ such that $\ensuremath{\mathcal F}_t=\ensuremath{\mathcal E}_t$ for $t \ne t_0$. This new family does induce a morphism $T \to M_0$.
Since $\psi$ extends to the family of $\mu$-semistable sheaves
$\ensuremath{\mathcal E}_t$,
\mathbf{b}egin{equation}
\psi(\ensuremath{\mathcal F}_{t_0})=\lim_{t \to t_0} \psi(\ensuremath{\mathcal F}_t)=\psi(\ensuremath{\mathcal E}_{t_0})=
(E_0, [Z]) \in M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v}',K_X) \times S^2 X,
\end{equation}
where
$[Z]$ is the 0-cycle defined by $Z$.
Hence our claim holds.
\end{proof}
For the construction of the other irreducible component $M_1$,
we prepare the following lemma.
\mathbf{b}egin{Lem}\label{Lem:nonlocally free extension is semistable}
For $Z \in \ensuremath{\mathbb{H}}ilb^2(X)$ and a non-trivial extension
\mathbf{b}egin{equation}\label{eqn:nonlocally free extension}
0 \to I_Z \to E \to \ensuremath{\mathcal O}_X(K_X) \to 0,
\end{equation}
$E\in M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)$.
\end{Lem}
\mathbf{b}egin{proof}
Suppose not, and let $F$ be a saturated, destabilizing, stable subsheaf of $E$. In particular, $\mathop{\mathrm{rk}}(F)=1$. As $E$ is $\mu$-semistable, $\mu_\mathop{\ord(\omega_S)}\nolimitsmega(F)\leq\mu_\mathop{\ord(\omega_S)}\nolimitsmega(E)$, so we must have $\mu_\mathop{\ord(\omega_S)}\nolimitsmega(F)=\mu_\mathop{\ord(\omega_S)}\nolimitsmega(E)$ and $\mathop{\mathrm{ch}}\nolimitsi(F)>\frac{\mathop{\mathrm{ch}}\nolimitsi(E)}{2}=0$. It follows from $\mathop{\mathrm{ch}}\nolimitsi(I_Z)=-1$ that the composition $\phi:F\ensuremath{\hookrightarrow} E\mathop{\ord(\omega_S)}\nolimitsnto \ensuremath{\mathcal O}_X(K_X)$ must be nonzero so that from stability $\mathop{\mathrm{ch}}\nolimitsi(F)\leq\mathop{\mathrm{ch}}\nolimitsi(\ensuremath{\mathcal O}_X(K_X))=1$, and thus $\mathop{\mathrm{ch}}\nolimitsi(F)=\mathop{\mathrm{ch}}\nolimitsi(\ensuremath{\mathcal O}_X(K_X))$. Then $\phi$ is an isomorphism, contradicting the non-triviality of the extension \eqref{eqn:nonlocally free extension}. Hence $E$ is semistable, as claimed.
\end{proof}
Observe that $E$ as in \eqref{eqn:nonlocally free extension} is not locally free and dualizing \eqref{eqn:nonlocally free extension} twice we get another short exact sequence
\mathbf{b}egin{equation}\label{eqn:s.e.s. on double duals}
0\to \ensuremath{\mathcal O}_X\to E^{\mathbf{v}ee\mathbf{v}ee}\to\ensuremath{\mathcal O}_X(K_X)\to 0.
\end{equation}
Moreover, we may put \cref{eqn:nonlocally free extension,eqn:s.e.s. on double duals} together into the following short exact sequence of complexes
$$\mathbf{b}egin{CD}
0 @>>> I_Z @>>> E @>>> \ensuremath{\mathcal O}_X(K_X) @>>> 0\\
& & @VVV @VVV @V\mathop{\mathrm{Id}}\nolimits VV \\
0 @>>> \ensuremath{\mathcal O}_X @>>> E^{\mathbf{v}ee\mathbf{v}ee} @>>> \ensuremath{\mathcal O}_X(K_X)@>>> 0
\end{CD}$$
and apply the Snake lemma to see that $E$ fits into another short exact sequence \mathbf{b}egin{equation}\label{eqn:alternative defining short exact sequence}0\to E\to E^{\mathbf{v}ee\mathbf{v}ee}\to \ensuremath{\mathcal O}_Z\to 0.\end{equation} As $\mathop{\mathrm{Ext}}\nolimits^1(\ensuremath{\mathcal O}_X(K_X),\ensuremath{\mathcal O}_X)=0$, we see that $E^{\mathbf{v}ee\mathbf{v}ee}=\ensuremath{\mathcal O}_X\mathop{\ord(\omega_S)}\nolimitsplus\ensuremath{\mathcal O}_X(K_X)$, so we may compose the natural embedding $\ensuremath{\mathcal O}_X(K_X)\ensuremath{\hookrightarrow}\ensuremath{\mathcal O}_X\mathop{\ord(\omega_S)}\nolimitsplus\ensuremath{\mathcal O}_X(K_X)$ with the surjection $E^{\mathbf{v}ee\mathbf{v}ee}\mathop{\ord(\omega_S)}\nolimitsnto \ensuremath{\mathcal O}_Z$ to obtain a morphism $\ensuremath{\mathcal O}_X(K_X)\to\ensuremath{\mathcal O}_Z$. We will show in the next lemma that we may determine when $E$ as in \eqref{eqn:nonlocally free extension} is in $M_\mathop{\ord(\omega_S)}\nolimitsmega^s(\mathbf{v},K_X)$ based on the surjectivity of the associated morphism $\ensuremath{\mathcal O}_X(K_X)\to\ensuremath{\mathcal O}_Z$.
\mathbf{b}egin{Lem}\label{Lem:criterion for extension to be stable}
A sheaf $E$ fitting into the short exact sequence \eqref{eqn:nonlocally free extension} is stable if and only if the associated morphism $\ensuremath{\mathcal O}_X(K_X)\to\ensuremath{\mathcal O}_Z$ is surjective.
\end{Lem}
\mathbf{b}egin{proof}
The sheaf $E$ is properly semistable if and only if it contains a torsion free subsheaf of Mukai vector $(1,0,-\frac{1}{2})$, that is, either the ideal sheaf of a point $I_z$ or its twist $I_z(K_X)$. As $\ensuremath{\mathbb{H}}om(I_z,I_Z)=0=\ensuremath{\mathbb{H}}om(I_z,\ensuremath{\mathcal O}_X(K_X))$, we see that $E$ is properly semistable if and only if $E$ contains $I_{z_1}(K_X)$ for some $z_1\in X$. But this is equivalent to $E$ sitting in a short exact sequence \mathbf{b}egin{equation}\label{eqn:strictly semistable s.e.s}0\to I_{z_1}(K_X)\to E\to I_{z_2}\to 0\end{equation} for some other point $z_2\in X$.
Taking double duals of \eqref{eqn:strictly semistable s.e.s}, we again get a short exact sequence of complexes
$$
\mathbf{b}egin{CD}
0@>>> I_{z_1}(K_X)@>>> E@>>>I_{z_2}@>>>0\\
&&@VVV @VVV @VVV\\
0@>>>\ensuremath{\mathcal O}_X(K_X)@>>>E^{\mathbf{v}ee\mathbf{v}ee}@>>>\ensuremath{\mathcal O}_X@>>>0,
\end{CD}
$$
so using the Snake lemma we see that, as claimed, the composition $\ensuremath{\mathcal O}_X(K_X)\ensuremath{\hookrightarrow} E^{\mathbf{v}ee\mathbf{v}ee}\mathop{\ord(\omega_S)}\nolimitsnto\ensuremath{\mathcal O}_Z$ is not surjective, factoring instead through the proper subsheaf $\ensuremath{\mathcal O}_{z_1}\subsetneq\ensuremath{\mathcal O}_Z$. Note that we also see that $Z=\{z_1,z_2\}$.
Conversely, if the composition $\ensuremath{\mathcal O}_X(K_X)\ensuremath{\hookrightarrow} E^{\mathbf{v}ee\mathbf{v}ee}\mathop{\ord(\omega_S)}\nolimitsnto\ensuremath{\mathcal O}_Z$ is not surjective, then it factors through $\ensuremath{\mathcal O}_{z_1}$ for some $z_1\in Z$, and we get a short exact sequence of complexes
$$\mathbf{b}egin{CD}
0@>>>\ensuremath{\mathcal O}_X(K_X)@>>>E^{\mathbf{v}ee\mathbf{v}ee}@>>>\ensuremath{\mathcal O}_X@>>>0\\
&&@VVV@VVV@VVV\\
0@>>>\ensuremath{\mathcal O}_{z_1}@>>>\ensuremath{\mathcal O}_Z@>>>\ensuremath{\mathcal O}_{z_2}@>>>0
\end{CD}
$$ for $z_2\in Z$. It follows that $E$ sits in a short exact sequence as in \eqref{eqn:strictly semistable s.e.s}.
\end{proof}
We have seen that any $E$ fitting into \eqref{eqn:nonlocally free extension} fits into \eqref{eqn:alternative defining short exact sequence}. We show that the converse holds as well if $E$ is stable.
\mathbf{b}egin{Lem}\label{Lem:alternative sequence is equivalent for stable E}
Let $E$ be a stable sheaf fitting into a short exact sequence as in \eqref{eqn:alternative defining short exact sequence}, $$0\to E\to \ensuremath{\mathcal O}_X\mathop{\ord(\omega_S)}\nolimitsplus \ensuremath{\mathcal O}_X(K_X)\to \ensuremath{\mathcal O}_Z\to 0$$ for $Z\in\ensuremath{\mathbb{H}}ilb^2(X)$. Then $E$ fits into a short exact sequence as in \eqref{eqn:nonlocally free extension}.
\end{Lem}
\mathbf{b}egin{proof}
Indeed, if $E$ is a stable sheaf fitting into \eqref{eqn:alternative defining short exact sequence}, then the stability of $E$ and $\ensuremath{\mathcal O}_X(K_X)$ forces the composition $E\ensuremath{\hookrightarrow} \ensuremath{\mathcal O}_X\mathop{\ord(\omega_S)}\nolimitsplus\ensuremath{\mathcal O}_X(K_X)\mor[p_2]\ensuremath{\mathcal O}_X(K_X)$ is surjective, where $p_2$ is the projection onto the second factor. Then similar arguments as above show that the kernel of this composition is $I_Z$, giving the lemma.
\end{proof}
Now we are able to define the second irreducible component $M_1$. Since $\hom(\ensuremath{\mathcal O}_X(K_X),I_Z)=0$ and $\mathop{\mathrm{ext}}\nolimits^2(\ensuremath{\mathcal O}_X(K_X),I_Z)=\hom(I_Z,\ensuremath{\mathcal O}_X)=1$, we see that $$\mathop{\mathrm{ext}}\nolimits^1(\ensuremath{\mathcal O}_X(K_X),I_Z)=\langle(1,0,\tfrac{1}{2}),(1,0,-\tfrac{3}{2})\rangle+1=2,$$ so we have a family
of semistable non-locally free sheaves
\mathbf{b}egin{equation}
0 \to I_{\ensuremath{\mathbb{Z}}Z} \mathop{\ord(\omega_S)}\nolimitstimes \ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}}}(1) \to \ensuremath{\mathcal E} \to \ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}} \times X}(K_X) \to 0
\end{equation}
on the projective bundle
$$
q:\ensuremath{\mathbb{P}}:=\ensuremath{\mathbb{P}}(\mathop{\mathrm{Ext}}\nolimits^1_p(\ensuremath{\mathcal O}_{\ensuremath{\mathbb{H}}ilb^2(X) \times X}(K_X),I_{\ensuremath{\mathbb{Z}}Z})) \to \ensuremath{\mathbb{H}}ilb^2(X),
$$
where $\ensuremath{\mathbb{Z}}Z$ is the universal family on $\ensuremath{\mathbb{H}}ilb^2(X) \times X$
and $p:\ensuremath{\mathbb{H}}ilb^2(X) \times X \to \ensuremath{\mathbb{H}}ilb^2(X)$ is the projection.
By \cref{Lem:nonlocally free extension is semistable}, $\ensuremath{\mathcal E}$ gives rise to a morphism
\mathbf{b}egin{equation}
\mathbf{b}egin{matrix}
g:& \ensuremath{\mathbb{P}} & \to & M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)\\
& z & \mapsto & \ensuremath{\mathcal E}_z. \\
\end{matrix}
\end{equation}
We set $M_1:=g(\ensuremath{\mathbb{P}})$, and prove in the next result that $M_1$ is an irreducible component of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)$.
\mathbf{b}egin{Lem}\label{Lem:M_1}
$M_1$ is an irreducible component of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)$ and satisfies $M_1=\psi^{-1}(M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v}',K_X) \times S^2 X)$.
\end{Lem}
\mathbf{b}egin{proof}
We begin by showing that $g$ is injective
on the stable locus.
Indeed, for a stable sheaf $E$ in the image of $g$, $Z$ is determined uniquely by $E^{\mathbf{v}ee\mathbf{v}ee}/E$ so that the exact sequence \eqref{eqn:nonlocally free extension}
is uniquely determined by $E$.
Now we prove that the set
$$
M_\mathop{\ord(\omega_S)}\nolimitsmega^{pss}(\mathbf{v},K_X)=\Set{I_{z_1} \mathop{\ord(\omega_S)}\nolimitsplus I_{z_2}(K_X) \ |\ z_1,z_2 \in X}\subset M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)
$$
of properly semistable polystable
sheaves is contained in $M_1$. First assume that $Z=\{z_1,z_2 \}$ $(z_1 \ne z_2)$.
If $E$ is properly semistable, then by \cref{Lem:criterion for extension to be stable}, the associated morphism $\ensuremath{\mathcal O}_X(K_X) \to \ensuremath{\mathcal O}_Z$ is not surjective, say at $z_1$, so $E$ fits into an exact sequence
$$
0 \to I_{z_2}(K_X) \to E \to I_{z_1} \to 0,
$$
giving the point $I_{z_2}(K_X) \mathop{\ord(\omega_S)}\nolimitsplus I_{z_1}$ of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)$.
Note, moreover, that it follows from $\mathop{\mathrm{ext}}\nolimits^1(I_{z_1},I_{z_2}(K_X))=1$ that $g$ is injective if $z_1 \ne z_2$.
If $Z=\{z \}$ with a non-reduced structure
and $E$ is properly semistable, then again by \cref{Lem:criterion for extension to be stable}, the induced morphism $\ensuremath{\mathcal O}_X(K_X) \to \ensuremath{\mathcal O}_Z$ is not surjective and $E$ fits into an exact sequence $$0\to I_z(K_X)\to E\to I_z\to 0,$$ giving the point $I_z \mathop{\ord(\omega_S)}\nolimitsplus I_z(K_X)$. Thus $M_\mathop{\ord(\omega_S)}\nolimitsmega^{pss}(\mathbf{v},K_X)\subset M_1$ as claimed. Note further that the point $I_z(K_X)\mathop{\ord(\omega_S)}\nolimitsplus I_z$ of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)$ is independent of the choice of a scheme structure on $Z$, so we also see that $g^{-1}(I_z\mathop{\ord(\omega_S)}\nolimitsplus I_z(K_X))$ has positive dimension.
As $g$ is injective away from $q^{-1}(\ensuremath{\mathbb{D}}elta_X)$, where $\ensuremath{\mathbb{D}}elta_X\subset\ensuremath{\mathbb{H}}ilb^2(X)$ is the locus of non-reduced subschemes, it follows that $\mathop{\mathrm{dim}}\nolimits(M_1)=5$. As $M_1$ is proper, irreducible, and of maximal dimension, it is an irreducible component of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)$.
Since $M_\mathop{\ord(\omega_S)}\nolimitsmega^{pss}(\mathbf{v},K_X)\subset M_1\subset\psi^{-1}(M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v}',K_X)\times S^2 X)$, to prove the second claim, we must show that $E \in M_\mathop{\ord(\omega_S)}\nolimitsmega^s(\mathbf{v},K_X)$ satisfying
$\psi(E) \in M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v}',K_X) \times S^2 X$ is in $M_1$. But such an $E$ is a stable sheaf fitting into an exact sequence as in \eqref{eqn:alternative defining short exact sequence}. It follows from \cref{Lem:alternative sequence is equivalent for stable E} that $E$ fits into the exact sequence
\eqref{eqn:nonlocally free extension}.
Therefore $E\in M_1$, as claimed.
\end{proof}
We will show that $M_0 \cup M_1$ is a connected component of
$M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)$, but the first step is to show that $M_1$ does not meet any other components. In the next lemma, we show that away from the locus of properly semistable sheaves S-equivalent to $I_z\mathop{\ord(\omega_S)}\nolimitsplus I_z(K_X)$, $M_1$ is normal and thus meets no other components.
\mathbf{b}egin{Lem}\label{Lem:normal}
The local deformation space is smooth at $E$ if
$E$ is S-equivalent to $I_{z_1} \mathop{\ord(\omega_S)}\nolimitsplus I_{z_2} (K_X)$ ($z_1 \ne z_2$).
Hence $I_{z_1} \mathop{\ord(\omega_S)}\nolimitsplus I_{z_2} (K_X)$ ($z_1 \ne z_2$)
is contained in the normal open subscheme
$$
M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)^*:=\Set{ E \in M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X) \ |\ \mathop{\mathrm{Ext}}\nolimits^2(E,E)=0}
$$
of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)$.
\end{Lem}
\mathbf{b}egin{proof}
We begin by showing that $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)^*$ is normal. Let $Q$ be an open subscheme of a suitable quot-scheme
$\ensuremath{\mathbb{Q}}uot_{\ensuremath{\mathcal O}_X(-mH)^{\mathop{\ord(\omega_S)}\nolimitsplus N}/X}$, which parameterizes quotients
$\ensuremath{\mathcal O}_X(-mH)^{\mathop{\ord(\omega_S)}\nolimitsplus N} \to E$,
such that $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)$ is a GIT-quotient of
$Q$ by $PGL(N)$.
Let $Q^*$ be the open subscheme of $Q$
such that $\mathop{\mathrm{Ext}}\nolimits^2(E,E)=0$. Then $Q^*$ is smooth.
Hence the open subscheme $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)^*=Q^*/PGL(N)$ of
$M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)$ is a normal scheme.
Now suppose that $E$ is S-equivalent to $I_{z_1}\mathop{\ord(\omega_S)}\nolimitsplus I_{z_2}(K_X)$ with $z_1 \ne z_2$. Then
$$
\mathop{\mathrm{Ext}}\nolimits^2(I_{z_1},I_{z_1})=\mathop{\mathrm{Ext}}\nolimits^2(I_{z_1}, I_{z_2} (K_X))=
\mathop{\mathrm{Ext}}\nolimits^2(I_{z_2}(K_X),I_{z_1})=\mathop{\mathrm{Ext}}\nolimits^2(I_{z_2}(K_X),I_{z_2}(K_X))=0.
$$
Hence $\mathop{\mathrm{Ext}}\nolimits^2(E,E)=0$ so that $E\in Q^*$. It follows that the polystable representative, $I_{z_1}\mathop{\ord(\omega_S)}\nolimitsplus I_{z_2}(K_X)$, is in $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)^*$, as claimed.
\end{proof}
We can actually prove that $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)^*$ is smooth:
\mathbf{b}egin{Lem}\label{Lem:M^*}
$\ensuremath{\mathbb{P}} \times_{M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)} M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)^*
\to M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)^*$ is a closed immersion whose
image is a connected component of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)^*$.
In particular $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)^* $ is smooth.
\end{Lem}
\mathbf{b}egin{proof}
We first show that the image of $g':\ensuremath{\mathbb{P}} \times_{M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)} M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)^* \to M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)^*$ a connected component of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)^*$. First observe that $g'$ is proper since $g$ is a proper morphism. We further note that the image of $g'$ is $M_1^*:=M_1 \cap M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)^*$,
which is therefore a connected component of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)^*$ by Lemma \ref{Lem:normal}.
Now let us show that $g'$ is a closed immersion. If $E\in M^{pss}_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)$ is $S$-equivalent to $I_{z} \mathop{\ord(\omega_S)}\nolimitsplus I_z(K_X)$, then
we see that $\mathop{\mathrm{Ext}}\nolimits^2(E,E) \ne 0$. Hence $E$ is not contained in
$M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)^*$.
Therefore $g'$ is injective, which implies that
$g'$ is a finite birational map between normal schemes.
Therefore, $g'$ is an isomorphism onto its image, proving the claim.
As $M_\mathop{\ord(\omega_S)}\nolimitsmega^{pss}(\mathbf{v},K_X)\subset M_1$, the open subscheme $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)^*\mathbf{b}ackslash M_1^*$ is smooth by \cite{Kim98} and the definition of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)^*$. Since $M_1^*$ is isomorphic to an open subscheme of a $\ensuremath{\mathbb{P}}^1$ bundle over $\ensuremath{\mathbb{H}}ilb^2(X)$, it is smooth, which gives the final statement of lemma.
\end{proof}
Finally, we put everything together to show that $M_0\cup M_1$ is a connected component of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)$.
\mathbf{b}egin{Prop}\label{prop:connected}
\mathbf{b}egin{enumerate}
\item
The singular locus of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)$ consists of a 4-dimensional subscheme
$$
S_1:=\{ \mathbf{v}arpi_*(I_W) \mid I_W \in \ensuremath{\mathbb{H}}ilb^2(\mathbf{w}idetilde{X}) \}
$$
and a finite set of points
$$
S_2:=\{\mathbf{v}arpi_*({\mathcal O}_{\mathbf{w}idetilde{X}}(D)) \mid \iota(D)=-D, (D^2)=-4\}.
$$
\item
$M_0$ and $M_1$
intersect along $S_1$.
\item
$M_0 \cup M_1$ is a connected component of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)$.
\end{enumerate}
\end{Prop}
\mathbf{b}egin{proof}
(1) follows from Lemma \ref{Lem:ext^2} and Lemma \ref{Lem:M^*}.
By Lemma \ref{Lem:M_0} and Lemma \ref{Lem:M_1},
$M_0$ intersects $M_1$ along the 4-dimensional singular locus.
By the description of the singular locus,
$M_0 \cup M_1$ is a connected component of
$M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},K_X)$.
\end{proof}
\mathbf{b}egin{Rem}
The restriction of $\psi$ to $S_1=M_0 \cap M_1$ is a double cover onto $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v}',K_X) \times S^2 X$. Indeed, for $\mathbf{v}arpi_*(I_{w_1,w_2})$ $(w_1 \ne w_2,\iota(w_2))$, we push forward the short exact sequence on $\mathbf{w}idetilde{X}$
$$0\to I_{w_1,w_2}\to\ensuremath{\mathcal O}_{\mathbf{w}idetilde{X}}\to\ensuremath{\mathcal O}_{w_1,w_2}\to 0$$ to obtain the short exact sequence
$$0\to\mathbf{v}arpi_*(I_{w_1,w_2})\to\ensuremath{\mathcal O}_X\mathop{\ord(\omega_S)}\nolimitsplus\ensuremath{\mathcal O}_X(K_X)\to\ensuremath{\mathcal O}_{\mathbf{v}arpi(w_1),\mathbf{v}arpi(w_2)}\to 0.$$
From this it is clear that $\psi^{-1}(\psi(\mathbf{v}arpi_*(I_{w_1,w_2})))=\{\mathbf{v}arpi_*(I_{w_1,w_2}),\mathbf{v}arpi_*(I_{w_1,\iota(w_2)})\}$.
\end{Rem}
\subsubsection{A remark on $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},0)$ $(\mathbf{v}=(2,0,-1))$}
We discuss here the structure of the other component of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v})$ parametrizing sheaves with determinant $\ensuremath{\mathcal O}_X$ vis-a-vis the Uhlenbeck contraction.
\mathbf{b}egin{Lem}
If $E \in M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},0)$ is not a $\mu$-stable locally free sheaf, then
$E$ is S-equivalent to $I_{z_1} \mathop{\ord(\omega_S)}\nolimitsplus I_{z_2}$ or
$I_{z_1}(K_X) \mathop{\ord(\omega_S)}\nolimitsplus I_{z_2}(K_X)$.
\end{Lem}
\mathbf{b}egin{proof}
If $E$ is not locally free, then we see that
$\mathop{\mathrm{ch}}\nolimitsi(E^{\mathbf{v}ee \mathbf{v}ee})>0$.
Hence $H^0(E^{\mathbf{v}ee \mathbf{v}ee}) \ne 0$ or
$H^0(E^{\mathbf{v}ee}(K_X)) \ne 0$. It follows from considerations of $\mu$-stability that $E^{\mathbf{v}ee\mathbf{v}ee}$ contains $\ensuremath{\mathcal O}_X(D)$, where $D=0$ or $K_X$, respectively, as a saturated subsheaf. Intersecting with $E$ gives an exact sequence
$$
0 \to I_Z(D) \to E \to I_W(D) \to 0
$$
with $D=0,K_X$.
By the semi-stability of $E$, $\mathop{\mathrm{deg}}(Z)\geq 1$, $\mathop{\mathrm{deg}}(W)\leq 1$, and $\mathop{\mathrm{deg}}(Z)+\mathop{\mathrm{deg}}(W)=2$. If $\mathop{\mathrm{deg}}(Z)=\mathop{\mathrm{deg}}(W)=1$, then $E$ is S-equivalent to $I_Z(D)\mathop{\ord(\omega_S)}\nolimitsplus I_W(D)$, as claimed. If $\mathop{\mathrm{deg}} W=0$, then
$E^{\mathbf{v}ee \mathbf{v}ee}=\ensuremath{\mathcal O}_X(D)^{\mathop{\ord(\omega_S)}\nolimitsplus 2}$ with $E^{\mathbf{v}ee\mathbf{v}ee}/E\cong\ensuremath{\mathcal O}_Z$ where now $Z\in\ensuremath{\mathbb{H}}ilb^2(X)$. Choosing a torsion free sheaf $F$ with
$E \subset F \subset E^{\mathbf{v}ee \mathbf{v}ee}$ and $F/E=\ensuremath{\mathcal O}_{z_1}$, $F$ is $\mu$-semistable and $\mathop{\mathrm{ch}}\nolimitsi(F)=1$, so the same argument as for $E^{\mathbf{v}ee\mathbf{v}ee}$ shows that
$\ensuremath{\mathbb{H}}om(\ensuremath{\mathcal O}_X(D),F) \ne 0$. Slope-stability considerations force $\ensuremath{\mathcal O}_X(D)$ to be a saturated subsheaf, and intersecting with $E$ gives another short exact sequence $$0\to I_{Z'}(D)\to E\to I_{W'}(D)\to 0$$ with $\mathop{\mathrm{deg}}(Z')\geq 1$, $\mathop{\mathrm{deg}}(W')\leq 1$, and $\mathop{\mathrm{deg}}(Z')+\mathop{\mathrm{deg}}(W')=2$. This time, however, we see that $$0\neq\ensuremath{\mathcal O}_X(D)/I_{Z'}(D)=(\ensuremath{\mathcal O}_X(D)+E)/E\subset F/E=\ensuremath{\mathcal O}_{z_1},$$ so $Z'=z_1$ and $W'=z_2$ for points $z_1,z_2\in X$.
Therefore $E$ is S-equivalent to $I_{z_1}(D)\mathop{\ord(\omega_S)}\nolimitsplus I_{z_2}(D)$ in this case as well.
\end{proof}
Thus the complement of the open subset of $\mu$-stable locally free sheaves
in $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},0)$ is parameterized by
two copies of $S^2 X$, and this locus is not contracted by the
morphism to the Uhlenbeck compactification.
\subsection{Irreducible components of $M_\sigma(\mathbf{v})$
with $\mathbf{v}^2=2$}
In this subsection, we shall prove the following claim.
\mathbf{b}egin{Prop}\label{prop:irred-comp:v^2=2}
Let $\mathbf{v}:=(r,\xi,a)$ be a Mukai vector such that $\mathbf{v}^2=2$.
Assume that $L \equiv D+\frac{r}{2}K_X \pmod 2$, where $D$ is a nodal cycle.
Then $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},L)$ has two irreducible components.
\end{Prop}
As in the previous subsection, we begin by reducing to studying a certain moduli space of Gieseker semistable sheaves of rank two. To do so, we make use of a motivic invariant called the virtual Hodge polynomial. Recall that for a variety $Y$, the virtual Hodge polynomial $e(Y)$ is defined by $e(Y)=\sum_{p,q}e^{p,q}(Y)x^py^q$, where $e^{p,q}(Y):=\sum_k (-1)^kh^{p,q}(H^k_c(Y))$ are the virtual Hodge numbers of the natural mixed Hodge structure on the cohomology of $Y$ with compact support (see \cite{Yos14} and the references therein for a more complete explanation).
\mathbf{b}egin{Lem}\label{Lem:e-poly}
Let $\mathbf{v}:=(r,\xi,a)$ be a Mukai vector such that $\mathbf{v}^2$ is even.
\mathbf{b}egin{enumerate}
\item
There is $\mathbf{v}'=(2,L',a')$ such that
$e(M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},L))=e(M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v}',L'))$,
where $L'+K_X \equiv L+\frac{r}{2}K_X \pmod 2$.
\item
For a smooth rational curve $C$,
we have an equality
\mathbf{b}egin{equation}\label{eq:(-2)}
e(M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},L))= e(M_{\mathop{\ord(\omega_S)}\nolimitsmega'}(\mathbf{v}',L')),
\end{equation}
where $\sigma'=\ensuremath{\mathbb{P}}hi(\sigma)$ for the spherical twist $\ensuremath{\mathbb{P}}hi=R_{\ensuremath{\mathcal O}_C(-1)}$,
$\mathbf{v}'=\ensuremath{\mathbb{P}}hi(\mathbf{v})=(r,\xi+(C,\xi)C,a)$ and $L'=L+(L,C)C$.
\end{enumerate}
\end{Lem}
\mathbf{b}egin{proof}
(1)
Since the $(-1)$-reflection $R_{\ensuremath{\mathcal O}_X}$ associated to
$\ensuremath{\mathcal O}_X$ preserves $\mathop{\mathrm{Stab}}\nolimits^\mathbf{d}agger(X)$,
we have an isomorphism
\mathbf{b}egin{equation}
M_\sigma(\mathbf{v},L))\cong M_{\sigma'}(\mathbf{v}',L'),
\end{equation}
where $\sigma'=R_{\ensuremath{\mathcal O}_X}(\sigma)$,
$\mathbf{v}'=R_{\ensuremath{\mathcal O}_X}(\mathbf{v})=(-2a,\xi,-r/2)$
and $L'=L+\tfrac{r+2a}{2}K_X$.
By \cite{Nue14a},
we have
\mathbf{b}egin{equation}
e(M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},L))= e(M_{\mathop{\ord(\omega_S)}\nolimitsmega'}(\mathbf{v}',L')).
\end{equation}
By similar arguments as in \cite{Nue14a} or \cite{Yos16a},
we get (1).
(2)
Let $\ensuremath{\mathbb{P}}hi:\ensuremath{\mathbb{D}}b(X) \to \ensuremath{\mathbb{D}}b(X)$ be the twist functor
associated to the spherical object $\ensuremath{\mathcal O}_C(-1)$.
The action of $\ensuremath{\mathbb{P}}hi$ preserves
$\mathop{\mathrm{Stab}}\nolimits^\mathbf{d}agger(X)$, so
we have an isomorphism
\mathbf{b}egin{equation}
M_\sigma(\mathbf{v},L) \cong M_{\sigma'}(\mathbf{v}',L)
\end{equation}
where $\sigma'=\ensuremath{\mathbb{P}}hi(\sigma)$,
$\mathbf{v}'=(r,\xi+(C,\xi)C,a)$ and $L'=L+(L,C)C$.
By using \cite{Nue14a} again,
we get \eqref{eq:(-2)}.
\end{proof}
In the specific case that we are interested in, we can in fact get more explicit:
\mathbf{b}egin{Lem}\label{Lem:rank2}
Let $\mathbf{v}=(r,\xi,a)$ be a Mukai vector with
$\mathbf{v}^2=2$ and $L$ a divisor such that $L \equiv D+\frac{r}{2}K_X \pmod 2$,
where $D$
is a nodal cycle.
Then there is a Mukai vector $\mathbf{v}'=(2,\xi',0)$
and an elliptic fibration $\pi:X \to \ensuremath{\mathbb{P}}^1$
such that
\mathbf{b}egin{enumerate}
\item
$e(M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},L))=e(M_{\mathop{\ord(\omega_S)}\nolimitsmega'}(\mathbf{v}',L'))$,
\item
$(\xi',C)=2$ for a general fiber $C$ of $\pi$
and $L' \equiv D' +K_X \pmod 2$,
where $D'$ is a nodal cycle.
\end{enumerate}
\end{Lem}
\mathbf{b}egin{proof}
For a primitive Mukai vector $\mathbf{v}$ such that $\mathbf{v}^2=2$,
we take $\mathbf{v}'=(2,\xi',a')$ and $L'$ satisfying
Lemma \ref{Lem:e-poly}.
As $\ell(\mathbf{v})=1$, we can find an isotropic divisor
$\eta$ such that
$(\xi'+2\lambda,\eta)=1$, where $\lambda \in \ensuremath{\mathbb{N}}S(X)$.
Replacing $\mathbf{v}'$ by $\mathbf{v}' e^{k \eta+\lambda}$, for an appropriate choice of $k$, we may assume that
$({\xi'}^2)=2$ and $a'=0$.
Then $\xi'$ is effective or $-\xi'$ is effective.
Since $\mathbf{v}' e^{-\xi'}=(2,-\xi',a')$, we may assume that $\xi'$ is effective.
Since $L \equiv D+\frac{r}{2}K_X \pmod 2$ for a nodal cycle $D$,
by using Lemma \ref{Lem:nodal} and \eqref{eq:(-2)},
we may assume that $\xi'$ is nef and
$L' \equiv D'+K_X \pmod 2$ for a nodal cycle $D'$.
As $(\xi'^2)=2$, $\xi'$ is primitive, so we may find an isotropic divisor $\eta$ with $(\xi',\eta)=1$. The Riemann-Roch theorem then implies that
$\eta$ is effective.
Let $\eta=f+\sum_i C_i$ be a decomposition of $\eta$ such that
$f$ is a nef and isotropic divisor, and $C_i$ are smooth rational curves.
Then $(\xi',f), (\xi',C_i) \geq 0$ imply that
$(f,\xi')=1$ by using Hodge index theorem.
The linear system
$|2f|$ induces an elliptic fibration $\pi:X \to \ensuremath{\mathbb{P}}^1$
satisfying our requirements.
\end{proof}
\mathbf{b}egin{Lem}\label{Lem:nodal}
For nodal cycles $D$ and $D'$,
$D+(D,D')D' \equiv D'' \pmod 2$, where $D''$ is another nodal cycle.
\end{Lem}
\mathbf{b}egin{proof}
For nodal cycles $D$ and $D'$ and an ample divisor $H$,
let $E$ be a $H$-stable vector bundle with $\mathbf{v}=\mathbf{v}(E)=(2,D+K_X,0)$
and
$E'$ a $H$-stable vector bundle with $\mathbf{v}'=\mathbf{v}(E')=(2,D'+K_X,0)$.
Then for $E(nH)$ $(n \gg 0)$,
$$
F:=\ker (\ensuremath{\mathbb{H}}om(E',E(nH)) \mathop{\ord(\omega_S)}\nolimitstimes E' \to E(nH))
$$
is a stable vector bundle
with $\mathbf{v}(F)=-e^{nH}(2,D+K_X,0)-\langle e^{nH}\mathbf{v},\mathbf{v}' \rangle \mathbf{v}'$ (\cite[Thm. 1.7]{Yos09}). Thus $\mathbf{v}(F)^2=-2$. Since
\mathbf{b}egin{equation}
\langle e^{nH}\mathbf{v},\mathbf{v}' \rangle=(D,D')-2n^2(H^2)+2n(H,D'-D),
\end{equation}
we get
\mathbf{b}egin{equation}
\mathbf{b}egin{split}
\frac{\mathop{\mathrm{rk}} F}{2} \equiv & 1+(D,D') \pmod 2,\\
c_1(F) \equiv & D+K_X+(D,D')(D'+K_X) \pmod 2\\
\equiv &
D+(D,D')D'+\frac{\mathop{\mathrm{rk}} F}{2}K_X \pmod 2.
\end{split}
\end{equation}
As $F$ is thus a stable spherical bundle, it follows that there is a nodal cycle $D''$ such that $$D''+\frac{\mathop{\mathrm{rk}} F}{2}K_X\equiv c_1(F)\equiv D+(D,D')D'+\frac{\mathop{\mathrm{rk}} F}{2}K_X \pmod 2,$$ and the result follows.
\end{proof}
By Lemma \ref{Lem:rank2}, it is sufficient to
describe the irreducible components of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},L)$
for $\mathbf{v}=(2,\xi,0)$ with $(\xi^2)=2$ and $L$ such that
$L \equiv D +K_X \pmod 2$ and $(L,C)=2$
for a general fiber $C$ of an elliptic fibration
$\pi:X \to \ensuremath{\mathbb{P}}^1$.
For this purpose, we first describe a 2-dimensional component
of the singular locus of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},L)$.
\mathbf{b}egin{Lem}\label{ext^2-2}
For a Mukai vector $\mathbf{v}=(2,\xi,0)$ with $(\xi^2)=2$ and a divisor $L$
with $L \equiv D+K_X \pmod 2$,
the singular locus of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},L)$ is
\mathbf{b}egin{equation}
\Set{ \mathbf{v}arpi_*(I_W(\mathbf{w}idetilde{L}))
\ |\ I_W \in \ensuremath{\mathbb{H}}ilb^n(\mathbf{w}idetilde{X}),\;
\iota^*(\mathbf{w}idetilde{L})=\mathbf{v}arpi^*(L)-\mathbf{w}idetilde{L},\;
n=(\mathbf{w}idetilde{L}^2)/2+1},
\end{equation}
where $(\mathbf{w}idetilde{L}^2)=0,-2$.
In particular, the 2-dimensional component of the singular locus is
irreducible.
\end{Lem}
\mathbf{b}egin{proof}
Since $\mathop{\ord(\omega_S)}\nolimitsmega$ is generic, $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},L)=M_\mathop{\ord(\omega_S)}\nolimitsmega^s(\mathbf{v},L)$ is of expected dimension, so the singular locus is
$$
\Set{ E \in M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},L) \ |\ \mathop{\mathrm{Ext}}\nolimits^2(E,E) \ne 0}=\Set{E\in M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},L)\ | \ E\cong E(K_X)}.
$$
If $E(K_X) \cong E$, then
we see that $E \cong \mathbf{v}arpi_*(I_W(\mathbf{w}idetilde{L}))$,
where $\iota^*(I_W(\mathbf{w}idetilde{L}))\ncong I_W(\mathbf{w}idetilde{L}))$ and $W=\mathbf{v}arnothing$ or $W=\{w \}$.
If $W=\{w \}$, then
$E^{\mathbf{v}ee \mathbf{v}ee}=\mathbf{v}arpi_*(\ensuremath{\mathcal O}_{\mathbf{w}idetilde{X}}(\mathbf{w}idetilde{L}))$
is a spherical vector bundle with $\mathbf{v}(E^{\mathbf{v}ee \mathbf{v}ee})=(2,\xi,1)$.
Therefore the claim holds.
\end{proof}
\mathbf{b}egin{Rem}
For $x \in X$, we set $\mathbf{v}arpi^{-1}(x)=\{z,\iota(z)\}$.
Then $\mathbf{v}arpi_*(I_z(\mathbf{w}idetilde{L}))$ and
$\mathbf{v}arpi_*(I_{\iota(z)}(\mathbf{w}idetilde{L}))$
are not locally free at $x$.
Hence the 2-dimensional component of the singular locus
is a double covering of $X$.
\end{Rem}
The existence of a two dimensional component of the singular locus has the following important consequence for the reducibility of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},L)$.
\mathbf{b}egin{Lem}\label{Lem:irred-comp-2}
There are at most two irreducible components of
$M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},L)$.
\end{Lem}
\mathbf{b}egin{proof}
If there are two irreducible components of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},L)$, then
the connectedness of $M_\mathop{\ord(\omega_S)}\nolimitsmega(v,L)$ implies they intersect along
the 2-dimensional component of the singular locus.
By \cite{Yamada},
the analytic germ of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},L)$ at a singular point $E$ is described as a hypersurface
$F(x_1,x_2,x_3)=0$ in $(\ensuremath{\mathbb{C}}^3,0)$ with a non-trivial quadratic term.
If $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},L)$ is reducible, then each irreducible component is defined
by a factor of $F(x_1,x_2,x_3)$.
Therefore $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},L)$ has at most two irreducible components.
\end{proof}
Let $f$ be the reduced part of a multiple fiber of $\pi$ and
$E_0 \in M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v}',L-2f)$ be a spherical vector bundle,
where $\mathbf{v}'=(2,\xi-2f,0)$.
We shall prove Proposition \ref{prop:irred-comp:v^2=2}
by constructing two irreducible components $M_0$ and $M_1$
of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},L)$. We begin by constructing a component $M_0$ containing locally free sheaves using elementary transformations as in \cite[Section 5.2]{HL10}. To this end we must study how $E_0$ restricts to a general fiber of $\pi$.
\mathbf{b}egin{Lem}\label{Lem:splitting type}
Let $C$ be a general fiber of the elliptic fibration.
Then ${E_0}|_C \cong \ensuremath{\mathcal O}_C(p) \mathop{\ord(\omega_S)}\nolimitsplus \ensuremath{\mathcal O}_C(q)$ $(p \ne q)$.
\end{Lem}
\mathbf{b}egin{proof}
For the primitive and isotropic Mukai vector $\mathbf{u}:=(0,C,1)$, $\ell(\mathbf{u})=2$ so that $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{u},C)$ is a fine moduli space
which is isomorphic to $X$, where $\mathop{\ord(\omega_S)}\nolimitsmega$ is general.
As in \cite{Bri98}, there is an elliptic fibration $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{u},C) \to \ensuremath{\mathbb{P}}^1$ that comes from regarding $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{u},C)$ as a smooth compactification of the relative Picard scheme $\ensuremath{\mathbb{P}}ic^1(X/\ensuremath{\mathbb{P}}^1) \to \ensuremath{\mathbb{P}}^1$ of degree 1.
We shall identify $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{u},C) \to \ensuremath{\mathbb{P}}^1$ with
the elliptic fibration $X \to \ensuremath{\mathbb{P}}^1$.
For a universal family $\ensuremath{\mathcal E}$ of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{u},C)$, we have a relative Fourier-Mukai transform
$\ensuremath{\mathbb{P}}hi_{X \to X}^{\ensuremath{\mathcal E}^{\mathbf{v}ee}}: \ensuremath{\mathbb{D}}b(X) \to \ensuremath{\mathbb{D}}b(X)$.
Then $F:=\ensuremath{\mathbb{P}}hi_{X \to X}^{\ensuremath{\mathcal E}^{\mathbf{v}ee}}(E_0)[1]$ is a purely 1-dimensional sheaf
whose support is a double cover of $\ensuremath{\mathbb{P}}^1$.
Thus for a general fiber $C$ with $F|_C=\ensuremath{\mathbb{C}}_p \mathop{\ord(\omega_S)}\nolimitsplus \ensuremath{\mathbb{C}}_q$, one can check that ${E_0}|_C \cong \ensuremath{\mathcal O}_C(p) \mathop{\ord(\omega_S)}\nolimitsplus \ensuremath{\mathcal O}_C(q)$.
\end{proof}
Let $L$ be a line bundle on a smooth fiber $C \in |2f|$ with $\mathbf{v}(L)=(0,2f,0)$.
We set $L^*:=\ensuremath{\mathcal E} xt^1_{\ensuremath{\mathcal O}_X}(L,\ensuremath{\mathcal O}_X)$.
Then $L^*$ is a line bundle of degree 0 on $C$ so that $\mathbf{v}(L^*)=(0,2f,0)$. Observe that $$\langle\mathbf{v}(E_0^\mathbf{v}ee),\mathbf{v}(L^*)\rangle=\langle(2,2f-\xi,0),(0,2f,0)\rangle=-2<0,$$ so there is necessarily a non-zero homomorphism $\psi:E_0^\mathbf{v}ee\to L^*$.
Moreover we may assume that
$\psi$ is surjective by Lemma \ref{Lem:splitting type}.
We will see in the next two results that these give rise to a component of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},L)$ containing $\mu$-stable locally free sheaves. As $E_0$ is $\mu$-stable with respect to any polarization \cite{Kim94,Qin91}, for any given starting polarization $H_0$, $\{E_0\}= M_{H_0}(\mathbf{v}',L-2f)$. We will have cause to vary the polarization in the next lemma, but it is important to state that the stability of $E_0$ remains unchanged.
\mathbf{b}egin{Lem}\label{Lem:M_1 v^2=2}
For a non-zero homomorphism
$\psi:E_0^{\mathbf{v}ee} \to L^*$,
we set $E:=\ensuremath{\mathbb{R}}lHom_{\ensuremath{\mathcal O}_X}(\ensuremath{\mathbb{C}}one(\psi),\ensuremath{\mathcal O}_X)[1]$.
Then $E$ is a $\mu$-stable torsion free sheaf with respect to
$H_0+nf$, $n \gg 0$.
If $\psi$ is surjective, then $E$ is a locally free sheaf.
\end{Lem}
\mathbf{b}egin{proof}
We first show that $E$ is torsion free, and locally free if $\psi$ is surjective. Dualizing the exact triangle $$E_0^\mathbf{v}ee\mor[\psi]L^*\to\ensuremath{\mathbb{C}}one(\psi)\to E_0^\mathbf{v}ee[1],$$ shifting by 1, and taking cohomology sheaves, we immediately see that $E$ is a coherent sheaf fitting into an exact sequence
\mathbf{b}egin{equation}
0 \to E_0 \to E \to L \to 0.
\end{equation}
Moreover, as $\ensuremath{\mathbb{R}}lHom(E,\ensuremath{\mathcal O}_X)=\ensuremath{\mathbb{C}}one(\psi)[-1]$, we get that $\mathop{\mathcal Ext}\nolimits^1(E,\ensuremath{\mathcal O}_X)=\ensuremath{\mathbb{H}}H^{0}(\ensuremath{\mathbb{C}}one(\psi))=\mathop{\mathrm{cok}}er(\psi)$, which is supporded in codimension two, and $\mathop{\mathcal Ext}\nolimits^2(E,\ensuremath{\mathcal O}_X)=0$, so
$E$ is torsion free by \cite[Proposition 1.1.10]{HL10}. Moreover, if $\psi$ is surjective, so that $\mathop{\mathrm{cok}}er(\psi)=0$, then $E$ is reflexive (by \cite[\textit{ibid.}]{HL10}) and thus locally free.
Now we show that $E$ is $\mu$-stable. Let $F$ be a subsheaf of $E$ with $\mathop{\mathrm{rk}} F=1$.
Then $E_0 \cap F$ is a rank 1 subsheaf of $E_0$.
Since $E_0$ is $\mu$-stable for any ample divisor,
$$
2(c_1(E_0 \cap F),H_0+nf)<(c_1(E_0),H_0+nf)\;\;(n \geq 0).
$$
Hence $2(c_1(E_0 \cap F),f) \leq (c_1(E_0),f)=1$ and
$2(c_1(E_0 \cap F),H_0)<(c_1(E_0),H_0)$.
In particular
$(c_1(E_0 \cap F),f) \leq 0$.
As $c_1(F)=c_1(E_0\cap F)+2f$, $(c_1(F),f) \leq 0$.
If $n>(4f,H_0)$, then $2(c_1(F),H_0+nf)<(c_1(E),H_0+nf)$.
Therefore $E$ is $\mu$-stable with respect to $H_0+nf$ for $n\gg 0$, as claimed.
\end{proof}
Considering the irreducible component containing the sheaves $E$ constructed in \cref{Lem:M_1 v^2=2}, we get the following result.
\mathbf{b}egin{Cor}
For $\mathop{\ord(\omega_S)}\nolimitsmega=H_0+n f$ ($n \gg 0$),
there is an irreducible component $M_0$ of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},L)$
which contains a $\mu$-stable locally free sheaf.
\end{Cor}
The second irreducible component, $M_1$, parametrizes $E$ fitting into an exact sequence
\mathbf{b}egin{equation}
0 \to E \to E_0(f) \to \ensuremath{\mathbb{C}}_x \to 0, \; x \in X.
\end{equation}
In particular, such $E$ are not locally free, so $M_1$ is indeed distinct from $M_0$. Moreover, as in the previous subsection, $M_1$ has the structure of a $\ensuremath{\mathbb{P}}^1$-bundle, this time over $X$ (instead of $\ensuremath{\mathbb{H}}ilb^2(X)$). Moreover, the fibers of this $\ensuremath{\mathbb{P}}^1$ bundle are contracted by the Uhlenbeck morphism.
By Lemma \ref{Lem:irred-comp-2}, $M_0$ and $M_1$ are the irreducible components
of $M_\mathop{\ord(\omega_S)}\nolimitsmega(\mathbf{v},L)$, which shows
Proposition \ref{prop:irred-comp:v^2=2} by Lemma \ref{Lem:rank2}.
\mathbf{b}ibliographystyle{plain}
\mathbf{b}ibliography{NSF_Research_Proposal}
\end{document} |
\mathfrak begin {equation}gin{document}
\mathfrak title{Godement-Jacquet L-functions and full theta lifts}
\mathcal Ahor[Y. Fang]{Yingjue Fang}
\mathfrak operatorname{ad}dress{College of Mathematics and Statistics, Shenzhen University, Shenzhen, 518060, China}
\mathfrak mathfrak email{joyfang@szu.edu.cn}
\mathcal Ahor[B. Sun]{Binyong Sun}
\mathfrak operatorname{ad}dress{Academy of Mathematics and Systems Science, Chinese Academy of
Sciences \& University of Chinese Academy of Sciences, Beijing, 100190, China} \mathfrak mathfrak email{sun@math.ac.cn}
\mathcal Ahor [H.Xue] {Huajian Xue}
\mathfrak operatorname{ad}dress{Beijing International Center for Mathematical Research\\
Peking University\\
Beijing, 100871, China} \mathfrak mathfrak email{xuehuajian@126.com}
\mathfrak mathfrak subjclass[2000]{22E50} \mathfrak keywords{Godement-Jacquet L-function, theta lift}
\mathfrak begin {equation}gin{abstract}
We relate poles of local Godement-Jacquet L-functions to distributions on matrix spaces with singular supports. As an application, we show the irreducibility of the full theta lifts to ${\mathfrak mathrm{GL}}_n({\mathfrak mathrm {F}})$ of generic irreducible representations of ${\mathfrak mathrm{GL}}_n({\mathfrak mathrm {F}})$, where ${\mathfrak mathrm {F}}$ is an arbitrary local field.
\mathfrak mathfrak end{abstract}
\mathfrak maketitle
\mathfrak mathfrak section{Introduction}
Let ${\mathfrak mathrm {F}}$ be a local field and let ${\mathfrak mathrm {D}}$ be a central division algebra over ${\mathfrak mathrm {F}}$ of finite dimension $d^2$ ($d\mathfrak mathfrak geq 1$). Fix an integer $n\mathfrak mathfrak geq 1$. As usual, let $\mathfrak operatorname{M}_{n}({\mathfrak mathrm {D}})$ denote the space of $n\mathfrak times n$ matrices with coefficients in ${\mathfrak mathrm {D}}$. Put
\[
G:={\mathfrak mathrm{GL}}_n({\mathfrak mathrm {D}})\mathfrak mathfrak subset \mathfrak operatorname{M}_{n}({\mathfrak mathrm {D}}).
\]
Write $\mathfrak mathcal S$ for the space of Schwartz or Bruhat-Schwartz functions on $\mathfrak operatorname{M}_{ n}({\mathfrak mathrm {D}})$, when ${\mathfrak mathrm {F}}$ is respectively archimedean or non-archimedean. View it
as a representation of $G\mathfrak times G$ by the action
\mathfrak begin {equation}\mathfrak langlebel{actgg}
((g,h). \mathfrak mathfrak phi)(x):={\mathfrak mathrm{ab}}s{\det(g^{-1}h)}_{\mathfrak mathrm {F}}^{\mathfrak mathfrak frac{dn}{2}}\mathfrak mathfrak phi(g^{-1} xh),\mathfrak mathfrak quad g,h\in G,\, \mathfrak mathfrak phi\in {\mathfrak mathcal {S}},\, x\in \mathfrak operatorname{M}_{n}({\mathfrak mathrm {D}}).
\mathfrak mathfrak ee
Here ``$\det$" stands for the reduced norm on $\mathfrak operatorname{M}_{n}({\mathfrak mathrm {D}})$, and ``${\mathfrak mathrm{ab}}s{\,\mathfrak cdot\, }_{\mathfrak mathrm {F}}$" stands for the normalized absolute value on ${\mathfrak mathrm {F}}$. Write $G_1$ for the subgroup $G\mathfrak times\{1\}$ of $G\mathfrak times G$, and likewise
write $G_2$ for the subgroup $\{1\}\mathfrak times G$ of $G\mathfrak times G$. When no confusion is possible, we will identify these two groups with $G$.
Let $\mathfrak mathfrak sigma$ be an irreducible admissible smooth representation of $G$. By an ``admissible smooth representation", we mean a Casselman-Wallach representation when ${\mathfrak mathrm {F}}$ is archimedean, and
a smooth representation of finite length when ${\mathfrak mathrm {F}}$ is non-archimedean. The reader may consult \mathfrak cite{Ca}, \mathfrak cite[Chapter 11]{Wa2} or \mathfrak cite{BK} for details about Casselman-Wallach representations.
Define the full theta lift of $\mathfrak mathfrak sigma$ by
\mathfrak begin {equation}\mathfrak langlebel{theta1}
\Theta_1(\mathfrak mathfrak sigma):= ({\mathfrak mathcal {S}}\widehat \mathfrak otimes \mathfrak mathfrak sigma^{\mathfrak vee}e)_{G_1},
\mathfrak mathfrak ee
which is a representation of $G_2$ and is also viewed as a representation of $G$ via the identification $G\mathfrak textit{C}g G_2$.
Here ``$\widehat \mathfrak otimes$" denotes the completed projective tensor product in the archimedean case, and the algebraic tensor product in the non-archimedean case;
a superscript ``$\,^{\mathfrak vee}e$" indicates the contragredient representation; $\mathfrak mathfrak sigma^{\mathfrak vee}e$ is viewed as a representation of $G_1$ via the identification $G_1\mathfrak textit{C}g G$;
and a
subscript group indicates the maximal (Hausdorff in the archimedean case) quotient on which the group acts trivially.
Similar to \mathfrak mathfrak eqref{theta1}, view $\mathfrak mathfrak sigma$ as a representation of $G_2$ and define
\mathfrak begin {equation}\mathfrak langlebel{theta2}
\Theta_2(\mathfrak mathfrak sigma):= ({\mathfrak mathcal {S}}\widehat \mathfrak otimes \mathfrak mathfrak sigma^{\mathfrak vee}e)_{G_2},
\mathfrak mathfrak ee
which is a representation of $G$. The following proposition is well known. See \mathfrak cite{howe}, \mathfrak cite{Ku} and \mathfrak cite{MVW}, for examples.
\mathfrak begin {equation}gin{prpt}\mathfrak langlebel{ftfl}
Both $\Theta_1(\mathfrak mathfrak sigma)$ and $\Theta_2(\mathfrak mathfrak sigma)$ are admissible smooth representations of $G$.
\mathfrak mathfrak end{prpt}
It is also well known that $\Theta_1(\mathfrak mathfrak sigma)$ has a unique irreducible quotient, which is isomorphic to $\mathfrak mathfrak sigma^{\mathfrak vee}e$, and likewise $\Theta_2(\mathfrak mathfrak sigma)$ has a unique irreducible quotient, which is also isomorphic to $\mathfrak mathfrak sigma^{\mathfrak vee}e$ (\emph{cf.}~ \mathfrak cite[Th\'eor\`eme 1]{mi}). This assertion is equivalently formulated as in the following theorem.
\mathfrak begin {equation}gin{thm}\mathfrak langlebel{howe}
Let $\mathfrak mathfrak sigma, \mathfrak mathfrak sigma'$ be irreducible admissible smooth representations of $G$. Then
\[
\dim {\mathfrak mathrm{Hom}}_{G\mathfrak times G}({\mathfrak mathcal {S}}, \mathfrak mathfrak sigma\widehat \mathfrak otimes \mathfrak mathfrak sigma')=\mathfrak operatorname{left}t \{
\mathfrak begin {equation}gin{array}{ll}
1,\mathfrak mathfrak quad &\mathfrak textrm{if $\mathfrak mathfrak sigma'\mathfrak textit{C}g \mathfrak mathfrak sigma^{\mathfrak vee}e$;}\\
0,\mathfrak mathfrak quad &\mathfrak textrm{otherwise.}
\mathfrak mathfrak end{array}
\mathfrak operatorname{right}t.
\]
\mathfrak mathfrak end{thm}
For applications to representation theory and automorhic forms, it is desirable to know whether or not the full theta lift itself is irreducible. This is known affirmatively for supercuspidal representations in the non-archimedean case, in a general setting of dual pair correspondences (see \mathfrak cite{Ku}). However, not much is known beyond the supercuspidal case.
Write $\mathfrak mathcal S^\mathfrak circ$ for the space of Schwartz or Bruhat-Schwartz functions on $G$ when ${\mathfrak mathrm {F}}$ is respectively archimedean or non-archimedean. By extension by zero, we view it
as a subrepresentation of ${\mathfrak mathcal {S}}$.
The following is the key result of this note.
\mathfrak begin {equation}gin{thm}\mathfrak langlebel{pole1}
The following assertions are equivalent.
\mathfrak begin {equation}gin{itemize}
\item[(a).] The Godment-Jacquet L-function $\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma)$ has no pole at $s=1/2$.
\item[(b).] ${\mathfrak mathrm{Hom}}_{G_1}({\mathfrak mathcal {S}}/{\mathfrak mathcal {S}}^\mathfrak circ, \mathfrak mathfrak sigma)= 0$.
\item[(c).] ${\mathfrak mathrm{Hom}}_{G_2}({\mathfrak mathcal {S}}/{\mathfrak mathcal {S}}^\mathfrak circ, \mathfrak mathfrak sigma^{\mathfrak vee}e)= 0$.
\item[(d).] ${\mathfrak mathrm{Hom}}_{G\mathfrak times G}({\mathfrak mathcal {S}}/{\mathfrak mathcal {S}}^\mathfrak circ, \mathfrak mathfrak sigma\widehat \mathfrak otimes \mathfrak mathfrak sigma^{\mathfrak vee}e)= 0$.
\mathfrak mathfrak end{itemize}
If one of the above conditions is satisfied, then both $\Theta_1(\mathfrak mathfrak sigma)$ and $\Theta_2(\mathfrak mathfrak sigma^{\mathfrak vee}e)$ are irreducible.
\mathfrak mathfrak end{thm}
The following result will be proved in Section \mathfrak ref{secf} by using the Fourier transform.
\mathfrak begin {equation}gin{prpt}\mathfrak langlebel{theta12}
As representations of $G$, $\Theta_1(\mathfrak mathfrak sigma)$ and $\Theta_2(\mathfrak mathfrak sigma)$ are isomorphic to each other.
\mathfrak mathfrak end{prpt}
Theorem \mathfrak ref{pole1} and Proposition \mathfrak ref{theta12} have the following obvious consequence.
\mathfrak begin {equation}gin{cort}\mathfrak langlebel{nopole}
Assume that $\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma)$ has no pole at $s=1/2$, or $\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma^{\mathfrak vee}e)$ has no pole at $s=1/2$. Then as representations of $G$, $\Theta_1(\mathfrak mathfrak sigma)\mathfrak textit{C}g \mathfrak mathfrak sigma^{\mathfrak vee}e \mathfrak textit{C}g \Theta_2(\mathfrak mathfrak sigma)$.
\mathfrak mathfrak end{cort}
\mathfrak begin {equation}gin{example}
Assume that ${\mathfrak mathrm {F}}$ is non-archimedean and $G={\mathfrak mathrm{GL}}_2({\mathfrak mathrm {F}})$. If $\mathfrak mathfrak sigma$ is not the trivial representation, then $\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma)$ has no pole at $s=1/2$, or $\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma^{\mathfrak vee}e)$ has no pole at $s=1/2$. Thus by Corollary \mathfrak ref{nopole}, $\Theta_1(\mathfrak mathfrak sigma)$ and $\Theta_2(\mathfrak mathfrak sigma)$ are irreducible. On the other hand, it is shown in \mathfrak cite{Xue} that $\Theta_1(\mathfrak mathfrak sigma)$ and $\Theta_2(\mathfrak mathfrak sigma)$ are reducible when $\mathfrak mathfrak sigma$ is the trivial representation of ${\mathfrak mathrm{GL}}_2({\mathfrak mathrm {F}})$.
\mathfrak mathfrak end{example}
We are particularly interested in generic representations of ${\mathfrak mathrm{GL}}_n({\mathfrak mathrm {F}})$ since they appear as local components of cuspidal automorphic representations. The following proposition asserts that the assumption in Corollary \mathfrak ref{nopole} does hold for generic representations of ${\mathfrak mathrm{GL}}_n({\mathfrak mathrm {F}})$.
\mathfrak begin {equation}gin{prpt}\mathfrak langlebel{generic}
Assume that ${\mathfrak mathrm {D}}={\mathfrak mathrm {F}}$ and $\mathfrak mathfrak sigma$ is generic. Then $\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma)$ has no pole at $s=1/2$, or $\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma^{\mathfrak vee}e)$ has no pole at $s=1/2$.
\mathfrak mathfrak end{prpt}
By Corollary \mathfrak ref{nopole} and Proposition \mathfrak ref{generic}, we get the following result.
\mathfrak begin {equation}gin{thm}
Assume that ${\mathfrak mathrm {D}}={\mathfrak mathrm {F}}$ and $\mathfrak mathfrak sigma$ is generic. Then as representations of $G$, $\Theta_1(\mathfrak mathfrak sigma)\mathfrak textit{C}g \mathfrak mathfrak sigma^{\mathfrak vee}e \mathfrak textit{C}g \Theta_2(\mathfrak mathfrak sigma)$.
\mathfrak mathfrak end{thm}
As one step towards the proof of Proposition \mathfrak ref{generic}, in Section \mathfrak ref{secl} we will prove the following result which is interesting in itself.
\mathfrak begin {equation}gin{prpt}\mathfrak langlebel{pat1}
Let $\mathfrak mathfrak sigma_1, \mathfrak mathfrak sigma_2$ be irreducible admissible smooth representations of ${\mathfrak mathrm{GL}}_{n_1}({\mathfrak mathrm {F}})$ and ${\mathfrak mathrm{GL}}_{n_2}({\mathfrak mathrm {F}})$ ($n_1,n_2\mathfrak mathfrak geq 1$), respectively. Assume that both $\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma_1)$ and $\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma_2)$ have a pole at $s=1/2$. Then the Rankin-Selberg L-function $\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma_1\mathfrak times \mathfrak mathfrak sigma_2)$ has a pole at $s=1$.
\mathfrak mathfrak end{prpt}
\mathfrak begin {equation}gin{rremark}
By using local Langlands correspondence for both ${\mathfrak mathrm{GL}}_n({\mathfrak mathrm {F}})$ and ${\mathfrak mathrm{GL}}_n({\mathfrak mathrm {D}})$, Proposition \mathfrak ref{pat1} implies the similar result with ${\mathfrak mathrm {F}}$ replaced by ${\mathfrak mathrm {D}}$ (The Rankin-Selberg L-function for ${\mathfrak mathrm{GL}}_{n_1}({\mathfrak mathrm {D}})\mathfrak times {\mathfrak mathrm{GL}}_{n_2}({\mathfrak mathrm {D}})$ is defined via the Jacquet Langlands correspondence).
\mathfrak mathfrak end{rremark}
\mathfrak mathfrak section{A proof of Theorem \mathfrak ref{pole1}}
We continue with the notation of the Introduction. The local Godement-Jacquet zeta integral attached to $\mathfrak mathfrak sigma$ is defined by
\[
\mathfrak operatorname{Z}(\mathfrak mathfrak phi, \mathfrak langlembda, v;s):=\int_G \mathfrak mathfrak phi(g) \mathfrak langle g.v, \mathfrak langlembda\mathfrak rangle {\mathfrak mathrm{ab}}s{\det(g)}_{\mathfrak mathrm {F}}^{s+\mathfrak mathfrak frac{dn-1}{2}}\,\mathfrak operatorname{d}\! g, \mathfrak mathfrak quad \mathfrak mathfrak phi\in {\mathfrak mathcal {S}},\, \mathfrak langlembda\in \mathfrak mathfrak sigma^{\mathfrak vee}e,\, v\in \mathfrak mathfrak sigma,\, s\in \mathbb{C},
\]
where $\mathfrak operatorname{d}\!g$ is a fixed Haar measure on $G$.
It is clear that if $\mathfrak mathfrak phi\in {\mathfrak mathcal {S}}^\mathfrak circ$, then the integral is absolutely convergent and is holomorphic in the variable $s\in \mathbb{C}$.
We summarize the basic results of local Godement-Jacquet zeta integrals as in the following theorem (\emph{cf.}~ \mathfrak cite[Theorems 3.3 and 8.7]{GJ}).
\mathfrak begin {equation}gin{thm}\mathfrak langlebel{gjzeta}
When the real part of $s$ is sufficiently large, the integral $\mathfrak operatorname{Z}(\mathfrak mathfrak phi, \mathfrak langlembda,v;s)$ is absolutely convergent for all $\mathfrak mathfrak phi$, $\mathfrak langlembda$ and $v$. Moreover,
there exists a (continuous in the archimedean case) map
\[
\mathfrak operatorname{Z}^\mathfrak circ: {\mathfrak mathcal {S}}\mathfrak times \mathfrak mathfrak sigma^{\mathfrak vee}e\mathfrak times \mathfrak mathfrak sigma\mathfrak times \mathbb{C}\mathfrak operatorname{right}tarrow \mathbb{C}
\]
which is linear on the first three variables and holomorpic on the last variable such that
\mathfrak begin {equation}gin{itemize}
\item
when the real part of $s$ is sufficiently large,
\[
\mathfrak operatorname{Z}^\mathfrak circ(\mathfrak mathfrak phi, \mathfrak langlembda,v ;s)=\mathfrak mathfrak frac{\mathfrak operatorname{Z}(\mathfrak mathfrak phi, \mathfrak langlembda,v;s)}{\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma)},\mathfrak mathfrak quad \mathfrak textrm{for all }\mathfrak mathfrak phi, v,\mathfrak langlembda; \mathfrak mathfrak quad\mathfrak textrm{and}
\]
\item
for each $s\in \mathbb{C}$, the trilinear form $\mathfrak operatorname{Z}^\mathfrak circ(\mathfrak cdot, \mathfrak cdot, \mathfrak cdot; s)$ yields a generator of the one dimensional vector space
\[
{\mathfrak mathrm{Hom}}_{G\mathfrak times G}({\mathfrak mathcal {S}}\widehat \mathfrak otimes \mathfrak mathfrak sigma^{\mathfrak vee}e\widehat \mathfrak otimes \mathfrak mathfrak sigma, {\mathfrak mathrm{ab}}s{\det}^{s-\mathfrak mathfrak frac{1}{2}}_{\mathfrak mathrm {F}}\mathfrak otimes {\mathfrak mathrm{ab}}s{\det}^{\mathfrak mathfrak frac{1}{2}-s}_{\mathfrak mathrm {F}} ).
\]
\mathfrak mathfrak end{itemize}
\mathfrak mathfrak end{thm}
Let $\mathfrak operatorname{Z}^\mathfrak circ$ be as in Theorem \mathfrak ref{gjzeta}. Write $\mathfrak operatorname{Z}^{\mathfrak mathfrak frac{1}{2}}$ for the generator of the one dimensional space
\[
{\mathfrak mathrm{Hom}}_{G\mathfrak times G}({\mathfrak mathcal {S}}, \mathfrak mathfrak sigma \widehat \mathfrak otimes \mathfrak mathfrak sigma^{\mathfrak vee}e)
\]
produced by the trilinear form $\mathfrak operatorname{Z}^\mathfrak circ(\mathfrak cdot, \mathfrak cdot, \mathfrak cdot; \mathfrak mathfrak frac{1}{2})$.
\mathfrak begin {equation}gin{lemt}\mathfrak langlebel{gjzetap}
The Godement-Jacuqet L-function $\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma)$ has a pole at $s=\mathfrak mathfrak frac{1}{2}$ if and only if
\[
\mathfrak operatorname{Z}^\mathfrak circ|_{{\mathfrak mathcal {S}}^\mathfrak circ \mathfrak times \mathfrak mathfrak sigma^{\mathfrak vee}e\mathfrak times \mathfrak mathfrak sigma\mathfrak times \{\mathfrak mathfrak frac{1}{2}\}}=0,\mathfrak mathfrak quad \mathfrak textrm{or equivalently,}\mathfrak mathfrak quad \mathfrak operatorname{Z}^{\mathfrak mathfrak frac{1}{2}}|_{{\mathfrak mathcal {S}}^\mathfrak circ}=0.
\]
\mathfrak mathfrak end{lemt}
\mathfrak begin {equation}gin{proof}
Denote by $c_r(s-\mathfrak mathfrak frac{1}{2})^{-r}$ the leading term of the Laurent expansion of $\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma)$ around $s=\mathfrak mathfrak frac{1}{2}$. Then $r\mathfrak mathfrak geq 0$ as all local L-functions have no zero.
Now we have that
\[
\mathfrak operatorname{Z}^\mathfrak circ(\mathfrak mathfrak phi, v, \mathfrak langlembda;\mathfrak mathfrak frac{1}{2})=\mathfrak lim_{s\mathfrak operatorname{right}tarrow \mathfrak mathfrak frac{1}{2}} \mathfrak operatorname{left}t(s-\mathfrak mathfrak frac{1}{2}\mathfrak operatorname{right}t)^{r} \, \mathfrak cdot \, c_r^{-1}\mathfrak cdot \int_G \mathfrak mathfrak phi(g) \mathfrak langle g.v, \mathfrak langlembda\mathfrak rangle {\mathfrak mathrm{ab}}s{\det(g)}_{\mathfrak mathrm {F}}^{\mathfrak mathfrak frac{dn}{2}}\,\mathfrak operatorname{d}\! g
\]
for all $\mathfrak mathfrak phi\in {\mathfrak mathcal {S}}^\mathfrak circ,\, \mathfrak langlembda\in \mathfrak mathfrak sigma^{\mathfrak vee}e,\, v\in \mathfrak mathfrak sigma$. This is identically zero if and only if $r>0$. Thus the lemma follows.
\mathfrak mathfrak end{proof}
\mathfrak begin {equation}gin{lemt}\mathfrak langlebel{l1}
If
\mathfrak begin {equation}\mathfrak langlebel{vant}
{\mathfrak mathrm{Hom}}_{G_1}({\mathfrak mathcal {S}}/{\mathfrak mathcal {S}}^\mathfrak circ, \mathfrak mathfrak sigma)\mathfrak mathfrak neq 0 \mathfrak mathfrak quad\mathfrak textrm{or }\mathfrak mathfrak quad
{\mathfrak mathrm{Hom}}_{G_2}({\mathfrak mathcal {S}}/{\mathfrak mathcal {S}}^\mathfrak circ, \mathfrak mathfrak sigma^{\mathfrak vee}e)\mathfrak mathfrak neq 0,
\mathfrak mathfrak ee
then
$\mathfrak operatorname{Z}^{\mathfrak mathfrak frac{1}{2}}|_{{\mathfrak mathcal {S}}^\mathfrak circ}= 0$
\mathfrak mathfrak end{lemt}
\mathfrak begin {equation}gin{proof}
First assume that ${\mathfrak mathrm{Hom}}_{G_1}({\mathfrak mathcal {S}}/{\mathfrak mathcal {S}}^\mathfrak circ, \mathfrak mathfrak sigma)\mathfrak mathfrak neq 0$. By Proposition \mathfrak ref{ftfl}, we know that there is an irreducible admissible smooth representation $\mathfrak mathfrak sigma'$ of $G$ such that
\[
{\mathfrak mathrm{Hom}}_{G\mathfrak times G}({\mathfrak mathcal {S}}/{\mathfrak mathcal {S}}^\mathfrak circ, \mathfrak mathfrak sigma\widehat \mathfrak otimes \mathfrak mathfrak sigma')\mathfrak mathfrak neq 0.
\]
Then Theorem \mathfrak ref{howe} implies that $\mathfrak mathfrak sigma'\mathfrak textit{C}g \mathfrak mathfrak sigma^{\mathfrak vee}e$. Therefore, there is a nonzero element of ${\mathfrak mathrm{Hom}}_{G\mathfrak times G}({\mathfrak mathcal {S}}, \mathfrak mathfrak sigma\widehat \mathfrak otimes \mathfrak mathfrak sigma^{\mathfrak vee}e)$ which vanishes on ${\mathfrak mathcal {S}}^\mathfrak circ$. Since
$\dim {\mathfrak mathrm{Hom}}_{G\mathfrak times G}({\mathfrak mathcal {S}}, \mathfrak mathfrak sigma\widehat \mathfrak otimes \mathfrak mathfrak sigma^{\mathfrak vee}e)=1$, this implies that $\mathfrak operatorname{Z}^{\mathfrak mathfrak frac{1}{2}}|_{{\mathfrak mathcal {S}}^\mathfrak circ}=0$. If ${\mathfrak mathrm{Hom}}_{G_2}({\mathfrak mathcal {S}}/{\mathfrak mathcal {S}}^\mathfrak circ, \mathfrak mathfrak sigma^{\mathfrak vee}e)\mathfrak mathfrak neq 0$, a similar proof
shows that $\mathfrak operatorname{Z}^{\mathfrak mathfrak frac{1}{2}}|_{{\mathfrak mathcal {S}}^\mathfrak circ}=0$.
\mathfrak mathfrak end{proof}
\mathfrak begin {equation}gin{lemt}\mathfrak langlebel{l2}
Parts (a), (b), (c) and (d) of Theorem \mathfrak ref{pole1} are equivalent to each other.
\mathfrak mathfrak end{lemt}
\mathfrak begin {equation}gin{proof}
If $\mathfrak operatorname{Z}^{\mathfrak mathfrak frac{1}{2}}|_{{\mathfrak mathcal {S}}^\mathfrak circ}= 0$, then $\mathfrak operatorname{Z}^{\mathfrak mathfrak frac{1}{2}}$ descends to a nonzero element of ${\mathfrak mathrm{Hom}}_{G\mathfrak times G}({\mathfrak mathcal {S}}/{\mathfrak mathcal {S}}^\mathfrak circ, \mathfrak mathfrak sigma\widehat \mathfrak otimes \mathfrak mathfrak sigma^{\mathfrak vee}e)$. Therefore
\[
\mathfrak operatorname{Z}^{\mathfrak mathfrak frac{1}{2}}|_{{\mathfrak mathcal {S}}^\mathfrak circ}= 0\mathfrak mathfrak quad \Longrightarrow\mathfrak mathfrak quad {\mathfrak mathrm{Hom}}_{G\mathfrak times G}({\mathfrak mathcal {S}}/{\mathfrak mathcal {S}}^\mathfrak circ, \mathfrak mathfrak sigma\widehat \mathfrak otimes \mathfrak mathfrak sigma^{\mathfrak vee}e)\mathfrak mathfrak neq 0.
\]
It is obvious that
\[
{\mathfrak mathrm{Hom}}_{G\mathfrak times G}({\mathfrak mathcal {S}}/{\mathfrak mathcal {S}}^\mathfrak circ, \mathfrak mathfrak sigma\widehat \mathfrak otimes \mathfrak mathfrak sigma^{\mathfrak vee}e)\mathfrak mathfrak neq 0 \mathfrak mathfrak quad \Longrightarrow\mathfrak mathfrak quad
\mathfrak operatorname{left}t\{ \mathfrak begin {equation}gin{array}{l}
{\mathfrak mathrm{Hom}}_{G_1}({\mathfrak mathcal {S}}/{\mathfrak mathcal {S}}^\mathfrak circ, \mathfrak mathfrak sigma)\mathfrak mathfrak neq 0, \ \mathfrak textrm{ and }\\
{\mathfrak mathrm{Hom}}_{G_2}({\mathfrak mathcal {S}}/{\mathfrak mathcal {S}}^\mathfrak circ, \mathfrak mathfrak sigma^{\mathfrak vee}e)\mathfrak mathfrak neq 0.
\mathfrak mathfrak end{array}
\mathfrak operatorname{right}t.
\]
Together with Lemma \mathfrak ref{gjzetap} and Lemma \mathfrak ref{l1}, this proves the lemma.
\mathfrak mathfrak end{proof}
\mathfrak begin {equation}gin{lemt}\mathfrak langlebel{frob}
Let $\mathfrak mathfrak sigma_0$ be a smooth representation of $G$ when ${\mathfrak mathrm {F}}$ is non-archimedean, and a smooth Fr\'echet representation of $G$ of moderate growth when ${\mathfrak mathrm {F}}$ is archimedean. Then
\mathfrak begin {equation}\mathfrak langlebel{isof1}
({\mathfrak mathcal {S}}^\mathfrak circ\widehat \mathfrak otimes \mathfrak mathfrak sigma_0)_{G_1} \mathfrak textit{C}g \mathfrak mathfrak sigma_0
\mathfrak mathfrak ee
as representations of $G$.
\mathfrak mathfrak end{lemt}
\mathfrak begin {equation}gin{proof}
We prove the lemma in the archimedean case by assuming that ${\mathfrak mathrm {F}}$ is archimedean. The non-archimedean case is similar but less involved, and we omit its proof.
Write $\mathfrak mathcal D^\mathfrak circ:={\mathfrak mathcal {S}}^\mathfrak circ \mathfrak operatorname{d}\! g$, which is a topological vector space of measures on $G$. It is a representation of $G\mathfrak times G$ such that $(g,h)\in G\mathfrak times G$ acts on it
by the push-forward of measures through the translation map
\[
G\mathfrak operatorname{right}tarrow G,\mathfrak mathfrak quad x\mathfrak mapsto gxh^{-1}.
\]
Using the topological linear isomorphism
\[
{\mathfrak mathcal {S}}^\mathfrak circ\mathfrak operatorname{right}tarrow \mathfrak mathcal D^\mathfrak circ, \mathfrak mathfrak quad \mathfrak mathfrak phi\mathfrak mapsto \mathfrak check \mathfrak mathfrak phi \mathfrak cdot {\mathfrak mathrm{ab}}s{\det}_{\mathfrak mathrm {F}}^{-\mathfrak mathfrak frac{dn}{2}}\mathfrak cdot \mathfrak operatorname{d}\! g, \mathfrak mathfrak qquad (\mathfrak check \mathfrak mathfrak phi(g):=\mathfrak mathfrak phi(g^{-1}))
\]
we know that \mathfrak mathfrak eqref{isof1} is equivalent to
\mathfrak begin {equation}\mathfrak langlebel{isof2}
(\mathfrak mathcal D^\mathfrak circ\widehat \mathfrak otimes \mathfrak mathfrak sigma_0)_{G_2} \mathfrak textit{C}g \mathfrak mathfrak sigma_0.
\mathfrak mathfrak ee
The bilinear map
\mathfrak begin {equation}\mathfrak langlebel{actsigma0}
\mathfrak mathcal D^\mathfrak circ\mathfrak times \mathfrak mathfrak sigma_0\mathfrak operatorname{right}tarrow \mathfrak mathfrak sigma_0, \mathfrak mathfrak quad (\mathfrak mathfrak phi \mathfrak operatorname{d}\! g, v)\mathfrak mapsto (\mathfrak mathfrak phi \mathfrak operatorname{d}\! g).v:= \int_G \mathfrak mathfrak phi(g) g.v \mathfrak operatorname{d} \! g
\mathfrak mathfrak ee
is continuous and yields a $G$-homomorphism
\mathfrak begin {equation}\mathfrak langlebel{isof3}
(\mathfrak mathcal D^\mathfrak circ\widehat \mathfrak otimes \mathfrak mathfrak sigma_0)_{G_2} \mathfrak operatorname{right}tarrow \mathfrak mathfrak sigma_0.
\mathfrak mathfrak ee
The theorem of Dixmier-Malliavin \mathfrak cite[Theorem 3.3]{DM} implies that the map \mathfrak mathfrak eqref{isof3} is surjective. It is thus an open map by the Open Mapping Theorem. In order to show that the map \mathfrak mathfrak eqref{isof3} is an isomorphism, it suffices to show that its transpose is a linear isomorphism. This transpose map is the composition of
\mathfrak begin {equation}gin{eqnarray*}
\mathfrak mathfrak sigma_0^*&\mathfrak operatorname{right}tarrow &{\mathfrak mathrm{Hom}}_{G_2}(\mathfrak mathfrak sigma_0, (\mathfrak mathcal D^\mathfrak circ)^*)\\
&\mathfrak textit{C}g& {\mathfrak mathrm{Hom}}_{G_2}(\mathfrak mathcal D^\mathfrak circ\widehat \mathfrak otimes \mathfrak mathfrak sigma_0, \mathbb{C}) \mathfrak mathfrak qquad \mathfrak mathfrak quad \mathfrak textrm{\mathfrak cite[Formula (50.16)]{Tr}}\\
&\mathfrak textit{C}g & ((\mathfrak mathcal D^\mathfrak circ\widehat \mathfrak otimes \mathfrak mathfrak sigma_0)_{G_2})^*,
\mathfrak mathfrak end{eqnarray*}
where the first homomorphism is given by
\mathfrak begin {equation}\mathfrak langlebel{firstmap}
\mathfrak langlembda\mathfrak mapsto (v\mathfrak mapsto(\mathfrak mathfrak eta\mathfrak mapsto \mathfrak langlembda(\mathfrak mathfrak eta. v))).
\mathfrak mathfrak ee
By definition, $(\mathfrak mathcal D^\mathfrak circ)^*$ is the space of tempered generalized functions on $G$. Let $\mathfrak mathfrak nu\in {\mathfrak mathrm{Hom}}_{G_2}(\mathfrak mathfrak sigma_0, (\mathfrak mathcal D^\mathfrak circ)^*)$. Since the convolution of a tempered generalized function on $G$ with an element of $\mathfrak mathcal D^\mathfrak circ$ is a smooth function, using the theorem of Dixmier-Malliavin, we know that $\mathfrak mathfrak nu(v)$ is a smooth function on $G$ for each $v\in \mathfrak mathfrak sigma_0$. Let $\mathfrak langlembda_\mathfrak mathfrak nu(v)\in \mathbb{C}$ be its evaluation at $1\in G$. Then $\mathfrak langlembda_\mathfrak mathfrak nu$ is a linear functional on $\mathfrak mathfrak sigma_0$. It is easy to check that the diagram
\mathfrak begin {equation}\mathfrak langlebel{cd1}
\mathfrak begin {equation}gin{CD}
\mathfrak mathcal D^\mathfrak circ\mathfrak times \mathfrak mathfrak sigma_0@>\mathfrak textrm{the map \mathfrak mathfrak eqref{actsigma0}}>> \mathfrak mathfrak sigma_0\\
@V\mathfrak textrm{(identity map)}\mathfrak times \mathfrak mathfrak nu VV @VV\mathfrak langlembda_\mathfrak mathfrak nu V\\
\mathfrak mathcal D^\mathfrak circ\mathfrak times (\mathfrak mathcal D^\mathfrak circ)^* @ >\mathfrak textrm{the natural paring}>> \mathbb{C}\\
\mathfrak mathfrak end{CD}
\mathfrak mathfrak ee
commutes. Note that the bottom horizontal arrow is separately continuous. Thus the composition of
\[
\mathfrak mathcal D^\mathfrak circ\mathfrak times \mathfrak mathfrak sigma_0\xrightarrow{\mathfrak textrm{the map \mathfrak mathfrak eqref{actsigma0}}} \mathfrak mathfrak sigma_0\xrightarrow{\mathfrak langlembda_\mathfrak mathfrak nu} \mathbb{C}
\]
is separately continuous, which is automatically continuous by \mathfrak cite[Corollary of Theorem 34.1]{Tr}. This implies that $\mathfrak langlembda_\mathfrak mathfrak nu$ is continuous. Using the commutative diagram \mathfrak mathfrak eqref{cd1}, it is routine to check that the map
\[
{\mathfrak mathrm{Hom}}_{G_2}(\mathfrak mathfrak sigma_0, (\mathfrak mathcal D^\mathfrak circ)^*)\mathfrak operatorname{right}tarrow \mathfrak mathfrak sigma_0^*,\mathfrak mathfrak qquad \mathfrak mathfrak nu\mathfrak mapsto \mathfrak langlembda_\mathfrak mathfrak nu
\]
is inverse to the map \mathfrak mathfrak eqref{firstmap}. Therefore the map \mathfrak mathfrak eqref{firstmap} is bijective. This finishes the proof of the lemma.
\mathfrak mathfrak end{proof}
\mathfrak begin {equation}gin{rremark}
The proof of the above lemma shows that the isomorphism \mathfrak mathfrak eqref{isof2} holds when $G$ is replaced by an arbitrary totally disconnected locally compact Hausdorff topological group, or an arbitrary almost linear Nash group. See \mathfrak cite{Sun} for the notion of almost linear Nash groups, and \mathfrak cite[Sections 2.2, 2.3]{Sun2} for the notion of smooth representations of moderate growth for almost linear Nash groups.
\mathfrak mathfrak end{rremark}
\mathfrak begin {equation}gin{lemt}\mathfrak langlebel{irrf}
If ${\mathfrak mathrm{Hom}}_{G_1}({\mathfrak mathcal {S}}/{\mathfrak mathcal {S}}^\mathfrak circ, \mathfrak mathfrak sigma)= 0$, then the representation $\Theta_1(\mathfrak mathfrak sigma)$ of $G$ is irreducible.
\mathfrak mathfrak end{lemt}
\mathfrak begin {equation}gin{proof}
The exact sequence
\[
0\mathfrak operatorname{right}tarrow {\mathfrak mathcal {S}}^\mathfrak circ \mathfrak operatorname{right}tarrow {\mathfrak mathcal {S}} \mathfrak operatorname{right}tarrow {\mathfrak mathcal {S}}/{\mathfrak mathcal {S}}^\mathfrak circ\mathfrak operatorname{right}tarrow 0
\]
yields an exact sequence
\[
({\mathfrak mathcal {S}}^\mathfrak circ\widehat \mathfrak otimes \mathfrak mathfrak sigma^{\mathfrak vee}e)_{G_1} \mathfrak operatorname{right}tarrow ({\mathfrak mathcal {S}}\widehat \mathfrak otimes \mathfrak mathfrak sigma^{\mathfrak vee}e)_{G_1} \mathfrak operatorname{right}tarrow (({\mathfrak mathcal {S}}/{\mathfrak mathcal {S}}^\mathfrak circ)\widehat \mathfrak otimes \mathfrak mathfrak sigma^{\mathfrak vee}e)_{G_1} \mathfrak operatorname{right}tarrow 0.
\]
The assumption of the lemma implies that $ (({\mathfrak mathcal {S}}/{\mathfrak mathcal {S}}^\mathfrak circ)\widehat \mathfrak otimes \mathfrak mathfrak sigma^{\mathfrak vee}e)_{G_1}=0$. Thus we have a surjective homomorphism
\[
({\mathfrak mathcal {S}}^\mathfrak circ\widehat \mathfrak otimes \mathfrak mathfrak sigma^{\mathfrak vee}e)_{G_1} \mathfrak operatorname{right}tarrow \Theta_1(\mathfrak mathfrak sigma)
\]
of representations of $G$. By Lemma \mathfrak ref{frob},
\[
({\mathfrak mathcal {S}}^\mathfrak circ\widehat \mathfrak otimes \mathfrak mathfrak sigma^{\mathfrak vee}e)_{G_1} \mathfrak textit{C}g \mathfrak mathfrak sigma^{\mathfrak vee}e.
\]
Since $\Theta_1(\mathfrak mathfrak sigma)$ is nonzero, we conclude that $\Theta_1(\mathfrak mathfrak sigma)\mathfrak textit{C}g \mathfrak mathfrak sigma^{\mathfrak vee}e$ is irreducible.
\mathfrak mathfrak end{proof}
A similar argument as in the proof of Lemma \mathfrak ref{irrf} shows the following lemma.
\mathfrak begin {equation}gin{lemt}\mathfrak langlebel{irrf2}
If ${\mathfrak mathrm{Hom}}_{G_2}({\mathfrak mathcal {S}}/{\mathfrak mathcal {S}}^\mathfrak circ, \mathfrak mathfrak sigma^{\mathfrak vee}e)= 0$, then the representation $\Theta_2(\mathfrak mathfrak sigma^{\mathfrak vee}e)$ of $G$ is irreducible.
\mathfrak mathfrak end{lemt}
Combining Lemmas \mathfrak ref{l2}, \mathfrak ref{irrf} and \mathfrak ref{irrf2}, we finish the proof of Theorem \mathfrak ref{pole1}.
\mathfrak mathfrak section{A proof of Proposition \mathfrak ref{theta12}}\mathfrak langlebel{secf}
\mathfrak begin {equation}gin{lemt}\mathfrak langlebel{fourier}
There is a (topological in the archimedean case) linear automorphism ${\mathfrak mathcal {F}}: {\mathfrak mathcal {S}}\mathfrak operatorname{right}tarrow {\mathfrak mathcal {S}}$ such that
\[
{\mathfrak mathcal {F}}((g,h).\mathfrak mathfrak phi)=(h,g).({\mathfrak mathcal {F}}(\mathfrak mathfrak phi)) \mathfrak mathfrak quad \mathfrak textrm{for all } g,h\in G, \mathfrak mathfrak phi\in {\mathfrak mathcal {S}}.
\]
\mathfrak mathfrak end{lemt}
\mathfrak begin {equation}gin{proof}
Define a symmetric bilinear form
\[
\mathfrak langle\,,\,\mathfrak rangle: \mathfrak operatorname{M}_{n}({\mathfrak mathrm {D}})\mathfrak times \mathfrak operatorname{M}_{n}({\mathfrak mathrm {D}})\mathfrak operatorname{right}tarrow {\mathfrak mathrm {F}}, \mathfrak mathfrak quad (x,y)\mathfrak mapsto \mathfrak textrm{the reduced trace of $xy$}.
\]
Fix a non-trivial unitary character $\mathfrak mathfrak psi$ on ${\mathfrak mathrm {F}}$. Define the Fourier transform ${\mathfrak mathcal {F}}: {\mathfrak mathcal {S}}\mathfrak operatorname{right}tarrow {\mathfrak mathcal {S}}$ by
\[
({\mathfrak mathcal {F}}(\mathfrak mathfrak phi))(x):=\int_{ \mathfrak operatorname{M}_{n}({\mathfrak mathrm {D}})} \mathfrak mathfrak phi(y)\mathfrak mathfrak psi(\mathfrak langle x,y\mathfrak rangle) \mathfrak operatorname{d}\! y,\mathfrak mathfrak quad \mathfrak mathfrak phi\in {\mathfrak mathcal {S}}, x\in \mathfrak operatorname{M}_{n}({\mathfrak mathrm {D}}),
\]
where $\mathfrak operatorname{d}\! y$ is a Haar measure on $\mathfrak operatorname{M}_{n}({\mathfrak mathrm {D}})$. It is routine to check that ${\mathfrak mathcal {F}}$ fulfills the requirement of the lemma.
\mathfrak mathfrak end{proof}
Lemma \mathfrak ref{fourier} clearly implies Proposition \mathfrak ref{theta12}, namely
\[
\Theta_1(\mathfrak mathfrak sigma)\mathfrak textit{C}g\Theta_2(\mathfrak mathfrak sigma).
\]
\mathfrak mathfrak section{A proof of Proposition \mathfrak ref{generic}}\mathfrak langlebel{sec18}
We first treat the case of essentially square integrable representations. Recall that an irreducible admissible smooth representation of ${\mathfrak mathrm{GL}}_n({\mathfrak mathrm {F}})$ is said to be essentially square integrable if all its matrix coefficients are square integrable when restricted to ${\mathfrak mathrm{SL}}_n({\mathfrak mathrm {F}})$. Note that essentially square integrable representations of ${\mathfrak mathrm{GL}}_n(\mathbb{C})$ exist only when $n=1$, and essentially square integrable representations of ${\mathfrak mathrm{GL}}_n(\mathbb R)$ exist only when $n\mathfrak leq 2$.
\mathfrak begin {equation}gin{lemt}\mathfrak langlebel{gl1r}
Proposition \mathfrak ref{generic} holds when
$G={\mathfrak mathrm{GL}}_1(\mathbb R)$.
\mathfrak mathfrak end{lemt}
\mathfrak begin {equation}gin{proof}
The representation $\mathfrak mathfrak sigma$ corresponds to a character of $\mathbb R^\mathfrak times$ of the form
\mathfrak begin {equation}\mathfrak langlebel{cgl1r}
x\mathfrak mapsto \mathfrak chi_{m, r}(x):=\mathfrak operatorname{left}t(\mathfrak mathfrak frac{x}{{\mathfrak mathrm{ab}}s{x}}\mathfrak operatorname{right}t)^m {\mathfrak mathrm{ab}}s{x}^{r} ,
\mathfrak mathfrak ee
where $m\in\{0,1\}$ and $r\in \mathbb{C}$. Then (\emph{cf.}~ \mathfrak cite[Section 16]{Ja})
\[
\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma)=\mathfrak mathfrak pi^{\mathfrak mathfrak frac{-(s+m+r)}{2}}\mathbf{G}amma(\mathfrak mathfrak frac{s+m+r}{2}),
\]
and
\[
\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma^{\mathfrak vee}e)=\mathfrak mathfrak pi^{\mathfrak mathfrak frac{-(s+m-r)}{2}}\mathbf{G}amma(\mathfrak mathfrak frac{s+m-r}{2}).
\]
Recall that the poles of the gamma function are $0,-1,-2, -3, \mathfrak cdots$. Thus, if both $\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma)$ and $\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma^{\mathfrak vee}e)$ have a pole at $\mathfrak mathfrak frac{1}{2}$, then
\[
\mathfrak mathfrak frac{1}{2}+m+r, \mathfrak mathfrak frac{1}{2}+m-r\in\{0,-2,-4,-6,\mathfrak cdots\}.
\]
This implies that $m<0$, which contradicts to the fact that $m\in \{0,1\}$.
\mathfrak mathfrak end{proof}
\mathfrak begin {equation}gin{lemt}\mathfrak langlebel{comlex}
Proposition \mathfrak ref{generic} holds when
$G={\mathfrak mathrm{GL}}_1(\mathbb{C})$.
\mathfrak mathfrak end{lemt}
\mathfrak begin {equation}gin{proof}
The representation $\mathfrak mathfrak sigma$ corresponds to a character of $\mathbb{C}^\mathfrak times$ of the form
\mathfrak begin {equation}\mathfrak langlebel{cmr}
z\mathfrak mapsto \mathfrak chi_{m, r}(z):=z^m(z\mathfrak bar z)^{r-\mathfrak mathfrak frac{m}{2}} ,
\mathfrak mathfrak ee
where $m\in \mathbb{Z}$ and $r\in \mathbb{C}$. Then (\emph{cf.}~ \mathfrak cite[Section 16]{Ja})
\[
\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma)=2(2\mathfrak mathfrak pi)^{-s-r-\mathfrak mathfrak frac{{\mathfrak mathrm{ab}}s{m}}{2}}\mathbf{G}amma(s+r+\mathfrak mathfrak frac{{\mathfrak mathrm{ab}}s{m}}{2}),
\]
and
\[
\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma^{\mathfrak vee}e)=2(2\mathfrak mathfrak pi)^{-s+r-\mathfrak mathfrak frac{{\mathfrak mathrm{ab}}s{m}}{2}}\mathbf{G}amma(s-r+\mathfrak mathfrak frac{{\mathfrak mathrm{ab}}s{m}}{2}).
\]
The lemma then follows as in the proof of Lemma \mathfrak ref{gl1r}.
\mathfrak mathfrak end{proof}
\mathfrak begin {equation}gin{lemt}\mathfrak langlebel{gl2}
Proposition \mathfrak ref{generic} holds when $G={\mathfrak mathrm{GL}}_2(\mathbb R)$ and
$\mathfrak mathfrak sigma$ is essentially square integrable.
\mathfrak mathfrak end{lemt}
\mathfrak begin {equation}gin{proof}
Under the local Langlands correspondence, the representation $\mathfrak mathfrak sigma$ corresponds to a representation of the Weil group $W_\mathbb R$ of $\mathbb R$ of the form ${\mathfrak mathrm{Ind}}_{\mathbb{C}^\mathfrak times}^{W_\mathbb R} \mathfrak chi_{m,r}$, where $\mathfrak chi_{m,r}$ is as in \mathfrak mathfrak eqref{cmr} with $m\mathfrak mathfrak neq 0$. Then
(\emph{cf.}~ \mathfrak cite[Section 16]{Ja})
\[
\mathfrak operatorname{L}(s, \mathfrak mathfrak sigma)=\mathfrak operatorname{L}(s, \mathfrak chi_{m,r})\mathfrak mathfrak quad \mathfrak textrm{and}\mathfrak mathfrak quad \mathfrak operatorname{L}(s, \mathfrak mathfrak sigma^{\mathfrak vee}e)=\mathfrak operatorname{L}(s,\mathfrak chi_{m,r}^{-1}),
\]
and the lemma follows by Lemma \mathfrak ref{comlex}.
\mathfrak mathfrak end{proof}
Given an admissible smooth representation $\mathfrak mathfrak sigma_i$ of ${\mathfrak mathrm{GL}}_{n_i}({\mathfrak mathrm {F}})$ for each $i=1,2,\mathfrak cdots, \mathfrak mathfrak ell$ ($\mathfrak mathfrak ell\mathfrak mathfrak geq 1$, $n_i\mathfrak mathfrak geq 1$), let $\mathfrak mathfrak sigma_1\dot\mathfrak times \mathfrak mathfrak sigma_2\dot\mathfrak times \mathfrak cdots \dot \mathfrak times \mathfrak mathfrak sigma_\mathfrak mathfrak ell$ denote the normalized smooth induction
\[
{\mathfrak mathrm{Ind}}_{{\mathfrak mathrm {P}}_{n_1, n_2, \mathfrak cdots, n_\mathfrak mathfrak ell}({\mathfrak mathrm {F}})}^{{\mathfrak mathrm{GL}}_{n_1+n_2+\mathfrak cdots+n_\mathfrak mathfrak ell}({\mathfrak mathrm {F}})} (\mathfrak mathfrak sigma_1\widehat\mathfrak otimes \mathfrak mathfrak sigma_2\widehat\mathfrak otimes \mathfrak cdots \widehat\mathfrak otimes \mathfrak mathfrak sigma_\mathfrak mathfrak ell),
\]
where ${\mathfrak mathrm {P}}_{n_1, n_2, \mathfrak cdots, n_\mathfrak mathfrak ell}({\mathfrak mathrm {F}})$ denotes the block-wise upper triangular parabolic subgroup of ${\mathfrak mathrm{GL}}_{n_1+n_2+\mathfrak cdots+n_\mathfrak mathfrak ell}({\mathfrak mathrm {F}})$ which has ${\mathfrak mathrm{GL}}_{n_1}({\mathfrak mathrm {F}})\mathfrak times {\mathfrak mathrm{GL}}_{n_2}({\mathfrak mathrm {F}})\mathfrak times\mathfrak cdots \mathfrak times {\mathfrak mathrm{GL}}_{n_\mathfrak mathfrak ell}({\mathfrak mathrm {F}})$ as a Levi factor, and $\mathfrak mathfrak sigma_1\widehat\mathfrak otimes \mathfrak mathfrak sigma_2\widehat\mathfrak otimes \mathfrak cdots \widehat\mathfrak otimes \mathfrak mathfrak sigma_\mathfrak mathfrak ell$ is viewed as a representation of ${\mathfrak mathrm {P}}_{n_1, n_2, \mathfrak cdots, n_\mathfrak mathfrak ell}({\mathfrak mathrm {F}})$ as usual.
Assume that ${\mathfrak mathrm {F}}$ is non-archimedean for the moment. Let $\mathfrak tau$ be a supercuspidal irreducible admissible smooth representation of ${\mathfrak mathrm{GL}}_m({\mathfrak mathrm {F}})$, where $m$ is a positive divisor of $n$. Suppose $n=\mathfrak mathfrak ell m$. Then the representation
\[
(\mathfrak tau\mathfrak cdot {\mathfrak mathrm{ab}}s{\det}_{\mathfrak mathrm {F}}^{1-\mathfrak mathfrak ell})\dot \mathfrak times (\mathfrak tau\mathfrak cdot {\mathfrak mathrm{ab}}s{\det}_{\mathfrak mathrm {F}}^{2-\mathfrak mathfrak ell})\dot \mathfrak times\dots \dot\mathfrak times \mathfrak tau
\]
has a unique irreducible quotient representation, which we denote by $\mathfrak mathfrak sigma_{n,\mathfrak tau}$. It is an essentially square integrable irreducible admissible smooth representation of ${\mathfrak mathrm{GL}}_n({\mathfrak mathrm {F}})$. Conversely, every such representation is uniquely of the form $\mathfrak mathfrak sigma_{n,\mathfrak tau}$. See \mathfrak cite{BZ, Ze} for more details.
\mathfrak begin {equation}gin{lemt}\mathfrak langlebel{sqip}
Assume that ${\mathfrak mathrm {F}}$ is non-archimedean, and $\mathfrak mathfrak sigma=\mathfrak mathfrak sigma_{n,\mathfrak tau}$ is essentially square integrable as above. If $\mathfrak operatorname{L}(s, \mathfrak mathfrak sigma)$ has a pole at $\mathfrak mathfrak frac{1}{2}$, then $m=1$ and $\mathfrak tau$ is the character ${\mathfrak mathrm{ab}}s{\,\mathfrak cdot\, }^{-\mathfrak mathfrak frac{1}{2}}$ of ${\mathfrak mathrm{GL}}_1({\mathfrak mathrm {F}})$.
\mathfrak mathfrak end{lemt}
\mathfrak begin {equation}gin{proof}
The lemma follows by noting that (\emph{cf.}~ \mathfrak cite[Theorem 8.2]{JPSS})
\[
\mathfrak operatorname{L}(s, \mathfrak mathfrak sigma_{n,\mathfrak tau})=\mathfrak operatorname{L}(s, \mathfrak tau).
\]
\mathfrak mathfrak end{proof}
By Lemma \mathfrak ref{sqip}, $\mathfrak mathfrak sigma_{n, {\mathfrak mathrm{ab}}s{\,\mathfrak cdot\, }^{-\mathfrak mathfrak frac{1}{2}}}$ is the only essentially square integrable irreducible admissible smooth representation of ${\mathfrak mathrm{GL}}_n({\mathfrak mathrm {F}})$ whose L-function has a pole at $\mathfrak mathfrak frac{1}{2}$. Since the representation $\mathfrak mathfrak sigma_{n, {\mathfrak mathrm{ab}}s{\,\mathfrak cdot\, }^{-\mathfrak mathfrak frac{1}{2}}}$ is not self-dual, we conclude that Proposition \mathfrak ref{generic} holds when
${\mathfrak mathrm {F}}$ is non-arhimedean and $\mathfrak mathfrak sigma$ is essentially square integrable. Together with Lemmas \mathfrak ref{gl1r}, \mathfrak ref{comlex} and \mathfrak ref{gl2}, this implies the following lemma.
\mathfrak begin {equation}gin{lemt}\mathfrak langlebel{nal2}
Proposition \mathfrak ref{generic} holds when
$\mathfrak mathfrak sigma$ is essentially square integrable.
\mathfrak mathfrak end{lemt}
Now ${\mathfrak mathrm {F}}$ is archimedean or non-archimedean, as in Lemma \mathfrak ref{nal2}. Recall that a unitary representation of ${\mathfrak mathrm{GL}}_n({\mathfrak mathrm {F}})$ is said to be tempered if it is weakly contained in the regular representation (see \mathfrak cite{CHH}), and an irreducible admissible smooth representation ${\mathfrak mathrm{h}}o$ of ${\mathfrak mathrm{GL}}_n({\mathfrak mathrm {F}})$ is said to be essentially tempered if there is a real number $e({\mathfrak mathrm{h}}o)$ such that ${\mathfrak mathrm{h}}o\mathfrak cdot {\mathfrak mathrm{ab}}s{\det}_{\mathfrak mathrm {F}}^{-e({\mathfrak mathrm{h}}o)}$ is unitarizable and tempered. Note that the real number $e({\mathfrak mathrm{h}}o)$ is uniquely determined by ${\mathfrak mathrm{h}}o$. It is evident that all essentially square integrable irreducible admissible smooth representations of ${\mathfrak mathrm{GL}}_n(F)$ are essentially tempered.
The following lemma is well-known and easy to check. See \mathfrak cite[Theorem 1.1]{HO} for a more general statement.
\mathfrak begin {equation}gin{lemt}\mathfrak langlebel{rs}
Let $\mathfrak mathfrak sigma_i$ be an irreducible admissible smooth representation of ${\mathfrak mathrm{GL}}_{n_i}({\mathfrak mathrm {F}})$ which is unitarizable and tempered ($i=1,2$, $n_i\mathfrak mathfrak geq 1$).
Then the Rankin-Selberg L-function $\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma_1\mathfrak times \mathfrak mathfrak sigma_2)$ has no pole in the domain where the real part of $s$ is positive.
\mathfrak mathfrak end{lemt}
To prove Proposition \mathfrak ref{generic} in the general case, we need the following result.
\mathfrak begin {equation}gin{prpt}\mathfrak langlebel{shahidi}
Let $\mathfrak mathfrak sigma_1, \mathfrak mathfrak sigma_2$ be essentially tempered irreducible admissible smooth representations of ${\mathfrak mathrm{GL}}_{n_1}({\mathfrak mathrm {F}})$ and ${\mathfrak mathrm{GL}}_{n_2}({\mathfrak mathrm {F}})$ ($n_1,n_2\mathfrak mathfrak geq 1$), respectively. Then the Rankin-Selberg L-function $\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma_1^{\mathfrak vee}e\mathfrak times \mathfrak mathfrak sigma_2)$ has a pole at $s=1$ if and only if $e(\mathfrak mathfrak sigma_1)\mathfrak mathfrak geq e(\mathfrak mathfrak sigma_2)$ and $\mathfrak mathfrak sigma_1\dot \mathfrak times \mathfrak mathfrak sigma_2$ is reducible.
\mathfrak mathfrak end{prpt}
\mathfrak begin {equation}gin{proof}
Lemma \mathfrak ref{rs} implies that if $e(\mathfrak mathfrak sigma_1)<e(\mathfrak mathfrak sigma_2)$ then $\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma_1^{\mathfrak vee}e\mathfrak times \mathfrak mathfrak sigma_2)$ has no pole at $s=1$.
Thus we may assume that $e(\mathfrak mathfrak sigma_1)\mathfrak mathfrak geq e(\mathfrak mathfrak sigma_2)$, and then the proposition is an instance of \mathfrak cite[Proposition 5.3]{CS}.
\mathfrak mathfrak end{proof}
Now we come to the proof of Proposition \mathfrak ref{generic}. As in Proposition \mathfrak ref{generic}, let $\mathfrak mathfrak sigma$ be a generic irreducible admissible smooth representation of ${\mathfrak mathrm{GL}}_n({\mathfrak mathrm {F}})$.
Write
\[
\mathfrak mathfrak sigma\mathfrak textit{C}g \mathfrak mathfrak sigma_1\dot\mathfrak times \mathfrak mathfrak sigma_2\dot\mathfrak times \mathfrak cdots \dot \mathfrak times\mathfrak mathfrak sigma_\mathfrak mathfrak ell\mathfrak mathfrak quad(\mathfrak mathfrak ell\mathfrak mathfrak geq 1),
\]
where $\mathfrak mathfrak sigma_i$ ($i=1,2, \mathfrak cdots, \mathfrak mathfrak ell$) is an essentially square integrable irreducible admissible smooth representation of ${\mathfrak mathrm{GL}}_{n_i}({\mathfrak mathrm {F}})$ ($n_i\mathfrak mathfrak geq 1$), with $n_1+n_2+\mathfrak cdots n_\mathfrak mathfrak ell=n$.
Then
\[
\mathfrak operatorname{L}(s, \mathfrak mathfrak sigma)={\mathfrak mathrm{pr}}od_{j=1}^\mathfrak mathfrak ell \mathfrak operatorname{L}(s, \mathfrak mathfrak sigma_j)\mathfrak mathfrak quad\mathfrak textrm{and}\mathfrak mathfrak quad \mathfrak operatorname{L}(s, \mathfrak mathfrak sigma^{\mathfrak vee}e)={\mathfrak mathrm{pr}}od_{j=1}^\mathfrak mathfrak ell \mathfrak operatorname{L}(s, \mathfrak mathfrak sigma_j^{\mathfrak vee}e).
\]
Assume by contradiction that both $\mathfrak operatorname{L}(s, \mathfrak mathfrak sigma)$ and $\mathfrak operatorname{L}(s, \mathfrak mathfrak sigma^{\mathfrak vee}e)$ have a pole at $s=\mathfrak mathfrak frac{1}{2}$. Using Lemma \mathfrak ref{nal2}, we know that both $\mathfrak operatorname{L}(s, \mathfrak mathfrak sigma_i)$ and $\mathfrak operatorname{L}(s, \mathfrak mathfrak sigma_j^{\mathfrak vee}e)$ have a pole at $s=\mathfrak mathfrak frac{1}{2}$, for some $i\mathfrak mathfrak neq j$.
Proposition \mathfrak ref{pat1} (which will be proved in Section \mathfrak ref{secl}) then implies that $\mathfrak operatorname{L}(s, \mathfrak mathfrak sigma_j^{\mathfrak vee}e\mathfrak times \mathfrak mathfrak sigma_i)$ has a pole at $s=1$. Hence by Proposition \mathfrak ref{shahidi}, $\mathfrak mathfrak sigma_j\dot\mathfrak times \mathfrak mathfrak sigma_i$ is reducible, which contradicts the fact that $\mathfrak mathfrak sigma$ is irreducible. This proves Proposition \mathfrak ref{generic}.
\mathfrak mathfrak section{A proof of Proposition \mathfrak ref{pat1}}\mathfrak langlebel{secl}
Let $\mathfrak mathfrak sigma_1, \mathfrak mathfrak sigma_2$ be as in Proposition \mathfrak ref{pat1} so that both $\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma_1)$ and $\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma_2)$ have a pole at $s=\mathfrak mathfrak frac{1}{2}$. We are aimed to show that $\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma_1\mathfrak times \mathfrak mathfrak sigma_2)$ has a pole at $s=1$. Using the Langlands classification for general linear groups, we assume without loss of generality that both $\mathfrak mathfrak sigma_1$ and $\mathfrak mathfrak sigma_2$ are essentially square integrable.
We further assume without loss of generality that $n_1\mathfrak mathfrak geq n_2$.
\mathfrak begin {equation}gin{lemt}\mathfrak langlebel{l51}
Assume that ${\mathfrak mathrm {F}}$ is non-archimedean. Then $\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma_1\mathfrak times \mathfrak mathfrak sigma_2)$ has a pole at $s=1$.
\mathfrak mathfrak end{lemt}
\mathfrak begin {equation}gin{proof}
By Lemma \mathfrak ref{sqip},
\[
\mathfrak mathfrak sigma_1\mathfrak textit{C}g \mathfrak mathfrak sigma_{n_1, {\mathfrak mathrm{ab}}s{\,\mathfrak cdot\, }^{-\mathfrak mathfrak frac{1}{2}}} \mathfrak mathfrak quad \mathfrak textrm{and}\mathfrak mathfrak quad \mathfrak mathfrak sigma_2\mathfrak textit{C}g\mathfrak mathfrak sigma_{n_2, {\mathfrak mathrm{ab}}s{\,\mathfrak cdot\, }^{-\mathfrak mathfrak frac{1}{2}}}.
\]
Thus by \mathfrak cite[Theorem 8.2]{JPSS} (see also \mathfrak cite[Theorem 2.3]{CPS}),
\[
\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma_1\mathfrak times \mathfrak mathfrak sigma_2)={\mathfrak mathrm{pr}}od_{j=0}^{n_2-1} \mathfrak operatorname{L}(s+j, {\mathfrak mathrm{ab}}s{\,\mathfrak cdot\,}^{-n_2}).
\]
Hence $s=1$ is a pole of $\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma_1\mathfrak times \mathfrak mathfrak sigma_2)$.
\mathfrak mathfrak end{proof}
\mathfrak begin {equation}gin{lemt}\mathfrak langlebel{l52}
Assume that ${\mathfrak mathrm {F}}$ is archimedean and $n_1=n_2=1$. Then $\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma_1\mathfrak times \mathfrak mathfrak sigma_2)$ has a pole at $s=1$.
\mathfrak mathfrak end{lemt}
\mathfrak begin {equation}gin{proof}
Fist assume that ${\mathfrak mathrm {F}}=\mathbb{C}$. Write $\mathfrak mathfrak sigma_1\mathfrak textit{C}g\mathfrak chi_{m_1,r_1}$ and $\mathfrak mathfrak sigma_2\mathfrak textit{C}g\mathfrak chi_{m_2,r_2}$ as in \mathfrak mathfrak eqref{cmr}. Then
\[
\mathfrak mathfrak frac{{\mathfrak mathrm{ab}}s{m_1}+1}{2}+r_1,\, \mathfrak mathfrak frac{{\mathfrak mathrm{ab}}s{m_2}+1}{2}+r_2\in \{0,-1,-2,\mathfrak cdots\}.
\]
This implies that
\[
\mathfrak mathfrak frac{{\mathfrak mathrm{ab}}s{m_1+m_2}}{2}+1+r_1+r_2\in \{0,-1,-2,\mathfrak cdots\}.
\]
Thus
\[
\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma_1\mathfrak times \mathfrak mathfrak sigma_2)=2(2\mathfrak mathfrak pi)^{-s-(r_1+r_2)-\mathfrak mathfrak frac{{\mathfrak mathrm{ab}}s{m_1+m_2}}{2}}\mathbf{G}amma(s+r_1+r_2+\mathfrak mathfrak frac{{\mathfrak mathrm{ab}}s{m_1+m_2}}{2})
\]
has a pole at $s=1$.
When ${\mathfrak mathrm {F}}=\mathbb R$, the same proof shows that $\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma_1\mathfrak times \mathfrak mathfrak sigma_2)$ has a pole at $s=1$.
\mathfrak mathfrak end{proof}
\mathfrak begin {equation}gin{lemt}\mathfrak langlebel{l53}
Assume that ${\mathfrak mathrm {F}}=\mathbb R$ and $(n_1,n_2)=(2,1)$. Then $\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma_1\mathfrak times \mathfrak mathfrak sigma_2)$ has a pole at $s=1$.
\mathfrak mathfrak end{lemt}
\mathfrak begin {equation}gin{proof}
Under the local Langlands correspondence, the representation $\mathfrak mathfrak sigma_1$ corresponds to a representation of the Weil group $W_\mathbb R$ of $\mathbb R$ of the form ${\mathfrak mathrm{Ind}}_{\mathbb{C}^\mathfrak times}^{W_\mathbb R} \mathfrak chi_{m_1,r_1}$, where $\mathfrak chi_{m_1,r_1}$ is as in \mathfrak mathfrak eqref{cmr}, with $m_1\mathfrak mathfrak neq 0$. Write $\mathfrak mathfrak sigma_2\mathfrak textit{C}g\mathfrak chi_{m_2,r_2}$ as in \mathfrak mathfrak eqref{cgl1r}.
Then
\[
\mathfrak mathfrak frac{{\mathfrak mathrm{ab}}s{m_1}+1}{2}+r_1\in \{0,-1,-2,\mathfrak cdots\}
\]
and
\[
\mathfrak mathfrak frac{1}{2}+m_2+r_2\in \{0,-2,-4,\mathfrak cdots\}.
\]
This implies that
\[
\mathfrak mathfrak frac{{\mathfrak mathrm{ab}}s{m_1}}{2}+1+r_1+r_2\in \{0,-1,-2,\mathfrak cdots\}
\]
Thus
\[
\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma_1\mathfrak times \mathfrak mathfrak sigma_2)=2(2\mathfrak mathfrak pi)^{-s-(r_1+r_2)-\mathfrak mathfrak frac{{\mathfrak mathrm{ab}}s{m_1}}{2}}\mathbf{G}amma(s+r_1+r_2+\mathfrak mathfrak frac{{\mathfrak mathrm{ab}}s{m_1}}{2})
\]
has a pole at $s=1$.
\mathfrak mathfrak end{proof}
\mathfrak begin {equation}gin{lemt}\mathfrak langlebel{l54}
Assume that ${\mathfrak mathrm {F}}=\mathbb R$ and $(n_1,n_2)=(2,2)$. Then $\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma_1\mathfrak times \mathfrak mathfrak sigma_2)$ has a pole at $s=1$.
\mathfrak mathfrak end{lemt}
\mathfrak begin {equation}gin{proof}
Under the local Langlands correspondence, the representation $\mathfrak mathfrak sigma_i$ ($i=1,2$) corresponds to a representation of the Weil group $W_\mathbb R$ of $\mathbb R$ of the form ${\mathfrak mathrm{Ind}}_{\mathbb{C}^\mathfrak times}^{W_\mathbb R} \mathfrak chi_{m_i,r_i}$, where $\mathfrak chi_{m_i,r_i}$ is as in \mathfrak mathfrak eqref{cmr}, with $m_i\mathfrak mathfrak neq 0$.
Then
\[
\mathfrak mathfrak frac{{\mathfrak mathrm{ab}}s{m_i}+1}{2}+r_i\in \{0,-1,-2,\mathfrak cdots\}\mathfrak mathfrak quad (i=1,2),
\]
which implies that
\mathfrak begin {equation}\mathfrak langlebel{m1m2}
\mathfrak mathfrak frac{{\mathfrak mathrm{ab}}s{m_1+m_2}}{2}+1+r_1+r_2\in \{0,-1,-2,\mathfrak cdots\}.
\mathfrak mathfrak ee
We have that
\mathfrak begin {equation}gin{eqnarray*}
&&{\mathfrak mathrm{Ind}}_{\mathbb{C}^\mathfrak times}^{W_\mathbb R} \mathfrak chi_{m_1,r_1}\mathfrak otimes {\mathfrak mathrm{Ind}}_{\mathbb{C}^\mathfrak times}^{W_\mathbb R} \mathfrak chi_{m_2,r_2}\\
&\mathfrak textit{C}g & {\mathfrak mathrm{Ind}}_{\mathbb{C}^\mathfrak times}^{W_\mathbb R} \mathfrak operatorname{left}t(\mathfrak chi_{m_1,r_1}\mathfrak otimes ({\mathfrak mathrm{Ind}}_{\mathbb{C}^\mathfrak times}^{W_\mathbb R} \mathfrak chi_{m_2,r_2})|_{\mathbb{C}^\mathfrak times} \mathfrak operatorname{right}t)\\
&\mathfrak textit{C}g & {\mathfrak mathrm{Ind}}_{\mathbb{C}^\mathfrak times}^{W_\mathbb R} \mathfrak operatorname{left}t(\mathfrak chi_{m_1,r_1}\mathfrak otimes (\mathfrak chi_{m_2,r_2}\mathfrak oplus \mathfrak chi_{-m_2,r_2})\mathfrak operatorname{right}t)\\
&\mathfrak textit{C}g& {\mathfrak mathrm{Ind}}_{\mathbb{C}^\mathfrak times}^{W_\mathbb R} \mathfrak chi_{m_1+m_2,r_1+r_2}\mathfrak oplus {\mathfrak mathrm{Ind}}_{\mathbb{C}^\mathfrak times}^{W_\mathbb R} \mathfrak chi_{m_1-m_2,r_1+r_2}.
\mathfrak mathfrak end{eqnarray*}
Thus
\[
\mathfrak operatorname{L}(s,\mathfrak mathfrak sigma_1\mathfrak times \mathfrak mathfrak sigma_2)=\mathfrak operatorname{L}(s, \mathfrak chi_{m_1+m_2,r_1+r_2})\mathfrak cdot \mathfrak operatorname{L}(s, \mathfrak chi_{m_1-m_2,r_1+r_2}).
\]
It has a pole at $s=1$ by \mathfrak mathfrak eqref{m1m2}.
\mathfrak mathfrak end{proof}
Proposition \mathfrak ref{pat1} is now proved by summarizing Lemmas \mathfrak ref{l51}, \mathfrak ref{l52}, \mathfrak ref{l53} and \mathfrak ref{l54}.
\mathfrak mathfrak section*{Acknowledgements}
B. Sun would like to thank Jiajun Ma and Wee Teck Gan for helpful discussions.
Y. Fang was supported in part by the National Natural Science Foundation of China (No. 11601341), and the National Key Research and Development Program of China (No. 2016QY04W0805).
B. Sun was supported in part by the National Natural Science Foundation of China (No. 11525105, 11688101, 11621061 and 11531008).
\mathfrak begin {equation}gin{thebibliography}{99}
\mathfrak bibitem[BK]{BK}
J. N. Bernstein and B. Kr\"{o}tz, \mathfrak textit{Smooth Frechet globalizations of
Harish-Chandra modules}, Isr. J. Math. 199 (2014), 45-111.
\mathfrak bibitem[BZ]{BZ}
J.N. Bernstein and A.V. Zelevinsky, \mathfrak textit{Induced representations of reductive p-adic groups, I, } Ann.
scient. \'Ec. Norm. Sup., $4^e$ s\'erie 10 (1977), 441-472.
\mathfrak bibitem[Ca]{Ca}
W. Casselman, \mathfrak textit{Canonical extensions of Harish-Chandra moudles
to representations of $G$}, Can. Jour. Math. 41, (1989), 385-438.
\mathfrak bibitem[CS]{CS}
W. Casselman and F. Shahidi, \mathfrak textit{On irreducibility of standard modules for generic
representations}, Ann. Sci. \'Ecole Norm. Sup. 31 (1998), 561-589.
\mathfrak bibitem[CPS]{CPS}
J. Cogdell and I.I. Piatetski-Shapiro, \mathfrak textit{Derivatives and L-functions for GL(n)}, The Heritage of B. Moishezon, IMCP, 2010.
\mathfrak bibitem[CHH]{CHH}
M. Cowling, U. Haagerup and R. Howe, \mathfrak textit{Almost $L^2$ matrix coefficients}, J. Reine Angew. Math. 387 (1988), 97-110.
\mathfrak bibitem[DM]{DM}
J. Dixmier and P. Malliavin, \mathfrak textit{Factorisations de fonctions et de vecteurs ind\'efiniment diff\'erentiables}, Bull. Sci. Math. (2), 102 (4), 307-330 (1978).
\mathfrak bibitem[GJ]{GJ}
R. Godement and H. Jacquet, \mathfrak textit{Zeta functions of simple algebras}, Lecture Notes in Mathematics, Vol. 260. Springer-Verlag, Berlin-New York, 1972.
\mathfrak bibitem[HO]{HO}
V. Heiermann and E. Opdam, \mathfrak textit{On the tempered L-function conjecture}, Am. J. Math. 135, 777-800 (2013).
\mathfrak bibitem[Ho]{howe}
R. Howe,
\mathfrak textit{Transcending classical invariant theory},
J. Amer. Math. Soc., 2 (1989), 535-552.
\mathfrak bibitem[Ja]{Ja}
H. Jacquet, \mathfrak textit{Archimedean Rankin-Selberg integrals, in Automorphic Forms and L- functions II: Local Aspects}, Proceedings of a workshop in honor of Steve Gelbart on the occasion of his 60th birthday, Contemporary Mathematics, volumes 489, 57-172, AMS and BIU 2009.
\mathfrak bibitem[JPSS]{JPSS}
H. Jacquet, I.I. Piatetski-Shapiro, and J. Shalika, \mathfrak textit{Rankin-Selberg Convolutions}. Am. J. Math.
105 (1983), 367-464.
\mathfrak bibitem[Ku]{Ku}
S. S. Kudla, \mathfrak textit{On the local theta correspondence}, Invent. Math. 83(2), 229-255 (1986).
\mathfrak bibitem[Mi]{mi} A. Minguez, \mathfrak textit{Correspondance de Howe explicite: paires duales de type II}, Ann. Sci. \'Ec. Norm. Sup\'er. 41 (2008), 717-741.
\mathfrak bibitem[MVW]{MVW} C. Moeglin, M.F. Vign\'eras and J.L. Waldspurger, \mathfrak textit{Correspondance de Howe sur un corps p-adique}, LNM 1291, Springer-Verlag, 1987.
\mathfrak bibitem[Sun1]{Sun} B. Sun, \mathfrak textit{Almost linear Nash groups}, Chin. Ann. Math. 36B(3), 2015, 355-400.
\mathfrak bibitem[Sun2]{Sun2} B. Sun, \mathfrak textit{On representations of real Jacobi groups}, Sci. China Math. 55, (2012), 541-555.
\mathfrak bibitem[Tr]{Tr}
F. Treves, \mathfrak textit{Topological vector spaces, distributions and
kernels}, Academic Press, New York, 1967.
\mathfrak bibitem[Wal]{Wa2}
N. Wallach, \mathfrak textit{Real Reductive Groups II}, Academic Press, San
Diego, 1992.
\mathfrak bibitem[Xue]{Xue}
H. Xue, \mathfrak textit{Homogeneous distributions and full theta lifts}, Ph.D thesis, University of Chinese Academy of Sciences, Beijing, PRC, 2017.
\mathfrak bibitem[Ze]{Ze} A.V. Zelevinsky, \mathfrak textit{Induced representations of reductive p-adic groups, II. On irreducible representations
of GL(n)}, Ann. scient. \'Ec. Norm. Sup., $4^e$ s\'erie 13 (1980), 165-210.
\mathfrak mathfrak end{thebibliography}
\mathfrak mathfrak end{document} |
\begin{document}
\title{Linear Model Predictive Control under Continuous Path Constraints via Parallelized Primal-Dual Hybrid Gradient Algorithm\\
\thanks{$^{1}$Zishuo Li, Bo Yang, Jiayun Li, and Yilin Mo are with the Department of Automation, Tsinghua University, Beijing, 100084, China.
{\small \{lizs19,yang-b21,lijiayun22\}@mails.tsinghua.edu.cn,
ylmo@tsinghua.edu.cn}}
\thanks{$^{2}$Jiaqi Yan is with Department of Computer Science, Tokyo Institute of Technology, Tokyo, Japan.
{\small jyan@sc.dis.titech.ac.jp}
}
}
\author{Zishuo Li$^{1}$, Bo Yang$^{1}$, Jiayun Li$^{1}$, Jiaqi Yan$^{2}$, Yilin Mo$^{1}$}
\maketitle
\begin{abstract}
In this paper, we consider a Model Predictive Control~(MPC) problem of a continuous-time linear time-invariant system subject to continuous-time path constraints on the states and the inputs. By leveraging the concept of differential flatness, we can replace the differential equations governing the system with linear mapping between the states, inputs, and flat outputs~(including their derivatives). The flat outputs are then parameterized by piecewise polynomials, and the model predictive control problem can be equivalently transformed into a Semi-Definite Programming~(SDP) problem via Sum-of-Squares (SOS), ensuring constraint satisfaction at every continuous-time interval. We further note that the SDP problem contains a large number of small-size semi-definite matrices as optimization variables. To address this, we develop a Primal-Dual Hybrid Gradient~(PDHG) algorithm that can be efficiently parallelized to speed up the optimization procedure. Simulation results on a quadruple-tank process demonstrate that our formulation can guarantee strict constraint satisfaction, while the standard MPC controller based on the discretized system may violate the constraint inside a sampling period. Moreover, the computational speed superiority of our proposed algorithm is collaborated by numerical simulation.
\end{abstract}
\title{Linear Model Predictive Control under Continuous Path Constraints via Parallelized Primal-Dual Hybrid Gradient Algorithm\\
\thanks{Identify applicable funding agency here. If none, delete this.}
}
\author{Zishuo Li, Bo Yang, Jiayun Li, Jiaqi Yan, Yilin Mo}
\section{Introduction}
The optimal control theory aims to find control laws for a dynamical system in order to optimize a given objective function, which finds numerous applications in fields of engineering~\cite{ben2010optimal,sharp2011vehicle,satici2013robust} and economics~\cite{weber2011optimal,aseev2012infinite,kellett2019feedback} etc. Closed-form optimal control law can be found for certain unconstrained problems, such as linear-quadratic control problem~\cite{bertsekas2012dynamic}, or brachistochrone problem~\cite{clarke2013functional}. However, analytically solving the optimal control problem of continuous-time systems remains a challenging task. Furthermore, a vast majority of real-world dynamical systems operate under various constraints, such as input saturation or safety constraint on the state. For constrained optimal control problem, Pontryagin's maximum principle~\cite{hartl1995survey} can be used to derive necessary condition for optimality. However, in practice, only a small number of problems can be solved analytically. Therefore, algorithms, such as model predictive control, discretize the system and thus reducing the search space of the control input from the infinite dimensional function space into a finite dimensional space, where numerical optimization can be used.
Dynamic Matrix Control~(DMC)~\cite{lundstrom1995limitations} and Model Algorithmic Control~(MAC)~\cite{rouhani1982model} are two formulations of MPC algorithm for discretized optimal control problems with constraints~\cite{garcia1989model}. Both formulations employ a zero-order hold for the control inputs, which implies that the control inputs are step functions and hence reside in a finite dimensional space.
However, the discretization of a continuous time system means that one can only guarantee constraint satisfaction at all discrete-time instant, where constraint violation can occur in between.
For control applications with high safety requirements, constraints violations can be intolerable. In order to meet the constraints at all time, Semi-Infinite Programming~(SIP)~\cite{djelassi2021recent} has been used to deal with infinite number of constraints. Several approaches for solving SIP have been proposed, and a common framework is to check constraint violations in intervals, and adaptively add additional discrete-time points until the tolerance level is guaranteed or no constraint violations occur. Chen et al~\cite{chen2005inequality} introduce $\epsilon$-tolerance on inequality constraints, which means that the constraints may still be violated up to $\epsilon$. Fu et al.~\cite{fu2015local} tighten the inequality constraints at discrete-time instant, hence guarantee the satisfactory of constraints over the whole interval. However, tighter constraints may lead to relatively conservative solution.
To address these issues, we parameterize the flat output of the continuous-time linear system by piecewise polynomials. The differential equation of the dynamic system is eliminated and replaced by flatness map between flat output $y$ and system state $x$, input $u$~\cite{763209}. In this way, the decision variables become finite-dimensional polynomial coefficients. On the other hand, the inequality constraints become non-negative polynomials over intervals, which are still infinite-dimensional. Fortunately, we can leverage Markov-Luk\'{a}cs theorem~\cite{Roh_SOS,szego_orth_poly} to transcribe a polynomial inequality constraint on an interval into an equivalent matrix Positive Semi-Definite (PSD) constraint, thus ensures the path constraints hold at every time interval. With this procedure, the continuous-time MPC problem can be transcribed into a Semi-Definite Programming (SDP) problem.
It is worth noticing that the SDP problem we formulate contains a large amount of small symmetric matrices. As a result, we propose to use parallel computing to speed up the calculation. To this end, we use a customized Primal-Dual Hybrid Gradient (PDHG) algorithm to solve the SDP problem. PDHG, also known as Chambolle-Pock~\cite{Chambolle2011}, is a well-known first-order algorithm dealing with convex optimization problems with equality constraints.
For large scale problems, it has been one of the preferred first-order algorithm~\cite{NEURIPS2021_a8fbbd3b} due to the fact that it can be easily parallelized.
The main contributions of this article are as follows.
\begin{itemize}
\item An equivalent formulation of continuous inequality constrained linear MPC problem is derived, in which the system dynamics are eliminated by using differential flatness. The problem is then converted into a polynomial optimization problem by parameterizing the flat output with piece-wise polynomials.
\item Path constraints are rigorously guaranteed by using sum of squares theory to transcribe the non-negative constraints of polynomials into the equivalent positive semi-definite constraints of matrices, and an equivalent SDP programming problem is formulated.
\item The SDP problem is solved by using the customized primal-dual splitting-based iterations and accelerated by parallel computing.
\end{itemize}
It is worth noting that, although the derivations in this paper are carried out for linear MPC problems, it can also be extended to nonlinear MPC problems if the constraints remain linear and the objective remains quadratic after the differentially flat transformation.
The paper is organized as follows. Differential flatness theory is stated and the form of flatness map for linear systems is described in Section \uppercase\expandafter{\romannumeral2}. The transformation of linear MPC problem with continuous-time path constraints into SDP problem is discussed in Section \uppercase\expandafter{\romannumeral3}. In Section \uppercase\expandafter{\romannumeral4}, we present the PDHG algorithm for SDP solving and explain that it can be accelerated by parallel computing. The simulation validation of our proposed MPC solver on quadruple-tank process is provided in Section \uppercase\expandafter{\romannumeral5}. Finally, concluding remarks are made in Section \uppercase\expandafter{\romannumeral6}
\section{Preliminary: Differential Flatness of Linear System}\label{sec:flatness}
Differential flatness is an important concept for a class of linear and nonlinear systems~\cite{763209}. A system is differentially flat if and only if there exists a flat output, such that all states and inputs subject to system dynamical constraints can be explicitly expressed as functions of the flat output (which is free of dynamical constraints) and a finite number of its derivatives.
In this paper, we restrict our discussion to linear system. Consider an LTI system governed by the following ordinary differential equation:
\begin{align}
\dot{x}(t) = A x(t) + B u(t), \label{eq:sys}
\end{align}
where $x \in \mathbb{R}^n, u \in \mathbb{R}^m$. Without loss of generality, we assume that $(A,B)$ is controllable. Otherwise, we can always perform a Kalman decomposition and only consider the controllable part of the system.
For such a system, Filess et al.\cite{763209} proved the following theorem:
\begin{theorem}[Linear flatness~\cite{763209}]\label{th:linear_flat}
A linear system is differentially flat, if and only if, it is controllable.
\end{theorem}
In general, the choice of the flat output may not be unique. In this paper, we adopt the procedure proposed by Yong et al.~\cite{7171938} to derive the flat output as well as the flatness map:
\begin{theorem}
If $(A,B)$ is controllable, then there exists a matrix $\mathcal{T}\in{\mathbb{R}}^{m\times n}$, such that the following $y$ is the flat output of the system:
\begin{equation}
y =\begin{bmatrix}
y_1 & \cdots & y_m
\end{bmatrix}^\top\triangleq \mathcal{T}x \in{\mathbb{R}}^m .
\end{equation}
Moreover, there exists matrices $S\in{\mathbb{R}}^{n\times(n+m)}$, and $H\in{\mathbb{R}}^{m\times(n+m)}$, such that the state and the input of the system can be represented by the following flatness map:
\begin{align*}
x = S\bm{y}, u = H\bm{y},
\end{align*}
where $\bm{y}$ is the extended flat output vector consisting of $y_i$s and their derivatives, i.e.,\footnote{$\kappa_i,i\in\{1,\cdots,m\}$ is determined by $A,B$ and satisfies $\sum_{i=1}^{m} \kappa_i =n$.}
{\small
\begin{align}\label{eq:by}
\bm{y}&\triangleq \begin{bmatrix} y_1 & \cdots & y_1^{(\kappa_1+1)} & \cdots & y_m & \cdots & y_m^{(\kappa_m+1)} \end{bmatrix}^\top \in{\mathbb{R}}^{n+m}.
\end{align}
}
\end{theorem}
\noindent
The procedure to construct the $\mathcal{T},\,S,\,H$ matrices and extended flat output $\bm{y}$ (including the calculation of $\kappa_i$) is omitted due to space limit and the readers can refer to \cite{7171938} for more details.
\section{SDP Formulation of MPC}
This section is devoted to transcribing the MPC problem of a continuous-time path constrained linear system \eqref{eq:sys} to an SDP problem, the procedure of which is depicted in Fig~\ref{fig:procedure}. In the next subsection, we first remove the differential equality constraints in the MPC problem by differential flatness and then convert the problem into a polynomial optimization problem by parameterizing the flat output with piecewise polynomials. The polynomial optimization problem is then transformed into an equivalent SDP problem via Markov-Luk\'{a}cs Theorem~\cite{Roh_SOS,szego_orth_poly} and Sum-of-Squares (SOS) in Section~\ref{sec:sdpformulation}.
\subsection{Polynomial Optimization Formulation of MPC}
We consider an optimal control problem of the continuous-time linear system \eqref{eq:sys} under state and input constraints, which can be formulated in a receding horizon fashion as follows:
\begin{problem}[Continuous-time Linear MPC Problem]\label{pb:continuous}
\begin{align*}
\min_{x(t), u(t)} \quad & \int_0^T x(t)^\top Q x(t) + u(t)^\top R u(t)dt \\
\text{s.t.} \quad & \dot{x}(t) = A x(t) + B u(t), \quad \forall t \in [0,T]\\
& \Xi x(t) + \Upsilon u(t) \leq b, \quad \forall t \in [0,T]\\
& x(0)=x_0,
\end{align*}
where $T$ is the horizon length and $\Xi\in \mathbb R^{p\times n},\, \Upsilon\in \mathbb R^{p\times m}$ are matrices and $b\in \mathbb R^p$ is a vector of proper dimensions.
\end{problem}
Adopting the flatness map in Section~\ref{sec:flatness}, we can express the state $x(t)$ and control input $u(t)$ using the extended flat output $\bm{y}(t)$ and hence removing the differential equation constraint in Problem~\ref{pb:continuous}, which results in the following problem:
\begin{problem}[MPC using Flat Output]\label{pb:MPC_flat}
\begin{align*}
\min_{y(t)} \quad & \int_{0}^{T} \bm{y}(t)^\top (S^\top Q S+H^\top R H) \bm{y}(t) \ {\rm d}t \\
\text{s.t.} \quad & (\Xi S + \Upsilon H)\bm{y}(t) \leq b, \quad \forall t \in [0,T]\\
&S\bm{y}(0)=x_0.
\end{align*}
\end{problem}
Notice that Problem~\ref{pb:continuous} and Problem~\ref{pb:MPC_flat} are equivalent, in the sense that we can use the definition of the flat output $y=\mathcal{T}x$ and the flatness map $x = S\bm{y}$, $u=H\bm{y}$ to map the solution of one problem to the other.
Further notice that the path constraint $\Xi x(t) + \Upsilon u(t) \leq b$ (or $(\Xi S + \Upsilon H)\bm{y}(t) \leq b$), which consists of $p$ linear inequalities, requires that the state and the control input (or the flat output) to be inside a polytope at all time interval $[0,T]$. Aside from very special cases, Problem~\ref{pb:continuous} (or Problem~\ref{pb:MPC_flat}) cannot be solved in the infinite dimensional function space, due to the difficulty to determine when the path constraints are active~\cite{garcia1989model}.
To facilitate optimization-based method to solve Problem~\ref{pb:MPC_flat}, we propose to parameterize the flat output $y(t)$ by piecewise polynomials, which effectively reduce the domain of the optimization problem from infinite dimensional function space to a finite dimensional space. To this end, first define the polynomial basis of degree $d$ as
\begin{align}\label{eq:defbeta}
\gamma(t) = \begin{bmatrix}
t^{d}& \cdots& t &1
\end{bmatrix}^\top.
\end{align}
Suppose each entry of flat output $y$ is represented by $N$ segments of polynomials in the horizon $[0,T]$.
Denote row vector $c_{l,i}\in{\mathbb{R}}^{(d+1)\times 1}$ as the coefficient of segment $l$ of flat output $y_i$, i.e.,\footnote{Since we need smoothness constraints on the conjecture points of segments, $c_{l,i},\cdots,c_{l+1,i}$ are not fully free and coupled by equality constraints.}
\begin{align}\label{eq:by=cbeta}
y_i(t)=\begin{cases}
c_{1,i}^\top \gamma\left(\frac{tN}{T}\right), 0\leq t< \frac{T}{N}\\
c_{2,i}^\top \gamma\left(\frac{tN}{T}-1\right), \frac{T}{N}\leq t< \frac{2T}{N} \\
\hspace{60pt} \vdots \\
c_{N,i}^\top \gamma\left(\frac{tN}{T}-(N-1)\right), \frac{(N-1)T}{N}\leq t< T
\end{cases}.
\end{align}
Each segment of polynomial $c_{l,i}^\top \gamma\left(\cdot\right),l\in\{1,\cdots,N\}$ has been normalized such that the time variable is on interval $[0,1]$.
By stacking the coefficients of the $l$-th segment $c_{l,i}$ vertically, we have the overall coefficient vector
\begin{align}
c_{l}\triangleq\begin{bmatrix}
c_{l,1}\\ \vdots\\ c_{l,m}
\end{bmatrix}\in{\mathbb{R}}^{m(d+1)\times 1}, \,
{\bm{c}}\triangleq\begin{bmatrix}
c_1\\ \vdots\\ c_{N}
\end{bmatrix}\in{\mathbb{R}}^{m(d+1)N\times 1}.
\end{align}
As a result, instead of optimizing $y$ in the infinite-dimensional function space, we can restrict ourselves to the following polynomial optimization problem:
\begin{problem}[Polynomial Optimization]\label{pb:poly_opt}
\begin{align*}
\min_{\bm{c}}\ & J(\bm{c})= \bm{c}^\top P \bm{c}\\
\text{s.t. }& (L_{j} c_{l}-g_{j})^\top \gamma(t) \geq 0, \forall t \in [0,1], \\
&\qquad \qquad j\in\{1,\cdots,p\}, l\in\{1,\cdots,N\}\\
&h_j^\top {\bm{c}}=r_j\ ,j\in\{1,\cdots,2mN \}
\end{align*}
\end{problem}
The calculation of parameters in Problem \ref{pb:poly_opt} is as follows.
Define $\mathbf{e}_{j}$ as the canonical basis vector of length $d+1$, where 1 in on the $j$-th entry and $0$ on other entries.
Define a matrix to represent the derivative of $d$ degree polynomial:
\begin{align}
D\triangleq \begin{bmatrix}
0 & & & &\\
d & 0& & & \\
& d-1 & 0 & &\\
&&\ddots&\ddots&\\
& & & 1 & 0
\end{bmatrix}\in{\mathbb{R}}^{(d+1)\times (d+1)}.
\end{align}
Thus, for any coefficient $c\in{\mathbb{R}}^{d+1}$, we have polynomial derivative equation
$\frac{{\rm d} \left(c^\top \gamma (t) \right)}{{\rm d} t}=(Dc)^\top \gamma(t).$
Define $
\bm{D}_{k}=\begin{bmatrix}
I & D^\top & (D^2)^\top & \cdots & (D^k)^{\top}
\end{bmatrix}^\top.
$
Then based on \eqref{eq:by}, one can verify that after polynomial parameterizing, the relationship between flat output $y$ and extend flat output $\bm{y}$ are $\bm{y}=\bm{\Pi} \cdot y$ with $\bm{\Pi}$ defined as
\begin{align*}
\bm{\Pi}\triangleq \begin{bmatrix}
\bm{D}_{\kappa_1+1} &&&\\
& \bm{D}_{\kappa_2+1} &&&\\
& &\ddots &\\
& && \bm{D}_{\kappa_m+1}
\end{bmatrix}.
\end{align*}
Denote the $j$-th row of $\Xi,\Upsilon$ as $[\Xi]_j,[\Upsilon]_j$ respectively. Denote the $j$-th entry of vector $b$ as $b_j$.
Then the parameters in Problem \ref{pb:poly_opt} is defined as:
\begin{align*}
L_{j}\triangleq &\left(([\Xi]_j S+ [\Upsilon]_j H)\otimes I_{d+1} \right) \times \bm{\Pi},\\
g_j\triangleq & b_j \ \mathbf{e}_{d+1},
\end{align*}
where $\otimes$ is the Kronecker product and $\bm{0}_d$ is the all-zero vector of length $d$. $I_{d+1}$ is the identity matrix of size $(d+1)\times (d+1)$.
The equality constraints are composed of segment smooth conditions and initial conditions, that is, for neighboring polynomial segments, the value of the polynomial and the value of its first-order derivative at conjecture points or at initial time are the same.
The equality constraint parameters are defined by
{\small
\begin{align*}
h_j=\begin{cases}
\mathbf{e}^{N}_1\otimes \mathbf{e}_j \otimes \mathbf{e}_{d+1} , \text{ if } 1\leq j\leq m. \\
\mathbf{e}^{N}_{l}\otimes \mathbf{e}_{j-m} \otimes \mathbf{e}_{d+1}-\mathbf{e}^{N}_{l+1}\otimes \mathbf{e}_{j-m} \otimes \mathbf{1}_{d+1} ,\\ \hspace{50pt}\text{ if } m+1\leq j\leq mN .\\
\mathbf{e}^{N}_1\otimes \mathbf{e}_j \otimes \mathbf{e}_{d} , \text{ if } mN+1\leq j\leq mN+m. \\
\mathbf{e}^{N}_{l}\otimes \mathbf{e}_{j-mN-m} \otimes \mathbf{e}_{d+1}-\mathbf{e}^{N}_{l+1}\otimes \mathbf{e}_{j-Nm-m} \otimes \mathbf{1}_{d+1}, \\ \hspace{50pt}\text{ if } mN+1\leq j\leq mN+m .
\end{cases}
\end{align*}
}\noindent
where $\mathbf{e}^{N}_j$ is the canonical basis vector of size $N$, with 1 one $j$-th entry and 0 on other entries.
\begin{align*}
r_j=\begin{cases}
[{\mathcal{T}} x_0]_j , \text{ if } 1\leq j\leq m .\\
0, \text{ if } m+1\leq j\leq mN .\\
[{\mathcal{T}} Ax_0]_{j-mN} , \text{ if } mN+1\leq j\leq mN+m .\\
0 , \text{ if } mN+1\leq j\leq mN+m .
\end{cases}
\end{align*}
Define matrix $P_{\rm int}\in {\mathbb{R}}^{(d+1)\times (d+1)}$ associating objective function integration on $[0,1]$ as
\begin{align}
[P_{\rm int}]_{u,v}=\frac{1}{2d+3-u-v}.
\end{align}
The parameter $P\in{\mathbb{R}}^{m{N}(d+1)\times mN(d+1)}$ in the objective of Problem \ref{pb:poly_opt} is calculated by
\begin{align}
P=\left((S\bm{\Pi})^\top Q S\bm{\Pi}+(H\bm{\Pi})^\top R H\bm{\Pi} \right)\otimes I_N \otimes P_{\rm int}.
\end{align}
\begin{remark}
Piecewise polynomials are chosen to represent the flat output for the following reasons:
\begin{itemize}
\item The set of polynomials are closed under derivative operation and is dense in the function space, as is shown by the Stone-Weierstrass theorem. Hence, we can approximate any continuous functions to arbitrary precision. In fact, one can also use polynomials to approximate the derivatives and high order derivatives of a smooth enough function\cite{4908942}.
\item The continuous-time path constraints are transformed into non-negativity of a univariate polynomial inside an interval, which can be transformed {\bf exactly} into Positive Semi-Definite (PSD) cone constraint using Markov-Luk\'{a}cs theorem and SOS \cite{Roh_SOS}. The detailed discussion is reported in the subsequent subsection.
\end{itemize}
\end{remark}
\subsection{SDP Formulation via SOS}
\label{sec:sdpformulation}
This subsection is devoted to the \emph{exact} SDP formulation of the polynomial optimization Problem~\ref{pb:poly_opt}. To this end, the following theorem is needed:
\begin{theorem}[Markov-Luk\'{a}cs theorem \cite{Roh_SOS}]
Let $a<b$. Then, a polynomial $p(t)$ is non-negative for $t\in [a, b]$, if and only if it can be written as
\begin{align*}
p(t)=\begin{cases}
f(t)+(t-a)(b-t) g(t), & \text { if } \deg(p) \text { is even } \\
(t-a) f(t)+(b-t) g(t), & \text { if } \deg(p) \text { is odd }
\end{cases},
\end{align*}
where $f(t), g(t)$ are SOS polynomials, with degree $\deg(f) \leq \deg(p)$, $\deg(g) \leq \deg(p)-2$ when $\deg(p)$ is even, or $\deg(f) \leq \deg(p)-1$, $\deg(g) \leq \deg(p)-1$ when $\deg(p)$ is odd.
\end{theorem}
For simplicity, we shall only consider the case where the flat output $y$ is an odd degree polynomial, i.e., $d$ is an odd number. The case where $d$ is even can be treated similarly. Let us denote $\delta \triangleq \frac{d-1}{2}$. Notice that a degree $d-1$ SOS polynomial $f$ can be represented as
\[
f(t)= \tilde \gamma(t)^\top X\tilde \gamma(t),
\]
with $\tilde \gamma(t)\triangleq\begin{bmatrix}
t^{\delta}& \cdots& t &1
\end{bmatrix}^\top$ and positive semi-definite matrix $X\in{\mathbb{R}}^{(\delta+1)\times (\delta+1)}$.
As a result, each inequality constraint $(L_{j}c_l-g_{j})^\top \gamma(t)\geq 0 $ in Problem \ref{pb:poly_opt} can be equivalently represented as
\begin{align}
& (L_{j}c_l-g_{j})^\top\gamma(t) =\notag \\
&\qquad t \tilde \gamma(t)^\top X^f_{j,l}\tilde \gamma(t)+\left(1-t\right)\tilde \gamma(t)^\top X^g_{j,l}\tilde \gamma(t),\label{eq:odd_eq}
\end{align}
with positive semi-define matrices $X^f_j, X^g_j\in{\mathbb{R}}^{(\delta+1)\times (\delta+1)}$. By comparing the coefficients of the polynomials on the LHS and RHS of \eqref{eq:odd_eq}, we know that \eqref{eq:odd_eq} is equivalent to:
\begin{align}
L_{j}c_l-g_{j}={\mathcal{M}}( X^f_{j,l}, X^g_{j,l})
\end{align}
where
\begin{align}\label{eq:defMc}
{\mathcal{M}}(X^f_{j,l}, X^g_{j,l})=\begin{bmatrix}
\tr(F_0 X^f_{j,l})+\tr(G_0 X^g_{j,l})\\
\tr(F_1 X^f_{j,l})+\tr(G_1 X^g_{j,l})\\
\vdots\\
\tr(F_{d} X^f_{j,l})+\tr(G_{d} X^g_{j,l})
\end{bmatrix},
\end{align}
and $\{F_i,G_i|i=0,1,\cdots,d\}$ is a sequence of constant matrices defined as
\begin{align*}
[F_i]_{u,v}=&\begin{cases}
1, &\text{ if } u+v=i+2 \\
0, &\text{ otherwise }
\end{cases},\\
[G_i]_{u,v}=&\begin{cases}
-1, &\text{ if } u+v=i+2 \\
1, &\text{ if } u+v=i+1 \\
0, &\text{ otherwise }
\end{cases},
\end{align*}
where $[\cdot]_{u,v}$ represents the entry at row $u$, column $v$ in a matrix.
Notice that linear function ${\mathcal{M}}(\cdot,\cdot)$ is independent of index $j,l$.
Now we handle the second order objective function $J(c)=\bm{c}^\top P \bm{c}$ by linear matrix inequality techniques. Notice that $P$ is a positive semi-definite matrix, define $ \tilde{P} \in{\mathbb{R}}^{\rank(P)\times \size(P)}$, such that $ \tilde{P} ^\top \tilde{P} =P$.
Notice that the following three optimization problems are equivalent where $s$ is a scalar:
\begin{align*}
\min_{{\bm{c}}\in\mathcal{C}}&\quad {\bm{c}}^\top P {\bm{c}} \Leftrightarrow
\min_{{\bm{c}}\in\mathcal{C},s}\ s, \text{ s.t. } \| \tilde{P} {\bm{c}} \|_2\leq s\\
\Leftrightarrow&\min_{{\bm{c}}\in\mathcal{C},s\geq 0}s ,\text{ s.t. } \begin{bmatrix} \tilde{P} {\bm{c}} \\ s \end{bmatrix}\in\text{second order cone}.
\end{align*}
We arrive at the following SDP problem which is equivalent to Problem \ref{pb:poly_opt} and computationally tractable.
\begin{problem}[SDP Problem]\label{pb:conic}\hspace{40pt}
\noindent
\textbf{Original form}
\begin{align}
&\min_{\bm{c},s,\{X^f_{j,l},X^g_{j,l}\} } \quad s \notag \\
\text{s.t. }& L_{j} c_l-g_j={\mathcal{M}}(X^f_{j,l},X^g_{j,l}), X^f_{j,l},\,X^g_{j,l}\in \mathbb S_+,\notag \\
& \ \qquad \qquad j\in\{1,\cdots,p \}, l\in\{1,\cdots,N \}\label{eq:ineq_eq_cons}\\
&h_j^\top {\bm{c}}=r_j ,j\in\{1,\cdots,2mN \}\label{eq:eq_eq_cons}\\
&\begin{bmatrix} \tilde{P} {\bm{c}}\\ s \end{bmatrix} \in\SOC \notag
\end{align}
where $\SOC$ denotes the {second order cone} and ${\mathbb{S}}_{+}$ is the positive semi-definite cone.
\end{problem}
For notation conciseness, we define:
\begin{align}
{\bm{X}}\triangleq \begin{bmatrix}
X^f_{1,1}&&&&\\
&X^g_{1,1}&&&\\
&&\ddots&&\\
&&&X^f_{p,N}&\\
&&&&X^g_{p,N}\\
\end{bmatrix}.
\end{align}
Moreover, define function
\begin{align}\label{eq:defbM}
{\bm{M}}({\bm{X}})=\begin{bmatrix}
{\mathcal{M}}(X_{1,1}^f,X_{1,1}^g)\\
\vdots \\
{\mathcal{M}}(X_{p,N}^f,X_{p,N}^g)
\end{bmatrix}
=\begin{bmatrix}
\begin{bmatrix}
\tr(M_{1,1}^0{\bm{X}})\\
\vdots\\
\tr(M_{1,1}^{d}{\bm{X}})
\end{bmatrix}\\
\vdots \\
\begin{bmatrix}
\tr(M_{p,N}^0{\bm{X}})\\
\vdots\\
\tr(M_{p,N}^{d}{\bm{X}})
\end{bmatrix}
\end{bmatrix},
\end{align}
where $M_{j,l}^i$ is the corresponding matrix composed of $F_{i},G_i$ at compatible position that generates ${\mathcal{M}}(X_{j,l}^f,X_{j,l}^g)$ in \eqref{eq:defMc}.
Define
\begin{align}
{\bm{L}}=& I_{N} \otimes \begin{bmatrix}
L_1^\top & L_2^\top & \cdots & L_p^\top
\end{bmatrix}^\top ,\\
{\bm{g}}=&I_{N} \otimes \begin{bmatrix}
g_1^\top & g_2^\top & \cdots & g_p^\top
\end{bmatrix}^\top ,\\
{\bm{h}}=& \begin{bmatrix}
h_1^\top & h_2^\top & \cdots & h_{2mN}^\top
\end{bmatrix}^\top, \\
{\bm{r}}=& \begin{bmatrix}
r_1^\top & r_2^\top & \cdots & r_{2mN}^\top
\end{bmatrix}^\top.
\end{align}
We can rewrite Problem~\ref{pb:conic} as
\textbf{\textit{Compact form}}
\begin{align}
&\min_{s,{\bm{c}},{\bm{X}}}\quad s\notag \\
\text{s.t. }
&{\bm{L}} {\bm{c}}-{\bm{M}}({\bm{X}})={\bm{g}} \label{eq:ineq_comp}\\
&{\bm{h}} {\bm{c}}={\bm{r}} \label{eq:eq_comp} \\
&{\bm{X}}\in{\mathbb{S}}_+,s\geq 0 \notag \\
&\begin{bmatrix} \tilde{P} {\bm{c}} \\ s \end{bmatrix} \in\SOC \notag
\end{align}
\begin{figure}
\caption{The relationships between optimization problems in this paper. Double tail arrow represents that the two problems are equivalent. Single tail arrow means that Problem \ref{pb:poly_opt}
\label{fig:procedure}
\end{figure}
Since there are $p$ inequality constraints in the original Problem~\ref{pb:continuous}, $\bm{X}$ is a block diagonal matrix with $2pN$ positive semi-definite matrices of size $\delta+1$. As a result, in the following section, we introduce a customized algorithm that solves Problem \ref{pb:conic} by primal-dual hybrid gradient methods which can handle ${\bm{X}}$ in a parallel fashion.
However, before continuing on, we would like to give a comparison between the conventional quadratic programming-based linear MPC and our approach.
\subsection{Discussions}
A conventional way to solve the continuous-time optimal control problem is to discretize it into the following discrete-time linear MPC problem\cite{garcia1989model}:
\begin{problem}[Discrete-time Linear MPC Problem]\label{pb:discrete}
\begin{align*}
\min_{ \{x[k],u[k]\}_{k=1}^{T_d} }&\quad \sum_{k=1}^{T_d} x[k]^\top Q x[k] + u[k]^\top R u[k] \\
\text{s.t.} \quad & x_{k+1} = A_d x_{k} + B_d u_k, \quad k = 1, \cdots, {T_d} \\
& \Xi x[k] + \Upsilon u[k] \leq b, \quad k = 1, \cdots, {T_d}
\end{align*}
\end{problem}
\noindent where $A_d,B_d$ are the discretized system matrix assuming zero-order hold for the control input is used and $T_d\in{\mathbb{Z}}_{+}$ is the discrete horizon length.
One of the main differences between Problem~\ref{pb:discrete} and Problem~\ref{pb:poly_opt} is that the control input is parameterized as step functions in Problem~\ref{pb:discrete} (assuming zero-order hold is used), while for our case, the flat output (and hence the control input as it is a linear function of the flat input and its derivatives) is parameterized as polynomials.
Another difference is that Problem~\ref{pb:discrete} is a Quadratic Programming (QP) problem and hence can be solved more efficiently than SDP. However, this is due to the fact that in Problem~\ref{pb:discrete}, constraints are only required to hold at discrete sampling time instants and therefore they may be violated in between sampling times.
On the other hand, the reason for our SDP formulation is that we want to have an exact representation of the continuous time path constraints. If we only require the constraint to hold at discrete time instant, since the value of a polynomial at a time instant is a linear function of its coefficients, we can express such constraints as linear inequalities on the coefficients of the polynomial, which effectively relaxed the polynomial optimization Problem~\ref{pb:poly_opt} into a QP problem that only guarantees constraint satisfaction at a discrete time instant. As an alternative, one could also leverage the following theorem to generate a QP problem, which has a smaller feasible set than that of Problem~\ref{pb:poly_opt}, but is guaranteed to satisfy the path constraints at every time instant.
\begin{theorem}[\cite{powers2000polynomials}]
Let ${\rm Pd}([a,b])$ denote the set of polynomials $p(t) > 0, \forall t \in [a,b]$. Define
\begin{align*}
\mathcal{P}_q:=\left\{\left.\sum_{i+j \leq q} c_{ij}(b-x)^i(x-a)^j \right| c_{ij} \geq 0\right\}.
\end{align*}
If polynomial $p \in {\rm Pd}([a,b])$, then $p \in \mathcal{P}_q$ for sufficiently large integer $q$.
\end{theorem}
\section{Accelerated SDP Solving with Parallel Computing}
\subsection{Primal dual hybrid gradient for SDP solving}
In this subsection, we present the primal-dual hybrid gradient algorithm that solves Problem \ref{pb:conic}.
Encode the constraints into the objective function as
\begin{align}
\min_{s,{\bm{c}},{\bm{X}}} s+
{\mathbb{I}}_{{\mathbb{S}}_{+}, \SOC}({\bm{X}},\tilde{\bm{c}},s)+{\mathbb{I}}_{=}({\bm{X}},{\bm{c}},\tilde{\bm{c}}),
\end{align}
where $\tilde{\bm{c}}= \tilde{P} {\bm{c}}$ is the slack variable, and the indicator functions are defined as
\begin{align}
& {\mathbb{I}}_{{\mathbb{S}}_{+}, \SOC}({\bm{X}},\tilde{\bm{c}},s)=\begin{cases}
0,\text{ if }{\bm{X}}\in{\mathbb{S}}_+ \text{ and}\begin{bmatrix}\tilde{\bm{c}} \\ s \end{bmatrix} \in\SOC\\
+\infty, \text{otherwise}
\end{cases}.\\
&{\mathbb{I}}_{=}({\bm{X}},{\bm{c}},\tilde{\bm{c}})=\begin{cases}
0,\text{ if }\left\{
\makecell{
{\bm{L}} {\bm{c}}-{\bm{M}}({\bm{X}})={\bm{g}}\\
{\bm{h}} {\bm{c}}={\bm{r}}\\
\tilde{P} {\bm{c}}-\tilde{\bm{c}}=0
} \right. \\
+\infty, \text{otherwise}
\end{cases}.
\end{align}
Using primal-dual operator splitting \cite{exp_lowrank}, the iterations can be derived as the following where $\alpha$ is the primal step-size and $\beta$ is the dual step-size. ${\mathcal{D}}^*_{{\bm{X}}}(\cdot)$ is the conjugate operator of the linear mapping ${\bm{M}}({\bm{X}})$, and ${\mathcal{D}}^*_{{\bm{c}}}(\cdot)$ is the conjugate operator of the linear mapping $\begin{bmatrix}
{\bm{L}}^\top& {\bm{h}}^\top & \tilde{P}^\top \end{bmatrix}^\top{\bm{c}}$, the definition of which are given in \eqref{eq:D*X} and \eqref{eq:D*c} respectively.
\textbf{1. Primal step:}
\begin{align}
{\bm{X}}^{k+1} &\leftarrow \proj_{{\mathbb{S}}_+}({\bm{X}}^{k}-\alpha {\mathcal{D}}_{{\bm{X}}}^*(\lambda_1^k)) \label{eq:primal_X}\\
{\bm{c}}^{k+1}&\leftarrow {\bm{c}}^{k}-\alpha {\mathcal{D}}_{{\bm{c}}}^*(\lambda_1^k,\lambda_2^k,\lambda_3^k)\label{eq:primal_c}\\
\begin{bmatrix}
\tilde{\bm{c}}^{k+1}\\
s^{k+1}
\end{bmatrix}&\leftarrow \proj_{\SOC}\begin{bmatrix}
\tilde{\bm{c}}^{k}-\alpha (-\lambda_3^k)\\
(s^{k}-\alpha)^{+} \label{eq:primal_soc}
\end{bmatrix}
\end{align}
where $(s^{k}-\alpha)^+=\max(s^{k}-\alpha,0)$.
\textbf{2. Calculating difference:}
\begin{align}
\Delta {\bm{X}}^{k+1}&\leftarrow 2{\bm{X}}^{k+1}-{\bm{X}}^{k}\label{eq:diff_X}\\
\Delta {\bm{c}}^{k+1}&\leftarrow 2{\bm{c}}^{k+1}-{\bm{c}}^{k}\\
\Delta \tilde{\bm{c}}^{k+1}&\leftarrow 2\tilde{\bm{c}}^{k+1}-\tilde{\bm{c}}^{k}
\end{align}
\textbf{3. Dual step:}
\begin{align}
\lambda_1^{k+1}&\leftarrow \lambda_1^k+\beta {\bm{L}} \Delta{\bm{c}}^{k+1} -\beta{\bm{M}}(\Delta{\bm{X}}^{k+1})-\beta{\bm{g}} \label{eq:dualy1}\\
\lambda_2^{k+1}&\leftarrow \lambda_2^k+\beta {\bm{h}} \Delta{\bm{c}}^{k+1}-\beta{\bm{r}}\\
\lambda_3^{k+1}&\leftarrow \lambda_3^k+\beta \tilde{P} \Delta{\bm{c}}^{k+1}-\beta\Delta \tilde{\bm{c}}^{k+1}\label{eq:dualy3}
\end{align}
Here $\lambda_1$ is the dual variable corresponding to the equality constraint \eqref{eq:ineq_comp} and equivalently \eqref{eq:ineq_eq_cons}. $\lambda_2$ is the dual variable corresponding to the equality constraint \eqref{eq:eq_comp} and equivalently \eqref{eq:eq_eq_cons}. $\lambda_3$ is the dual variable corresponding to the equality constraint $ \tilde{P} {\bm{c}}-\tilde{\bm{c}}=0$.
Define $\lambda=\begin{bmatrix}
\lambda_1^\top & \lambda_2^\top & \lambda_3^\top
\end{bmatrix}^\top$.
We denote the entry of $\lambda_1$ corresponding to segment $l$, inequality index $j$, order $i$ as $\lambda_1[i,j,l],i\in\{0,\cdots,d\}, j\in\{1,\cdots,p\},l\in\{1,2,\cdots,N\}$. Recall the definition of $M_{j,l}^i$ in \eqref{eq:defbM}. The conjugate operators ${\mathcal{D}}^*$ are defined as\footnote{ We use $[A]_j$ to denote the $j$-th row of matrix $A$, or in case of $A$ is column vector, $j$-th entry of the vector $A$.}
\begin{align}
{\mathcal{D}}^*_{\bm{X}}(\lambda_1)\triangleq&-\sum_{l=1}^{N}\sum_{j=1}^{p} \sum_{i=1}^{d+1} \lambda_1[i,j,l] M^i_{j,l} \label{eq:D*X}\\
{\mathcal{D}}^*_{\bm{c}}(\lambda_1,\lambda_2,\lambda_3)\triangleq& \sum_{l=1}^{N}\sum_{j=1}^{p} \sum_{i=1}^{d+1} \lambda_1[i,j,l]\left(\mathbf{e}^N_l\otimes [L_{j}]_i^\top \right)\notag\\
&+\sum_{j=1}^{2mN} [\lambda_2]_j h_j+ \tilde{P} ^\top \lambda_3 \label{eq:D*c}
\end{align}
where $\mathbf{e}^N_l$ is the canonical basis vector of size $N$, with 1 on $l$-th entry and $0$ on other entries.
The projection to the semi-definite cone is
\begin{align}
\proj_{{\mathbb{S}}_+}(X)=\sum_{i=1}^{\size(X)} \max \left\{0, \nu_i\right\} \mu_i \mu_i^\top
\end{align}
where $\nu_i,\mu_i$ are the eigenvalue and the corresponding eigenvector of $X$. The projection to the second order cone is
\begin{align}
\proj_{\SOC}\begin{bmatrix}
{\bm{c}}\\
s
\end{bmatrix}=
\begin{cases}
\frac{s+\|{\bm{c}}\|_2}{2\|{\bm{c}}\|_2}\begin{bmatrix}
{\bm{c}}\\
\|{\bm{c}}\|_2
\end{bmatrix}& \text{if } \|{\bm{c}}\|_2>s.\\
\begin{bmatrix}
{\bm{c}}\\
s
\end{bmatrix}& \text{if } \|{\bm{c}}\|_2\leq s.
\end{cases}.
\end{align}
The convergence of algorithm \eqref{eq:primal_X}-\eqref{eq:dualy3} is provided in the following.
\begin{theorem}[\cite{ryu_yin_2022}]
Assume the solution to KKT conditions of Problem \ref{pb:conic} exists (denoted by ${\bm{c}}^\star,{\bm{X}}^*,s^*,\lambda^*$), and strong duality holds. If the linear projection defined by
\begin{align*}
{\mathcal{L}}({\bm{c}},{\bm{X}})=\begin{bmatrix}
{\bm{L}}{\bm{c}}-{\bm{M}}({\bm{X}}) \\ {\bm{h}}{\bm{c}}
\end{bmatrix}
\end{align*}
and step sizes $\alpha,\beta$ satisfy $0<\alpha\beta < 1/\left\| {\mathcal{L}}({\bm{c}},{\bm{X}}) \right\|_2$, then the primal dual hybrid gradient descent algorithm \eqref{eq:primal_X}-\eqref{eq:dualy3} converges to the solution to KKT conditions, i.e., ${\bm{c}}^k\rightarrow {\bm{c}}^\star,{\bm{X}}^k\rightarrow {\bm{X}}^\star,s^k\rightarrow s^\star,\lambda^k\rightarrow \lambda^\star$.
\end{theorem}
\subsection{GPU parallel computing}\label{subsec:GPU}
It is worth noticing that for our proposed iterations, a significant proportion of the time will be spent on the projection $\proj_{{\mathbb{S}}_+}(\cdot)$. However, since ${\bm{X}}$ is a block diagonal matrix with $2pN$ matrices of size $\delta +1$ on its diagonal, the projection of ${\bm{X}}$ can be parallelized by projecting each small matrices onto the PSD cone. Furthermore, the calculation of ${\mathcal{D}}^*_{{\bm{X}}},{\mathcal{D}}^*_{{\bm{c}}}$ in \eqref{eq:D*X} and \eqref{eq:D*c}, and the difference calculation in \eqref{eq:diff_X} are essentially tensor operations and hence can be accelerated by parallel computation.
To speed up the computation of the proposed PDHG solver, we implement it in a parallelized manner on the GPU. Specifically, the projection step~\eqref{eq:primal_X} is wrapped as a kernel to be computed in parallel on the GPU. Additionally, we implement the calculation of $\mathcal{D}^*_{\boldsymbol{X}}$ and $\mathcal{D}^*_{\boldsymbol{c}}$ in~\eqref{eq:D*X} and~\eqref{eq:D*c}, as well as the update steps from~\eqref{eq:primal_c} to~\eqref{eq:dualy3} as tensor operations, which can also be accelerated by GPU parallelization.
We test the implemented our proposed solver on a desktop computer equipped with an AMD Ryzen Threadripper 3970X 32-Core Processor and an NVIDIA GeForce RTX 3080 GPU. We report the computation time for a single iteration of the parallelized solver running on GPU in Table~\ref{tab:pdhg_time}, and compare it to that of a serialized version running on the CPU. Our results show that the iteration time of the accelerated solver is significantly shorter than that of the CPU version. Moreover, the computation time of the accelerated solver increases slowly as the problem size (the value $d$ and $N$) grows. Even for the largest problem instances considered, the iteration time remains within a few milliseconds, demonstrating the effectiveness of the GPU acceleration and the efficiency of the implementation.
\begin{table}[!htbp]
\centering
\begin{tabular}{lllll}
\toprule
\multirow{2}{*}{$N$} & \multicolumn{3}{c}{$d$} \\
\cmidrule(lr){2-4}
& \multicolumn{1}{c}{3} & \multicolumn{1}{c}{5} & \multicolumn{1}{c}{7} \\
\midrule
2 & 0.318 (1.874) & 0.345 (2.773) & 0.390 (3.778) \\
6 & 0.408 (5.159) & 0.443 (8.012) & 0.435 (11.240) \\
10 & 0.431 (8.811) & 0.506 (13.758) & 0.547 (19.254) \\
20 & 0.484 (19.449) & 0.649 (30.721) & 0.821 (42.886) \\
30 & 0.561 (32.330) & 0.767 (50.046) & 1.187 (63.170) \\
40 & 0.776 (46.167) & 1.394 (65.989) & 2.593 (93.495) \\
\bottomrule
\end{tabular}
\caption{Time consumption (in milliseconds) of a single iteration of the PDHG solver (calculating updates \eqref{eq:primal_X}-\eqref{eq:dualy3}) for different problem sizes. The runtime on CPU is in parentheses.
}
\label{tab:pdhg_time}
\end{table}
\begin{semilogyaxis}
[width=2.5in,
height=1.3in,
at={(0in,0in)},
scale only axis,
title={computation time of each iteration},
xlabel={\small polynomial segment number $N$},
ylabel={\small computation time (milliseconds)},
y label style={at={(axis description cs:0.06,.5)},anchor=south},
xticklabel style = {font=\footnotesize},
yticklabel style = {font=\footnotesize},
axis background/.style={fill=white},
xmajorgrids,
ymajorgrids,
legend style={at={(1.0,-0.38)}, anchor=north east, legend columns=4, legend cell align=left, align=left, draw=white!15!black, font=\scriptsize}]
\addplot [color={blue}, line width=1pt, mark=*, mark size=1pt]
table[row sep={\\}]
{ 2 0.06166896933333332 \\
4 0.062722244044444443 \\
6 0.06178892611111111\\
8 0.065368641566666666 \\
10 0.06688440444444445 \\
15 0.06658882044444444 \\
20 0.07110262266666666 \\
25 0.06837505161111111 \\
30 0.06595894044444443 \\
35 0.07261382344444445 \\
40 0.07032468044444445 \\
}
;\addlegendentry{$d=3$}
\addplot [color={black}, line width=1pt, mark=*, mark size=1pt]
table[row sep={\\}]
{
2 0.07141276644444443 \\
4 0.06979288822222221 \\
6 0.06871374655555554 \\
8 0.06930141611111111 \\
10 0.07747854833333331 \\
15 0.07623266822222222 \\
20 0.0842319561111111 \\
25 0.0837051\\
30 0.09019142177777777 \\
35 0.091056077866666666 \\
40 0.11322089433333334 \\
}
;\addlegendentry{$d=5$}
\addplot [color={blue}, dashed, line width=1pt, mark=*, mark size=1pt]
table[row sep={\\}]
{
2 0.21636989799999998 \\
4 0.23359112766666668 \\
6 0.233701074 \\
8 0.23418399999996 \\
10 0.23572537466666665 \\
15 0.23695022855555553 \\
20 0.2384660635555555 \\
25 0.2626788785555555 \\
30 0.3170705874444444 \\
35 0.3329102924444444 \\
40 0.30039304677777773 \\
}
;\addlegendentry{$d=7$}
\addplot [color={black}, dashed, line width=1pt, mark=*, mark size=1pt]
table[row sep={\\}]
{
2 0.24817848711111107\\
4 0.261123071\\
6 0.2603819921111111\\
8 0.2465184132222222\\
10 0.2576784097777778\\
15 0.26052244199999997\\
20 0.2764174364444444\\
25 0.34160906988888884\\
30 0.3360188203333333\\
35 0.369049681\\
40 0.355626913111111\\
}
;\addlegendentry{$d=9$}
\end{semilogyaxis}
\end{tikzpicture}
\subsection{Warm Start and Termination Rule}
Denote $\Delta t$ as the control apply time length of each solution. To facilitate the simple warm start strategy, the horizon $T$ satisfies $T=N\cdot \Delta t$, i.e., the first segment of control input $u(t)$ is applied before receding to a new horizon. We evaluate our algorithm's performance in two different strategies: cold start, and warm start.
The cold start strategy initializes the optimization variable ${\bm{c}}(t+\Delta t),{\bm{X}}(t+\Delta t),\lambda(t+\Delta t)$ as random vectors/matrices with each entry uniformly distributed on $[-0.5,0.5]$. The warm start strategy initializes the optimization variable in a shifting manner, i.e., $\forall \ 2\leq l\leq N,1\leq j\leq p$:
\begin{align*}
\left(X^f_{j,l}(t+\Delta t)\right)^{0}&:=\left(X^f_{j,l-1}(t)\right)^{\tau(t)}\\
\left(X^g_{j,l}(t+\Delta t)\right)^{0}&:=\left(X^g_{j,l-1}(t)\right)^{\tau(t)}\\
\left(c_l(t+\Delta t)\right)^{0}&:=\left( c_{l-1}(t) \right)^{\tau(t)}
\end{align*}
where the super script $\tau(t)$ is the number of iterations applied to solve the SDP problem at time $t$.
For the first segment, $\left(X^f_{j,1}(t+\Delta t)\right)^{0},\left(X^g_{j,1}(t+\Delta t)\right)^{0},\left(c_1(t+\Delta t)\right)^{0}$ are initialized randomly.
The dual variable $\lambda$ is also initialized in a shifting manner according to its correspondence with ${\bm{X}},{\bm{c}}$ in \eqref{eq:dualy1}-\eqref{eq:dualy3}.
The algorithm termination is determined by the residue $\epsilon^k=\epsilon_{\text {primal }}^k+\epsilon_{\mathrm{dual}}^k $ where the primal and dual residue are defined as:
\begin{align*}
\epsilon_{\mathrm{primal}}^k=&\left\|\frac{1}{\alpha}\left({\bm{X}}^k-{\bm{X}}^{k-1}\right)-\mathcal{D}_{{\bm{X}}}^*\left(\lambda^k-\lambda^{k-1}\right)\right\|_F \\
& +\left\|\frac{1}{\alpha}\left({\bm{c}}^k-{\bm{c}}^{k-1}\right)-\mathcal{D}_{{\bm{c}}}^*\left(\lambda^k-\lambda^{k-1}\right)\right\|_2
\end{align*}
\begin{align*}
\epsilon_{\mathrm{dual}}^k=&\left\|\frac{1}{\beta}\left(\lambda^k-\lambda^{k-1}\right)\right.\\
& \left.-\begin{bmatrix}
{\bm{L}}({\bm{c}}^k-{\bm{c}}^{k-1})-{\bm{M}}({\bm{X}}^k-{\bm{X}}^{k-1})-{\bm{g}}\\
{\bm{h}}({\bm{c}}^k-{\bm{c}}^{k-1})-{\bm{r}}\\
\tilde{P}({\bm{c}}^k-{\bm{c}}^{k-1}) - (\tilde{\bm{c}}^k-\tilde{\bm{c}}^{k-1})
\end{bmatrix}\right\|_2.
\end{align*}
$\|\cdot\|_F$ is the Frobenius norm.
Our proposed algorithm terminates when $\epsilon^k$ corresponding to segment $1$ is below $2\times 10^{-2}$, which is accurate enough for control performance.
The control and computational speed performance is demonstrated in the next section.
\section{Simulation}
The performance of the proposed MPC solver is validated on the quadruple-tank process~\cite{johansson2000quadruple}, whose schematic diagram is visualized in Fig.~\ref{fig:quaduple-tank}. The system has $4$ states, which represent the liquid levels (in centimeter) of each tank. There are two control inputs in the system, namely the voltage (in volt) of Pump 1 and Pump 2 in Fig.~\ref{fig:quaduple-tank}. The simulation employs the same linearized system equations and system parameters as~\cite{johansson2000quadruple}, which are omitted due to space limitations.
\begin{figure}
\caption{The schematic diagram of the quadruple-tank process.}
\label{fig:quaduple-tank}
\end{figure}
The initial conditions of the tanks are
\[x_0=\begin{bmatrix}
10 & 19 & 19 & 1
\end{bmatrix}^\top,\]
and the control objective of the MPC is to track a reference trajectory $r(t)$ of the liquid levels. For simplicity, we set the constant reference trajectory as
\[r(t)=\begin{bmatrix}
19.9 & 19.9 & 2.4 & 2.4
\end{bmatrix}^\top.\]
In addition to tracking the reference signal, the MPC must ensure that the liquid levels in all tanks remain between 0 to 20 cm and that the control inputs stay in the voltage limit between 0 to 8 V during the control process. The objective weighting matrices in MPC Problem \ref{pb:continuous} are defined as $Q=I,R=0.1I$ with appropriate dimensions. Our code is available on \href{https://github.com/zs-li/MPC_PDHG}{https://github.com/zs-li/MPC\_PDHG}.
\subsection{Control Performance}
For comparison, we employ the Quadratic Programming (QP) formulation (Problem~\ref{pb:discrete}), where the MPC problem is discretized with a sampling interval of $T_s=1{\rm s}$ and horizon length $T_d=20$. On the other hand, for the proposed method, we set the degree of polynomial $d=3$ and the segments of polynomials $N=20$, horizon length $T=20$. For each iteration, the resulting control input applies to the system for $\Delta t=1$ second.
Thus, the two methods are comparable in terms of horizon length and update frequency. We simulate the control process for $120$ seconds and visualize the resulting system states and control inputs in Fig.~\ref{fig:QP}-Fig.~\ref{fig:state_input}.
\begin{figure}
\caption{The states of discrete time linear MPC using the QP solver. The gray area in the figures denotes the feasible region of states. The states between sampling times violate the constraints.}
\label{fig:QP}
\end{figure}
\begin{figure}
\caption{The states of continuous time linear MPC using our proposed solver. The gray area in the figures denotes the feasible region of states. The states using our proposed MPC input stays in the feasible region for whole time interval.}
\end{figure}
\begin{figure}
\caption{Control input using the QP solver.}
\caption{Control input using the proposed solver.}
\caption{Comparison on the input trajectory using the QP and the proposed SDP strategy respectively. The gray area in the figures denotes the feasible regions of control input.}
\label{fig:state_input}
\end{figure}
As shown in Fig.~\ref{fig:state_input}, at the first glance, the state trajectories obtained from both solvers are nearly identical. However, upon close inspection, it can be seen that even though the QP-based controller satisfies the constraints at discrete-time instants, the constraints are violated in between sampling instants. In contrast, the proposed algorithm ensures constraint satisfaction on the whole time interval.
\subsection{Computational Speed Performance}
In the following, we compare the computational speed performance of our proposed algorithm and several off-the-shelf solvers (on Problem \ref{pb:conic}) under different numbers of polynomial degrees $d$ and polynomial segments $N$.
The block number for GPU acceleration is set as 128. The number of threads on every block is $\lceil \frac{pN}{128}\rceil$. The computational time in Figure \ref{fig:time} is the average solving time of the first 100 apply steps. The step sizes are $\alpha=0.2,\beta=0.4$. The computation platform is the same as in Subsection \ref{subsec:GPU}, i.e., a desktop computer equipped with an AMD Ryzen Threadripper 3970X 32-Core Processor and an NVIDIA GeForce RTX 3080 GPU. The real number calculations on GPU are floating point number with hybrid precision 32-bit and 16-bit, which is computationally efficient and accurate enough for control applications. As for comparison, the other solvers are of default precision 64-bit. Thus, the time comparison may not be equal but represents our computation speed superiority to some extent.
\begin{figure}
\caption{The states of discrete time linear MPC using the QP solver.}
\label{fig:time}
\end{figure}
As shown in Fig. \ref{fig:time}, our proposed algorithm has better scalability for large problems (especially lagre $N$), and has low computational time promising for real-time control applications. The warm-start technique introduced in Subsection \ref{subsec:GPU} can effectively reduce the computation time by reducing iterations. For off-the-shelf solvers, COSMO and COPT perform well on large-scale problems compared to other solvers. However, their computation is still slow and incompatible with real-time control scenarios.
\begin{figure}
\caption{The number of iterations required to reach residue $(\epsilon^k)_1<1\times 10^{-2}
\label{fig:iters}
\end{figure}
We demonstrate the number of iterations required to reach $(\epsilon^k)_1<10^{-2}$ for different problem sizes in Fig \ref{fig:iters}. The iteration number required grows gently as the problem size grows, which also corroborates the scalability of our proposed solver. The warm-start technique introduced in Subsection \ref{subsec:GPU} can effectively reduce the iteration number.
\begin{figure}
\caption{The relative duality gap of our proposed algorithm (cold start). Problem sizes scarcely influence the convergence speed of the relative duality gap. }
\label{fig:converge}
\end{figure}
Define Lagrange function of Problem \ref{pb:conic} as $\mathscr{L}({\bm{c}},s,{\bm{X}};\lambda)$, then the relative duality gap is defined as
$$\frac{1}{J({\bm{c}})}\left[\inf_{{\bm{c}},s,{\bm{X}}}\mathscr{L}({\bm{c}},s,{\bm{X}};\lambda)-\sup_{\lambda}\mathscr{L}({\bm{c}},s,{\bm{X}};\lambda)\right],
$$
where $J(c)$ is the objective value of Problem \ref{pb:poly_opt} and equivalently Problem \ref{pb:conic}.
We demonstrate the convergence of relative duality gap with respect to iteration number in Fig. \ref{fig:converge}. The relative duality gap converged below $10^{-5}$ within approximately 500 iterations. The problem sizes scarcely influence the convergence speed of the relative duality gap, which also indicates good scalability of our proposed algorithm.
\section{conclusion}
In this paper, we aim to address continuous-time path-constrained linear MPC problems while ensuring that path constraints are satisfied at every time interval. To achieve this, we propose an algorithm that utilizes differential flatness to eliminate dynamic constraints. Furthermore, by parameterizing the flat output with piecewise polynomials, we formulate a polynomial optimization problem where the decision variables are finite-dimensional polynomial coefficients, and the inequality path constraints are polynomial non-negativity constraints on intervals, which remain infinite-dimensional.
Taking advantage of the Markov-Luk\'{a}cs theorem from SOS theory, we transform the polynomial optimization problem into an equivalent SDP problem that is computationally tractable.
To accelerate the solving process of the SDP problem, we use a customized PDHG algorithm, which exploits the block-diagonal structure of the PSD matrix to perform paralleled computation. The numerical simulation of a quadruple-tank process validates that our proposed algorithm can ensure that the path constraints are satisfied at every time interval. Moreover, the parallel accelerated design of our algorithm results in superior computational speed performance.
\end{document} |
\begin{equation}gin{document}
\title{What can we learn from the dynamics of entanglement and quantum discord in the Tavis--Cummings model?}
\author{Juliana Restrepo}
\affiliation{Sistemas Complejos, Universidad Antonio Nari\~{n}o, Medell\'{i}n, Colombia}
\author{Boris A. Rodr\'{i}guez}
\affiliation{Instituto de F\'{i}sica, Universidad de Antioquia UdeA, Calle 70 No. 52-21, Medell\'{i}n, Colombia}
\begin{equation}gin{abstract}
We revisit the problem of the dynamics of quantum correlations in the exact Tavis--Cummings model. We show that many of the dynamical features of quantum discord attributed to dissipation are already present in the exact framework and are due to the well known non-linearities in the model and to the choice of initial conditions. Through a comprehensive analysis, supported by explicit analytical calculations, we find that the dynamics of entanglement and quantum discord are far from being trivial or intuitive. In this context, we find states that are indistinguishable from the point of view of entanglement and distinguishable from the point of view of quantum discord, states where the two quantifiers give opposite information and states where they give roughly the same information about correlations at a certain time. Depending on the initial conditions, this model exhibits a fascinating range of phenomena that can be used for experimental purposes such as: Robust states against change of manifold or dissipation, tunable entanglement states and states with a counterintuitive sudden birth as the number of photons increase. We furthermore propose an experiment called quantum discord gates where discord is zero or non-zero depending on the number of photons.
\varepsilonnd{abstract}
\pacs{03.65.Ta, 03.67.Mn, 42.50.-p}
\date{\today}
\maketitle
\section{Introduction}
The quantum correlations arising from the superposition principle have been the source of a long-standing and heated debate since the birth of quantum mechanics \textquotedblleftte{EPR, Bohr, Olival}. Indeed, when Schr\"odinger declared in 1935 that \textquotedblleft entanglement is the characteristic trait of quantum mechanics\textquotedblright \textquotedblleftte{Schrodinger} it was believed that the key quantum correlation was entanglement \textquotedblleftte{HorodeckiRev,EntanglementRev} and many efforts were devoted to quantify entangled vs separable states \textquotedblleftte{Werner}. This main role of entanglement was encouraged by the Bell theorem \textquotedblleftte{Bell}, its experimental verifications \textquotedblleftte{Aspect, Zeilinger, Gisin} and its use as a main resource in quantum information \textquotedblleftte{HorodeckiRev} and quantum computation \textquotedblleftte{Nielsen} tasks. However, entanglement is not the only quantum correlation encoded in a quantum state and it was found recently, that certain tasks, e.g. \textit{nonlocality without entanglement} \textquotedblleftte{Bennet, Bartlett} and \textit{quantum speedup with separable states} \textquotedblleftte{Braunstein, Lanyon} can be done without using entanglement as a resource.
From this point of view, it is desirable to investigate other quantifiers of quantum correlations. Recently, Olivier and Zurek introduced the quantum discord \textquotedblleftte{Zurek, Vedral} as the difference between two possible quantum extensions of the classical mutual information. This new quantity has been broadly studied in the context of quantum information \textquotedblleftte{Winter, Datta, Dakic, Piani}, quantum cryptography \textquotedblleftte{Pirandola} and quantum metrology \textquotedblleftte{Adesso2014}. Even though the quantum and classical information encoded in quantum discord cannot be directly compared \textquotedblleftte{Modi}, several theoretical efforts have been made in order to understand how they are contained in a quantum state \textquotedblleftte{Acin, Modi, Modi2012}, and it is presently understood that the quantum discord quantifies in a more subtle way the quantum correlations in mixed states through the state disturbance induced by local measurements \textquotedblleftte{Streltsov, Piani2012}.
The dynamics of quantum entanglement have been exhaustively studied \textquotedblleftte{Buchleitner}. One of the important features of entanglement is the ``entanglement sudden death" (ESD) phenomenon \textquotedblleftte{YuEberly, Davidovich, Kimble}. This process describes the disentaglement of a pair of qubits exposed to an environment in a finite time, and it depends strongly on the initial state of the system and its interaction with the environment. On the other hand, the dynamics of quantum discord have been mainly studied under decoherence and dissipation scenarios. For a pair of qubits coupled to a Markovian environment \textquotedblleftte{Fanchini2009, Celeri, Serra, Mazzola}, the quantum discord was sometimes seen to have an exponential decay and an asymptotic vanishing \textquotedblleftte{Fanchini2009, Acin, Mazzola}. The effect of non-Markovian environment has also been studied in \textquotedblleftte{Fanchini2010}. The ensuing quantum discord was found to vanish only at discrete instants of time. In this context it is relevant to ask up to which extend these dynamical phenomena are due to the dissipative dynamics or to the non-linearity contained in the matter--field dipole--type interaction in the models considered.
In this paper, we address the dynamics of quantum discord and entanglement in the Tavis--Cummings exact Hamiltonian. We show that some of the dynamical features observed for quantum discord considering dissipation are already present in the exact model and do not need assumption on the coupling or the nature of the environment. Though a direct comparison in amount of discord and entanglement in meaningless, because the measure of discord and entanglement does not coincide, we make some general remarks on the different correlations embedded in a quantum state and the implications on the geometry of Hilbert space following the interpretation of K. Modi et al., \textquotedblleftte{Modi}. Furthermore we find surprising effects for certain initial conditions such as the possibility of building quantum discord gates (zero or non zero discord depending on the number of photons), robust states against dissipation and states whose entanglement counterintuitively augments with the number of photons, among others.
The paper is organized as follows: we present the exact model in Sec II followed by the definition and a discussion of the entanglement and quantum discord in Sec. III. We then present our results for the dynamics of correlations of different initial conditions in Section IV. Finally, Section V summarizes the results and draws conclusions.
\section{Tavis--Cummings model}
We present the model Hamiltonian used to study the dynamics of entanglement and quantum discord. The Hamiltonian that describes the interaction between two non-interacting two--level systems (2-TLS) and a single--mode cavity field, is the so-called Tavis--Cumings Hamiltonian (TC) \textquotedblleftte{TC}, a generalization of the Jaynes-Cummigs Hamiltonian (JC) \textquotedblleftte{JC, ReviewJC}. The JC and TC models of matter-light interaction are the theoretical cornerstone in an variety of quantum related areas such as quantum optics \textquotedblleftte{Walls-Milburn, Special}, cavity QED (CQED) \textquotedblleftte{Haroche, Walther}, trapped ions \textquotedblleftte{Wineland1996, WinelandRMP}, quantum information and quantum computation \textquotedblleftte{Nico, Azuma, Molmer}, circuit QED \textquotedblleftte{Schoelkopf, Schoelkopf2004, Mooij, Takayanagi}, semiconductor quantum optics \textquotedblleftte{KiraKoch, Forchel, Deppe, Elena, Elena2012, us} and cavity opto-mechanics \textquotedblleftte{Marquardt, Milburn-Woolley}. In the dipole and rotating wave approximation, the TC Hamiltonian can be written as:
\begin{equation}
H = \frac{\omega}{2}(\sigma^{z}_1+\sigma^{z}_2)+ \omega_{0} a^\dagger a +g \sum_{i=1}^2 \left( a^\dagger \sigma_{i}^{-}+ a \sigma_{i}^{\dagger}\right),
\label{TC-h}
\varepsilonnd{equation}
where the first two terms are the 2-TLS and photon energies respectively, $a$ ($a^\dagger$) are the creation and annihilation operators of the single-mode cavity field, and $\sigma_i^\dagger=\ket{+}_i\prescript{}{i}{\bra{-}}$ and $\sigma_i^-=\ket{-}_i\prescript{}{i}{\bra{+}}$ are the TLS pseudo--spin flip operators that connect the ground $\ket{-}_i$ an excited $\ket{+}_i$ states of the i-th TLS with energies $- \omega/2$ and $\omega/2$ respectively. The interaction Hamitonian describes the dipole interaction between the 2-TLS and the field, $g$ is the light-matter coupling constant. We take $\hbar=1$ and the resonance condition $\omega-\omega_0=0$.
The Tavis--Cummings Hamitonian has a conserved quantum number, the so-called excitation manifold number, given by $\Lambda = N + N_e$, where $N=a^\dagger a$ is the number of photons and $N_e=\sum_{i=1}^2 \sigma_i^{\dagger} \sigma_i^{-}$ is the number of TLS in the excited state. Using this symmetry, we can separate the evolution of the total density operator $\rho^{T}(t)$ in disjoint excitation manifolds $\Lambda_{n}\doteq \{ \ket{++}_{n-1}, \ket{+-}_{n}, \ket{-+}_{n}, \ket{--}_{n+1}\}$ and calculate the exact solution for total density operator of the system:
\begin{equation}
\rho^{T}(t) =\hat{U}(t) \rho^{T}(0) \hat{U}^\dagger(t),
\varepsilonnd{equation}
where $\rho^{T}(t)=\oplus_{\substack{n}} \rho^T(t)|_{\Lambda_{n}}$ and $\hat{U}(t)=\oplus_{\substack{n}} \hat{U}(t)|_{\Lambda_{n}} $. The operators $\rho^T(t)|_{\Lambda_{n}}$ and $ \hat{U}(t)|_{\Lambda_{n}}$ are the density and the time evolution operators in manifold $\Lambda_n$ respectively. The latter can be written as \textquotedblleftte{Kim2002,Puri}:
\begin{equation}gin{widetext}
\begin{equation}
\hat{U}(t)|_{\Lambda_{n}}= e^{-i \omega n t} \times
\begin{equation}gin{pmatrix}
1+\frac{n C_1(t)}{2n+1} & - \frac{ i \sqrt{n} C_2(t)} {\sqrt{2(2n+1)}} & - \frac{ i \sqrt{n} C_2(t)} {\sqrt{2(2n+1)}} & \frac{\sqrt{n(n+1)} C_1(t)}{2n+1} \\
- \frac{ i \sqrt{n} C_2(t)} {\sqrt{2(2n+1)}} & 1 + \frac{C_1(t)}{2} & \frac{C_1(t)}{2} & - \frac{ i \sqrt{n+1} C_2(t)} {\sqrt{2(2n+1)}} \\
- \frac{ i \sqrt{n} C_2(t)} {\sqrt{2(2n+1)}} & \frac{C_1(t)}{2} & 1 + \frac{C_1(t)}{2} & - \frac{ i \sqrt{n+1} C_2(t)} {\sqrt{2(2n+1)}} \\
\frac{\sqrt{n(n+1)} C_1(t)}{2n+1} & - \frac{ i \sqrt{n+1} C_2(t)} {\sqrt{2(2n+1)}} & - \frac{ i \sqrt{n+1} C_2(t)} {\sqrt{2(2n+1)}} & 1+\frac{(n+1) C_1(t)}{2n+1}
\varepsilonnd{pmatrix}.
\label{exact}
\varepsilonnd{equation}
\varepsilonnd{widetext}
\noindent
where the time dependent functions $C_1(t)=\cos(2\pi\Omega_{R} t)-1$ and $C_2(t)=\sin(2\pi\Omega_R t)$ oscillate with an effective Rabi frequency $\Omega_{R}=g\sqrt{4n+2}$. The above expression enables us to calculate the dynamics for any initial state of the 2-TLS + light system, pure or mixed. Restricting ourselves to initial conditions $\rho^{T}(0)$ in the excitation manifold $\Lambda_n$, we can write the total 2-TLS + light density operator in the $\Lambda_n$ manifold. This restriction results in a global phase in Eq. (\ref{exact}) and the subsequent evolution will not depend on $\omega$. In this paper we focus on the 2-TLS which are formally a two-qubit system. Taking the partial trace on the field states $\rho(t)= \tr_{\mathrm{Field}}{\rho^T(t)}$ and using the usual basis for the 2-TLS Hilbert space $\mathcal{H} \doteq \{ \ket{1} = \ket{++}, \ket{2} = \ket{+-}, \ket{3} = \ket{-+}, \ket{4} = \ket{--}\}$, we obtain the reduced two-qubit density matrix:
\begin{equation}a
\rho(t)=
\begin{equation}gin{pmatrix}
\rho_{11}(t) & 0 & 0 & 0 \\
0 & \rho_{22}(t)& \rho_{23}(t) & 0 \\
0 & \rho_{32}(t) & \rho_{33}(t) & 0 \\
0 & 0 & 0 & \rho_{44}(t)
\varepsilonnd{pmatrix}.
\label{pmatrixx}
\varepsilonnd{equation}a
The above matrix has an X structure \textquotedblleftte{Eberly2007} which will turn out to be very useful to compute the dynamics of quantum discord.
\section{Classical and quantum correlations}
\subsection{Entanglement}
Entanglement is a measure of the non-separability of the quantum state of a composite system and it is, in general, a difficult quantity to compute \textquotedblleftte{HorodeckiRev, EntanglementRev}. As a resource in quantum information and computation tasks, the entanglement expresses the maximum number of Bell pairs that it is possible to obtain from the quantum state to be used for quantum tasks. From the work of Wootters \textquotedblleftte{Wootters} in the nineties, it is well known that the entanglement for a pair of qubits can be quantified in the concurrence, a function which has a closed simple form for any state of the TLS given the density matrix $\rho$ that describes them.
For a two-qubit system the concurrence is given by \textquotedblleftte{Wootters} $\max\,\{0,\Lambda(t)\}$, where $\Lambda(t)=\lambda_1(t)-\lambda_2(t)- \lambda_3(t)-\lambda_4(t)$ and $\lambda_i(t)$ are the square roots, ordered in decreasing value, of the eigenvalues of the matrix $\rho(t)(\sigma_2 \otimes \sigma_2) \rho^{\ast}(t) (\sigma_2 \otimes \sigma_2)$. $\rho^{\ast}(t)$ is the complex conjugate of the two-qubit density matrix $\rho(t)$ and $\sigma_2$ the second Pauli matrix. As the reduced two-qubit density matrix we obtain to evaluate concurrence has an X structure with $\rho_{14}(t)=0$, cf. Eq.~(\ref{pmatrixx}), the concurrence has a simple analytic expression $C(t)= 2 \max\,\{0, |\rho_{23}(t)|\sqrt{\rho_{11}(t)\rho_{44}(t)}\}$.
\subsection{Quantum discord}
Following Olivier and Zurek \textquotedblleftte{Zurek}, we can quantify the total amount of classical and quantum correlations present in a bipartite quantum system $\rho^{AB}$ by means of the quantum mutual information, an information-theoretic measure of the total correlation in a bipartite quantum state:
\begin{equation}
\mathcal{I}(\rho^{AB}) = S(\rho^{A}) + S(\rho^{B}) - S(\rho^{AB}),
\varepsilonnd{equation}
where $S(\rho) = - \tr(\rho \log \rho)$ is the von-Neumann entropy. The quantum mutual information may be written as a sum of classical correlations $\mathcal{C}(\rho^{AB})$ and the quantum discord $D(\rho^{AB})$:
\begin{equation}
D(\rho^{AB})=\mathcal{I}(\rho^{AB}) - \mathcal{C}(\rho^{AB}),
\label{resta}
\varepsilonnd{equation}
with the former given by \textquotedblleftte{Zurek, Vedral}:
\begin{equation}a
\mathcal{C}(\rho^{AB}) &=& \underset{\{ \Pi_k^B\}}{\max} \left( \mathcal{I}(\rho^{AB}|\Pi_k^B) \right); \nonumber \\
&=& S(\rho^A)-\underset{\{ \Pi_k^B\}}{\min} \left( S(\rho^{AB}| \{\Pi_k^B\}) \right),
\label{clasiquito}
\varepsilonnd{equation}a
where $ \mathcal{I}(\rho^{AB}|\Pi_k^B)$ is the quantum conditional mutual information of a measurement, with $\{ \Pi_k^B\}$ a complete set of projection operators, in the subsystem $B$, and $\rho^{AB}| \{\Pi_k^B\} = \tr_B(\Pi_k^B \rho^{AB} \Pi_k^B)/p_k$ is the residual state of the subsystem $A$ after the measurement with result $k$ and probability $p_k = \tr_{AB}(\Pi_k^B \rho^{AB} \Pi_k^B)$. Due to the complex optimization procedure involved in the definition of classical correlations, the quantum discord is usually intractable to compute for a general state. However recently efforts in the two-qubit case has shown that it is possible to obtain a closed expression for the quantum discord of a general two-qubit state \textquotedblleftte{Adesso, Zhou}.
For the class of two-qubit X-states it is possible to obtain an analytical expression \textquotedblleftte{Li, Ali,Luo} for the quantum discord:
\begin{equation}
D(\rho^{AB})= \min \left( D_{\sigma_x^B}(\rho^{AB}), D_{\sigma_z^B}(\rho^{AB}) \right),
\label{discord}
\varepsilonnd{equation}
where $D_{\Pi_k^B}(\rho^{AB}) = \mathcal{I}(\rho^{AB}) - \mathcal{I}(\rho^{AB}|\Pi_k^B)$. Recently, Chen and Huang \textquotedblleftte{Chen,Huang} introduced a theorem that states that expression (\ref{discord}) is exact if:
\begin{equation}gin{equation}
(|\rho_{23}|+|\rho_{14}|)^2\leq (\rho_{11}-\rho_{22})(\rho_{44}-\rho_{33});
\label{uno}
\varepsilonnd{equation}
or
\begin{equation}gin{equation}
|\sqrt{\rho_{11}\rho_{44}}-\sqrt{\rho_{22}\rho_{33}}|\leq |\rho_{23}|+|\rho_{14}|,
\label{dos}
\varepsilonnd{equation}
with $\rho_{ij}=\rho_{ij}(t)$, $i,j=1,\ldots, 4$ defined in Eq. (\ref{pmatrixx}). In the first case the minimum is $D_{\sigma_z^B}(\rho^{AB})$ and in the latter case the minimum is $D_{\sigma_x^B}(\rho^{AB})$. There exist the possibility of not satisfying eqs (\ref{uno}) and (\ref{dos}), in this case Huang \textquotedblleftte{Huang} bounded the error for the quantum discord to $0.0021$. For most of the initial conditions and range of parameters considered in this paper there are small regions in time, of order $0.02t_R$, where the quantum discord is neither $D_{\sigma_x^B}(\rho^{AB})$ nor $D_{\sigma_x^B}(\rho^{AB})$ but the effect of considering the other minimum is negligible given the order of the correlations measured by discord.
There are two important results in our paper that are of the same order of magnitude of the error, in both cases we specifically comment on the validity of expression (\ref{discord}) for quantum discord.
\section{Dynamics of quantum correlations}
We now use the definitions presented in the previous section to calculate the concurrence $C(t)$ and the quantum discord $D(t)$ in the Tavis--Cummings model cf. Eq. (\ref{TC-h}). In order to comprehend the dynamics of quantum correlations we consider various families of pure and mixed initial conditions. For each of them, we compare the two quantities and discuss how the results might be used for quantum computation purposes.
\subsection{Family of Bell states}
\begin{equation}gin{figure}
\includegraphics[width=0.45\textwidth]{0713fig23+_v2.eps}
\caption{\label{0313fig23+}(color online) Concurrence (bold lines) and quantum discord (dashed lines) as a function of time for initial condition $\ket{\psi^+_\alpha}_1$, with $\alpha=0$ (black), $\alpha=0.2$ (blue) and $\alpha=1/\sqrt{2}$ (red). The inset shows the dependence of collapse and revival times $t_i/t_R$, $i=1,2,3,4$ as a function of $\alpha$ for manifold $\Lambda_1$. The shaded areas in the inset correspond to the ESD and the vertical lines correspond to the values of $\alpha$ considered in the large figure.}
\varepsilonnd{figure}
As a first example we take the partially entangled pure states:
\begin{equation}gin{equation}
\ket{\psi^\pm_\alpha}_n=\alpha \ket{+-}_n\pm\sqrt{1-\alpha^2}\ket{-+}_n,
\varepsilonnd{equation}
with $0\leq\alpha\leq1$. For $\alpha=1/\sqrt{2}$ these correspond to the usual Bell States which are maximally entangled. For such states quantum discord and any measurement of entanglement coincide \textquotedblleftte{Ali}.
We first focus on the results for the family of initial conditions $\ket{\psi^+_\alpha}_n$. For $0<\alpha<1$ (cf Fig. \ref{0313fig23+}) the concurrence $C(t)$ starts at a maximum $C(0)$ given by:
\begin{equation}gin{equation}
C(0)\varepsilonquiv g(\alpha)=2\alpha\sqrt{1-\alpha^2}.
\label{galpha}
\varepsilonnd{equation}
An entanglement sudden death (ESD) appears at $0<t_1<t_R/4$ and a sudden revival at $t_R/4<t_2<t_R/2$ augmenting to the same value $g(\alpha)$ at $t=t_R/2$. The collapse ($t_1$ and $t_3$) and revival ($t_2$ and $t_4$) times as a function of $\alpha$ in the interval $\left[0,t_R\right]$ are plotted in the inset of Figure \ref{0313fig23+}. The discord, on the other hand, only vanishes at discrete times. For $0<\alpha<1$, it starts at a maximum:
\begin{equation}gin{equation}
D(0)=\left(\alpha ^2-1\right) \log_2 \left(1-\alpha ^2\right)-\alpha ^2 \log_2 \left(\alpha ^2\right).
\label{D0}
\varepsilonnd{equation}
As time increases, it vanishes and oscillates to a second maximum at $t=t_R/4$ symmetrically vanishing again and oscillating to the maximum at $t_R/2$. Depending on the number of initial photons and the initial condition the quantum discord at $t_R/4$ can be larger, equal or smaller than the quantum discord at $t_R/2$. For example, for the manifold $\Lambda_1$, for $\alpha\simeq0.22$ and $\alpha\simeq0.97$ the discord at $t_R/4$ is equal to the discord at $t_R/2$ and for $\alpha\simeq0.18$ one obtains the maximum quantum discord at $t_R/4$. As can be seen in Figure \ref{0313fig23+} the discord has discontinuities in the derivative before vanishing at $t<t_R/4$ and after vanishing at $t>t_R/4$. These discontinuities were already reported by Maziero et al., when they considered an open system obeying the Tavis--Cummings Hamiltonian coupled to non-Markovian baths \textquotedblleftte{Serra}. In general, one can say that they are a common feature in the dynamics of quantum discord and are due to the minimization of entropies in expression (\ref{discord}) (see left panel in Fig. \ref{0415disc}). Furthermore, in the Tavis--Cummings model they are already present in the exact dynamics.
\begin{equation}gin{figure}
\includegraphics[width=0.45\textwidth]{0415disc_v2.eps}
\caption{\label{0415disc}(color online) Left panel: $S(\rho_{AB}|\sigma_z^B)$ (black-dashed), $S(\rho_{AB}|\sigma_x^B)$ (black-point dashed), the minimum ${\min} \left( S(\rho^{AB}| \{\sigma_z^B,\sigma_x^B\}) \right)$ (black) and quantum discord (red) as a function of time for initial condition $\ket{\psi^+_{1/\sqrt{2}}}_1$. Right panel: ${\min} \left( S(\rho^{AB}| \{\sigma_z^B,\sigma_x^B\}) \right)=S(\rho_{AB}|\sigma_x^B)$ (brown) and quantum discord $D(t)$ (black) as a function of time for initial conditions $\ket{\psi^+_0}_1$ (bold) and $\ket{\psi^+_1}_1$ (dashed). The vertical scale on both panels is the same.}
\varepsilonnd{figure}
The dependence of dynamics on the manifold $\Lambda_n$, apart from the trivial dependency of the Rabi frequency, is only in the elements $\rho_{11}(t)$ and $\rho_{44}(t)$ of the reduced density matrix and is of order $\mathcal{O}(1/n)$. Both for the concurrence and the quantum discord this leads to corrections of order $\mathcal{O}(1/n^2)$ or $\mathcal{O}(1/n^4)$. In this way, we see that for the family under consideration
the correlations measured by discord and concurrence are robust when changing the manifold and survive as $n\to\infty$. It is possible to look at these results from a interesting and different perspective. If we consider that the 2-TLS are the central system and the photons are the environment then, by solving the exact Hamitonian, we are solving the exact open dynamics of 2-TLS coupled to a bosonic bath. The fact that the correlations are robust with a change in the manifold implies that this family of Bell states has an particular behavior in the presence of dissipation. For the concurrence the robustness can be seen explicitly because the dependence on $n$ is given by:
\begin{equation}gin{equation}
C(t)=\max(0,\Xi(\alpha,t)-f(n)\Omega(\alpha,t)),
\label{concu23+}
\varepsilonnd{equation} where $\Xi(\alpha,t)$ and $\Omega(\alpha,t)$ are oscillating functions of $\alpha$ and $t$ and $f(n)$ is:
\begin{equation}gin{equation}
f(n)=\frac{\sqrt{n(n+1)}}{1+2n},
\label{fn}
\varepsilonnd{equation}
which is a fast growing function that saturates rapidly to $1/2$ ($f'(0)=\infty$ and $f'(\infty)=0$).
For $\alpha=0,1$ ($\ket{\psi^+_\alpha}_n=\ket{+-}_n$ or $\ket{-+}_n$), $\Xi(\alpha,t)=\sin^2{2\pi t}=\Omega(\alpha,t)/2$. Therefore the concurrence (Eq. (\ref{concu23+})) is a simple periodic function with oscillations of order $\mathcal{O}(1/n^2)$. It is important to note that the quantum discord for the states $\alpha=0,1$ is different. The difference stems from the classical correlations (cf Eq. (\ref{clasiquito})), specifically from the entropies. One can already see it in the elements of the reduced density matrix because $\rho_{22}(t)$ and $\rho_{33}(t)$ are dephased (cf right panel in Fig \ref{0415disc}). Evidently, from the quantum point of view the entanglement is the same because the Hamitonian and the corresponding evolution operator are left unchanged when one permutes the state $\ket{+-}_n$ with $\ket{-+}_n$. On the other side, classical correlations suppose a measurement basis and this election and subsequent minimization induces a difference between the evolution of the two states. The effect is small but important because it is showing that the correlations are not entirely measured by concurrence. As n increases this effect becomes smaller. Please note that here the expression for discord (\ref{discord}) is exact because inequality (\ref{dos}) is satisfied at all times.
For the Bell State of this family $\ket{\psi^+_{1/\sqrt{2}}}_n=\frac{1}{\sqrt{2}}(\ket{+-}_n+\ket{-+}_n)$ $\Xi(\alpha,t)=\cos^2{2\pi t}/4$ and $\Omega(\alpha,t)=\sin^2{2\pi t}/2$ so Eq. (\ref{concu23+}) becomes
\begin{equation}gin{equation}
C(t)=\max(0,\cos 4 \pi t) + \mathcal{O}\left({1}/{n^2}\right).
\varepsilonnd{equation}
The collapse and revival times correspond to the solutions of equation $\tan{2\pi t}=\pm 1/\sqrt{2f(n)}$ with $f(n)$ given in Eq. (\ref{fn}). As $n\to \infty$, $f(n)\to 1/2$ so the collapse and revival times are $t_i/t_R=(2i-1)/8$, $i=1,2,3,4$ and the ESD lasts $\Deltaelta t=t_R/4$ which is the largest ESD for this initial condition. The quantum discord for the Bell state starts at a maximum $D(0)=1$, vanishes at $t_R/4$ and returns to a maximum at $t_R/2$ presenting two slope discontinuities (cf. Fig. \ref{0313fig23+}).
We can make some general remarks on the correlations measured by both discord and entanglement in Fig.~\ref{0313fig23+}. Note that the entanglement sudden death (EDS) is accompanied by a discrete vanishing of the quantum discord
but every time the quantum discord vanishes the concurrence vanishes.
Furthermore, the discord has oscillations with harmonics of the fundamental Rabi frequency. For the Bell state (see red curve in Fig. \ref{0313fig23+}) the entanglement and discord are in phase and give roughly the same information at least for where the maximum of quantum correlations occurs but for the other initial states they give information about correlations that is completely opposite at a given time (see blue curve in Fig.~\ref{0313fig23+}). The fact that they give opposite information will be seen for other initial conditions. Consequently, if one asks if the quantum state is correlated or not at $t_R/4$ there is no absolute answer because quantum discord tells us that this is the maximally correlated state while concurrence says that this is a state with no correlations. K. Modi et al., \textquotedblleftte{Modi} proposed a geometrical interpretation of correlations that considers quantum discord as the distance in Hilbert space between the quantum state of the system and the nearest classical pure state and concurrence as the distance to the nearest separable state. From that perspective one can say that in most of the states in the family the separable states are close, in fact, the state is a separable state and at the same time the classical pure states are far in Hilbert space. Clearly, the number of states with zero concurrence will give us an idea of the size of the set of separable states in the Hilbert space.
The family $\ket{\psi_\alpha^-}_n$ has very different dynamics because in this case the related Bell State $\ket{\psi_{{1}/{\sqrt{2}}}^-}_n=\frac{1}{\sqrt{2}}(\ket{+-}_n-\ket{-+}_n)$ is an exact eigenstate of the Tavis--Cummings Hamiltonian. The concurrence is:
\begin{equation}gin{equation}
C(t)=g(\alpha)+(\tfrac{1}{2}-f(n))(1-g(\alpha)){\sin^2{2\pi t }},
\varepsilonnd{equation}
with $g(\alpha)$ and $f(n)$ defined in Eqs. (\ref{galpha}) and (\ref{fn}). Therefore, the correlations measured by concurrence present oscillations with amplitude $\tfrac{(1-g(\alpha))}{16n^2}+\mathcal{O}(1/n^3)$ around $g(\alpha)$. The prefactor of the oscillating term, of order $\mathcal{O}(1/n^2)$, is smaller than the constant term $g(\alpha)$, so the entanglement dynamics will be quasi-stationary with a time averaged concurrence that can be tuned by the initial condition (cf. Fig. \ref{0417fig23-_v3}). As $n\to\infty$ the amplitude of oscillations goes to zero. From the point of view of considering the photons as a bath for the 2-TLS, the family $\ket{\psi_\alpha^-}_n$ is also robust against dissipation in the limit $n\to\infty $. For a given manifold $\Lambda_n$ as $\alpha$ goes from $\alpha=0$ to $\alpha=1/\sqrt{2}$ the amplitude of oscillations decreases and the entanglement increases reaching a maximum of $1$. For parameters between $\alpha=1/\sqrt{2}$ and $\alpha=1$ the amplitude of oscillations increases until the entanglement is zero and the oscillations have the largest amplitude.
\begin{equation}gin{figure}
\includegraphics[width=0.45\textwidth]{0706fig23.eps}
\caption{\label{0417fig23-_v3}(color online) Left panel: Concurrence (bold lines) and Discord (dashed lines) as a function of time for initial condition $\ket{\psi^-_\alpha}_1$ with $\alpha=0$ (black), $\alpha=0.1$ (turquoise), $\alpha=0.3$ (blue), $\alpha=0.6$ (orange), $\alpha=1/\sqrt{2}$ (red) and $\alpha=0.9$ (green). Right panel: Zoom of oscillations of concurrence and quantum discord for $\alpha=0.1$ (top panels) and $\alpha=0.6$ (bottom panels).}
\varepsilonnd{figure}
The dynamics of discord are always given by expression (\ref{discord}) and are also quasi-stationary oscillations (cf Fig. \ref{0417fig23-_v3}). The time averaged discord for a given value of $\alpha$ has the same overall behavior of the quantum correlations measured by concurrence and, as expected for Bell States, both discord and concurrence are equal to one for $\alpha=1/\sqrt{2}$.
However, the underlying oscillations of both measures have different behaviors. In general, the amplitude of oscillations in discord is larger than the amplitude of oscillations in concurrence and, as for the previous family, the discord often has oscillations with harmonics of the fundamental Rabi frequency. Furthermore, for some initial conditions ($\alpha=0.1$, top insets in Fig. \ref{0417fig23-_v3}) both oscillations are in phase but for other ($\alpha=0.6$, bottom insets in Fig. \ref{0417fig23-_v3}) they are in counter phase giving opposite information about the quantum correlations of the state at a given time. We also observe that for some times as concurrence augments discord decreases. The geometric interpretation of this behavior is that for those states $\rho(t)$ is no longer a separable state and the distance to the closest classical state decreases implying that the classical states are close to the frontier of separable states in the Hilbert space.
The Bell state associated with this initial condition $\ket{\psi_{1/\sqrt{2}}^-}_n$ belongs to the decoherence free subspace \textquotedblleftte{Eberly2007}. This means that even when the system is open and dissipation is present the entanglement of this state will be one. Qi-Liang He et al., \textquotedblleftte{He2011} have observed that the discord and entanglement in a similar arrangement but under dissipation have a quasi-stationary oscillatory behavior for some initial conditions related to the family $\ket{\psi_\alpha^-}_n$, we suspect this is due to the quasi-stationary oscillatory exact dynamics reported here. The fact that the concurrence can be tuned by changing the initial condition combined with the possible robustness against dissipation makes these states good candidates for quantum computation purposes.
An initial condition related to the Bell eigenstate $\ket{\psi_{1/\sqrt{2}}^-}_n$ that is often studied is the Werner state
\begin{equation}gin{equation}
\rho_W=\alpha\ket{\psi_{1/\sqrt{2}}^-}_n\prescript{}{n}{\bra{\psi_{1/\sqrt{2}}^-}}+\frac{1-\alpha}{4}\mathcal{I}_n,
\varepsilonnd{equation}
with $\mathcal{I}_n$ the identity in the manifold $\Lambda_n$.
This state, proposed by Werner in a historically important article \textquotedblleftte{Werner}, is interesting because for $\alpha=1$ it corresponds to the maximally entangled state, for $\alpha=0$ to the maximally mixed state and for intermediate $\alpha$ to a state that is both entangled and mixed to some degree. The entanglement and quantum discord for this state are initially $C(0)=\max(0,\frac{3\alpha-1}{2})$ and $D(0)=\tfrac{1}{4}((1-\alpha)\log_2(1-\alpha)+(1+3\alpha)\log_2(1+3\alpha)-2(1+\alpha)\log_2(1+\alpha))$. Interestingly, the Werner state does not evolve under the Tavis--Cummings Hamiltonian so we have an example of initial state with exact stationary dynamics were the value of quantum correlations can be tuned monotonically from $0$ to $1$ by changing $\alpha$ from $0$ to $1$ in the initial condition.
\subsection{Family of Bell type States}
The second type of initial conditions we consider are a linear combination of the two other states in the manifold:
\begin{equation}gin{equation}
\ket{\phi^\pm_\alpha}_n=\alpha \ket{++}_{n-1}\pm\sqrt{1-\alpha^2}\ket{--}_{n+1}.
\label{belltype}
\varepsilonnd{equation}
For $\alpha=0,1$ these correspond respectively to $\ket{--}_{n+1}$ and $\ket{++}_{n-1}$ and counterintuitively, do not have the same evolution of entanglement. Mathematically this arises from the
well known non-linear character of the TC model. On one hand, the evolution of entanglement for the state $\ket{++}_{n-1}$ is always zero. On the other hand, the entanglement of the state $\ket{--}_{n+1}$ starts at a value $C(0)=0$ then augments to a maximum that corresponds to a slope discontinuity at $t_R/4<\bar t_2\leq t_R/2$ and collapses at $t_R/4<t_2<t_R/2$. It then revives at $t_3=t_R-t_2$ to the same value it had at $\bar t_2$ decreasing to $C(t_R)=0$ where it completes a period of evolution. Both the discontinuity times $\bar t_i$, $i=1,2$ ($\bar t_1=0$ for $\ket{--}_{n+1}$) and the collapse and revival times $t_i$, $i=1,2,3,4$ ($t_1=t_4=0$ for $\ket{--}_{n+1}$) are plotted in the right panels of Figure \ref{0318fig14+_v3}. It is straightforward to see that for the initial state $\ket{--}_{n+1}$, $\bar t_2$ is a solution of:
\begin{equation}gin{equation}
\cos(2\pi t)=-\frac{n}{1+n},
\varepsilonnd{equation}
and $t_2$ is a solution of:
\begin{equation}gin{equation}
\cos(\pi t)=\sqrt{\Pi(n)},
\varepsilonnd{equation}
with $\Pi(n)=\frac{\sqrt{n (n+1) (2 n+1)^2}} {n+1}-2 n$ . For the manifold $\Lambda_1$ which is the one plotted in Figure \ref{0318fig14+_v3}, $\bar t_2=1/3$ and $t_2\simeq0.39$.
The discord for the two basis states $\ket{--}_{n+1}$ and $\ket{++}_{n-1}$ is also different. In contrast to the previous section, the difference does not stem from the classical part but from the classical and quantum contributions taken together in expression (\ref{resta}). As can be seen in Figure \ref{0318fig14+_v3}, the discord for the initial state $\ket{++}_{n-1}$ is non-zero. It oscillates to a maximum near $t_R/4$ and then to zero at $t_R/2$. This is yet another initial condition where there are no correlations measured by concurrence and there are correlations measured by discord. For $\ket{--}_{n+1}$ the discord starts at $D(0)=0$, then augments to a maximum and eventually decreases to zero at $t_R/2$ presenting a slope discontinuity before $t_R/2$. The evolution of quantum discord for $t_R/2<t<t_R$ is symmetric with respect to $t_R/2$. The fact that there is a slope discontinuity in the discord just after the entanglement death and right before the entanglement revival was already encountered for the initial condition $\ket{\psi^+_\alpha}_n$ in the previous section. We conjecture this might be an universal feature.
\begin{equation}gin{figure}
\includegraphics[width=0.45\textwidth]{0730fig14+_v2.eps}
\caption{\label{0318fig14+_v3}(color online) Left panel: Concurrence (bold lines) and Discord (dashed lines) as a function of time for initial condition $\ket{\phi^+_\alpha}_1$ with $\alpha=0$ (black), $\alpha=0.3$ (blue), $\alpha=\alpha_{B}\simeq0.58$ (red), $\alpha=0.9$ (green) and $\alpha=1$ (brown). Right top panel: Collapse and revival times $t_i/t_R$, $i=1,2,3,4$ as a function of $\alpha$ for $\Lambda_1$ (bold) and $\Lambda_{10}$ (point dashed) Right bottom panel: Discontinuity times $\bar t_i/t_R$, $i=1,2$ as a function of $\alpha$ for $\Lambda_1$ (bold) and $\Lambda_{10}$ (point dashed). The shaded areas in the inset correspond to the ESD and the vertical lines in the right panels correspond to $\alpha_B(1)$ (bold) and $\alpha_B(10)$ (point dashed). }
\varepsilonnd{figure}
We first consider the family $\ket{\phi^+_\alpha}_n$. For $0<\alpha<1$ there is an initial collapse of the entanglement $\left[0,t_1\right]$ with $0\leq t_1<t_R/4$, followed by a sudden birth, then followed by an intermediate collapse $\left[t_2,t_3\right]$ centered at $t_R/2$. The evolution for times $t$ between $t_R/2$ and $t_R$ is symmetric with respect to $t_R/2$. To our understanding this is the first time an initial collapse, with the possibility of returning to a full entangled state, is predicted. While the initial and final collapses, $\left[0,t_1\right]$ and $\left[t_4,t_R\right]$, depend little on the excitation manifold, the intermediate $\left[t_2,t_3\right]$ collapse does depend as can be seen in the top right panel in Figure \ref{0318fig14+_v3} where we plot the collapse ($t_2$ and $t_4$) and revival ($t_1$ and $t_3$) times for manifolds $\Lambda_1$ and $\Lambda_{10}$. Note that, as $\alpha$ approaches one, the intermediate collapse vanishes, i.e. $t_2=t_3$. This happens at $\alpha_1=2f(n)$ and then, for $\alpha>\alpha_1$, the collapse time $t_2\to 0$ and the birth time $t_3\to t_R$ until we recover the entanglement dynamics of initial condition$\ket{++}_{n-1}$. Nothing particular happens to discord when $\alpha=\alpha_1$. In general, for the family considered, the discord for $0<\alpha<1$ starts at $D(0)=0$, then increases presenting a slope discontinuity just before the sudden birth of entanglement. In the interval $\left[t_1,t_2\right]$ it has three behaviours as a function of $\alpha$ that can be seen in Figure \ref{0318fig14+_v3} and then decreases presenting a slope discontinuity right before the concurrence collapses. For times $t_R/2<t<t_R$ the evolution is symmetric with respect to $t_R/2$.
The first interval where the concurrence is different from zero is $\left[t_1,t_2\right]$. For times near $t_R/4$, intermediate $\alpha$ have two slope discontinuities in the concurrence at $\bar t_1$ and $\bar t_2$. The time for which the non analyticities occur for a given $\alpha$ and a given manifold $\Lambda_n$ can be determined analytically by setting $\rho_{11}(t)\rho_{44}(t)=0$ in Eq. (\ref{pmatrixx}) and solving the subsequent equation. It is straightforward to see that it is the solution of an equation of the form $\cos(2\pi \bar t_i)=P_i(n,\alpha)$ with $P_i(n,\alpha)$ a polynomial function and $i=1,2$. These times are plotted in the right bottom panel in Figure \ref{0318fig14+_v3}. If $P_1(n,\alpha)=P_2(n,\alpha)$ the two slope discontinuities coincide and the entanglement is a smooth function of time, we note $\alpha_B$ the value of $\alpha$ when this happens:
\begin{equation}gin{equation}
\alpha_B(n)=\sqrt{\frac{n}{1+2n}}.
\varepsilonnd{equation}
The corresponding initial state, the Bell state $\ket{\phi^+_{\alpha_B}}_n$, has maximum quantum discord and maximum entanglement at $t_R/4$. This is an example of a maximally correlated state that depends on the number of initial photons. The result is interesting because starting with an initial uncorrelated state one is able to reach a maximally correlated state.
For $\bar t_1 < t < \bar t_2$, there is a positive slope of concurrence and discord if $\alpha<\alpha_B$ and a negative slope if $\alpha>\alpha_B$ (cf Fig. \ref{0318fig14+_v3}). The similarity between the evolution of concurrence and discord comes from the whole expression (\ref{resta}) rather than from the classical or quantum parts. The height $C(t_R/4)$ and the slope $C'(t_R/4)$ of the concurrence can be obtained analytically as a function of $n$ and $\alpha$. As $n\to\infty$, $C(t_R/4)\to g(\alpha)$ and $C'(t_R/4)\to0$. This result, combined with the control of the initial ESD by selecting the initial condition $\alpha$ and the control of the intermediate ESD by adjusting $\alpha$ and the number of initial photons in the cavity can be used to control quantum gates where the concurrence is zero or non-zero in certain intervals of time that can controlled and the value of concurrence $C(t_R/4+j t_R/2)$, $j$ an integer, depends on the initial condition.
\begin{equation}gin{figure}
\includegraphics[width=0.45\textwidth]{0814fig14-_v2.eps}
\caption{\label{0319fig14-andWerner}(color online) Left panel: Collapse and revival times $t_i/t_R$, $i=1,2,3,4$ (black) and discontinuity times $\bar t_i/t_R$, $i=1,2$ (red) a function of $\alpha$ for $\Lambda_1$ (bold) and $\Lambda_{10}$ (point dashed) for initial condition $\ket{\phi^-_\alpha}_n$. The shaded areas correspond to ESD. Right panel: Concurrence as a function of time for $\alpha=0.02$ for $\Lambda_1$ (bold) and $\Lambda_{10}$ (point dashed)}.
\varepsilonnd{figure}
We now discuss the dynamics of entanglement and quantum discord for initial condition $\ket{\phi^-_\alpha}_n$. There are two distinct evolutions for this initial condition depending on the value of $\alpha$. For $0<\alpha<\alpha_c(n)$, with\begin{equation}gin{equation}
\alpha_c(n)=\frac{1}{1+2n},
\varepsilonnd{equation}
the concurrence has an initial collapse $\left[0,t_1\right]$ followed by a sudden birth at $t_1$. In the interval $\left[t_1,t_2\right]$ it reaches a maximum that corresponds to a slope discontinuity similar to the situation encountered previously for initial condition $\ket{--}_{n+1}$, collapsing again at $t_2$. Also, like in the previous cases, the evolution is symmetrical with respect to $t_R/2$ for times $t_R/2<t<t_R$.
\begin{equation}gin{figure}
\includegraphics[width=0.45\textwidth]{0814gate.eps}
\caption{\label{gates}Example of a quantum discord gate. Quantum discord as a function of time in units $t_R(n)$ for initial condition $\ket{\phi^-_{\alpha_0}}_1$ with $\alpha_0=\alpha_0(1)\simeq0.82$. The arrows indicate the times at which photons are injected or removed from the cavity.}
\varepsilonnd{figure}
As $\alpha$ increases the birth intervals $\left[t_1,t_2\right]$ and $\left[t_3,t_4\right]$ become smaller until reaching $\alpha_c$ where the concurrence is zero at all times as for the initial state $\ket{++}_{n-1}$. See left panel in Figure \ref{0319fig14-andWerner} for the times at which the revival/collapse and discontinuities occur in the interval $\left[0,t_R\right]$ and right panel in Figure \ref{0319fig14-andWerner} for a plot of the concurrence as a function of time in manifolds $\Lambda_1$ and $\Lambda_{10}$. From the above description it is straightforward to see that, from the point of view of concurrence, for $\ket{\phi^-_\alpha}_n$ there is no related Bell State because there is no state that is completely entangled. Interestingly, from the point of view of quantum discord there is no related Bell State either.
As in previous initial conditions both quantifiers seem to be uncorrelated because the quantum discord does not change dramatically at $\alpha_c$ and concurrence does not change at all at $\alpha_0$ (see next paragraph). For small values of $\alpha$ in the initial condition the quantum discord starts at $D(0)$ then augments to a maximum decreasing to zero at $t_R/2$ presenting a slope discontinuity just before the ESD similar to initial condition $\ket{\phi^+_\alpha}_n$ and for larger values of $\alpha$ it starts at zero, augments to a maximum near $t_R/4$ and decreases to zero without the slope discontinuity. As $\alpha$ increases the quantum discord is smoother function in time and the maximums start occurring at exactly $t_R/4$.
A very surprising feature in this family of initial conditions is that, for each manifold $\Lambda_n$, there is a value of $\alpha$, $\alpha_0(n)$, for which the quantum correlations measured by quantum discord are zero for all times. The value of $\alpha$ for which this happens can be determined by setting the classical correlations equal to the quantum correlations in expression (\ref{resta}) and numerically solving the subsequent transcendental equation. For $\Lambda_1$, $\alpha_0\sim 0.82$ and for $n=\infty$, $\alpha_0=1/\sqrt{2}$ . We propose to use this result, combined with the fact that the state after one complete period of evolution is the same, for quantum computation purposes in an experiment that we call ``Quantum discord gates'', schematized in Figure \ref{gates}. Note that, for this experiment, of the same order of magnitude of the bounded error for discord\textquotedblleftte{Huang}, expression (\ref{discord}) is exact because inequality (\ref{uno}) holds at all times for $1/\sqrt{2}<\alpha<0.82$. Imagine one starts an experiment with $n$ photons in the cavity and at exactly $\alpha_0(n)$. Then, for the first complete period of evolution $\left[0,t_R\right]$ one has zero quantum discord. Eventually, at $t=t_R$, one injects $m$ photons. Clearly, since $\alpha_0(n+m)\neq\alpha_0(n)$ the discord will be non-zero in the interval $\left[t_R,2t_R\right]$. Evidently, this process can be repeated at any multiple of $t_R$ and, this way, by injecting or extracting photons at certain times one has zero or non-zero discord. From what was just explained, this experiment uses discord to count the photons in the cavity and
this is an important result of our work. Returning to the interpretations of the photons as the environment for the 2-TLS and comparing initial conditions $\ket{\phi^-_{\alpha_0}}_n$ and $\ket{\phi^-_{\alpha_0}}_{m}$,
then another conclusion from our work is that initial correlations with the environment have great impact on dynamics of quantum discord. In fact, the reduced 2-TLS density matrix of such initial conditions are indistinguisable
but they evolve to completely different discord because one evolves to finite discord while the other initially equivalent condition evolves to an uncorrelated state.
\subsection{Ali states}
\begin{equation}gin{figure}
\includegraphics[width=0.45\textwidth]{0821figali2a_v2.eps}
\caption{\label{0318figali2a}(color online) Concurrence (bold lines) and Discord (dashed lines) as a function of time for initial condition $\rho_A$ with $\alpha=0.3$ (blue), $\alpha=0.6$ (orange) and $\alpha=1$ (red) for manifolds $\Lambda_1$ and $\Lambda_{10}$. The thicker lines for correspond to $\Lambda_{10}$. The inset shows the dependence of collapse and revival times $t_i/t_R$ $i=1,2,3,4$ as a function of $\alpha$ for $\Lambda_1$ (bold) and $\Lambda_{10}$ (point dashed). Here $\alpha_A(1)\simeq0.38$ and $\alpha_A(10)\simeq0.09$. The shaded area corresponds to the ESD and the vertical lines correspond to the values of $\alpha$ considered in the big figure.}
\varepsilonnd{figure}
A mixed initial condition related to $\ket{\psi^\pm_\alpha}_n$ and $\ket{\phi^\pm_\alpha}_n$ that is similar to one studied by Ali et al., \textquotedblleftte{Ali} is:
\begin{equation}gin{equation}
\rho_{A}=\alpha\ket{\psi^+_{1/\sqrt{2}}}_n\prescript{}{n}{\bra{\psi^+_{1/\sqrt{2}}}}+(1-\alpha)\ket{++}_{ n-1}\prescript{}{n-1}{\bra{++}}. \\
\varepsilonnd{equation}
For this family of initial conditions there are two distinct behaviors of entanglement determined by:
\begin{equation}gin{equation}
\alpha_A(n)=\left[1+\frac{1+2n}{4f(n)}\right]^{-1}.
\varepsilonnd{equation}
If $\alpha_A<\alpha\leq 1$ the entanglement dynamics resemble the evolution of the maximally entangled state $\ket{\psi^+_{1/\sqrt{2}}}_n$ where concurrence starts at a maximum $g(\alpha)$ (cf Eq. (\ref{galpha})), then an ESD occurs at $0<t_1<t_R/4$ and a sudden revival at $t_R/4<t_2<t_R/2$ augmenting to the same value $g(\alpha)$ at $t_R/2$. In general, the collapse and revival times satisfy $t_1(n)\lesssim t_1(n+1)$, $t_2(n)>t_2(n+1)$, $t_3(n)<t_3(n+1)$ and $t_4(n)\gtrsim t_4(n+1)$ (cf inset of Fig. \ref{0318figali2a}), and as $\alpha\to1$ they are not sensible to the change of manifold. On the other hand, if $0\leq\alpha<\alpha_A$ the evolution is different from all the previous evolutions because for a given $\alpha$, the dependence of entanglement on the number of photons in the cavity is counterintuitive. To illustrate this point, lets examine the concurrence at $t_R/2$. It is straightforward to see that:
\begin{equation}
C(t_R/2)=\max\left[0,\alpha-4(\alpha-1)\frac{f(n)}{1+2n}\right].
\label{ctr2}
\varepsilonnd{equation}
The dependence on $n$ in the above expression implies that as one augments the number of photons in the cavity there is a sudden birth of concurrence at $t_R/2$.
From the point of view of considering the photons as the environment for the 2-TLS this result is interesting because increasing the number of photons, i.e. attaining the photon bath limit, results in a more entangled state at certain times. In Figure \ref{0318figali2a} we plot the concurrence for $\alpha=0.3$ for $\Lambda_1$ and $\Lambda_{10}$ (blue curves) to observe the sudden birth at $t_R/2$ when there are more photons and the concurrence for $\alpha=0.6$ for $\Lambda_1$ and $\Lambda_{10}$ (orange curves) to observe how the entanglement at $t_R/2$ increases as the excitation manifold increases. The counterintuitive dependence on the number of photons can be used to count photons. To clarify this point lets compare the entanglement evolution for different initial conditions (different $\alpha$) for $n$ and $n+1$ photons in the cavity. If $0\leq\alpha<\alpha_A(n+1)$ then there is no birth at $t_R/2$ regardless of the number of photons. If $\alpha_A(n+1)<\alpha<\alpha_A(n)$ there is birth if there are $n+1$ photons but there is no birth if there are $n$ photons (this is shown in Figure \ref{0318figali2a} but with a difference of 9 photons). Finally, if $\alpha>\alpha_A(n)$ then the entanglement at $t_R/2$ for $n+1$ photons is larger than for $n$ photons.
For a given number of photons the quantum discord for this initial condition has different evolutions. For $\alpha=0$ it was already described (cf black curve in Fig. \ref{0318fig14+_v3}). For small $\alpha$ it has the shape of a garland with discontinuities at the maximums and smooth minimums as can be seen in the blue curve in Figure \ref{0318figali2a}. As $\alpha$ increases there is a maximum that appears at $t_R/2$ and at the same time the minimums near $t_R/4$ and $3t_R/4$ go to zero (see orange curve in Fig. \ref{0318figali2a}). Finally as $\alpha\to 1$ the maximum at $t_R/2$ is the same as the initial discord and we obtain the known result for Bell where the discontinuities in the discord occur just after the entanglement sudden death and right before the entanglement sudden birth. An new unreported result for the quantum discord is that there is a value of $\alpha$ for each value of $n$ which there is a plateau near $t_R/4$ and near $3t_R/4$ (cf Fig. \ref{0525plateau}). For one photon in the cavity, $\alpha_{plateau}(1)=\frac{1}{4}$, and for infinite photons $\alpha_{plateau} (\infty)=\frac{1}{3}$. The discord $D(\alpha_{plateau})$ is $\alpha_{plateau}$ and the width of the plateau decreases as $n$ increases. In Fig. \ref{0525plateau} we plot $\alpha_{plateau}$ as a function of $n$ and the discord for the excitation manifolds $\Lambda_1$, $\Lambda_2$ and $\Lambda_{20}$.\\
\begin{equation}gin{figure}
\includegraphics[width=0.45\textwidth]{0821plateau.eps}
\caption{\label{0525plateau}Top panels: Discord as a function of time for $\alpha_{plateau}(n)$ for $\Lambda_1$ (left), $\Lambda_2$ (middle) and $\Lambda_{20}$ (right). Bottom panel: $\alpha_{plateau}(n)$ as a function of $n$.}
\varepsilonnd{figure}
As a final remark for this initial condition, we note that for some initial conditions $\alpha$ and number of photons $n$ the information of the correlations given by concurrence and quantum discord is completely opposite. We have both the situation where entanglement is zero and discord is maximum and also the situation where one augments as the other decreases.
\section{Discussion}
In this paper we have studied the dynamics of entanglement and quantum discord in the exact Tavis--Cummings Hamitonian. We concluded that some of the dynamical features of discord attributed to the Markovian or non-Markovian nature of the environment in the open system scheme are already contained in the non linear exact dynamics of the TC model and in the choice of initial conditions. In particular, we showed that the discontinuities Maziero et al., \textquotedblleftte{Serra} observed in discord were a direct consequence of the minimization of the entropies. We furthermore demonstrated that the stationary asymptotic dynamics reported by Qi-Liang et al., \textquotedblleftte{He2011} are related to the choice of initial conditions.
This simple model reveals that the assumption of an environment is not essential.
Regarding the entanglement, measured by the concurrence, we exhaustively studied its dynamics, in particular the well known EDS phenomena. In this context, we described the death and revival times as a function of all the the meaningful parameters in the model. These predictions on discord and entanglement will give new insights on the current debate on quantum correlations and the true influence of dissipation and non markovianity on their dynamics. One question still unanswered is what is the real role of dissipation and markovian character of the bath.
The model reveals that the dynamics of correlations depend crucially on the 2-TLS + field initial conditions and are, by no means, trivial. We show that there exist initial conditions where both measures give opposite information: on one hand quantum discord suggests that it is a maximally correlated state and on the other hand concurrence suggest that it is an uncorrelated state. There are also states where both measures are in phase and give roughly the same information for correlations. Interestingly, we find that there are states that are indistinguishable from the point of view of entanglement but distinguishable from the point of view of quantum discord because classical correlations suppose a measurement basis and this election and subsequent minimization induces a difference between the evolution of the two states. Finally, critical behaviours for both quantifiers usually occur in disjoint regions of the parameters space allowing the possibility of building devices where one can control one or the other independently. According to the Modi et al., \textquotedblleftte{Modi} interpretation of quantum correlations, the results just mentioned have implications for the geometry of Hilbert space, for example for the size of the separable states. We leave these as directions for future work.
The theoretical findings are of direct practical relevance. Our results predict i) Robust maximally entangled states with entanglement that is almost independent on the manifold ii) States whose entanglement and quantum discord can be tuned from zero to one by varying a parameter in the initial condition iii) States that have the possibility of returning to a fully entangled state after an initial ESD where we can control the entanglement by setting the number of initial photons and the parameter in the initial condition iv) States that counterintuitively have an entanglement birth as one augments the excitation number. We have furthermore proposed an experiment called Quantum discord gates where by injecting or extracting photons at certain times the quantum state has zero or non-zero discord. This makes use of a surprising result where we find that for some initial conditions the quantum discord is exactly zero. Given the marginal character of states with zero discord this result is not only completely counterintuitive but is also useful as a way to count photons present in a cavity.
\begin{equation}gin{acknowledgments}
This work was supported by the Vicerrector\'\i a de Investigaci\'on of the Universidad Antonio Nari\~no, Colombia under project number 2011282, Comit\'e para el Desarrollo de la Investigaci\'on(CODI) of the Universidad de Antioquia, Colombia under contract number E01620, Estrategia de Sostenibilidad del Grupo de F\'\i sica At\'omica y Molecular and by the Departamento Administrativo de Ciencia, Tecnolog\'\i a e Innovaci\'on (COLCIENCIAS) of Colombia under grant number 111556934912.
\varepsilonnd{acknowledgments}
\begin{equation}gin{thebibliography}{99}
\bibitem{EPR} A. Einstein, B. Podolsky, and N. Rosen, Phys. Rev. {\bf 47}, 777 (1935).
\bibitem{Bohr} N. Bohr, Phys. Rev. {\bf 48}, 696 (1935).
\bibitem{Olival} O. Freire, {\it The Quantum Dissidents: Rebuilding the Foundations of Quantum Mechanics (1950-1990)} (Springer-Verlag, Berlin, 2015).
\bibitem{Schrodinger} E. Schr\"odinger, Math. Proc. Cambridge {\bf 31}, 555 (1935).
\bibitem{HorodeckiRev} R. Horodecki, P. Horodecki, M. Horodecki, and K. Horodecki, Rev. Mod. Phys. {\bf 81}, 865 (2009).
\bibitem{EntanglementRev} O. G\"uhne and G. T\'oth, Phys. Rep. {\bf 474}, 1 (2009).
\bibitem{Werner} R. F. Werner, Phys. Rev. A {\bf 40}, 4277 (1989).
\bibitem{Bell} J.S. Bell, Physics {\bf 1}, 195 (1964). Reprinted in J. S. Bell, {\it Speakable and Unspeakable in Quantum Mechanics} (Cambridge University Press, Cambridge, 2004).
\bibitem{Aspect} A. Aspect, P. Grangier, and G. Roger, Phys. Rev. Lett. {\bf 49}, 91 (1982); A. Aspect, J. Dalibard, and G. Roger, Phys. Rev. Lett. {\bf 49}, 1804 (1982).
\bibitem{Zeilinger} G. Weihs, T. Jennewein, C. Simon, H. Weinfurter, and A. Zeilinger, Phys. Rev. Lett. {\bf 81}, 5039 (1998); M. Giustina, A. Mech, S. Ramelow, B. Wittmann, J. Kofler, J. Beyer, A. Lita, B. Calkins, T. Gerrits, S. W. Nam, R. Ursin and A. Zeilinger, Nature {\bf 497}, 227 (2013).
\bibitem{Gisin} W. Tittel, J. Brendel, H. Zbinden, and N. Gisin, Phys. Rev. Lett. {\bf 81}, 3563 (1998); D. Salart, A. Baas, J. A. W. van Houwelingen, N. Gisin, and H. Zbinden, Phys. Rev. Lett. {\bf 100}, 220404 (2008).
\bibitem{Nielsen} M. A. Nielsen, and I. L. Chuang, {\it Quantum Computation and Quantum Information: 10th Anniversary Edition} (Cambridge University Press, Cambridge, 2010).
\bibitem{Bennet} C. H. Bennett, D. P. DiVincenzo, C. A. Fuchs, T. Mor, E. Rains, P. W. Shor, J. A. Smolin, and W. K. Wootters, Phys. Rev. A {\bf 59}, 1070 (1999).
\bibitem{Bartlett} G. J. Pryde, J. L. O'Brien, A. G. White, and S. D. Bartlett, Phys. Rev. Lett. {\bf 94}, 220406 (2005).
\bibitem{Braunstein} S. L. Braunstein, C. M. Caves, R. Jozsa, N. Linden, S. Popescu, and R. Schack, Phys. Rev. Lett. {\bf 83}, 1054 (1999).
\bibitem{Lanyon} B. P. Lanyon, M. Barbieri, M. P. Almeida, and A. G. White, Phys. Rev. Lett. {\bf 101}, 200501 (2008).
\bibitem{Zurek} H. Ollivier and W. H. Zurek, Phys. Rev. Lett. {\bf 88}, 017901 (2001).
\bibitem{Vedral} L. Henderson, and V. Vedral, J. Phys. A: Math. Gen. {\bf 34}, 6899 (2001).
\bibitem{Winter} D. Cavalcanti, L. Aolita, S. Boixo, K. Modi, M. Piani, and A. Winter, Phys. Rev. A {\bf 83}, 032324 (2011).
\bibitem{Datta} V. Madhok and A. Datta, Phys. Rev. A {\bf 83}, 032323 (2011).
\bibitem{Dakic} B. Daki\'c, Y. O. Lipp, X. Ma, M. Ringbauer, S. Kropatschek, S. Barz, T. Paterek, V. Vedral, A. Zeilinger, C. Brukner and P. Walther, Nat. Phys. {\bf 8}, 666 (2012).
\bibitem{Piani} T. K. Chuan, J. Maillard, K. Modi, T. Paterek, M. Paternostro, and M. Piani, Phys. Rev. Lett. {\bf 109}, 070501 (2012).
\bibitem{Pirandola} S. Pirandola, Sci. Rep. {\bf 4}, 6956 (2014).
\bibitem{Adesso2014} D. Girolami, A. M. Souza, V. Giovannetti, T. Tufarelli, J. G. Filgueiras, R. S. Sarthour, D. O. Soares-Pinto, I. S. Oliveira, and Gerardo Adesso, Phys. Rev. Lett. {\bf 112}, 210401 (2014).
\bibitem{Modi} K. Modi, T. Paterek, W. Son, V. Vedral, and M. Williamson, Phys. Rev. Lett. {\bf 104}, 080501 (2010).
\bibitem{Acin} A. Ferraro, L. Aolita, D. Cavalcanti, F. M. Cucchietti, and A. Ac\'in, Phys. Rev. A {\bf 81}, 052318 (2010).
\bibitem{Modi2012} K. Modi, A. Brodutch, H. Cable, T. Paterek, and V. Vedral, Rev. Mod. Phys. {\bf 84}, 1655 (2012).
\bibitem{Streltsov} A. Streltsov, H. Kampermann, and D. Brufl, Phys. Rev. Lett. {\bf 106}, 160401 (2011).
\bibitem{Piani2012} M. Piani, S. Gharibian, G. Adesso, J. Calsamiglia, P. Horodecki, and A. Winter, Phys. Rev. Lett. {\bf 106}, 220403 (2011).
\bibitem{Buchleitner} F. Mintert, A. R.R. Carvalho, M. Ku\'{s}, and A. Buchleitner, Phys. Rep. {\bf 415}, 207 (2005).
\bibitem{YuEberly} T. Yu, and J. H. Eberly, Phys. Rev. Lett. {\bf 93}, 140404 (2004); T. Yu, and J. H. Eberly, Science {\bf 323}, 598 (2009).
\bibitem{Davidovich} M. P. Almeida, F. de Melo, M. Hor-Meyll, A. Salles, S. P. Walborn, P. H. Souto Ribeiro, and L. Davidovich, Science {\bf 316}, 579 (2007).
\bibitem{Kimble} J. Laurat, K. S. Choi, H. Deng, C. W. Chou, and H. J. Kimble, Phys. Rev. Lett. {\bf 99}, 180504 (2007).
\bibitem{Fanchini2009} T. Werlang, S. Souza, F. F. Fanchini, and C. J. Villas Boas, Phys. Rev. A {\bf 80}, 024103 (2009).
\bibitem{Celeri} J. Maziero, L. C. C\'eleri, R. M. Serra, and V. Vedral, Phys. Rev. A {\bf 80}, 044102 (2009).
\bibitem{Serra} J. Maziero, T. Werlang, F. F. Fanchini, L. C. C\'eleri, and R. M. Serra, Phys. Rev. A {\bf 81}, 022116 (2010).
\bibitem{Mazzola} L. Mazzola, J. Piilo, and S. Maniscalco, Phys. Rev. Lett. {\bf 104}, 200401 (2010).
\bibitem{Fanchini2010} F. F. Fanchini, T. Werlang, C. A. Brasil, L. G. E. Arruda, and A. O. Caldeira, Phys. Rev. A {\bf 81}, 052107 (2010).
\bibitem{TC} M. Tavis, and F. W. Cummings, Phys. Rev. {\bf 170}, 379 (1968).
\bibitem{JC} E. T. Jaynes, and F. W. Cummings, Proc. IEEE {\bf 51}, 89 (1963).
\bibitem{ReviewJC} B. W. Shore and P. L. Knight, J. Mod. Opt. {\bf 40}, 1195 (1993).
\bibitem{Walls-Milburn} D. F. Walls, and G. J. Milburn, {\it Quantum Optics, Second Ed.} (Springer-Verlag, Berlin, 2010).
\bibitem{Special} See the papers in the special issue J. Phys. B: At. Mol. Opt. Phys. {\bf 46}, 1 (2013), commemorating fifty years in the JC physics.
\bibitem{Haroche} J. M. Raimond, M. Brune, and S. Haroche, Rev. Mod. Phys. {\bf 73}, 565 (2001).
\bibitem{Walther} H. Walther, B. T. H. Varcoe, B.-G. Englert, and T. Becker, Rep. Prog. Phys. {\bf 69}, 1325 (2006).
\bibitem{Wineland1996} D. M. Meekhof, C. Monroe, B. E. King, W. M. Itano, and D. J. Wineland, Phys. Rev. Lett. {\bf 76}, 1796 (1996).
\bibitem{WinelandRMP} D. Leibfried, R. Blatt, C. Monroe, and D. Wineland, Rev. Mod. Phys. {\bf 75}, 281 (2003).
\bibitem{Nico} N. Quesada and A. Sanpera, J. Phys. B: At. Mol. Opt. Phys. {\bf 46}, 224002 (2013).
\bibitem{Azuma} H. Azuma, Prog. Theor. Phys. {\bf 126}, 369 (2011).
\bibitem{Molmer} B. Mischuck and K. Molmer, Phys. Rev. A {\bf 87}, 022341 (2013).
\bibitem{Schoelkopf} A. Blais, R.-S. Huang, A. Wallraff, S. M. Girvin, and R. J. Schoelkopf, Phys. Rev. A {\bf 69}, 062320 (2004).
\bibitem{Schoelkopf2004} A. Wallraff, D. I. Schuster, A. Blais, L. Frunzio, R.-S. Huang, J. Majer, S. Kumar, S. M. Girvin, and R. J. Schoelkopf, Nature {\bf 431}, 162 (2004).
\bibitem{Mooij} I. Chiorescu, P. Bertet, K. Semba, Y. Nakamura, C. J. P. M. Harmans, and J. E. Mooij, Nature {\bf 431}, 159 (2004).
\bibitem{Takayanagi} J. Johansson, S. Saito, T. Meno, H. Nakano, M. Ueda, K. Semba, and H. Takayanagi, Phys. Rev. Lett. {\bf 96}, 127006 (2006).
\bibitem{KiraKoch} M. Kira, and S. W. Koch, {\it Semiconductor Quantum Optics} (Cambridge University Press, Cambridge 2011).
\bibitem{Forchel} J. P. Reithmaier, G. Sek, A. L\"offler, C. Hofmann, S. Kuhn, S. Reitzenstein, L. V. Keldysh, V. D. Kulakovskii, T. L. Reinecke, and A. Forchel, Nature {\bf 432}, 197 (2004).
\bibitem{Deppe} T. Yoshie, A. Scherer, J. Hendrickson, G. Khitrova, H. M. Gibbs, G. Rupper, C. Ell, O. B. Shchekin, and D. G. Deppe, Nature {\bf 432}, 200 (2004).
\bibitem{Elena} E. del Valle, {\it Microcavity Quantum Electrodynamics} (VDM Verlag, 2010).
\bibitem{Elena2012} F. P. Laussy, E. del Valle, M. Schrapp, A. Laucht, and J. J. Finley, J. Nanophoton. {\bf 6}, 061803 (2012).
\bibitem{us} C. A. Vera, N. Quesada M., H. Vinck-Posada, B. A. Rodriguez, J. Phys.: Condens. Matt. {\bf 21}, 395603 (2009).
\bibitem{Marquardt} M. Aspelmeyer, T. J. Kippenberg, and F. Marquardt, Rev. Mod. Phys. {\bf 86}, 1391 (2014).
\bibitem{Milburn-Woolley} G. J. Milburn and M. J. Woolley, Acta Phys. Slovaca {\bf 61}, 483 (2011).
\bibitem{Kim2002} M. S. Kim, Jinhyoung Lee, D. Ahn, and P. L. Knight, Phys. Rev. A {\bf 65}, 040101 (2002).
\bibitem{Puri} R. R. Puri, {\it Mathematical Methods of Quantum Optics} (Springer-Verlag, Berlin, 2001).
\bibitem{Eberly2007} T. Yu and J. H. Eberly, Quantum Inf. Comput. {\bf 7}, 459 (2007).
\bibitem{Wootters} W. K. Wootters, Phys. Rev. Lett. {\bf 80}, 2245 (1998).
\bibitem{Adesso} D. Girolami and G. Adesso, Phys. Rev. A {\bf 83}, 052108 (2011).
\bibitem{Zhou} X. Wu and T. Zhou, e-print arXiv: 1504.00129.
\bibitem{Chen} Qing Chen, Chengje Zhang, Sixia Yu, X.X. Yi and C.H. Oh, Phys. Rev. A {\bf 84}, 042313 (2011).
\bibitem{Huang} Y. Huang, Rev. A {\bf 88}, 014302 (2013).
\bibitem{Namkug} M. Namkung, J. Chang, J. Shin, Y. Kwon, e-print arXiv: 1404.6329. To appear in Int. J. Theor. Phys.
\bibitem{Ali} M. Ali, A. R. P. Rau, and G. Alber, Phys. Rev. A {\bf 81}, 042105 (2010).
\bibitem{Luo} S. Luo, Phys. Rev. A {\bf 77}, 042303 (2008).
\bibitem{Li} B. Li, Z.-X. Wang, and S.-M. Fei Phys. Rev. A {\bf 83}, 022321 (2011).
\bibitem{He2011} Q.-L. He, J.-B. Xu, D.-X. Yao, and Y.-Q. Zhang, Phys Rev A \textbf{84}, 022312 (2011)
\varepsilonnd{thebibliography}
\varepsilonnd{document} |
\begin{document}
\title[Solvable maximal subgroups]{A note on solvable maximal subgroups\\ in subnormal subgroups of ${\rm GL}_n(D)$}
\author[Hu\`{y}nh Vi\d{\^{e}}t Kh\'{a}nh]{Hu\`{y}nh Vi\d{\^{e}}t Kh\'{a}nh}\thanks{The second author was funded by Vietnam National Foundation for Science and Technology Development (NAFOSTED) under Grant No. 101.04-2016.18}
\mbox{\rm add}ress{Faculty of Mathematics and Computer Science, VNUHCM - University of Science, 227 Nguyen Van Cu Str., Dist. 5, Ho Chi Minh City, Vietnam.}
\email{huynhvietkhanh@gmail.com; bxhai@hcmus.edu.vn}
\author[B\`{u}i Xu\^{a}n H\h{a}i]{B\`{u}i Xu\^{a}n H\h{a}i}
\keywords{division ring; maximal subgroup; solvable group; polycyclic-by-finite group.\\
\protect \indent 2010 {\it Mathematics Subject Classification.} 12E15, 16K20, 16K40, 20E25.}
\maketitle
\selectlanguage{english}
\begin{abstract} Let $D$ be a non-commutative division ring, and $G$ be a subnormal subgroup of ${\rm GL}_n(D)$. Assume additionally that the center of $D$ contains at least five elements if $n>1$. In this note, we show that if $G$ contains a non-abelian solvable maximal subgroup, then $n=1$ and $D$ is a cyclic algebra of prime degree over the center.
\end{abstract}
\section{Introduction}
In the theory of skew linear groups, one of unsolved difficult problems is that whether the general skew linear group over a division ring contains maximal subgroups. In \cite{akbri}, the authors conjectured that for $n\geq 2$ and a division ring $D$, the group ${\rm GL}_n(D)$ contains no solvable maximal subgroups. In \cite{dorbidi2011}, this conjecture was shown to be true for non-abelian solvable maximal subgroups. In this paper, we consider the following more general conjecture.
\begin{conjecture}\label{conj:1} Let $D$ be a division ring, and $G$ be a non-central subnormal subgroup of ${\rm GL}_n(D)$. If $n\geq 2$, then $G$ contains no solvable maximal subgroups.
\end{conjecture}
We note that this conjecture is not true if $n=1$. Indeed, it was proved in \cite{akbri} that the subgroup $\mathbb{C}^*\cup \mathbb{C}^* j$ is a solvable maximal subgroup of the multiplicative group $\mathbb{H}^*$ of the division ring of real quaternions $\mathbb{H}$. In this note, we show that Conjecture \ref{conj:1} is true for non-abelian solvable maximal subgroups of $G$, that is, we prove that $G$ contains no non-abelian solvable maximal subgroups. This fact generalizes the main result in \cite{dorbidi2011} and it is a consequence of Theorem \ref{theorem_3.7} in the text.
Throughout this note, we denote by $D$ a division ring with center $F$ and by $D^*$ the multiplicative group of $D$. For a positive integer $n$, the symbol ${\rm M}_n(D)$ stands for the matrix ring of degree $n$ over $D$. We identify $F$ with $F{\rm I}_n$ via the ring isomorphism $a\mapsto a{\rm I}_n$, where ${\rm I}_n$ is the identity matrix of degree $n$. If $S$ is a subset of ${\rm M}_n(D)$, then $F[S]$ denotes the subring of ${\rm M}_n(D)$ generated by the set $S\cup F$. Also, if $n=1$, i.e., if $S\subseteq D$, then $F(S)$ is the division subring of $D$ generated by $S\cup F$. Recall that a division ring $D$ is \textit{locally finite} if for every finite subset $S$ of $D$, the division subring $F(S)$ is a finite dimensional vector space over $F$. If $H$ and $K$ are two subgroups in a group $G$, then $N_K(H)$ denotes the set of all elements $k\in K$ such that $k^{-1}Hk\leq H$, i.e., $N_K(H)=K\cap N_G(H)$. If $A$ is a ring or a group, then $Z(A)$ denotes the center of $A$.
Let $V =D^n= \left\{ {\left( {{d_1},{d_2}, \ldots ,{d_n}} \right)\left| {{d_i} \in D} \right.} \right\}$. If $G$ is a subgroup of ${\rm GL}_n(D)$, then $V$ may be viewed as $D$-$G$ bimodule. Recall that a subgroup $G$ of ${\rm GL}_n(D)$ is \textit{irreducible} (resp. \textit{reducible, completely reducible}) if $V$ is irreducible (resp. reducible, completely reducible) as $D$-$G$ bimodule. If $F[G]={\rm M}_n(D)$, then $G$ is \textit{absolutely irreducible} over $D$. An irreducible subgroup $G$ is \textit{imprimitive} if there exists an integer $ m \geq 2$ such that $V = \oplus _{i = 1}^m{V_i}$ as left $D$-modules and for any $g \in G$ the mapping $V_i \to V_ig$ is a permutation of the set $\{V_1, \cdots, V_m\}$. If $G$ is irreducible and not imprimitive, then $G$ is \textit{primitive}.
\section{Auxiliary lemmas}
\begin{lemma}\label{lemm_2.1}
Let $D$ be a division ring with center $F$, and $M$ be a subgroup of ${\rm GL}_n(D)$. If $M/M\cap F^*$ is a locally finite group, then $F[M]$ is a locally finite dimensional vector space over $F$.
\end{lemma}
\begin{Proof} Take any finite subset $\{x_1,x_2,\dots,x_k\}\subseteq F[M]$ and write
$$x_i=f_{i_1}m_{i_1}+f_{i_2}m_{i_2}+\cdots+f_{i_s}m_{i_s}.$$
Let $G=\left\langle m_{i_j}:1\leq i\leq k,1\leq j\leq s\right\rangle$ be the subgroup of $M$ generated by all $m_{i_j}$.
Since $M/M\cap F^*\cong MF^*/F^*$ is locally finite, the group $GF^*/F^*$ is finite. Let $\{y_1,y_2,\dots,y_t\}$ be a transversal of $F^*$ in $GF^*$ and set
$$R=Fy_1+Fy_2+\cdots+Fy_t.$$
Then, $R$ is a finite dimensional vector space over $F$ containing $\{x_1,x_2,\dots,x_k\}$.
\end{Proof}
\begin{lemma}\label{lemma_2.2}
Every locally solvable periodic group is locally finite.
\end{lemma}
\begin{Proof}
Let $G$ be a locally solvable periodic group, and $H$ be a finitely generated subgroup of $G$. Then, $H$ is solvable with derived series of length $n\geq1$, say,
$$1=H^{(n)}\unlhd H^{(n-1)}\unlhd\cdots\unlhd H'\unlhd H.$$
We shall prove that $H$ is finite by induction on $n$. For if $n=1$, then $H$ is a finitely generated periodic abelian group, so it is finite. Suppose $n>1$. It is clear that $H/H'$ is a finitely generated periodic abelian group, so it is finite. Hence, $H'$ is finitely generated. By induction hypothesis, $H'$ is finite, and as a consequence, $H$ is finite.
\end{Proof}
\begin{lemma}\label{lemma_2.3}
Let $D$ be a division ring with center $F$, and $G$ be a subnormal subgroup of $D^*$. If $G$ is solvable-by-finite, then $G\subseteq F$.
\end{lemma}
\begin{Proof}
Let $A$ be a solvable normal subgroup of finite index in $G$. Since $G$ is subnormal in $G$, so is $A$. By \cite[14.4.4]{scott}, we have $A\subseteq F$. This implies that $G/Z(G)$ is finite, so $G'$ is finite too \cite[Lemma 1.4, p. 115]{passman_77}. Therefore, $G'$ is a finite subnormal subgroup of $D^*$. In view of \cite[Theorem 8]{her}, it follows that $G'\subseteq F$, hence $G$ is solvable. Again by \cite[14.4.4]{scott}, we conclude that $G\subseteq F$.
\end{Proof}
For our further use, we also need one result of Wehrfritz which will be restated in the following lemma for readers' convenience.
\begin{lemma}\cite[Proposition 4.1]{wehrfritz_07}\label{lemma_2.4}
Let $D=E(A)$ be a division ring generated as such by its metabelian subgroup $A$ and its division subring $E$ such that $E\leq C_D(A)$. Set $H=N_{D^*}(A)$, $B=C_A(A')$, $K=E(Z(B))$, $H_1=N_{K^*}(A)=H\cap K^*$, and let $T$ be the maximal periodic normal subgroup of $B$.
\begin{enumerate}[(1)]
\item If $T$ has a quaternion subgroup $Q=\left\langle i,j\right\rangle $ of order $8$ with $A=QC_A(Q)$, then $H=Q^+AH_1$, where $Q^+=\left\langle Q,1+j,-(1+i+j+ij)/2\right\rangle$. Also, $Q$ is normal in $Q^+$ and $Q^+/{\left\langle -1,2\right\rangle}\cong\mbox{\rm Aut} Q\cong Sym(4)$.
\item If $T$ is abelian and contains an element $x$ of order $4$ not in the center of $B$, then $H=\left\langle x+1\right\rangle AH_1$.
\item In all other cases, $H=AH_1$.
\end{enumerate}
\end{lemma}
\section{Maximal subgroups in subnormal subgroups of ${\rm GL}_n(D)$}
\begin{proposition}\label{proposition_3.1}
Let $D$ be a division ring with center $F$, and $G$ be a subnormal subgroup of $D^*$. If $M$ is a non-abelian solvable-by-finite maximal subgroup of $G$, then $M$ is abelian-by-finite and $[D:F]<\infty$.
\end{proposition}
\begin{Proof}
Since $M$ is maximal in $G$ and $M\subseteq F(M)^*\cap G\subseteq G$, either $M = F(M)^*\cap G$ or $G \subseteq F(M)^*$. The first case implies that $M$ is a solvable-by-finite subnormal subgroup of $F(M)^*$, which yields $M$ is abelian by Lemma \ref{lemma_2.3}, a contradiction. Therefore, the second case must occur, i.e., $G \subseteq F(M)^*$. By Stuth's theorem (see e.g. \cite[14.3.8]{scott}), we conclude that $F(M)=D$. Let $N$ be a solvable normal subgroup of finite index in $M$. First, we assume that $N$ is abelian, so $M$ is abelian-by-finite. In view of \cite[Corollary 24]{wehrfritz_89}, the ring $F[N]$ is a Goldie ring, and hence it is an Ore domain whose skew field of fractions coincides with $F(N)$. Consequently, any $\alpha\in F(N)$ may be written in the form $\alpha=pq^{-1},$ where $q, p\in F[N]$ and $q\ne0$. The normality of $N$ in $M$ implies that $F[N]$ is normalized by $M$. Thus, for any $m\in M$, we have
$$m\alpha m^{-1}=mpq^{-1}m^{-1}=(mpm^{-1})(m^{-1}qm)^{-1}\in F(N).$$
In other words, $L:=F(N)$ is a subfield of $D$ normalized by $M$. Let $\{x_1,x_2,\ldots,x_k\}$ be a transversal of $N$ in $M$ and set
$$\Delta=Lx_1+Lx_2+\cdots+Lx_k.$$
Then, $\Delta$ is a domain with $\dim_L\Delta\leq k$, so $\Delta$ is a division ring that is finite dimensional over its center. It is clear that $\Delta$ contains $F$ and $M$, so $D=\Delta$ and $[D:F]<\infty$.
Next, we suppose that $N$ is a non-abelian solvable group with derived series of length $s\geq1$. Then we have such a series
$$1=N^{(s)}\unlhd N^{(s-1)}\unlhd N^{(s-2)}\unlhd\cdots\unlhd N'\unlhd N \unlhd M.$$
If we set $A=N^{(s-2)}$, then $A$ is a non-abelian metabelian normal subgroup of $M$. By the same arguments as above, we conclude that $F(A)$ is normalized by $M$ and we have $M\subseteq N_G(F(A)^*)\subseteq G$. By the maximality of $M$ in $G$, either $N_G(F(A)^*)=M$ or $N_G(F(A)^*)=G$. If the first case occurs, then $G\cap F(A)^*$ is a subnormal subgroup of $F(A)^*$ contained in $M$. Since $M$ is solvable-by-finite, so is $G\cap F(A)^*$. By Lemma \ref{lemma_2.3}, $A\subseteq G\cap F(A)^*$ is abelian, a contradiction. We may therefore assume that $N_G(F(A))=G$, which says that $F(A)$ is normalized by $G$. In view of Stuth's theorem, we have $F(A)=D$. From this we conclude that $Z(A)=F^*\cap A$ and $F=C_D(A)$. Set $H=N_{D^*}(A)$, $B=C_A(A')$, $K=F(Z(B))$, $H_1=H\cap K^*$, and $T$ to be the maximal periodic normal subgroup of $B$. Then $H_1$ is an abelian group, and $T$ is a characteristic subgroup of $B$ and hence of $A$. In view of Lemma \ref{lemma_2.4}, we have three possible cases:
\textit{Case 1:} $T$ is not abelian.
Since $T$ is normal in $M$, we conclude that $M\subseteq N_G(F(T)^*)\subseteq G$. By the maximality of $M$ in $G$, either $M = N_G(F(T)^*)$ or $G= N_G(F(T)^*)$. The first case implies that $F(T)^*\cap G$ is subnormal in $F(T)^*$ contained in $M$. Again by Lemma~ \ref{lemma_2.3}, it follows that $T\subseteq F(T)\cap G$ is abelian, a contradiction. Thus, we may assume that $G= N_G(F(T)^*)$, which implies that $F(T)=D$ by Stuth's theorem. By Lemma~ \ref{lemma_2.2}, $T$ is locally finite. In view of Lemma \ref{lemm_2.1}, we conclude that $D=F(T)=F[T]$ is a locally finite division ring. Since $M$ is solvable-by-finite, it contains no non-cyclic free subgroups. In view of \cite[Theorem 3.1]{hai-khanh}, it follows $[D:F]<\infty$ and $M$ is abelian-by-finite.
\textit{Case 2:} $T$ is abelian and contains an element $x$ of order $4$ not in the center of $B=C_A(A')$.
It is clear that $x$ is not contained in $F$. Because $x$ is of finite order, the field $F(x)$ is algebraic over $F$. Since $\left\langle x\right\rangle$ is a $2$-primary component of $T$, it is a characteristic subgroup of $T$ (see the proof of \cite[Theorem 1.1, p. 132]{wehrfritz_07}). Consequently, $\left\langle x\right\rangle$ is a normal subgroup of $M$. Thus, all elements of the set $x^M:=\{m^{-1}xm\vert m\in M\}\subseteq F(x)$ have the same minimal polynomial over $F$. This implies $|x^M|<\infty$, so $x$ is an $FC$-element, and consequently, $[M:C_M(x)]<\infty$. Setting $C=Core_M(C_M(x))$, then $C\unlhd M$ and $[M:C]$ is finite. Since $M$ normalizes $F(C)$, we have $M\subseteq N_G(F(C)^*) \subseteq G$. By the maximality of $M$ in $G$, either $N_G(F(C)^*)=M$ or $N_G(F(C)^*)=G$. The last case implies that $F(C)=D$, and consequently, $x\in F$, a contradiction. Thus, we may assume that $N_G(F(C)^*)=M$. From this, we conclude that $G\cap F(C)^*$ is a subnormal subgroup of $F(C)^*$ which is contained in $M$. Thus, $C\subseteq G\cap F(C)^*$ is contained in the center of $F(C)$ by \cite[14.4.4]{scott}. Therefore, $C$ is an abelian normal subgroup of finite index in $M$. By the same arguments used in the first paragraph we conclude that $[D:F]<\infty$.
\textit{Case 3:} $H=AH_1$.
Since $A'\subseteq H_1\cap A$, we have $H/H_1\cong A/A\cap H_1$ is abelian, and hence $H'\subseteq H_1$. Since $H_1$ is abelian, $H'$ is abelian too. Moreover, $M\subseteq H$, it follows that $M'$ is also abelian. In other words, $M$ is a metabelian group, and the conclusions follow from \cite[Theorem 3.3]{hai-tu}.
\end{Proof}
Let $D$ be a division ring, and $G$ be a subnormal subgroup of $D^*$. It was showed in \cite[Theorem 3.3]{hai-tu} that if $G$ contains a non-abelian metabelian maximal subgroup, then $D$ is cyclic of prime degree. The following theorem generalizes this phenomenon.
\begin{theorem} \label{theorem_3.2}
Let $D$ be a division ring with center $F$, and $G$ be a subnormal subgroup of $D^*$. If $M$ is a non-abelian solvable maximal subgroup of $G$, then the following conditions hold:
\begin{enumerate}[(i)]
\item There exists a maximal subfield $K$ of $D$ such that $K/F$ is a finite Galois extension with $\mathrm{Gal}(K/F)\cong M/K^*\cap G\cong \mathbb{Z}_p$ for some prime $p$, and $[D:F]=p^2$.
\item The subgroup $K^*\cap G$ is the $FC$-center. Also, $K^*\cap G$ is the Fitting subgroup of $M$. Furthermore, for any $x\in M\setminus K$, we have $x^p\in F$ and $D=F[M]=\bigoplus_{i=1}^pKx^i$.
\end{enumerate}
\end{theorem}
\begin{Proof}
By Proposition \ref{proposition_3.1}, it follows that $[D:F]<\infty$. Since $M$ is solvable, it contains no non-cyclic free subgroups. In view of \cite[Theorem 3.4]{hai-tu}, we have $F[M]=D$, there exists a maximal subfield $K$ of $D$ containing $F$ such that $K/F$ is a Galois extension, $N_G(K^*)=M $, $K^*\cap G$ is the Fitting normal subgroup of $M$ and it is the $FC$-center, and $M/K^*\cap G\cong{\rm Gal}(K/F)$ is a finite simple group of order $[K:F]$. Since $M/K^*\cap G$ is solvable and simple, one has $M/K^*\cap G\cong{\rm Gal}(K/F)\cong \mathbb{Z}_p$, for some prime number $p$. Therefore, $[K:F]=p$ and $[D:F]=p^2$. For any $x\in M\backslash K$, if $x^p\not\in F$, then by the fact that $F[M]=D$, we conclude that $C_M(x^p)\ne M$. Moreover, since $x^p\in K^*\cap G$, it follows that $\left\langle x,K^*\cap G\right\rangle \leq C_M(x^p)$. In other words, $C_M(x^p)$ is a subgroup of $M$ strictly containing $K^*\cap G$. Because $M/K^*\cap G$ is simple, we have $C_M(x^p)= M$, a contradiction. Therefore $x^p\in F$. Furthermore, since $x^p\in K$ and $[D:K]_r=p$, we conclude $D=\bigoplus_{i=1}^{p-1}Kx_i$.
\end{Proof}
Also, the authors in \cite{nassab14} showed that if $D$ is an infinite division ring, then $D^*$ contains no polycyclic-by-finite maximal subgroups. In the following corollary, we will see that every subnormal subgroup of $D^*$ does not contain non-abelian polycyclic-by-finite maximal subgroups.
\begin{corollary}
Let $D$ be a division ring with center $F$, $G$ be a subnormal subgroup of $D^*$, and $M$ be a non-abelian maximal subgroup of $G$. Then $M$ cannot be finitely generated solvable-by-finite. In particular, $M$ cannot be polycyclic-by-finite.
\end{corollary}
\begin{Proof}
Suppose that $M$ is solvable-by-finite. Then by Proposition \ref{proposition_3.1} , we conclude that $[D:F]<\infty$. In view of \cite[Corollary 3]{mah2000}, it follows that $M$ is not finitely generated. The rest of the corollary is clear.
\end{Proof}
\begin{theorem} \label{theorem_3.4}
Let $D$ be a non-commutative locally finite division ring with center $F$, and $G$ be a subnormal subgroup of ${\rm GL}_n(D)$, $n\geq 1$. If $M$ is a non-abelian solvable maximal subgroup of $G$, then $n=1$ and all conclusions of Theorem \ref{theorem_3.2} hold.
\end{theorem}
\begin{Proof}
By \cite[Theorem 3.1]{hai-khanh}, there exists a maximal subfield $K$ of ${\rm M}_n(D)$ containing $F$ such that $K^*\cap G$ is a normal subgroup of $M$ and $M/K^*\cap G$ is a finite simple group of order $[K:F]$. Since $M/K^*\cap G$ is solvable and simple, we conclude $M/K^*\cap G\cong \mathbb{Z}_p$, for some prime number $p$. It follows that $[K:F]=p$ and $[M_n(D):F]=p^2$, from which we have $n=1$. Finally, all conclusions follow from Theorem \ref{theorem_3.2}.
\end{Proof}
\begin{lemma}\label{lemma_3.5}
Let $R$ be a ring, and $G$ be a subgroup of $R^*$. Assume that $F$ is a central subfield of $R$ and that $A$ is a maximal abelian subgroup of $G$ such that $K=F[A]$ is normalized by $G$. Then $F[G]=\oplus _{g \in T}{Kg}$ for every transversal $T$ of $A$ in $G$.
\end{lemma}
\begin{Proof}
For the proof of this lemma, we use the similar techniques as in the proof of \cite[Lemma 3.1]{dorbidi2011}.
Since $K$ is normalized by $G$, it follows that $F[G]=\sum\nolimits_{g \in T}{Kg}$ for every transversal $T$ of $A$ in $G$. Therefore, it suffices to prove that every finite subset $\{g_1,g_2,\dots,g_n\}\subseteq T$ is linearly independent over $K$. Assume by contrary that there exists such a non-trivial relation
$$k_1g_1+k_2g_2+\cdots+k_ng_n=0.$$
Clearly, we can suppose that all $k_i$ are non-zero, and that $n$ is minimal. If $n=1$, then there is nothing to prove, so we can suppose $n>1$. Since the cosets $Ag_1$ and $Ag_2$ are disjoint, we have $g_1^{-1}g_2\not\in A=C_G(A)$. So, there exists an element $x\in A$ such that $g_1^{-1}g_2x\ne xg_1^{-1}g_2$. For each $1\leq i\leq n$, if we set $x_i=g_ixg_i^{-1}$, then $x_1\ne x_2$. Since $G$ normalizes $K$, it follows $x_i\in K$ for all $1\leq i\leq n$. Now, we have
$$(k_1g_1+\cdots+k_ng_n)x-x_1(k_1g_1+\cdots+k_ng_n)= 0.$$
By definition, we have $x_ig_i=g_ix$, and $x$, $x_i\in K$ for all $i$. Recall that $K=F[A]$ is commutative, so from the last equality
$$\left( {{k_1}{g_1}x + {k_2}{g_2}x + \cdots + {k_n}{g_n}x} \right) - \left( {{x_1}{k_1}{g_1} + {x_1}{k_2}{g_2} + \cdots + {x_1}{k_n}{g_n}} \right) = 0,$$
it follows
$$\left( {{k_1}{x_1}{g_1} + {k_2}{x_2}{g_2} + \cdots + {k_n}{x_n}{g_n}} \right) - \left( {{k_1}{x_1}{g_1} + {k_2}{x_1}{g_2} + \cdots + {k_n}{x_1}{g_n}} \right) = 0.$$
Consequently, we get
$$\left( {{x_2} - {x_1}} \right){k_2}{g_2} + \cdots + \left( {{x_n} - {x_1}} \right){k_n}{g_n} = 0,$$
which is a non-trivial relation with less than $n$ summands because $x_1\ne x_2$, a contradiction. Therefore, $T$ is linearly independent over $K$.
\end{Proof}
\begin{remark}\label{rem.subnormal}
In view of \cite[Theorem 11]{mah98}, if $D$ is a division ring with at least five elements and $n\geq 2$, then any non-central subnormal subgroup of ${\rm GL}_n(D)$ contains ${\rm SL}_n(D)$ and hence is normal.
\end{remark}
\begin{theorem} \label{theorem_3.6}
Let $D$ be non-commutative division ring with center $F$, and $G$ be a subnormal subgroup of ${\rm GL}_n(D)$, $n\geq2$. Assume additionally that $F$ contains at least five elements. If $M$ is a solvable maximal subgroup of $G$, then $M$ is abelian.
\end{theorem}
\begin{Proof}
If $G\subseteq F$, then there is nothing to prove. Thus, we may assume that $G$ is non-central, hence ${\rm SL}_n(D)\subseteq G$ and $G$ is normal in ${\rm GL}_n(D)$ by Remark \ref{rem.subnormal}. Setting $R=F[M]$, then $M\subseteq R^*\cap G \subseteq G$. By the maximality of $M$ in $G$, either $R^*\cap G=M$ or $G\subseteq R^*$. We need to consider two possible cases:
\textit{Case 1:} $R^*\cap G=M$.
The normality of $G$ implies that $M$ is a normal subgroup of $R^*$. If $M$ is reducible, then by \cite[Lemma 1]{kiani_07}, it contains a copy of $D^*$. It follows that $D^*$ is solvable, and hence it is commutative, a contradiction. We may therefore assume that $M$ is irreducible. Then $R$ is a prime ring by \cite[1.1.14]{shirvani-wehrfritz}. So, in view of \cite[Theorem 2]{lanski_81}, either $M\subseteq Z(R)$ or $R$ is a domain. If the first case occurs, then we are done. Now, suppose that $R$ is a domain. By \cite[Corollary 24]{wehrfritz_89}, we conclude that $R$ is a Goldie ring, and thus $R$ is an Ore domain. Let $\Delta_1$ be the skew field of fractions of $R$, which is contained in ${\rm M}_n(D)$ by \cite[5.7.8]{shirvani-wehrfritz}. Since $M\subseteq \Delta_1^*\cap G\subseteq G$, either $G\subseteq\Delta_1^*$ or $M=\Delta_1^*\cap G$. The first case occurs implies that $\Delta_1$ contains $F[{\rm SL}_n(D)]$. Thus, if $G\subseteq\Delta_1^*$, then by the Cartan-Brauer-Hua Theorem for the matrix ring (see e.g. \cite[Theorem D]{dorbidi2011}), one has $\Delta_1={\rm M}_n(D)$, which is impossible since $n\geq 2$. Thus the second case must occur, i.e., $M=\Delta_1\cap G$, which yields $M$ is normal in $\Delta_1^*$. Since $M$ is solvable, it is contained in $Z(\Delta_1)$ by \cite[14.4.4]{scott}, so $M$ is abelian.
\textit{Case 2:} $G\subseteq R^*$.
In this case, Remark \ref{rem.subnormal} yields ${\rm SL}_n(D)\subseteq R^*$. Thus, by the Cartan-Brauer-Hua Theorem for the matrix ring, one has $R=F[M]={\rm M}_n(D)$. It follows by \cite[Theorem A]{wehrfritz86} that $M$ is abelian-by-locally finite. Let $A$ be a maximal abelian normal subgroup of $M$ such that $M/A$ is locally finite. Then \cite[1.2.12]{shirvani-wehrfritz} says that $F[A]$ is a semisimple artinian ring. The Wedderburn-Artin Theorem implies that
$$ F[A] \cong {\rm M}_{n_1}(D_1)\times {\rm M}_{n_2}(D_2)\cdots\times {\rm M}_{n_s}(D_s),$$
where $D_i$ are division $F$-algebras, $1\leq i\leq s$. Since $F[A]$ is abelian, $n_i = 1$ and $K_i:=D_i=Z(D_i)$ are fields that contain $F$ for all $i$. Therefore,
$$F[A]\cong K_1\times K_2\cdots\times K_s.$$
If $M$ is imprimitive, then by \cite[Lemma 2.6]{hai-khanh}, we conclude that $M$ contains a copy of ${\rm SL}_r(D)$ for some $r\geq 1$. This fact cannot occur: if $r>1$, then ${\rm SL}_r(D)$ is unsolvable; if $r=1$, then $D'$ is solvable and hence $D$ is commutative, a contradiction. Thus, $M$ is primitive, and \cite[Proposition 3.3]{dorbidi2011} implies that $F[A]$ is an integral domain, so $s=1$. It follows that $K:=F[A]$ is a subfield of ${\rm M}_n(D)$ containing $F$. Again by \cite[Proposition 3.3]{dorbidi2011}, we conclude that $$L:=C_{{\rm M}_n(D)}(K)=C_{{\rm M}_n(D)}(A)\cong {\rm M}_m(\Delta_2)$$ for some division $F$-algebra $\Delta_2$. Since $M$ normalizes $K$, it also normalizes $L$. Therefore, we have $M\subseteq N_G(L^*)\subseteq G$. By the maximality of $M$ in $G$, either $M= N_G(L^*)$ or $G=N_G(L^*)$. The last case implies that $L^*\cap G$ is normal in ${\rm GL}_n(D)$. By Remark \ref{rem.subnormal}, either $L^*\cap G\subseteq F$ or ${\rm SL}_n(D)\subseteq L^*\cap G$. If the first case occurs, then $A\subseteq F$ because $A$ is contained in $L^*\cap G$. If the second case occurs, then by the Cartan-Brauer-Hua Theorem for the matrix, one has $L={\rm M}_n(D)$. It follows that $K=F[A]\subseteq F$, which also implies that $A\subseteq F$. Thus, in both case we have $A\subseteq F$. Consequently, $M/M\cap F^*$ is locally finite, and hence $D$ is a locally finite division ring by Lemma \ref{lemm_2.1}. If $M$ is non-abelian, then by Theorem \ref{theorem_3.4}, we conclude that $n=1$, a contradiction. Therefore $M$ is abelian in this case. Now, we consider the case $M= N_G(L^*)$, from which we have $L^*\cap G \subseteq M$. In other words, $L^*\cap G$ is a solvable normal subgroup of ${\rm GL}_m(\Delta_2)$. From this, we may conclude that $L^*\cap G$ is abelian: if $m>1$, then in view of Remark \ref{rem.subnormal}, one has $L^*\cap G\subseteq Z(\Delta_2)$ or ${\rm SL}_m(\Delta_2)\subseteq L^*\cap G$, but the latter cannot happen since ${\rm SL}_m(\Delta_2)$ is unsolvable; if $m=1$ then $L=\Delta_2$, and according to \cite[14.4.4]{scott}, we conclude that $L^*\cap G\subseteq Z(\Delta_2)$. In short, we have $L^*\cap G$ is an abelian normal subgroup of $M$ and $M/L^*\cap G$ is locally finite. By the maximality of $A$ in $M$, one has $A=L^*\cap G$. Because we are in the case $L^*\cap G \subseteq M$, it follows that $L^*\cap G=L^*\cap M$. Consequently, $$A=L^*\cap M=C_{{\rm GL}_n(D)}(A)\cap M =C_M(A),$$ which means $A$ is a maximal abelian subgroup of $M$.
By Lemma \ref{lemma_3.5}, $F[M]=\oplus _{m \in T}{Km}$ for some transversal $T$ of $A$ in $M$. Thus, for any $x\in L$, there exist $k_1,k_2,\dots,k_t\in K$ and $m_1,m_2,\dots,m_t\in T$ such that $x=k_1m_1+k_2m_2+\cdots+k_tm_t$. Take an arbitrary element $a\in A$, since $xa=ax$, it follows that
$$k_1m_1a+k_2m_2a+\cdots+k_tm_ta=ak_1m_1+ak_2m_2+\cdots+ak_tm_t.$$
By the normality of $A$ in $M$, there exist $a_i\in A$ such that $m_ia=a_im_i$ for all $1\leq i\leq t$. Moreover, we have $a$ and $a_i$'s are in $K$ which is a field, the equality implies
$$k_1a_1m_1+k_2a_2m_2+\cdots+k_ta_tm_t=k_1am_1+k_2am_2+\cdots+k_tam_t,$$ from which it follows that
$$ (k_1a_1-k_1a)m_1+(k_2a_2-k_2a)m_2+\cdots+(k_ta_t-k_ta)m_t=0.$$
Since $\{m_1,m_2,\dots,m_t\}$ is linearly independent over $K$, one has $a=a_1=\cdots=a_t$. Consequently, $m_ia=am_i$ for all $a\in A$, and thus $m_i\in C_M(A)=A$ for all $1\leq i\leq t$. This means $x\in K$, and hence $L=K$ and $K$ is a maximal subfield of ${\rm M}_n(D)$.
Next, we prove that $M/A$ is simple. Suppose that $N$ is an arbitrary normal subgroup of $M$ properly containing $A$. Note that by the maximality of $A$ in $M$, we conclude that $N$ is non-abelian. We claim that $Q:=F[N]={\rm M}_n(D)$. Indeed, since $N$ is normal in $M$, we have $M\subseteq N_G(Q^*)\subseteq G$, and hence either $N_G(Q^*)=M$ or $N_G(Q^*)=G$. First, we suppose the former case occurs. Then $Q^*\cap G\subseteq M$, hence $Q^*\cap G$ is a solvable normal subgroup of $Q^*$. In view of \cite[Proposition 3.3]{dorbidi2011}, $Q$ is a prime ring. It follows by \cite[Theorem 2]{lanski_81} that either $Q^*\cap G\subseteq Z(Q)$ or $Q$ is a domain. If the first case occurs, then $N\subseteq Q^*\cap G$ is abelian, which contradicts to the choice of $N$. If $Q$ is a domain, then by Goldie's theorem, it is an Ore domain. Let $\Delta_3$ be the skew field of fractions of $Q$, which is contained in ${\rm M}_n(D)$ by \cite[5.7.8]{shirvani-wehrfritz}. Because $M$ normalizes $Q$, it also normalizes $\Delta_3$, from which we have $M\subseteq N_G(\Delta_3^*)\subseteq G$. Again by the maximality of $M$ in $G$, either $N_G(\Delta_3^*)=M$ or $N_G(\Delta_3^*)=G$. The first case implies that $\Delta_3^*\cap G$ is a solvable normal subgroup of $\Delta_3^*$. Consequently, $N\subseteq\Delta_3^*\cap G$ is abelian by \cite[14.4.4]{scott}, a contradiction. If $N_G(\Delta_3^*)=G$, then $\Delta_3={\rm M}_n(D)$ by the Cartan-Brauer-Hua Theorem for the matrix ring, which is impossible since $n\geq2$. Therefore, the case $N_G(Q^*)=M$ cannot occur. Next, we consider the case $N_G(Q^*)=G$. In this case we have $Q^*\cap G \unlhd {\rm GL}_n(D)$, and hence either $Q^*\cap G\subseteq F$ or ${\rm SL}_n(D)\subseteq Q^*\cap G$ by Remark \ref{rem.subnormal}. The first case cannot occur since $Q^*\cap G$ contains $N$, which is non-abelian. Therefore, we have ${\rm SL}_n(D)\subseteq Q^*$. By the Cartan-Brauer-Hua theorem for the matrix ring, we conclude $Q={\rm M}_n(D)$ as claimed. In other words, we have $F[N]=F[M]={\rm M}_n(D)$.
For any $m\in M\subseteq F[N]$, there exist $f_1,f_2,\dots,f_s\in F$ and $n_1,n_2,\dots,n_s\in N$ such that
$$m=f_1n_1+f_2n_2+\cdots+f_sn_s.$$
Let $H=\left\langle n_1,n_2,\dots,n_s \right\rangle $ be the subgroup of $N$ generated by $n_1,n_2,\dots,n_s$. Set $B=AH$ and $S=F[B]$. Recall that $A$ is a maximal abelian subgroup of $M$. Thus, if $B$ is abelian, then $A=B$ and hence $H\subseteq A$. Consequently, $m\in F[A]=K$, from which it follows that $m\in K^*\cap M=A\subseteq N$. Now, assume that $B$ is non-abelian, and we will prove that $m$ also belongs to $N$ in this case. Since $M/A$ is locally finite, $B/A$ is finite. Let $\{x_1,\ldots,x_k\}$ be a transversal of $A$ in $B$. The maximality of $A$ in $M$ implies that $A$ is a maximal abelian subgroup of $B$, and that $A$ is also normal in $B$. By Lemma \ref{lemma_3.5},
$$S=Kx_1\oplus Kx_2\oplus\cdots\oplus Kx_k,$$
which says that $S$ is an artinian ring. Since $C_{{\rm M}_n(D)}(A)=C_{{\rm M}_n(D)}(K)=L$ is a field, in view of \cite[Proposition 3.3]{dorbidi2011}, we conclude that $A$ is irreducible. Because $B$ contains $A$, by definition, it is irreducible too. It follows by \cite[1.1.14]{shirvani-wehrfritz} that $S$ is a prime ring. Now, $S$ is both prime and artinian, so it is simple and $S\cong {\rm M}_{n_0}(\Delta_0)$ for some division $F$-algebra $\Delta_0$. If we set $F_0=Z(\Delta_0)$, then $Z(S)=F_0$. Since $B$ is abelian-by-finite, the group ring $FB$ is a PI-ring by \cite[Lemma 11, p. 176]{passman_77}. Thus, as a hommomorphic image of $FB$, the ring $S=F[B]$ is also a PI-ring. By Kaplansky's theorem (\cite[Theorem 3.4, p. 193]{passman_77}), we conclude that $[S:F_0]<\infty$. Since $K$ is a maximal subfield of ${\rm M}_n(D)$, it is also maximal in $S$. From this, we conclude that $F_0\subseteq C_S(K)= K$, and that $K$ is a finite extension field over $F_0$.
Recall that $A$ is normal in $B$, so for any $b\in B$, the mapping $\theta_b:K\to K$ given by $\theta_b(x)=bxb^{-1}$ is well defined. It is clear that $\theta_b$ is an $F_0$-automorphism of $K$. Thus, the mapping $$\psi:B\to {\rm Gal}(K/F_0)$$ defined by $\psi(b)=\theta_b$ is a group homomorphism with $$\mathrm{ker}\psi=C_{S^*}(K^*)\cap B=C_{S^*}(A)\cap B=C_B(A)=A.$$ Since $F_0[B]=S$, it follows that $C_S(B)=F_0$. Therefore, the fixed field of $\psi(B)$ is $F_0$, and hence $K/F_0$ is a Galois extension. By the fundamental theorem of Galois theory, one has $\psi$ is a surjective homomorphism. Hence, $ B/A\cong {\rm Gal}(K/F_0)$.
Setting $M_0=M\cap S^*$, then $m\in M_0$, $B\subseteq M_0$, and $F_0[M_0]=F_0[B]=S$. The conditions $F_0\subseteq K$ and $F\subseteq F_0$ implies that $K=F[A]=F_0[A]$. It is clear that $A$ is a maximal abelian subgroup of $M_0$, and that $A$ is also normal in $M_0$. If $M_0/A$ is infinite, then there exists an infinite transversal $T$ of $A$ in $M_0$ such that $S=F_0[M_0]=\oplus_{m\in T}Km$ by Lemma \ref{lemma_3.5}. It follows that $[S:K]=\infty$, a contradiction. Therefore, $M_0/A$ must be finite. Replacing $B$ by $M_0$ in the preceding paragraph, we also conclude that $ M_0/A\cong {\rm Gal}(K/F_0)$. Consequently, $B/A\cong{\rm Gal}(K/F_0)\cong M_0/A$. The conditions $B\subseteq M_0$ and $B/A \cong M_0/A$ imply $B=M_0$. Hence, $m\in M_0= B\subseteq N$. Since $m$ was chosen arbitrarily, it follows that $M=N$, which implies the simplicity of $M/A$. Since $M/A$ is simple and solvable, one has $M/A\cong \mathbb{Z}_p$, for some prime number $p$. By Lemma \ref{lemma_3.5}, it follows $\dim_K{\rm M}_n(D)=|M/A|=p$, which forces $n=1$, a contradiction.
\end{Proof}
Now, we are ready to get the main result of this note which gives in particular, the positive answer to Conjecture \ref{conj:1} for non-abelian case.
\begin{theorem} \label{theorem_3.7}
Let $D$ be a non-commutative division ring with center $F$, $G$ a subnormal subgroup of ${\rm GL}_n(D)$. Assume additionally that $F$ contains at least five elements if $n>1$. If $M$ is a non-abelian solvable maximal subgroup of $G$, then $n=1$ and the following conditions hold:
\begin{enumerate}[(i)]
\item There exists a maximal subfield $K$ of $D$ such that $K/F$ is a finite Galois extension with $\mathrm{Gal}(K/F)\cong M/K^*\cap G\cong \mathbb{Z}_p$ for some prime $p$, and $[D:F]=p^2$.
\item The subgroup $K^*\cap G$ is the $FC$-center. Also, $K^*\cap G$ is the Fitting subgroup of $M$. Furthermore, for any $x\in M\setminus K$, we have $x^p\in F$ and $D=F[M]=\bigoplus_{i=1}^pKx^i$.
\end{enumerate}
\end{theorem}
\begin{Proof}
Combining Theorem \ref{theorem_3.2} and Theorem \ref{theorem_3.6}, we get the result.
\end{Proof}
\end{document} |
\begin{equation}gin{document}
\date{}
\title{On the heat kernel of a class of fourth order operators in two dimensions:
sharp Gaussian estimates and short time asymptotics}
\parindent=0pt
\parskip=8pt
\begin{equation}gin{abstract}
We consider a class of fourth order uniformly elliptic operators in planar Euclidean domains
and study the associated heat kernel. For operators with $L^{\infty}$ coefficients we obtain Gaussian estimates with best constants, while for operators with constant coefficients we obtain short time asymptotic estimates. The novelty of this work is that we do not assume that the associated symbol is strongly convex. The short time asymptotics reveal a behavior which is qualitatively different from that of the strongly convex case.
\end{abstract}
\noindent
{\bf Keywords:} higher order parabolic equations; heat kernel estimates; short time asymptotics
\noindent
{\bf 2010 Mathematics Subject Classification:} 35K25, 35E05, 35B40
\section{Introduction}
Let $\Omega$ be a planar domain and let
\[
Hu = \partial_{x_1}^2 \big( \alpha(x) \partial_{x_1}^2 u \big) +2\partial_{x_1}\partial_{x_2} \big( \begin{equation}ta(x) \partial_{x_1}\partial_{x_2} u \big)
+ \partial_{x_2}^2 \big( \gamma(x) \partial_{x_2}^2 u \big)
\]
be a self-adjoint, fourth-order uniformly elliptic operator in divergence form on $\Omega$ with $L^{\infty}$ coefficients
satisfying Dirichlet boundary conditions on $\partial\Omega$. It has been proved by Davies \cite{davies1} that $H$ has a heat kernel
$G(x,x',t)$ which satisfies the Gaussian-type estimate,
\begin{equation}
|G(x,x',t)|\leq c_1 t^{-\frac{1}{2}}\exp\Big(-c_2\frac{|x-x'|^{4/3}}{t^{1/3}}+c_3t\Big),
\label{in0}
\end{equation}
for some positive constants $c_1$, $c_2$, $c_3$ and all $t>0$ and $x,y\in\Omega$.
The problem of finding the sharp value of the exponential constant $c_2$ is related to replacing the Euclidean distance $|x-y|$ by an appropriate distance $d(x,y)$ that is suitably adapted to the operator $H$ and, more preciesly, to its symbol
\[
A(x,\xi) =\alpha(x) \xi_1^4 +2\begin{equation}ta(x) \xi_1^2 \xi_2^2 +\gamma(x)\xi_2^4 \; , \qquad x\in\Omega \, , \;\; \xi\in{\mathbb{R}}^2 \, .
\]
In the article \cite{ep}, and for constant coefficient operators in ${\mathbb{R}}^n$ which satisfy suitable assumptions, the asymptotic formula
\begin{equation}
G(x,x',t)\sim h(x-x')^{-2/3} t^{-1/3}\exp\Big(- \frac{3\sqrt[3]{2}}{16} \frac{p_*(x-x')^{4/3}}{t^{1/3}}\Big)
\cos\Big(-\frac{3\sqrt{3}\sqrt[3]{2}}{16} \frac{p_*(x-x')^{4/3}}{t^{1/3}} -\frac{\pi}{3}\Big),
\label{in3}
\end{equation}
was established as $t\to 0+$; here $h$ is a positively homogeneous function of degree one and $p_*$ is the Finsler metric defined by
\begin{equation}
p_*(\xi) = \max_{\eta \in{\mathbb{R}}^2\setminus\{0\}} \frac{ \eta\cdot \xi}{ A(\eta)^{1/4}}.
\label{plo}
\end{equation}
An analogous asymptotic formula has been obtained in \cite{tintarev} in the more general case of operators with variable smooth coefficients;
in this case the relevant distance is the (geodesic) Finsler distance $d_{p_*}(x,x')$ induced by the Finsler metric with length element $p_*(x,\xi)$, the latter being defined similarly to ({\rm Re}\;f{plo}), with the additional dependence on $x$.
A sharp version of the Gaussian estimate ({\rm Re}\;f{in0}) was established in \cite{b2001} where it was proved that
\begin{equation}
|G(x,x',t)|\leq c_{\epsilon} t^{-\frac{1}{2}}\exp\Big\{-\big( \frac{3\sqrt[3]{2}}{16} -D -\epsilon\big) \frac{d_M(x,x')^{4/3}}{t^{1/3}}+
c_{\epsilon,M}t\Big\},
\label{in2}
\end{equation}
for arbitrary $\epsilon$ and $M$ positive. Here $D\geq 0$ is a constant that is related to the regularity of the coefficients and $d_M(x,x')$, $M>0$, is a family of Finsler-type distances on $\Omega$ which
is monotone increasing and converges as $M\to +\infty$ to a limit Finsler-type distance $d(x,x')$ closely related to $d_{p_*}(x,x')$ but not equal to it; see also Subsection {\rm Re}\;f{nss}.
A fundamental assumption for both ({\rm Re}\;f{in3}) and ({\rm Re}\;f{in2}) is the {\em strong convexity} of the symbol $A(x,\xi)$ of the operator $H$.
The notion of strong convexity was introduced in \cite{ep} where short time asymptotics were obtained not only for the operator described above but more generally for a constant coefficient operator of order $2m$ acting on functions on ${\mathbb{R}}^n$.
In the context of the present article and for an operator with constant coefficients, strong convexity of the symbol $A(\xi)$ amounts to
\begin{equation}
\label{sc1}
0< \begin{equation}ta < 3 \sqrt{\alpha\gamma} \, .
\end{equation}
We note however that in \cite{b2001} (where the coefficients $\alpha,\begin{equation}ta,\gamma$ are functions) the term strong convexity was also used for the slightly more general situation
where
\begin{equation}
\label{sc2}
0\leq \begin{equation}ta(x) \leq 3 \sqrt{\alpha(x)\gamma(x)} \, , \qquad x\in \Omega.
\end{equation}
In other words, while for short time asymptotics the strict inequality was necessary, for Gaussian estimates equality is allowed.
Our aim in this work is to extend both ({\rm Re}\;f{in3}) and ({\rm Re}\;f{in2}) to the case of non-strongly convex symbols.
Hence in Theorem 1, which extends the Gaussian estimate ({\rm Re}\;f{in2}),
condition ({\rm Re}\;f{sc2}) is not valid, while in Theorem 2, which extends the short time asymptotics ({\rm Re}\;f{in3}), condition ({\rm Re}\;f{sc1}) is not valid.
In Theorem {\rm Re}\;f{thm1} we obtain a Gaussian estimate involving the Finsler-type distances $d_M(\cdot , \cdot)$ and an $\sigma_*$ that depends on the range of the function
\[
Q(x)=\frac{\begin{equation}ta(x)}{\sqrt{\alpha(x)\gamma(x)}} \, .
\]
So the strongly convex case corresponds to $Q$ taking values in $[0,3]$ but here we allow $Q$ to take any value in $(-1, +\infty)$. It is worth noting that while in the strongly convex case we have $\sigma_* =3\sqrt[3]{2} /16$, in the general case
$\sigma_*$ can take a continuous range of values.
Our approach follows the main ideas of \cite{b2001} and in particular makes use of Davies' exponential perturbation method.
However technical difficulties arise due to the existence of three different regimes for the function $Q(x)$,
namely $(-1,0]$, $[0,3]$ and $[3,+\infty)$. Each regime must be handled differently,
and it must be shown that the matching at $Q=0$ and $Q=3$ does not cause any problems.
In the second part of the paper we extend the short time asymptotic estimates of \cite{ep} to operators with non-strongly convex symbol.
As in \cite{ep}, we consider constant coefficient operators
acting on ${\mathbb{R}}^2$, so the heat kernel (also referred to as the Green's function) is given by
\[
G(x,t)=\frac{1}{(2\pi)^2}\int_{{\mathbb{R}}^2}e^{i\,\xi\cdot\,x -A(\xi) t}\,d\xi,\qquad x\in{\mathbb{R}}^2\, ,\; t>0 .
\]
The asymptotic estimates are contained in Theorem {\rm Re}\;f{thm2} and the proof uses the steepest descent method.
For technical reasons we only consider specific choices of the point $x\in{\mathbb{R}}^2$; we comment further upon this before the statement of Theorem {\rm Re}\;f{thm2},
but note that the asymptotic formulae obtained are enough to demonstrate the sharpness of the exponential constant of Theorem {\rm Re}\;f{thm1}.
Anyway, these asymptotic estimates are of independent interest, in particular because they reveal a behavior that is qualitatively different from that of the strongly convex case.
In the case $0< Q<3$ studied in \cite{ep} the Green function oscillates around the horizontal axis. As it turns out, when
$ Q<0$ or $ Q>3$ the Green function remains positive for small times. The borderline
cases $ Q=0$ and $ Q=3$ are particularly interesting. In these two cases the asymptotic expression
involves oscillations that touch the horizontal axis at their lowest points (see the diagrams at the end of the article). This is due to a bifurcation phenomenon that takes place at $Q=0$ and $Q=3$.
At these values of $ Q$ there is a change in the branches of saddle points that contribute to the asymptotic behavior of the integral. While for each $ Q\neq 0,3$ there are two contributing points,
for each of the values $ Q=0$ and $ Q= 3$ there are four such points.
At the end of the article we present numerical computations that illustrate the asymptotic estimates. For the sake of completeness we have also included an appendix with the proof of Evgrafov and Postnikov in the strongly convex case $0<Q<3$.
We close this introduction with one remark. As pointed out in \cite{ep}, for a fourth order operator in two dimensions
strong convexity is equivalent to convexity. Nevertheless we have chosen not to replace the term `strongly convex' by `convex' in order to
emphasize the importance of strong convexity in the general case of an operator of order $2m$ acting in ${\mathbb{R}}^n$ (considered in both \cite{ep} and \cite{b2001}).
\section{Heat kernel estimates}
\subsection{Setting and statement of Theorem {\rm Re}\;f{thm1}}
Let $\Omega\subset{\mathbb{R}}^2$ be open and connected. We consider a differential operator $H$ on $L^2(\Omega)$ (complex-valued functions) given formally by
\[
Hu(x)=\partial_{x_1}^2\big(\alpha(x)\partial_{x_1}^2u\big)+2\partial^2_{x_1x_2}(\begin{equation}ta(x)\partial^2_{x_1x_2}u)+\partial_{x_2}^2\big(\gamma(x)\partial_{x_2}^2u\big),
\]
where $\alpha$, $\begin{equation}ta$ and $\gamma$ are functions in $L^{\infty}(\Omega)$. In case $\Omega\neq {\mathbb{R}}^2$ we impose
Dirichlet boundary conditions on $\partial\Omega$.
The operator $H$ is defined by means of the quadratic form
\[
Q(u)=\int_{\Omega}\big\{\alpha(x)|u_{x_1x_1}|^2+2\begin{equation}ta(x)|u_{x_1x_2}|^2+\gamma(x)|u_{x_2x_2}|^2\big\}\,dx,
\]
defined on $H^2_0(\Omega)$. We assume that $H$ is uniformly elliptic, that is the functions $\alpha$ and $\gamma$ are positive and bounded away from zero and also
\[
\inf_{x\in\Omega} \frac{\begin{equation}ta(x)}{\sqrt{\alpha(x)\gamma(x)}} > -1 \, .
\]
The form $Q$ is then closed and we define the operator $H$ to be the self-adjoint operator associated to it. As mentioned in the introduction, the operator $H$ has a heat kernel $G(x,x',t)$ which satisfies the Gaussian estimate ({\rm Re}\;f{in0}).
To state the main result of this section we need to introduce some more definitions.
We define the class of real-valued functions
\[
{\cal E} =\{\varphi\in C^2(\Omega):\;\|D^\alpha\varphi\|_\infty<+\infty\,,\;0\leq|\alpha|\leq2\},
\]
and the subclass
\[
{\cal E}_{A,M} =\{\varphi\in{\cal E} \, :\;A(y,\nabla\varphi(y))\leq1\, , \;
y\in\Omega \, , \; \mbox{ and }\;\|D^\alpha\varphi\|_{\infty}\leq M\,,\;|\alpha|=2\}.
\]
We then define a distance $d_M(\cdot,\cdot)$ on $\Omega$ by
\[
d_M(x,x')=\sup\big\{\varphi(x')-\varphi(x)\,:\;\;\varphi\in{\cal E}_{A,M}\big\}.
\]
It is not difficult to see that as $M\to +\infty$ this converges to the Finsler-type distance
\begin{equation}
d(x,x') = \sup \{ \varphi(x') -\varphi(x) \; : \; \varphi\in{\rm Lip}(\Omega) , \;\; A( y ,\nabla\varphi(y))\leq 1 \, , \;\; y\in\Omega\}.
\label{fd}
\end{equation}
As it turns out, there holds $d(x,x')\leq d_{p_*}(x,x')$ and the two distances in general are not equal. Still, there are cases where equality is valid and this shows in particular that the best constant for Gaussian estimates is the same for both distances.
This is further discussed in Subsection {\rm Re}\;f{nss}.
We next define the functions
\[
Q(x)=\frac{\begin{equation}ta(x)}{\sqrt{\alpha(x)\gamma(x)}},\qquad\quad x\in\Omega,
\]
\[
k(x)=\tarr{8\frac{1-Q(x)}{(1+Q(x))^2},}{\mbox{ if }-1<Q(x)<0,}{8,}{\mbox{ if }0\leq Q(x)\leq3,}{Q(x)^2-1,}{\mbox{ if }Q(x)>3,}
\]
and
\begin{equation}
\sigma(x)=\frac{3}{4}\cdot\Big(\frac{1}{4k(x)}\Big)^{1/3}= \tarr{ \frac{3}{2\cdot 4^{4/3}} \frac{ (1+Q(x))^{2/3}}{ (1-Q(x))^{1/3}},}
{\mbox{ if }-1<Q(x)<0,}{\frac{3}{8\cdot 4^{1/3}},}{\mbox{ if }0\leq Q(x)\leq3,}{ \frac{3}{4^{4/3}} (Q(x)^2-1)^{-1/3} ,}{\mbox{ if }Q(x)>3,}
\label{sx}
\end{equation}
We set
\[
k^*=\sup_{x\in\Omega}k(x) \, , \quad \mbox{ and }
\sigma_*= \inf_{x\in\Omega}\sigma(x) = \frac{3}{4}\cdot\Big(\frac{1}{4k^*}\Big)^{1/3}.
\]
Finally, we denote by $D$ the distance in $L^{\infty}(\Omega)$ of the functions $\alpha(x),\begin{equation}ta(x),\gamma(x)$ from the space of all Lipschitz functions,
\begin{equation}
D=\max\big\{d_{L^{\infty}}(\alpha,{\rm Lip}(\Omega))\,,\;d_{L^{\infty}}(\begin{equation}ta,{\rm Lip}(\Omega))\,,\;d_{L^{\infty}}(\gamma,{\rm Lip}(\Omega))\big\}.
\label{D}
\end{equation}
The main result of this section is the following
\begin{equation}gin{theorem}
For all $\epsilon\in(0,1)$ and all $M$ large there exists $c_\epsilon,c_{\epsilon,M}<\infty$ such that
\begin{equation}
|G(x,x',t)|\leq c_\epsilon t^{-1/2}\exp\Big\{-(\sigma_*-cD-\epsilon)\frac{d_M(x,x')^{4/3}}{t^{1/3}}+c_{\epsilon,M}t\Big\},
\label{eq:mainthm}
\end{equation}
for all $x,x'\in\Omega$ and $t>0$.
\label{thm1}
\end{theorem}
It will follow from the results of Section {\rm Re}\;f{sec:asympt} that the constant $\sigma_*$ is sharp.
\subsection{Proof of Theorem {\rm Re}\;f{thm1}}
We first establish some auxiliary inequalities related to the symbol $A(x,\xi)$ of the operator $H$. Since these are pointwise inequalities with respect to $x\in\Omega$, we assume for simplicity that the coefficients are constant and therefore the symbol is
\[
A(\xi)=\alpha\xi_1^4+2\begin{equation}ta\xi_1^2\xi_2^2+\gamma\xi_2^4,\qquad\xi\in{\mathbb{R}}^2,
\]
where $\alpha,\begin{equation}ta,\gamma\in{\mathbb{R}}$. By ellipticity we have $\alpha,\gamma>0$ and $Q:=\begin{equation}ta/\sqrt{\alpha\gamma}>-1$.
We shall need to consider the symbol also as a function of two complex variables, that is we set
\[
A(z)=\alpha z_1^4+2\begin{equation}ta z_1^2 z_2^2+\gamma z_2^4,\qquad z=(z_1,z_2)\in{\mathbb{C}}^2 \, .
\]
\begin{equation}gin{lemma}
There holds
\begin{equation}
\label{eq1}
{\rm Re}\;\,A(\xi+i\eta)\geq-kA(\eta),\quad\quad \xi,\,\eta\in{\mathbb{R}}^2,
\end{equation}
where the constant $k$ is given by
\[
k=\tarr{8\frac{1-Q}{(1+Q)^2},}{\mbox{ if }\;-1<Q<0,}{8,}{\mbox{ if }\;0\leq Q\leq 3,}{Q^2-1,}{\mbox{ if }\;Q>3.}
\]
\labelbel{lem1}
\end{lemma}
{\em Proof.} We first compute
\begin{equation}a
{\rm Re}\;\,A(\xi+i\eta)&=& \alpha (\xi_1^4 -6\xi_1^2\eta_1^2 +\eta_1^4 ) +2\begin{equation}ta\Big(\xi_1^2\xi_2^2
-\xi_1^2\eta_2^2-\xi_2^2\eta_1^2-4\xi_1\xi_2\eta_1\eta_2+\eta_1^2\eta_2^2\Big) \nonumber\\
&& + \gamma (\xi_2^4 -6\xi_2^2\eta_2^2+\eta_2^4 ).
\label{expansion}
\end{equation}a
We now distinguish the three cases.
(i) $-1<Q<0$. Using ({\rm Re}\;f{expansion}) we see by a direct computation that
\begin{equation}a
&&{\rm Re}\;\,A(\xi+i\eta)+8\frac{1-Q}{(1+Q)^2}A(\eta)\nonumber\\
&=&(Q+1)\Big\{\alpha\Big(\xi_1^2-\frac{3-Q}{1+Q}\eta_1^2 \Big)^2+\gamma\Big(\xi_2^2-\frac{3-Q}{1+Q}\eta_2^2\Big)^2\Big\}
-Q\big(\alpha^{1/2}\xi_1^2-\gamma^{1/2}\xi_2^2\big)^2 \label{relation1}\\
&&-2Q\big(\alpha^{1/2}\xi_1\eta_1+\gamma^{1/2}\xi_2\eta_2\big)^2-2Q\alpha^{1/2}\gamma^{1/2}(\xi_1\eta_2+\xi_2\eta_1)^2-Q\Big(\frac{3-Q}{1+Q}\Big)^2 \big(\alpha^{1/2}\eta_1^2-\gamma^{1/2}\eta_2^2\big)^2 \! ,\nonumber
\end{equation}a
and ({\rm Re}\;f{eq1}) follows.
(ii) $0\leq Q\leq3$. Similarly it may be verified that
\begin{equation}a
{\rm Re}\;\,A(\xi+i\eta)+8A(\eta)&=&\frac{Q}{3}\Big\{\alpha^{1/2}(\xi_1^2-3\eta_1^2)+\gamma^{1/2}(\xi_2^2-3\eta_2^2)\Big\}^2+\frac{4Q}{3}\alpha^{1/2}\gamma^{1/2}(\xi_1\xi_2-3\eta_1\eta_2)^2\nonumber\\
&&+\frac{3-Q}{3}\Big\{\alpha(\xi_1^2-3\eta_1^2)^2+\gamma(\xi_2^2-3\eta_2^2)^2\Big\},\label{relation2}
\end{equation}a
and ({\rm Re}\;f{eq1}) again follows.
(iii) $Q>3$. In this case we have
\begin{equation}a
&&\hspace{-1.5cm}{\rm Re}\;\,A(\xi+i\eta)+(Q^2-1)A(\eta)\nonumber\\
&=&2(Q-3)\big(\alpha^{1/2}\xi_1\eta_1-\gamma^{1/2}\xi_2\eta_2\big)^2 +\Big\{\alpha^{1/2}(\xi_1^2-Q\eta_1^2)+\gamma^{1/2}(\xi_2^2-Q\eta_2^2)\Big\}^2 \label{relation3}\\
&&+2(Q-1)\alpha^{1/2}\gamma^{1/2}\Big(\xi_1\xi_2-\frac{Q+3}{Q-1}\eta_1\eta_2\Big)^2+2\frac{(Q-3)(Q+1)(Q^2+3)}{Q -1}\alpha^{1/2}\gamma^{1/2}\eta_1^2\eta_2^2, \nonumber
\end{equation}a
and ({\rm Re}\;f{eq1}) follows once again. $
\Box$
Given $\psi\in{\cal E}$ the (multiplication) operator $e^{\psi}$ leaves the Sobolev space $H_0^2(\Omega)$ invariant so one can define a quadratic form $Q_{\psi}$ on $H^2_0(\Omega)$ by
\[
Q_{\psi}(u)=Q(e^{\psi}u,e^{-\psi}u),
\]
where
\[
Q(u,v)= \int_{\Omega}\big\{\alpha(x)u_{x_1x_1}\overline{v}_{x_1x_1}+2\begin{equation}ta(x)u_{x_1x_2}\overline{v}_{x_1x_2}
+\gamma(x)u_{x_2x_2}\overline{v}_{x_2x_2}\big\}\,dx
\]
is the sesquilinear form associated to $Q(\cdot)$.
Expanding the various terms of $Q_{\psi}(u)$ (cf. ({\rm Re}\;f{pat}) below) we find that the highest order terms
coincide with those of $Q(u)$ and standard interpolation inequalities (cf. \cite[Lemma 2]{davies1}) then give
\begin{equation}
|Q(u)-Q_{\psi}(u)|\leq\epsilon Q(u)+c_{\epsilon}\{\|\psi\|_{W^{2,\infty}}+\|\psi\|_{W^{2,\infty}}^{4}\}\|u\|_2^2,
\label{fil}
\end{equation}
for all $\epsilon\in(0,1)$ and $u\in H^2_0(\Omega)$.
The proof of Theorem {\rm Re}\;f{thm1} makes essential use of the following result which is Proposition 2 of \cite{bd}:
\begin{equation}gin{lemma}
Let $\psi\in{\cal E} $ be given and let $\tilde k >0$ be such that
\[
{\rm Re}\,Q_{\psi}(u)\geq - \tilde{k} \,\|u\|_2^2,
\]
for $u\in C_c^{\infty}(\Omega)$. Then for any $\delta\in(0,1)$ there exists a constant $c_\delta$ such that
\[
|G(x,y,t)|\leq c_{\delta} t^{-1/2}\exp\{\psi(x)-\psi(y)+(1+\delta) \tilde{k} t\},
\]
for all $x,y\in\Omega$ and all $t>0$.
\labelbel{lem:jde}
\end{lemma}
Given $\varphi\in{\cal E}$ and $\labelmbda>0$ we have
\begin{equation}a
Q_{\labelmbda\varphi}(u)&=&\int_{\Omega}\Big[\alpha(x)(e^{\labelmbda\varphi}u)_{x_1x_1}
(e^{-\labelmbda\varphi}\overline{u})_{x_1x_1}+2\begin{equation}ta(x)(e^{\labelmbda\varphi}u)_{x_1x_2}(e^{-\labelmbda\varphi}\overline{u})_{x_1x_2}\nonumber\\
&&\quad\quad+\gamma(x)(e^{\labelmbda\varphi}u)_{x_2x_2}(e^{-\labelmbda\varphi}\overline{u})_{x_2x_2} \Big] \,dx.
\label{pat}
\end{equation}a
Using Leibniz's rule to expand the second partial derivatives the exponentials $e^{\labelmbda\varphi}$ and $e^{-\labelmbda\varphi}$ cancel and we conclude that $Q_{\labelmbda\varphi}(u)$ is a linear combination of terms of the form
\begin{equation}
\label{terms}
\labelmbda^s\int_{\Omega}b_{s\gamma\delta}(x)D^{\gamma}u\,D^{\delta}\overline{u}\,dx,
\end{equation}
(multi-index notation) where each function $b_{s\gamma\delta}(x)$ is a product of one of the functions $\alpha(x)$, $\begin{equation}ta(x)$, $\gamma(x)$ and first or second order derivatives of $\varphi$. For any such term we have $s+|\gamma+\delta|\leq4$.
{\bf Definition.} We denote by ${\cal L}$ the space of (finite) linear combinations of terms of the form ({\rm Re}\;f{terms})
with $s+|\gamma+\delta| <4$.
We shall see later the terms in ${\cal L}$ are in a certain sense negligible.
We next define the quadratic form
\begin{equation}an
Q_{1,\labelmbda\varphi}(u)\!&=&\int_{\Omega}\Big\{\labelmbda^4\big[\alpha(x)\varphi_{x_1}^4+2\begin{equation}ta(x)\varphi_{x_1}^2\varphi_{x_2}^2+\gamma(x)\varphi_{x_2}^4\big]|u|^2\\
&&\quad+\labelmbda^2\Big\{\alpha(x)\varphi_{x_1}^2(u\overline{u}_{x_1x_1}+u_{x_1x_1}\overline{u} -4|u_{x_1}|^2)\\
&&\quad+2\begin{equation}ta(x)\big[\varphi_{x_1}\varphi_{x_2}(u\overline{u}_{x_1x_2}+u_{x_1x_2}\overline{u}-u_{x_1}\overline{u}_{x_2}-u_{x_2}\overline{u}_{x_1})-(\varphi_{x_2}^2|u_{x_1}|^2+\varphi_{x_1}^2|u_{x_2}|^2)\big]\\
&&\quad+\gamma(x)\varphi_{x_2}^2(u\overline{u}_{x_2x_2}+u_{x_2x_2}\overline{u}-4|u_{x_2}|^2)\Big\}\\
&&\quad +\alpha(x)|u_{x_1x_1}|^2+2\begin{equation}ta(x)|u_{x_1x_2}|^2+\gamma(x)|u_{x_2x_2}|^2\Big\}\,dx.
\end{equation}an
It can be easily seen that $Q_{1,\labelmbda\varphi}(\cdot)$ contains precisely those terms of the form ({\rm Re}\;f{terms}) from the expansion of $Q_{\labelmbda\varphi}(\cdot)$ for which we have $s+|\gamma+\delta|=4$. Hence we have
\begin{equation}gin{lemma}
The difference $Q_{\labelmbda\varphi}(\cdot)-Q_{1,\labelmbda\varphi}(\cdot)$ belongs to ${\cal L}$.
\labelbel{lem2}
\end{lemma}
The symbol of the operator $H$ is
\[
A(x,z)=\alpha(x)z_1^4+2\begin{equation}ta(x)z_1^2z_2^2+\gamma(x)z_2^4,\qquad x\in\Omega,\quad z\in{\mathbb{C}}^2\,,
\]
and the polar symbol is defined as
\[
A(x,z,z')=\alpha(x)z_1^2z_1'^2+2\begin{equation}ta(x)z_1z_2z_1'z_2'+\gamma(x)z_2^2z_2'^2,\qquad x\in\Omega,\quad z,\,z'\in{\mathbb{C}}^2.
\]
For $x\in\Omega$ and $\xi,\xi',\eta\in{\mathbb{R}}^2$ we set
\[
S(x,\xi,\xi',\eta)={\rm Re}\,A(x,\xi+i\eta,\xi'+i\eta)+k(x)A(x,\eta).
\]
Given $\varphi\in{\cal E}$ and $\labelmbda\in{\mathbb{R}}$ we define the quadratic form $S_{\labelmbda\varphi}$ on $H^2_0(\Omega)$ by
\[
S_{\labelmbda\varphi}(u)=\frac{1}{(2\pi)^{2}}\iiint_{\Omega\times{\mathbb{R}}^2\times{\mathbb{R}}^2}S(x,\xi,\xi',\labelmbda\nabla\varphi)e^{i(\xi-\xi')\cdot x}\hat{u}(\xi)\overline{\hat{u}(\xi')}\,d\xi\,d\xi'\,dx.
\]
\begin{equation}gin{lemma}
There holds
\[
Q_{1,\labelmbda\varphi}(u)+\int_{\Omega} k(x)A(x,\labelmbda\nabla\varphi)|u|^2dx=S_{\labelmbda\varphi}(u),
\]
for all $\varphi\in{\cal E}$, $\labelmbda>0$ and $u\in H^2_0(\Omega)$.
\label{lem:4}
\end{lemma}
{\em Proof.} For the proof one simply uses the relation $D^{\alpha}u(x) =(2\pi)^{-1}\int_{{\mathbb{R}}^2}(i\xi)^{\alpha}e^{ix\cdot\xi}\hat{u}(\xi)d\xi$
for the various terms that appear in $Q_{1,\labelmbda\varphi}$. Since a very similar proof has been given in \cite{b2001} we omit further details (the fact that $k(x)$ is not constant in our case is not a problem and strong convexity is not relevant here). $
\Box$
We now define for each $x\in\Omega$ a quadratic form $\Gamma(x , \cdot)$ in ${\mathbb{C}}^6$ by
\begin{equation}an
&&\Gamma(x,p)=\\
&&\hspace{-0.5cm}=\left\{\begin{equation}gin{array}{lll}{(Q+1)|p_1|^2+(Q+1)|p_2|^2-Q|p_3|^2-2Q|p_4|^2-}&{}&{}\\[0.1cm]
{\hspace{3cm}-2Q|p_5|^2-\frac{Q(3-Q)^2}{(1+Q)^2}|p_6|^2,}&&{\hspace{-0.3cm}\mbox{if }-1<Q(x)<0,}\\[0.3cm]
{\frac{3-Q}{3}|p_1|^2+\frac{3-Q}{3}|p_2|^2+\frac{Q}{3}|p_1+p_2|^2+\frac{4Q}{3}|p_3|^2,}&{}&
{\hspace{-0.3cm}\mbox{if }0\leq Q(x)\leq3,}\\[0.3cm]
{2(Q-3)|p_1|^2+|p_2|^2 +2(Q-1)|p_3|^2+2\frac{Q-3}{Q-1}(Q+1)(Q^2+3)|p_4|^2,}&&{\hspace{-0.3cm}\mbox{if }Q(x)>3.}
\end{array}
\right.
\end{equation}an
for any $p=(p_1,\ldots,p_6)\in{\mathbb{C}}^6$.
Clearly $\Gamma(x , \cdot)$ is positive semidefinite for each $x\in\Omega$. We denote by $\Gamma(x,\cdot,\cdot)$ the corresponding sesquilinear form in ${\mathbb{C}}^6$, that is $\Gamma(x, p ,q)$ is given by a formula similar to the one above with each $|p_k|^2$ being replaced by $p_k\overline{q_k}$.
Next, for any $x\in\Omega$ and $\xi,\eta\in{\mathbb{R}}^2$ we define a vector $p_{x,\xi,\eta}\in{\mathbb{R}}^6$ by
\begin{equation}an
&&p_{x,\xi,\eta}=\\
&& \left\{
\begin{equation}gin{array}{l}
{\Big(\alpha^{1/2}[\xi_1^2-\frac{3-Q}{1+Q}\eta_1^2],\,\gamma^{1/2}[\xi_2^2-\frac{3-Q}{1+Q}\eta_2^2],\,\alpha^{1/2}\xi_1^2-\gamma^{1/2}\xi_2^2,\,\alpha^{1/2}\xi_1\eta_1+\gamma^{1/2}\xi_2\eta_2,}\\
{\qquad \alpha^{1/4}\gamma^{1/4}(\xi_1\eta_2+\xi_2\eta_1),\,\alpha^{1/2}\eta_1^2-\gamma^{1/2}\eta_2^2\Big),}
\hspace{3.3cm}{\mbox{ if } -1<Q(x)<0,} \\[0.2cm]
{\Big(\alpha^{1/2}[\xi_1^2-3\eta_1^2],\,\gamma^{1/2}[\xi_2^2-3\eta_2^2],\,\alpha^{1/4}\gamma^{1/4}[\xi_1\xi_2-3\eta_1\eta_2],\,0,\,0,\,0\Big),}\hspace{0.5cm}{\mbox{ if } 0\leq Q(x)\leq3,} \\[0.2cm]
{\Big(\alpha^{1/2}\xi_1\eta_1-\gamma^{1/2}\xi_2\eta_2,\,\alpha^{1/2}(\xi_1^2-Q \eta_1^2)+\gamma^{1/2}(\xi_2^2-Q\eta_2^2),}\\
{\qquad\alpha^{1/4}\gamma^{1/4}[\xi_1\xi_2-\frac{Q+3}{Q-1}\eta_1\eta_2],\,\alpha^{1/4}\gamma^{1/4}\eta_1\eta_2,\,0,\,0\Big),}
\hspace{2.45cm} {\mbox{ if }Q(x)>3.}
\end{array}
\right.
\end{equation}an
A crucial property of the form $\Gamma(x,\cdot)$ and the vectors $p_{x,\xi,\eta}$ is that
\begin{equation}
S(x;\xi,\xi,\eta)=\Gamma(x,p_{x,\xi,\eta},p_{x,\xi,\eta}),
\labelbel{s:g}
\end{equation}
for all $x\in\Omega$ and $\xi,\eta\in{\mathbb{R}}^2$; this is an immediate consequence of relations ({\rm Re}\;f{relation1}), ({\rm Re}\;f{relation2})
and ({\rm Re}\;f{relation3}), for each of the three cases respectively.
We next define a quadratic form $\Gamma_{\labelmbda\varphi}(\cdot)$ on $H^2_0(\Omega)$ by
\[
\Gamma_{\labelmbda\varphi}(u)=\frac{1}{(2\pi)^{2}}\iiint_{\Omega\times{\mathbb{R}}^2\times{\mathbb{R}}^2}\Gamma(x, \, p_{x,\xi,\labelmbda\nabla\varphi},p_{x,\xi',\labelmbda\nabla\varphi})e^{i(\xi-\xi')\cdot x}\hat{u}(\xi)\overline{\hat{u}(\xi')}\,d\xi\,d\xi'\,dx.
\]
We then have
\begin{equation}gin{lemma}
Assume that the functions $\alpha(x),\begin{equation}ta(x),\gamma(x)$ are Lipschitz continuous. Then the difference $S_{\labelmbda\varphi}(\cdot)-\Gamma_{\labelmbda\varphi}(\cdot)$ belongs to ${\cal L}$.
\labelbel{lem3}
\end{lemma}
{\em Proof.} We consider the difference
\[
S(x,\xi,\xi',\eta)-\Gamma(x,p_{x,\xi,\eta},p_{x,\xi',\eta}),
\]
and we group together terms that have the property that if we set $\xi'=\xi$ then they are similar as monomials of the variables
$\xi$ and $\eta$. Due to ({\rm Re}\;f{s:g}) one can use integration by parts to conclude that the total contribution of each such
group belongs to ${\cal L}$. We shall illustrate this for one particular group, the one consisting of terms
which for $\xi=\xi'$ involve the term $\xi_1^2\eta_1^2$.
The terms of this group from $S(x,\xi,\xi',\eta)$ add up to
\[
-\alpha(x)\eta_1^2(\xi_1^2+\xi_1'^2+4\xi_1\xi_1').
\]
The corresponding terms of $\Gamma(x,p_{x,\xi,\eta},p_{x,\xi',\eta})$ are
\[
\tarr{\alpha(x)\eta_1^2\big[(Q(x)-3)(\xi_1^2+\xi_1'^2)-2Q(x)\xi_1\xi_1'\big],}{\mbox{ if }Q(x)<0,}
{-3\alpha(x)\eta_1^2(\xi_1^2+\xi_1'^2),}{\mbox{ if }0\leq Q(x)\leq3,}
{\alpha(x)\eta_1^2\big[-Q(x)(\xi_1^2+\xi_1'^2)+2(Q(x)-3)\xi_1\xi_1'\big],}{\mbox{ if}Q(x)>3.}
\]
Hence the difference of these terms in $S(x,\xi,\xi',\eta)-\Gamma(x,p_{x,\xi,\eta},p_{x,\xi',\eta})$ is
\[
\tarr{\alpha(x)\eta_1^2\big[(2-Q(x))(\xi_1^2+\xi_1'^2)+(2Q(x)-4)\xi_1\xi_1'\big],}{\mbox{ if }Q(x)<0,}
{\alpha(x)\eta_1^2\big[2(\xi_1^2+\xi_1'^2)-4\xi_1\xi_1'\big],}{\mbox{ if }0\leq Q(x)\leq3,}
{\alpha(x)\eta_1^2\big[(Q(x)-1)(\xi_1^2+\xi_1'^2)+(2-2Q(x))\xi_1\xi_1'\big],}{\mbox{ if } Q(x)>3.}
\]
This can also be written as $\alpha(x)\eta_1^2 R(x)(\xi_1^2+\xi_1'^2-2\xi_1\xi_1')$ where
\[
R(x)=\tarr{2-Q(x),}{\mbox{ if }Q(x)<0,}{2,}{\mbox{ if }0\leq Q(x)\leq3,}{Q(x)-1,}{\mbox{ if }Q(x)>3.}
\]
Inserting this in the triple integral and recalling that $\eta=\labelmbda\nabla\varphi$ we obtain that the contribution of the above terms in the difference $S_{\labelmbda\varphi}(u)-\Gamma_{\labelmbda\varphi}(u)$ is
\begin{equation}an
&&(2\pi)^{-2}\iiint_{\Omega\times{\mathbb{R}}^2\times{\mathbb{R}}^2}\alpha(x)R(x)(\xi_1^2+\xi_1'^2-2\xi_1\xi_1')\labelmbda^2\varphi_{x_1}^2e^{i(\xi-\xi')\cdot x}\hat{u}(\xi)\overline{\hat{u}(\xi')}\,d\xi\,d\xi'\,dx\\
&=&\labelmbda^2\int_{\Omega}\alpha(x)R(x)\varphi_{x_1}^2(-u_{x_1x_1}\overline{u}-u\overline{u}_{x_1x_1}-2|u_{x_1}|^2)dx\\
&=&-\labelmbda^2\int_{\Omega}\alpha(x)R(x)\varphi_{x_1}^2( u_{x_1}\overline{u}+u\overline{u}_{x_1})_{x_1}dx.
\end{equation}an
Since the function $\alpha(x)R(x)\varphi_{x_1}^2$ is Lipschitz continuous we can integrate by parts and conclude that the last integral belongs to ${\cal L}$. Similar considerations are valid for the other groups; we omit further details. $
\Box$
\begin{equation}gin{lemma}
Assume that the functions $\alpha(x),\begin{equation}ta(x),\gamma(x)$ are Lipschitz continuous. Let $M>0$ be given. Then for any $\varphi\in{\cal E}_{A,M}$ and $\labelmbda>0$ we have
\[
{\rm Re}\;\,Q_{\labelmbda\varphi}(u)\geq-k^*\labelmbda^4\,\|u\|_2^2+T(u),
\]
for a form $T\in{\cal L}$ and all $u\in H^2_0(\Omega)$.
\label{lemma6}
\end{lemma}
{\em Proof.} The fact that $\varphi\in{\cal E}_{A,M}$ implies that $A(x,\nabla\varphi(x))\leq1$ for all $x\in\Omega$. Hence using Lemmas {\rm Re}\;f{lem2}, {\rm Re}\;f{lem:4} and {\rm Re}\;f{lem3} we obtain
\begin{equation}an
{\rm Re}\;\,Q_{\labelmbda\varphi}(u)&=&-\int_\Omega k(x)A(x,\labelmbda\nabla\varphi)\,|u|^2\,dx+\Gamma_{\labelmbda\varphi}(u)+T(u)\\
&\geq&-k^*\labelmbda^4\int_\Omega|u|^2\,dx+\Gamma_{\labelmbda\varphi}(u)+T(u),
\end{equation}an
for some form $T\in{\cal L}$ and all $u\in H^2_0(\Omega)$. Moreover
\begin{equation}an
\Gamma_{\labelmbda\varphi}(u)&=&\frac{1}{(2\pi)^{2}}\iiint_{\Omega\times{\mathbb{R}}^2\times{\mathbb{R}}^2}\Gamma(x, \, p_{x,\xi,\labelmbda\nabla\varphi},p_{x,\xi',\labelmbda\nabla\varphi})e^{i(\xi-\xi')\cdot x}\hat{u}(\xi)\overline{\hat{u}(\xi')}\,d\xi\,d\xi'\,dx\\
&=&\frac{1}{(2\pi)^{2}}\int_{\Omega}\Gamma\Big(x , \,\int_{{\mathbb{R}}^2}e^{i\xi\cdot x}\hat{u}(\xi)p_{x,\xi,\labelmbda\nabla\varphi}d\xi,\int_{{\mathbb{R}}^2}e^{i\xi'\cdot x}\hat{u}(\xi')p_{x,\xi',\labelmbda\nabla\varphi}d\xi'\Big)\,dx\\
&\geq&0,
\end{equation}an
by the positive definiteness of $\Gamma$; the result follows.$
\Box$
We can now prove Theorem {\rm Re}\;f{thm1}. We first consider the case where the coefficients of $H$ are Lipschitz continuous. For the general case
we shall then use the fact that Lemma {\rm Re}\;f{lem:jde} is stable under $L^{\infty}$ perturbation of the coefficients.
{\bf\em Proof of Theorem {\rm Re}\;f{thm1}} {\em Part 1.} We assume that the functions $\alpha(x),\begin{equation}ta(x),\gamma(x)$ are Lipschitz continuous. We claim that for any $\epsilon$ and $M$ positive there exists $c_{\epsilon,M}$ such that
\begin{equation}
{\rm Re}\;\,Q_{\labelmbda\varphi}(u)\geq-\Big\{(k^*+\epsilon)\labelmbda^4+c_{\epsilon,M}(1+\labelmbda^3)\Big\}\|u\|_2^2.
\label{claim}
\end{equation}
for all $\labelmbda>0$ and $\varphi\in{\cal E}_{A,M}$.
To prove this we first note (cf. \cite[Lemma 7]{b2001}) that any form $T\in{\cal L}$ satisfies
\[
|T(u)|\leq\epsilon Q(u)+c_{\epsilon}(1+\labelmbda^3)\,\|u\|_2^2,
\]
for all $\epsilon\in(0,1)$, $\labelmbda>0$ and $u\in H^2_0(\Omega)$. Hence Lemma {\rm Re}\;f{lemma6} implies
\begin{equation}
\label{gui1}
{\rm Re}\;\,Q_{\labelmbda\varphi}(u)\geq-\Big\{k^*\labelmbda^4+c_{\epsilon,M}(1+\labelmbda^3)\Big\}\|u\|_2^2-\epsilon Q(u).
\end{equation}
Now, from ({\rm Re}\;f{fil}) we have that for any $\psi\in{\cal E}$ there holds
\[
\big| Q(u)-Q_{\psi}(u) \big|\leq\frac{1}{2}Q(u)+c\{\|\psi\|_{W^{2,\infty}}+\|\psi\|_{W^{2,\infty}}^4\}\|u\|_2^2.
\]
Taking $\psi=\labelmbda\varphi$ where $\labelmbda>0$ and $\varphi\in{\cal E}_{A,M}$ we thus obtain
\begin{equation}
\big| Q(u)-Q_{\labelmbda\varphi}(u) \big| \leq\frac{1}{2}Q(u)+c_M(\labelmbda+\labelmbda^4)\|u\|_2^2,
\label{fil1}
\end{equation}
Now, the coefficients of $\labelmbda^4$ in the expansion of $Q_{\labelmbda\varphi}$ only involve first derivatives of $\varphi$. Since $|\nabla\varphi|\leq c$ for all $\varphi\in{\cal E}_{A,M}$, ({\rm Re}\;f{fil1}) can be improved to
\[
\big|Q(u)-Q_{\labelmbda\varphi}(u) \big|\leq\frac{1}{2}Q(u)+\big\{c_M(\labelmbda+\labelmbda^3)+c\labelmbda^4\big\}\|u\|_2^2,
\]
which in turn implies
\begin{equation}
Q(u)\leq\, 2{\rm Re}\,Q_{\labelmbda\varphi}(u)+\big\{c_M(\labelmbda+\labelmbda^3)+c\labelmbda^4\big\}\|u\|_2^2.
\label{last}
\end{equation}
Let $u\in H^2_0(\Omega)$ be given. If ${\rm Re}\; Q_{\labelmbda\varphi}(u)\geq0$ then ({\rm Re}\;f{claim}) is obviously true. If not we then have from ({\rm Re}\;f{gui1})
and ({\rm Re}\;f{last})
\begin{equation}an
{\rm Re}\;\,Q_{\labelmbda\varphi}(u)&\geq&-\Big\{k^*\labelmbda^4+c_{\epsilon}(1+\labelmbda^3)\Big\}\|u\|_2^2-2\epsilon\, {\rm Re}\; Q_{\labelmbda\varphi}(u)-\epsilon\big\{c_M(\labelmbda+\labelmbda^3)+c\labelmbda^4\big\}\|u\|_2^2\\
&\geq&-\Big\{(k^*+c\epsilon)\labelmbda^4+c_{\epsilon}(1+\labelmbda^3)+\epsilon\big\{c_M(\labelmbda+\labelmbda^3)+c\labelmbda^4\big\}\Big\}\|u\|_2^2,
\end{equation}an
and ({\rm Re}\;f{claim}) again follows; hence the claim has been proved.
We complete the standard argument; Lemma {\rm Re}\;f{lem:jde} and ({\rm Re}\;f{claim}) imply
\[
|G(x,x',t)|<c_\epsilon t^{-1/2}\exp\Big\{\labelmbda[\varphi(x)-\varphi(x')]+(1+\epsilon)\big\{(k^*+\epsilon)\labelmbda^4+c_{\epsilon,M}(1+\labelmbda^3)\big\}\,t\Big\},
\]
for all $\epsilon\in(0,1)$. Optimizing over $\varphi\in{\cal E}_{A,M}$ yields
\[
|G(x,x',t)|<c_\epsilon t^{-1/2}\exp\Big\{-\labelmbda d_M(x,x')+(1+\epsilon)\{(k^*+\epsilon)\labelmbda^4+c_{\epsilon,M}(1+\labelmbda^3)\}\,t\Big\} .
\]
Finally choosing
$
\labelmbda=[d_M(x,x')/(4k^*t)]^{1/3},
$
we have
\[
-\labelmbda d_M(x,x')+k^*\labelmbda^4t =-\sigma_*\frac{d_M(x,x')^{4/3}}{t^{1/3}},
\]
and ({\rm Re}\;f{eq:mainthm}) follows.
{\em Part 2.} We now consider the general case where the functions $\alpha(x),\begin{equation}ta(x),\gamma(x)$ are not Lipschitz continuous.
Then there exist Lipschitz functions $\tilde\alpha(x),\tilde\begin{equation}ta(x),\tilde\gamma(x)$ such that (cf. ({\rm Re}\;f{D}))
\[
\max\{\|\alpha-\tilde\alpha\|_{\infty},\;\|\begin{equation}ta-\tilde\begin{equation}ta\|_{\infty},\;\|\gamma-\tilde\gamma\|_{\infty}\}<2D.
\]
We assume that $D$ is small enough so that the corresponding operator $\tilde H$ is elliptic; we shall use a tilde to denote the various entities associated to $\tilde H$.
Given $\varphi\in{\cal E}_{\tilde{A},M}$ and $\labelmbda>0$ it follows from the first part of the proof that
\begin{equation}
\label{gui5}
{\rm Re}\;\,\tilde{Q}_{\labelmbda\varphi}(u)\geq-\Big\{\tilde{k}^*\labelmbda^4+c_{\epsilon}(1+\labelmbda^3)\Big\}\|u\|_2^2-\epsilon Q(u).
\end{equation}
Moreover it is easily seen that
\begin{equation}
|k^*-\tilde{k^{*}}| \leq cD\;,\qquad \big| Q_{\labelmbda\varphi}(u)-\tilde{Q}_{\labelmbda\varphi}(u) \big| \leq cD\big\{Q(u)+\labelmbda^4\|u\|_2^2\big\}.
\label{gui6}
\end{equation}
From ({\rm Re}\;f{gui5}) and ({\rm Re}\;f{gui6}) we obtain
\[
{\rm Re}\;\,Q_{\labelmbda\varphi}(u)\geq-\Big\{(k^*+cD)\labelmbda^4+c_{\epsilon}(1+\labelmbda^3)\Big\}\|u\|_2^2-\epsilon Q(u).
\]
As in Part 1, this leads to a Gaussian estimate involving the constant $\sigma_*-cD$ and the distance $\tilde{d}_M(x,x')$. To replace
$\tilde{d}_M(x,x')$ by $d_M(x,x')$ we note that there exists $c>0$ such that if $\varphi\in{\cal E}_{\tilde{A},M}$ then $(1+cD)^{-1}\varphi\in {\cal E}_{A,M}$. This implies that $\tilde{d}_M(x,x')\geq(1+cD)^{-1}d_{M}(x,x')$, which completes the proof of the theorem.$
\Box$
\section{Short time asymptotics}
\label{sec:asympt}
In this section we study the short time asymptotic behavior of the Green function $G(x,t)$ of the constant coefficient equation
\begin{equation}
u_t=-\big( \partial_{x_1}^4+2\begin{equation}ta\partial_{x_1}^2\partial_{x_2}^2+\partial_{x_2}^4\big)u,\;\qquad x\in{\mathbb{R}}^2\;,\quad t>0.
\label{me}
\end{equation}
(The slightly more general case where we have $\alpha\partial_{x_1}^4+2\begin{equation}ta\partial_{x_1}^2\partial_{x_2}^2+\gamma\partial_{x_2}^4$
is easily reduced to ({\rm Re}\;f{me}).) The symbol of the elliptic operator is
\[
A(\xi) =\xi_1^4 +2\begin{equation}ta \xi_1^2\xi_2^2 +\xi_2^4 \; , \quad \xi \in{\mathbb{R}}^2,
\]
and it is strongly convex if and only if $0 < \begin{equation}ta < 3$.
Theorem {\rm Re}\;f{thm2} below implies the sharpness of the constant $\sigma_*$ of Theorem {\rm Re}\;f{thm1}, but it is interesting on its own.
As already mentioned, the behavior when $\begin{equation}ta\leq 0$ or $\begin{equation}ta\geq 3$ is qualitatively different from that of the case $0<\begin{equation}ta <3$
studied in \cite{ep}. The borderline cases $\begin{equation}ta=0,3$ are particularly interesting.
The Green's function for equation ({\rm Re}\;f{me}) is given by
\begin{equation}
G(x,t)=\frac{1}{(2\pi)^2}\int_{{\mathbb{R}}^2}e^{i\,\xi\cdot\,x-t\,A(\xi)}\,d\xi,\qquad x\in{\mathbb{R}}^2\, ,\; t>0.
\label{gf}
\end{equation}
As already noted in the Introduction, we only consider specific points $x$: points lying on any coordinate axis when $\begin{equation}ta\geq 3$ and points lying on any main bisector when $\begin{equation}ta \leq 0$; due to symmetries this amounts to points of the form
$(s,0)$ and $(s,s)$ respectively. This choice is related to Lemma {\rm Re}\;f{lem1}: in each of the two cases the respective point $\eta$ (i.e. $\eta =(s,0)$ or $\eta =(s,s)$) is a point for which there exists $\xi\in{\mathbb{R}}^2$ so that ({\rm Re}\;f{eq1}) becomes an equality.
Moreover, for these points the explicit computation of the distance to the origin is possible; see also Subsection {\rm Re}\;f{nss} below.
The main result of this section is the following
\begin{equation}gin{theorem}
For any $s>0$ the following asymptotic formulae are valid as $t\to 0+$:
\begin{equation}an
& ({\rm i}) & \mbox{ If $-1<\begin{equation}ta<0$ and $x=(s,s)$ then} \\
&& G(x ,t) \sim \frac{1}{3^{1/2}\cdot 4^{1/3}\, \pi} \frac{(1-\begin{equation}ta)^{1/6}}{(3-\begin{equation}ta)^{1/2}(1+\begin{equation}ta)^{1/6}}s^{-2/3} t^{-1/3}
\exp\Big( -\frac{3}{ 4^{4/3}}\big( \frac{1+\begin{equation}ta}{1-\begin{equation}ta}\big)^{1/3}\frac{s^{4/3}}{t^{1/3}} \Big) \\[0.2cm]
& ({\rm ii}) & \mbox{ If $\begin{equation}ta=0$ and $x=(s,s)$ then} \\
&& G(x ,t) \sim \frac{1}{3 \cdot 4^{1/3}\, \pi} s^{-2/3} t^{-1/3}
\exp\Big( -\frac{3}{ 4^{4/3}} \frac{s^{4/3}}{t^{1/3}} \Big) \cdot
\Big( 1+ \cos\Big[ \frac{3\sqrt{3}}{ 4^{4/3}} \frac{s^{4/3}}{t^{1/3}} -\frac{\pi}{3} \Big] \Big)\\[0.2cm]
& ({\rm iii}) & \mbox{ If $\begin{equation}ta=3$ and $x=(s,0)$ then} \\
&& G (x,t )\sim
\frac{1}{3 \cdot 4^{1/3} \, \pi} s^{-2/3}t^{-1/3} \exp\Big( -\frac{3}{8\cdot 4^{1/3}}\frac{s^{4/3}}{t^{1/3}}\Big) \cdot
\Big( 1+ \cos\Big[ \frac{3\sqrt{3}}{8\cdot 4^{1/3}}\frac{s^{4/3}}{t^{1/3}} -\frac{\pi}{3} \Big] \Big) \\[0.2cm]
& ({\rm iv}) & \mbox{ If $\begin{equation}ta>3$ and $x=(s,0)$ then} \\
&& G(x,t) \sim \frac{1}{ 2^{7/6} \cdot 3^{1/2}\, \pi} \begin{equation}ta^{-1/2}(\begin{equation}ta^2-1)^{1/6}s^{-2/3} t^{-1/3}
\exp\Big( -\frac{3}{4^{4/3}} (\begin{equation}ta^2-1)^{-1/3}\frac{s^{4/3}}{t^{1/3}} \Big)
\end{equation}an
\label{thm2}
\end{theorem}
{\bf Remark.} Clearly that the notation $F(\labelmbda)\sim G(\labelmbda)$ cannot have here the usual meaning $F(\labelmbda) =G(\labelmbda)( 1+o(\labelmbda))$, as the function $G$ takes also the value zero.
By looking at the proof below it becomes clear that the actual meaning of
\[
F(\labelmbda ) \sim e^{A \labelmbda} \labelmbda^D \big[1+ \cos(B\labelmbda +C) \big] \; , \quad \mbox{ as }\labelmbda \to +\infty,
\]
is that
\[
F(\labelmbda ) = e^{A \labelmbda} \labelmbda^D \Big( [1+ \cos(B\labelmbda +C) ]+o(1) \Big) \; , \quad \mbox{ as }\labelmbda \to +\infty.
\]
\subsection{Some comments on the distance $d(x,x')$}
\label{nss}
Before proceeding with the proof of Theorem {\rm Re}\;f{thm2} we make some comments on the distance $d(x,x')$ defined by ({\rm Re}\;f{fd}). First, we recall that a Finsler metric on a domain $\Omega\subset{\mathbb{R}}^n$ is map $p:\Omega \times {\mathbb{R}}^n \to {\mathbb{R}}_+$ whose regularity with respect to $x\in\Omega$ may vary and which has the following properties
\begin{equation}an
&({\rm i}) & p(x,\xi)=0 \mbox{ if and only if }\xi =0 \\
&({\rm ii}) & p(x,\labelmbda\xi)= |\labelmbda| p(x,\xi) \; , \quad \labelmbda\in{\mathbb{R}} \\
& ({\rm iii}) & p(x,\xi) \mbox{ is convex with respect to }\xi
\end{equation}an
Given a Finsler metric on $\Omega$ the dual metric $p_*$ is defined by
\begin{equation}
p_*(x,\eta) = \max_{\xi \neq 0} \frac{ \eta\cdot \xi}{ p(x,\xi)} \; , \qquad x\in\Omega \; , \;\;\; \eta\in{\mathbb{R}}^2 \, .
\label{pstar}
\end{equation}
This is also a Finsler metric and there holds $p_{**}=p$. Having a Finsler metric one can define lengths of paths and hence the (geodesic) distance between points.
We now return to our specific case. The map
\[
p(x, \xi) =A(x, \xi)^{1/4} \; , \quad x\in\Omega \, , \;\; \xi\in{\mathbb{R}}^2,
\]
satisfies properties (i) and (ii) above but not (iii). Nevertheless the dual metric $p_*$ can still be defined by ({\rm Re}\;f{pstar}). Since it is convex (being the supremum of linear functions) it is a Finsler metric. Clearly $p_{**}(x,\xi)$ does not coincide with $p(x,\xi)$ in this case. Actually, there holds $p_{**}(x,\xi)\leq p(x,\xi)$; indeed it may be seen that the set $\{ \xi : p_{**}(x,\xi) \leq 1 \}$ is precisely the convex hull
of the set $\{ \xi : p(x,\xi) \leq 1\}$ .
Now, the (geodesic) Finsler distance $d_{p_*}(x,x')$ induced by $p_*$ satisfies \cite[Lemma 1.3]{agmon}
\[
d_{p_*}(x,x') = \sup \{ \varphi(x') -\varphi(x) \; : \; \varphi\in{\rm Lip}(\Omega) , \;\; p_{**}( y ,\nabla\varphi(y))\leq 1 \, , \;\; \mbox{ a.e. } y\in\Omega\}.
\]
Since $p_{**}\leq p$ this implies $d(x,x')\leq d_{p_*}(x,x')$. We shall now see that this does not spoil the sharpness of the constant $\sigma_*$ of Theorem {\rm Re}\;f{thm1}.
Let us restrict from now on our attention to the constant coefficient case.
By translation invariance we have $d(x,x') =d_0(x-x')$ where
\begin{equation}
d_0(x) = \sup \{ \varphi(x) \; : \; \varphi\in{\rm Lip}({\mathbb{R}}^2) , \;\; \varphi(0)=0 \; , \;\; A( y ,\nabla\varphi(y))\leq 1 \, , \;\; \mbox{ a.e. } y\in{\mathbb{R}}^2\}.
\label{fd0}
\end{equation}
We then have
\begin{equation}
d_0(x) =p_*(x) \; , \qquad x\in{\mathbb{R}}^2.
\label{lolo}
\end{equation}
Indeed, given a function $ \varphi$ as in ({\rm Re}\;f{fd0}) we have
\[
\varphi(x) =\int_0^1 \frac{d}{dt} \varphi(tx) dt = \int_0^1 \nabla\varphi(tx)\cdot x \, dt \leq \int_0^1 A\big(\nabla\varphi(tx)\big)^{1/4} p_*(x) \, dt \leq p_*(x) \, ,
\]
hence $d_0(x)\leq p_*(x)$. For the converse, let $\xi\in{\mathbb{R}}^2\setminus\{0\}$ be given. The function
\[
\varphi(y) = \frac{\xi\cdot y}{ A(\xi)^{1/4} } \; \; , \qquad y\in{\mathbb{R}}^2,
\]
then satisfies $A(\nabla \varphi(y))=1$ and therefore can be used as a test function in ({\rm Re}\;f{fd0}).
Hence
\[
\frac{\xi\cdot x}{ A(\xi)^{1/4}} =\varphi(x) \leq d_0(x),
\]
and maximizing over $\xi$ yields $p_*(x)\leq d_0(x)$.
Now, it is immediate from ({\rm Re}\;f{pstar}) that
\begin{equation}
p_*(x) \geq \frac{|x|^2}{p(x)} \; , \qquad x\in{\mathbb{R}}^2\setminus\{0\}.
\label{di}
\end{equation}
We shall need to identify the points $x\in{\mathbb{R}}^2$ for which ({\rm Re}\;f{di}) becomes an equality. By homogeneity it is enough to consider points of unit Euclidean length.
Let us write $e_{\phi}=(\cos\phi, \sin\phi)$. We are then seeking directions $\phi$ for which
\[
\frac{1}{ A(e_{\phi})^{1/4} } =\max_{\theta\in{\mathbb{R}}} \frac{ e_{\phi}\cdot e_{\theta}}{ A(e_{\theta})^{1/4} } .
\]
So let $\phi\in [0,2\pi]$ be fixed and set
\[
g(\theta) = \frac{ e_{\phi}\cdot e_{\theta}}{ A(e_{\theta})^{1/4} }= \frac{\cos(\phi-\theta)}{ \Big( 1 +\frac{\begin{equation}ta -1}{2}\sin^2 2\theta \Big)^{1/4}} .
\]
Then
\[
g'(\theta) = A(e_{\theta})^{-1/4} \sin (\theta -\phi) - \frac{\begin{equation}ta-1}{4} A(e_{\theta})^{-5/4} \cos(\theta-\phi) \, \sin 4\theta \, .
\]
It follows that $g'(\phi)=0$ if and only if $ \sin 4\phi =0$, i.e. if and only if $\phi$ is an integer multiple of $\pi/4$. This corresponds exactly to the points considered in Theorem {\rm Re}\;f{thm2} and hence for these points inequality ({\rm Re}\;f{di})
holds as an equality. In particular, recalling ({\rm Re}\;f{lolo}) we have
\[
d_0(s,0) = s
\]
and
\[
d_0(s,s) = \frac{2s^2}{ (2s^4 +2\begin{equation}ta s^4)^{1/4}} = 2^{3/4} (1+\begin{equation}ta)^{-1/4}s \, .
\]
Suppose now that $\begin{equation}ta\geq 3$. We then have (cf. ({\rm Re}\;f{sx})) $\sigma = 3 \cdot 4^{-4/3} (\begin{equation}ta^2-1)^{-1/3}$ (recall that $Q=\begin{equation}ta$ for equation ({\rm Re}\;f{me})).
This proves the sharpness of the constant $\sigma_*$ in Theorem {\rm Re}\;f{thm1} in the regime $Q\geq 3$.
Similarly, if $-1< \begin{equation}ta \leq 0$ then (cf. ({\rm Re}\;f{sx}))
\[
\sigma = \frac{3}{2 \cdot 4^{4/3}} \frac{(1+\begin{equation}ta)^{2/3}}{(1-\begin{equation}ta)^{1/3}}
\]
and therefore
\[
\exp\Big( -\frac{3}{4^{1/3}}\big( \frac{1+\begin{equation}ta}{1-\begin{equation}ta}\big)^{1/3}\frac{s^{4/3}}{t^{1/3}} \Big) =\exp\Big(-\sigma_* \frac{d_0(s,s)^{4/3}}{t^{1/3}}\Big).
\]
Hence the constant $\sigma_*$ is sharp also in the regime $-1< Q\leq 0$.
\subsection{Proof of Theorem {\rm Re}\;f{thm2}}
Changing variables in ({\rm Re}\;f{gf}) by $\xi=(4t)^{-1/3} \eta$ we obtain
\begin{equation}
G(x,t)=\frac{1}{(2\pi)^2 (4t)^{2/3}}\, F\Big( \frac{1}{(4t)^{1/3}}\Big),
\label{cv}
\end{equation}
where
\[
F(\labelmbda)=\int_{{\mathbb{R}}^2}e^{ \labelmbda\big( i\, x\cdot\,\xi-\frac{1}{4}A(\xi) \big)}\,d\xi .
\]
To find the asymptotic behavior of $F(\labelmbda)$ as $\labelmbda\to +\infty$ we shall use the method of steepest descent. So we shall consider the complex analytic function of two variables, $z=(z_1,z_2)$,
\[
\phi(z)=i\,x\cdot z-\frac{1}{4}A(z) =i(x_1z_1 + x_2z_2) -\frac{1}{4}(z_1^4+2\begin{equation}ta z_1^2z_2^2 +z_2^4),
\]
and shall use Cauchy's theorem for functions of two variables to suitably deform ${\mathbb{R}}^2\subset{\mathbb{C}}^2$ to some other surface in ${\mathbb{C}}^2$ that will contain the saddle points of $\phi$ that actually contribute to the asymptotic behavior of $F(\labelmbda)$.
For our purposes it is enough to consider deformations that are parallel transports by a point in $i{\mathbb{R}}^2$. Indeed, it easily follows by Cauchy's theorem that for any $\eta_0\in{\mathbb{R}}^2$ we have
\[
F(\labelmbda) =\int_{{\mathbb{R}}^2 +i\eta_0} e^{\labelmbda \phi(z)}dz.
\]
The main issue is to identify the relevant saddle points and (hence) the vector $\eta_0$. What is of importance here is the real part of
${\rm Re}\; \phi(z)$ -- also called the height of $\phi(z)$.
The relevant saddle points are not necessarily those of the largest height, but rather, they are those for which there exists a deformation such that the largest height on it is attained at those points.
Concerning the notation, we shall write each $z\in{\mathbb{C}}^2$ as $z=(z_1,z_2)$ but also as
$z =\xi + i\eta$ with $\xi =(\xi_1,\xi_2)\in{\mathbb{R}}^2$ and $\eta =(\eta_1,\eta_2)\in{\mathbb{R}}^2$. Finally we note that it is enough to prove the asymptotic formulae
in case $s=1$, since the general case then follows from the relation
\[
G(s\, x, t) =\frac{1}{s^2} \, G(x, \frac{t}{s^4}) \; , \qquad t,s>0 \, , \;\; x\in{\mathbb{R}}^2.
\]
\subsubsection{The case $-1 < \begin{equation}ta\leq 0$}
In this case we have $x=(1,1)$. Two saddle points that are relevant are the points
\[
z_0^{\pm} = \pm \xi_0 +i\eta_0
\]
where
\[
\xi_0 = \frac{1}{2} \frac{(3-\begin{equation}ta)^{1/2}}{(1+\begin{equation}ta)^{1/6}(1-\begin{equation}ta)^{1/3}} (1,-1) \; ,
\qquad\quad
\eta_0 = \frac{1}{2} \Big( \frac{1+\begin{equation}ta}{1-\begin{equation}ta} \Big)^{1/3} (1,1) \, .
\]
We deform ${\mathbb{R}}^2$ by $i\eta_0$ and have
\[
F(\labelmbda) =\int_{{\mathbb{R}}^2+i\eta_0} e^{\labelmbda\phi(z)}dz \, .
\]
{\bf Case 1. $-1<\begin{equation}ta<0$.} In this case the saddle points that contribute are precisely the points $z_0^{\pm}$.
We claim that
\begin{equation}
{\rm Re}\; \phi(z) \leq {\rm Re}\; \phi(z_0^+) \; , \qquad z\in{\mathbb{R}}^2+i\eta_0,
\label{fi1}
\end{equation}
with equality exactly at the points $z_0^{\pm}$. To prove ({\rm Re}\;f{fi1}) we note that it is equivalently written as
\[
{\rm Re}\; A(z) \geq {\rm Re}\; A(z_0^+) = -\Big( \frac {1+\begin{equation}ta}{1-\begin{equation}ta} \Big)^{1/3} ,
\]
so it is enough to establish that
\[
{\rm Re}\; A(\xi +i\eta_0) + \Big( \frac {1+\begin{equation}ta}{1-\begin{equation}ta} \Big)^{1/3} \geq 0 \; , \qquad \xi\in{\mathbb{R}}^2.
\]
This is indeed true, as a direct computation shows that
\begin{equation}a
{\rm Re}\; A(\xi +i\eta_0) + \Big( \frac {1+\begin{equation}ta}{1-\begin{equation}ta} \Big)^{1/3} & =& -\begin{equation}ta(\xi_1^2 -\xi_2^2)^2 +
(\begin{equation}ta +1) \bigg[ \Big( \xi_1^2 - \frac{3-\begin{equation}ta}{4(1+\begin{equation}ta)^{1/3}(1-\begin{equation}ta)^{2/3}}\Big)^2 \nonumber \\
&& + \Big( \xi_2^2 - \frac{3-\begin{equation}ta}{4(1+\begin{equation}ta)^{1/3}(1-\begin{equation}ta)^{2/3}}\Big)^2 \bigg] -\begin{equation}ta \Big( \frac{ 1+\begin{equation}ta}{1-\begin{equation}ta} \Big)^{2/3} (\xi_1+\xi_2)^2 \nonumber \\
&\geq & 0,
\label{la2}
\end{equation}a
[This is a scaled version of ({\rm Re}\;f{relation1}) for $\eta =(1,1)$.]
Clearly equality holds only for the points $\xi=\pm \xi_0$, and these correspond to the points $z_0^{\pm}$.
Hence the claim has been proved.
This implies (see \cite[Criterion 1, page 15]{ep}) that the points $z_0^{\pm}$ are precisely those that contribute to the asymptotic behavior of $F(\labelmbda)$ as $\labelmbda\to +\infty$. Now, it is easy to see that the for any $\delta>0$ the integrals
\[
\int_{D(z_0^{\pm} ,\delta) +i\eta_0}e^{\labelmbda \phi(z)}dz
\]
are complex conjugate of each other, hence the total contribution of the these two points is equal to twice the real part of the contribution of $z_0^{+}$. Since these saddle
points are non-degenerate, the contribution of $z_0^+$ is given by the formula (see \cite[equation (3.6)]{ep} or \cite[equation (1.61)]{f})
\[
{\rm contr}(z_0^+) = \frac{2\pi}{\labelmbda} \big( {\rm det} (\phi_{z_iz_j})\big|_{z=z_0^+} \big) ^{-1/2} e^{\labelmbda \phi(z_0^+)}.
\]
We have
\[
\phi(z_0^+) =-\frac{3}{4}\Big( \frac{1+\begin{equation}ta}{1-\begin{equation}ta}\Big)^{1/3} \; , \qquad
{\rm det} (\phi_{z_iz_j})|_{z=z_0^+} = \frac{3(3-\begin{equation}ta)(1+\begin{equation}ta)^{1/3}}{(1-\begin{equation}ta)^{1/3}},
\]
hence combining the above we conclude that
\begin{equation}
F(\labelmbda)\sim \frac{4\pi}{\labelmbda} \frac{(1-\begin{equation}ta)^{1/6}}{\sqrt{3} (3-\begin{equation}ta)^{1/2}(1+\begin{equation}ta)^{1/6}}
\exp\Big( - \frac{3}{4}\big( \frac{1+\begin{equation}ta}{1-\begin{equation}ta} \big)^{1/3} \labelmbda \Big) , \qquad
\mbox{ as }\labelmbda \to +\infty.
\label{888}
\end{equation}
Recalling ({\rm Re}\;f{cv}) concludes the proof in this case.
{\bf Case 2. $\begin{equation}ta=0$.} In this case $G(x,t)$ is the square of an one-dimensional integral; we prefer however to use the two-dimensional
approach because the setting is already prepared, but also because we believe that this conveys better the essential issues involved.
Relation ({\rm Re}\;f{la2}) is also valid for $\begin{equation}ta=0$ in which case it is written
\[
{\rm Re}\; A(\xi +i\eta_0) + 1 =
\big( \xi_1^2 - \frac{3}{4}\big)^2 + \big( \xi_2^2 - \frac{3}{4}\big)^2 \geq 0 \, .
\]
The points $z_0^{\pm}$ considered above are saddle points also for $\begin{equation}ta=0$. The same computations as above are valid hence their contribution is (cf. ({\rm Re}\;f{888}))
\[
{\rm contr}(z_0^+)+{\rm contr}(z_0^-) = \frac{4\pi}{3\labelmbda} \exp\big( -\frac{3}{4}\labelmbda\big).
\]
However in this case there are two more saddle points of $\phi$ that lie on ${\mathbb{R}}^2+i\eta_0$ and that must be considered, namely the points
\[
z_*^{\pm} =\pm\frac{\sqrt{3}}{2}(1,1) + i\eta_0 \, ,
\]
For these points we find
\[
\phi(z_*^{\pm}) = -\frac{3}{4}\pm\frac{3\sqrt{3}}{4} i \; , \qquad {\rm det} (\phi_{z_iz_j})|_{z=z_*^{\pm}} = 9e^{2\pi i/3},
\]
and thus obtain the contribution
\[
{\rm contr}(z_*^+)+{\rm contr}(z_*^-) = \frac{4\pi}{3\labelmbda} \exp\big( -\frac{3}{4}\labelmbda\big)
\cos\Big( \frac{3\sqrt{3}}{4} \labelmbda -\frac{\pi}{3}\Big).
\]
Adding the contributions we arrive at
\begin{equation}
F(\labelmbda) \sim \frac{4\pi}{3\labelmbda} \exp\big( -\frac{3}{4}\labelmbda\big) \Big( 1+
\cos\Big( \frac{3\sqrt{3}}{4} \labelmbda -\frac{\pi}{3}\Big) \Big) ,
\label{last1}
\end{equation}
which concludes the proof by means of ({\rm Re}\;f{cv}). $
\Box$
\subsubsection{The case $\begin{equation}ta\geq 3$}
In this case we have $x=(1,0)$. Two saddle points that are relevant in this case are the points
\[
z_0^{\pm}=(\begin{equation}ta^2-1)^{-1/3}[(0,\pm\sqrt{\begin{equation}ta})+i(1,0)] = \pm \xi_0 +i\eta_0 \, .
\]
As before, we have
\[
F(\labelmbda) =\int_{{\mathbb{R}}^2+i\eta_0} e^{\labelmbda\phi(z)}dz \, .
\]
{\bf Case 1. $\begin{equation}ta>3$.} In this case the relevant saddle points are precisely the points $z_0^{\pm}$. This will follow if we prove that
\begin{equation}
{\rm Re}\; \phi(z) \leq {\rm Re}\; \phi(z_0^+) \; , \qquad z\in{\mathbb{R}}^2+i\eta_0,
\label{fi}
\end{equation}
with equality exactly at the points $z_0^{\pm}$. To prove ({\rm Re}\;f{fi}) we note that it is equivalently written as
\[
{\rm Re}\; A(z) \geq {\rm Re}\; A(z_0^+) = -(\begin{equation}ta^2-1)^{-1/3},
\]
so it is enough to establish that
\[
{\rm Re}\; A(\xi +i\eta_0) + (\begin{equation}ta^2-1)^{-1/3} \geq 0 \; , \qquad \xi\in{\mathbb{R}}^2.
\]
This is indeed true, as a direct computation shows that
\begin{equation}a
&& \hspace{-1.5cm}{\rm Re}\; A(\xi +i\eta_0) + (\begin{equation}ta^2-1)^{-1/3} \nonumber \\
&& =\Big(\xi_1^2 + \xi_2^2-\begin{equation}ta(\begin{equation}ta^2-1)^{-2/3} \Big)^2 +2(\begin{equation}ta-1)\xi_1^2\xi_2^2 + 2(\begin{equation}ta-3)(\begin{equation}ta^2-1)^{-2/3} \xi_1^2 \geq 0 .
\label{la}
\end{equation}a
[This is a scaled version of ({\rm Re}\;f{relation3}) for $\eta =(1,0)$.] Equality holds only for the points
\[
\xi_0^{\pm} =\pm \big(0, \sqrt{\begin{equation}ta}(\begin{equation}ta^2-1)^{-1/3} \big)
\]
which correspond to the points $z_0^{\pm}$.
The two contributions are again complex conjugate of each other. We use again the relation
\[
{\rm contr}(z_0^+) = \frac{2\pi}{\labelmbda} \big( {\rm det} (\phi_{z_iz_j})\big|_{z=z_0^+} \big) ^{-1/2} e^{\labelmbda \phi(z_0^+)} ,
\]
and since
\[
\phi(z_0^+) = -\frac{3}{4}(\begin{equation}ta^2 -1)^{-1/3} \; , \qquad\quad {\rm det} (\phi_{z_iz_j})\big|_{z=z_0^+} =6\begin{equation}ta(\begin{equation}ta^2-1)^{-1/3},
\]
combining the above we obtain
\begin{equation}
F(\labelmbda) \sim \frac{4\pi}{\labelmbda} (6\begin{equation}ta)^{-1/2} (\begin{equation}ta^2-1)^{1/6} \exp\Big(-\frac{3}{4}(\begin{equation}ta^2-1)^{-1/3} \labelmbda\Big) \; , \qquad \mbox{ as }\labelmbda\to +\infty .
\label{that}
\end{equation}
The proof is concluded by using ({\rm Re}\;f{cv}).
{\bf Case 2. $ \begin{equation}ta=3$.} Inequality ({\rm Re}\;f{la}) is also true for $\begin{equation}ta=3$, in which case it takes the form.
\[
{\rm Re}\; A(\xi +i\eta_0) + \frac{1}{2}
=\big(\xi_1^2 + \xi_2^2- \frac{3}{4} \big)^2 +4 \xi_1^2\xi_2^2 \geq 0
\]
In this case equality holds not only at the points $\xi_0^{\pm}$ but also at the points
\[
\xi_*^{\pm} =\pm \big( \frac{\sqrt{3}}{2} \, , \, 0 \big) .
\]
The corresponding points in ${\mathbb{C}}^2$ are the points
\[
z_*^{\pm} =\xi_*^{\pm} +i\eta_0.
\]
As before, the combined contribution of the points
$ z_*^{\pm}$ is twice the real part of the contribution of $ z_*^{+}$. We find
\[
\phi(z_*^+) =-\frac{3}{8} +\frac{3\sqrt{3}}{8} i \; , \qquad {\rm det} (\phi_{z_iz_j})|_{z=z_*^+} =9 e^{ 2 \pi i/3},
\]
hence using the same formula as above we obtain
\[
{\rm contr}(z_*^+)+{\rm contr}(z_*^-) = \frac{4\pi}{3\labelmbda} \exp \big( -\frac{3}{8} \labelmbda \big)
\cos\big( \frac{3\sqrt{3}}{8}\labelmbda +\frac{\pi}{3} \big).
\]
The contribution of the first two points $z_0^{\pm}$ is given by ({\rm Re}\;f{that}) (for $\begin{equation}ta=3$); adding the two contributions we conclude that
\begin{equation}
F(\labelmbda) \sim \frac{4\pi}{3\labelmbda} \exp \big( -\frac{3}{8} \labelmbda \big)
\Big( 1+ \cos\big[ \frac{3\sqrt{3}}{8}\labelmbda -\frac{\pi}{3} \big] \Big) \;\; , \quad \mbox{ as }\labelmbda \to +\infty.
\label{that1}
\end{equation}
The proof is concluded by recalling ({\rm Re}\;f{cv}). $
\Box$
The estimates ({\rm Re}\;f{888}), ({\rm Re}\;f{last1}), ({\rm Re}\;f{that}) and ({\rm Re}\;f{that1}) obtained in the proof above all have the form $F(\labelmbda)\sim G(\labelmbda)$ for some explicitly given function $G(\labelmbda)$.
In each of the diagrams below we have plotted the numerically computed graph of $F(\labelmbda)e^{\sigma \labelmbda}$
(blue, dashed) against the function $G(\labelmbda)e^{\sigma \labelmbda}$ (red, continuous),
where $\sigma$ is the positive constant in the exponential term of
$G(\labelmbda)$. We note that in the case $\begin{equation}ta=4$ the convergence is slower, but more detailed computations are in line with the difference being
of order $O(1/\labelmbda^2)$.
\begin{equation}gin{figure*}[h]
\centering
\begin{equation}gin{minipage}{.5\textwidth}
\centering
\includegraphics[width=1\linewidth]{minuspointfive.pdf}
\caption{$\begin{equation}ta=-0.5$, $x=(1,1)$}
\end{minipage}
\begin{equation}gin{minipage}{.5\textwidth}
\centering
\includegraphics[width=1\linewidth]{zero.pdf}
\caption{$\begin{equation}ta=0$, $x=(1,1)$}
\end{minipage}
\end{figure*}
\begin{equation}gin{figure*}[h]
\centering
\begin{equation}gin{minipage}{.5\textwidth}
\centering
\includegraphics[width=1\linewidth]{three.pdf}
\caption{$\begin{equation}ta=3$, $x=(1,0)$}
\end{minipage}
\begin{equation}gin{minipage}{.5\textwidth}
\centering
\includegraphics[width=1\linewidth]{four.pdf}
\caption{$\begin{equation}ta=4$, $x=(1,0)$}
\end{minipage}
\end{figure*}
{\bf \Large Appendix}
In this appendix we present the proof of Evgrafov and Postnikov \cite{ep} for the asymptotic behavior of $G(x,t)$ in the strongly convex case $0<\begin{equation}ta< 3$
and for any $x\in{\mathbb{R}}^2$, $x\neq 0$.
We note that the article \cite{ep} deals with the general equation of an an operator of order $2m$ acting in ${\mathbb{R}}^d$.
To find the contributing saddle points we first note that by the strict convexity of the symbol there exists a unique $q=q(x)\in{\mathbb{R}}^2$ such that
\[
\frac{1}{4}\nabla A(q) =x.
\]
Then a point $z=\alpha q\in {\mathbb{C}}^2$ is a critical point for $\phi$ if and only if
$\alpha^3 =i$. We shall use two of these points, the points
\[
z_*^{\pm} = \big( \pm \frac{\sqrt{3}}{2} +\frac{1}{2} i \big) q =: \pm \xi_0 +i\eta_0 \, .
\]
As in the proof of Theorem {\rm Re}\;f{thm2} we change domain of integration from ${\mathbb{R}}^2$ to ${\mathbb{R}}^2+i\eta_0$ and the crucial property is that
\[
{\rm Re}\; \phi(z) \leq {\rm Re}\; \phi(z_0) \; , \qquad \mbox{ for all }z\in{\mathbb{R}}^2 +i\eta_0.
\]
with equality only at the points $z_*^{\pm}$.
To prove this we note that it is equivalent to
\[
{\rm Re}\; A(\xi +i\eta_0 ) \geq {\rm Re}\; A(\xi_0 + i\eta_0) \, , \qquad \xi\in{\mathbb{R}}^2,
\]
with equality only for $\xi=\pm \xi_0$.
With $\alpha$ as above, i.e. $\alpha =\pm \sqrt{3}/2 +i/2$, we compute
\[
{\rm Re}\; A(\xi_0 + i\eta_0) = {\rm Re}\; A(\alpha q) = {\rm Re}\; (\alpha^4) A(q) = -\frac{1}{2}A(q) =-8A(\eta_0),
\]
so we need to prove that
\[
{\rm Re}\; A(\xi +i\eta_0 ) + 8A(\eta_0)\geq 0 , \qquad \xi\in{\mathbb{R}}^2,
\]
with equality at $\xi=\pm\xi_0$.
This is indeed true since for any $\eta=(\eta_1,\eta_2)$ we have
\begin{equation}an
{\rm Re} \, A(\xi+i \eta)+8 A(\eta)&=&\frac{\begin{equation}ta}{3}\Big\{ (\xi_1^2-3\eta_1^2) + (\xi_2^2-3\eta_2^2) \Big\}^2
+\frac{4\begin{equation}ta}{3} (\xi_1\xi_2-3\eta_1\eta_2)^2 \nonumber \\
&& +\frac{3-\begin{equation}ta}{3}\Big\{ (\xi_1^2-3\eta_1^2)^2+ (\xi_2^2-3\eta_2^2)^2\Big\} \\
&\geq & 0.
\end{equation}an
Hence the asymptotic behavior will indeed result precisely from the points $z_*^{\pm}$. To compute it we first note that
\[
\phi(z_*^{\pm}) = \frac{3}{4} e^{\pm \frac{2\pi i}{3}}A(q) =-\frac{3}{8}A(q) \pm \frac{3\sqrt{3}}{8}A(q)\, i.
\]
We also have
\[
{\rm det} (\phi_{z_iz_j})|_{z=z_*^+} = h(x)^{\frac{4}{3}} e^{\frac{2\pi i}{3}}
\]
where the function $h$ is positively homogeneous of degree one. Hence
\[
{\rm contr}(z_*^+) =\frac{2\pi}{\labelmbda} h(x)^{-\frac{2}{3}} e^{-\frac{\pi i}{3}}
\exp\Big(-\frac{3}{8}A(q)\labelmbda\Big) \exp\Big(\frac{3\sqrt{3}}{8}A(q)\, i\Big).
\]
The contribution of $z_*^-$ is the complex conjugate of that of $z_*^+$ and adding the two contributions we obtain that
\begin{equation}
F(\labelmbda) \sim \frac{4\pi}{\labelmbda} h(x)^{-2/3}
\exp\Big(-\frac{3}{8}A(q)\labelmbda\Big) \cos\Big(\frac{3\sqrt{3}}{8}A(q) -\frac{\pi}{3} \Big)
\label{finfin}
\end{equation}
We claim that $A(q)=d_0(x)^{4/3}$. Indeed by ({\rm Re}\;f{lolo}) we have
\[
d_0(x) =p_*(x)
= \sup_{\xi}\frac{x\cdot \xi}{A(\xi)^{1/4}}
\geq \frac{x\cdot q}{A(q)^{1/4}}
= \frac{\frac{1}{4} \nabla A(q)\cdot q}{A(q)^{1/4}}
=A(q)^{3/4}
\]
The reverse inequality follows by noting that the supremum is attained at $\xi=q$.
Substituting $A(q)=d_0(x)^{4/3}$ in ({\rm Re}\;f{finfin}) and using ({\rm Re}\;f{cv}) we finally conclude that as $t\to 0+$.
\[
G(x,t) \sim \frac{2^{1/3}}{\pi}
h(x)^{-2/3} t^{-1/3}\exp\Big(-\frac{3}{8\cdot 4^{1/3}} \frac{d(x)^{\frac{4}{3}}}{t^{\frac{1}{3}}} \Big)
\cos\Big(\frac{3\sqrt{3}}{8\cdot 4^{1/3}} \frac{d_0(x)^{\frac{4}{3}}}{t^{\frac{1}{3}}} -\frac{\pi}{3}\Big) .
\]
\
{\bf Acknowledgment.} We thank Leonid Parnovski for useful suggestions and Gregory Kounadis for helping us with Matlab. We also thank the referee for crucial comments which led to a substantial improvement of
Section {\rm Re}\;f{sec:asympt}.
\begin{equation}gin{thebibliography}{RRR}
\bibitem{agmon} S. Agmon, {\em Lectures on exponential decay of solutions of second-order elliptic equations,} Mathematical Notes, Princeton University Press, 1982
\bibitem{b2001} G. Barbatis, {\em Explicit estimates on the fundamental solution of higher-order parabolic equations with measurable coefficients}, J. Differential Equations {\bf 174} (2001), 442-463
\bibitem{bd} G. Barbatis, E.B. Davies, {\em Sharp bounds on heat kernels of higher order uniformly elliptic operators}, J. Operator Theory {\bf 36} (1996), 179-198
\bibitem{davies1} E.B. Davies, {\em Uniformly elliptic operators with measurable coefficients},
J. Funct. Anal. {\bf 132} (1995), 141-169
\bibitem{ep} M.A. Evgrafov, M.M. Postnikov, {\em Asymptotic behavior of Green's functions for parabolic and
elliptic equations with constant coefficients}, Math. USSR Sbornik {\bf 11} (1970), 1-24
\bibitem{f}M.V. Fedoryuk, {\em Asymptotic methods in analysis,} in Encyclopaedia of Mathematical Sciences, vol13
(ed. R.V. Gamkrelidze), Springer 1986
\bibitem{tintarev} K. Tintarev, {\em Short time asymptotics for fundamental solutions of higher order parabolic equations},
Comm. Partial Differential Equations {\bf 7} (1982), 371-391
\end{thebibliography}
\end{document} |
\begin{document}
\title{Linear optics schemes for entanglement distribution\\with realistic single-photon sources}
\author{Miko{\l}aj Lasota}
\email{Corresponding author. E-mail: miklas@fizyka.umk.pl}
\affiliation{Faculty of Physics, Astronomy and Applied Informatics, Nicolaus Copernicus University, Grudziadzka~5, 87-100~Toru\'{n}, Poland}
\author{Czes{\l}aw Radzewicz}
\author{Konrad Banaszek}
\affiliation{Faculty of Physics, University of Warsaw, Ho\.{z}a~69, 00-681~Warsaw, Poland}
\author{Rob Thew}
\affiliation{Group of Applied Physics, University of Geneva, 1211 Geneva 4, Switzerland}
\begin{abstract}
We study the operation of linear optics schemes for entanglement distribution based on nonlocal photon subtraction when input states, produced by imperfect single-photon sources, exhibit both vacuum and multiphoton contributions. Two models for realistic photon statistics with radically different properties of the multiphoton ``tail'' are considered. The first model assumes occasional emission of double photons and linear attenuation, while the second one is motivated by heralded sources utilizing spontaneous parametric down-conversion. We find conditions for the photon statistics that guarantee generation of entanglement in the relevant qubit subspaces and compare it with classicality criteria. We also quantify the amount of entanglement that can be produced with imperfect single-photon sources, optimized over setup parameters, using as a measure entanglement of formation. Finally, we discuss verification of the generated entanglement by testing Bell's inequalities. The analysis is carried out for two schemes. The first one is the well-established one-photon scheme, which produces a photon in a delocalized superposition state between two nodes, each of them fed with one single photon at the input. As the second scheme, we introduce and analyze a linear-optics analog of the robust scheme based on interfering two Stokes photons emitted by atomic ensembles, which does not require phase stability between the nodes.
\end{abstract}
\pacs{42.50.Ex, 03.67.Hk, 42.50.Ar, 03.67.Bg}
\maketitle
\section{Introduction}
\label{Sec:Introduction}
One of the grand challenges in emerging quantum technologies is the distribution of entanglement over long distances, which would significantly enhance the feasible range of quantum key distribution \cite{GisiRiboRMP2002,ScarBechRMP2009} and other quantum communication protocols. Loss and other decoherence mechanisms, inevitably affecting long-haul transmission of quantum systems, e.g., light in optical fibers, dramatically attenuate available nonclassical correlations with an increasing distance. Presently, the most promising solution to this problem is an architecture based on a sequence of quantum repeaters placed at regular intervals over the distance to be covered \cite{BriDurPRL1998}. First, entanglement is generated between quantum memories located at adjacent nodes, which can be done more efficiently owing to shorter separation. In the second stage, entanglement swapping operations \cite{BennBrasPRL1993,ZukoZeilPRL1993} performed on quantum memories at individual nodes create long-distance entanglement between the end stations.
A natural choice for the physical implementation of quantum repeaters are atomic ensembles \cite{LukinRMP2003} or solid-state systems for storing quantum superpositions, with optical interconnects to facilitate transfers of quantum states \cite{SangSimonRMP2011}.
An essential ingredient in quantum repeater architectures is a scheme to distribute entanglement between adjacent nodes. Proposed designs are based on Raman scattering in atomic ensembles \cite{DuanLukinNAT2001,JiangTay07,ZhaoChen07,ChenZhao07,SangSimon08}, photon pair sources combined with quantum memories \cite{SimondeRied07}, as well as on a combination of multiple single-photon sources, memories, linear optics, and conditional photodetection \cite{SangSimon07}. In the last case, the underlying idea is to perform nonlocal photon subtraction \cite{DaknaAnhut97,KimPark05,Kim08}, which creates, in a heralded, loss-tolerant way, entangled states of excitations stored in quantum memories. The single photons at the input are a nonclassical resource which allows for the generation of entanglement.
The purpose of this paper is to analyze imperfections of single-photon sources that can be tolerated in the operation of linear-optics schemes for entanglement distribution based on nonlocal photon subtraction. Our analysis is based on two examples. The first one is the original proposal by Sangouard {\em et al.} \cite{SangSimon07} to prepare a superposition of two distant quantum memories sharing a single excitation. As the second example, we introduce a linear optics version of the scheme based on two-photon interference of Stokes photons emitted from atomic ensembles \cite{ChenZhao07,ZhaoChen07} and study its robustness against source imperfections.
Linear optics schemes for entanglement distribution are sensitive to both vacuum and multiphoton contributions in the input photon statistics. We will consider here two models of the photon number distribution for sources used to generate entanglement. The first model is a statistical mixture of up to two photons, that includes both non-ideal photon preparation and the possibility of double photon emission. The second model is motivated by heralded single-photon sources based on spontaneous parametric down-conversion \cite{CastellettoScholten}. Their typically low success rates could be improved in principle through the construction of multiplexed arrays \cite{MigdallBranningPRA2002,ShapiroWongOL2007,CollinsXiongNCOMM2013}. The down-conversion model of photon statistics exhibits a relatively long multiphoton ``tail'' vanishing more slowly than the thermal distribution with an increasing photon number. Considering two models with radically different properties in the multiphoton sector will allow us to assess whether the specifics of the multiphoton contribution may have a noticeable impact on the entanglement distribution scheme. The actual statistics of sources that are currently being developed \cite{ShieldsNPHOT2007,EisamanFanRSI2011} can be expected to interpolate between the two extreme models studied here.
To characterize the effects of imperfections, we quantify the entanglement generated in the relevant qubit subspaces with the help of entanglement of formation \cite{HillWoot97,Woot98}. This measure can be computed for a pair of two-level systems in a straightforward manner, providing an upper bound for distillable entanglement \cite{HoroHoroRMP2009}. We also give simple threshold criteria for the photon statistics necessary for entanglement generation at all. It is interesting to discuss these criteria in the context of nonclassicality of input light: clearly, if the sources exhibited Poissonian statistics, with optional classical excess noise, no entanglement generation would be possible, as a setup based on such sources, linear optics, and standard photodetectors would admit an entirely semiclassical description \cite{MandelJOSA1977}. We find that the non-classicality condition and the criteria for entanglement generation are not equivalent.
As the last aspect, we discuss the possibility to verify successful entanglement generation by demonstrating a violation of Bell's inequalities \cite{Bell87}. For the single-photon superposition generated between two quantum memories, we use the Clauser-Horne inequality \cite{ClauHorne74} applied to unbalanced homodyning measurements of phase-space quasiprobability distribution functions \cite{BanaWodPRL1999}. In this approach, noncommuting observables are realized with the help of phase-space displacements introduced by superposing the signal field with a coherent reference on an unbalanced beam splitter \cite{WallenVog96,BanaWod96}. In the second example, when the quantum memory at each mode contains an excitation prepared in a superposition of two modes, we can use the standard Clauser-Horne-Shimony-Holt (CHSH) inequality \cite{CHSH69}, although care needs to be taken to account correctly for multiphoton terms.
This paper is organized as follows. First, in Sec.~\ref{Sec:Subtracting} we present a general theoretical description of linear-optics schemes for nonlocal photon subtraction. Simplified formulas in the high-loss regime that are most relevant in practical scenarios are derived. Section III introduces two models of photon statistics that will be used to study the effects of source imperfections. The generic one-photon scheme is described in Sec.~\ref{Sec:TwoMemories}, where a threshold criterion for the photon statistics to warrant entanglement generation is also derived. A quantitative characterization of entanglement produced in this scheme and requirements to violate phase-space Bell's inequalities are presented in Sec.~\ref{Sec:OnePhotonEntanglement}. A linear optics two-photon scheme robust against phase instabilities is introduced in Sec.~\ref{Sec:FourMemories}. Its properties are discussed quantitatively in Sec.~\ref{TwoPhotonEntanglement}.
Finally, Sec.~\ref{Sec:Conclusions} concludes the work.
\section{Nonlocal photon subtraction}
\label{Sec:Subtracting}
A number of linear optics schemes for entanglement distribution via nonlocal photon subtraction can be described using a general setup depicted in Fig.~\ref{fig:figure1}(a). An array of $M$ sources emits single photons in well-defined modes represented by respective annihilation operators $\hat{a}_i$, $i=1,\ldots, M$. The sources are divided into two groups located at adjacent nodes. At each node, the photons are directed to beam splitters with identical power transmissions $T$. The transmitted beams are sent to an intermediary site, where they are combined using a linear optical circuit characterized by a certain unitary $M \times M$ matrix $\mathbf{U}$, whose outputs are monitored by an array of single-photon detectors. We assume here that losses affecting all the modes between the beam splitters and the intermediary site, as well as the efficiencies of the detectors, are uniform. The beams reflected off the beam splitters placed after single-photon sources are mapped locally onto quantum memories present at the nodes. Light stored in quantum memories is retained for further processing only if the detectors at the intermediary site produce certain sequences of clicks, heralding that entanglement between memories has been successfully generated. In subsequent stages, entanglement swapping operations can be used to extend the range of entanglement to more distant nodes.
\begin{figure}
\caption{(Color online) A general scheme for nonlocal photon subtraction that includes schemes for entanglement distribution studied in this work. The light emitted by an array of single-photon sources located on the left of the diagram is sent to beam splitters with identical power transmission coefficients $T$. Reflected beams are mapped onto local quantum memories, while transmitted beams travel to an intermediary site, where they enter a linear optics circuit described by a unitary matrix ${\mathbf U}
\label{fig:figure1}
\end{figure}
Let us denote by $\hat{b}_i$, $i=1,\ldots, M$, annihilation operators of modes after the transformation $\mathbf{U}$. Their relation to the input operators $\hat{a}_i$ can be written compactly as
\begin{equation}
\begin{pmatrix} \hat{b}_1 \\ \hat{b}_2 \\ \vdots \\ \hat{b}_M \end{pmatrix} = \mathbf{U}
\begin{pmatrix} \hat{a}_1 \\ \hat{a}_2 \\ \vdots \\ \hat{a}_M \end{pmatrix}.
\end{equation}
Suppose that the detectors produce a specific sequence of clicks described by a vector ${\mathbf k} = (k_1, k_2, \ldots, k_M)$. The transformation between the input state $\hat{\varrho}_{\text{in}}$ and the unnormalized conditional output state
${\hat{\varrho}^{({\mathbf k})}}_{\text{out}}$ generated at the reflected ports of the beam splitters is given by a map:
\begin{equation}
{\hat{\varrho}^{({\mathbf k})}}_{\text{out}} = \sum_{{\mathbf n}} p({\bf k}|{\bf n}) \hat{L}_{\mathbf n} \hat{\varrho}_{\text{in}} \hat{L}_{\mathbf n}^\dagger. \label{Eq:rho(k)out}
\end{equation}
Here $\hat{L}_{\mathbf n}$ are Kraus operators corresponding to the subtraction of $n_i$ photons from the mode $\hat{b}_i$, $i=1,\ldots, M$
\cite{ChuangLeungPRA1997,WasiBanaPRA2007},
\begin{equation}
\hat{L}_{\bf n} = \bigotimes_{i=1}^{M} \sqrt{\frac{T^{n_i}}{n_i!}} (\sqrt{1-T})^{\hat{b}^\dagger_i \hat{b}_i} \hat{b}_i^{n_i}
\label{Eq:Kraus}
\end{equation}
and we have denoted $\mathbf{n} = (n_1, n_2, \ldots, n_M)$. Further, $p({\bf k}|{\bf n})$ is the conditional probability of producing a click sequence ${\bf k}$ on the detectors, provided that $n_1, \ldots, n_M$ photons have been subtracted from the modes $\hat{b}_1 , \ldots, \hat{b}_M$. Its form depends on the specific detection scheme used in the setup.
Let us denote by $\zeta$ the detection efficiency, assumed to be identical for all the detectors. As the transformation $\mathbf{U}$ is linear, the parameter $\zeta$ can also account for losses between the nodes and the intermediary site, following the assumption that losses are identical for all the modes involved. If photon-number-resolving detectors are used at the intermediary site, the conditional probability $p({\bf k}|{\bf n})$ is given by a multimode binomial distribution
\begin{equation}
p({\bf k}|{\bf n}) = \prod_{i=1}^{M} { n_i \choose k_i } \zeta^{k_i} (1-\zeta)^{n_i - k_i}
\end{equation}
where we have used convention ${n \choose k } = 0$ for $n < k$. Throughout this paper we will focus our attention on the second relevant case, when binary detectors are used at the intermediary site. Then each $k_i$ can take only two values, $0$ or $1$, depending on whether the respective detector has registered no photons or at least one, and the conditional probability distribution $p({\bf k}|{\bf n})$ takes the form
\begin{equation}
\label{Eq:CondProbBinary}
p({\bf k}|{\bf n}) = \prod_{i=1}^{M} [k_i + (1-2 k_i)(1-\zeta)^{n_i}].
\end{equation}
In a realistic scenario losses between the nodes and the intermediary site are significant and consequently $\zeta \ll 1$. In this regime, in Eq.~(\ref{Eq:CondProbBinary}) we can approximate $(1-\zeta)^{n_i} \approx 1-\zeta n_i$, provided that the photon numbers at the setup input are not too high. Taking the leading order in $\zeta$ of factors appearing under the product in Eq.~(\ref{Eq:CondProbBinary}), which is $1$ for $k_i=0$ and $\zeta n_i$ for $k_i=1$, gives a simplified expression for the conditional probability distribution for $\zeta \ll 1$,
\begin{equation}
\label{Eq:CondProbBinaryLowEta}
p({\bf k}|{\bf n}) = \zeta^{K} \prod_{i=1}^{M} (1-k_i + k_i n_i)
\end{equation}
where $K= \sum_{i=1}^{M} k_i$ is the total number of clicks produced by the binary detectors. The above formula gives an explicit power scaling in the efficiency $\zeta$.
The principle of operation for entanglement distribution based on nonlocal photon subtraction is typically discussed in the limit of very high reflection of produced photons to quantum memories, corresponding to $T \rightarrow 0$. In this case, we can approximate
$(\sqrt{1-T})^{\hat{b}^\dagger_i \hat{b}_i} \approx 1$ in Eq.~(\ref{Eq:Kraus}), assuming that contributions from higher photon numbers at the input are sufficiently small \cite{SekatskiJPB2012}. Then the conditional state is given by
\begin{multline}
{\hat{\varrho}^{({\mathbf k})}}_{\text{out}} = \sum_{{\mathbf n}} p({\bf k}|{\bf n})
\frac{T^{n_1 + \cdots + n_M}}{n_1 ! \cdots n_M!} \\
\times \hat{b}_1^{n_1} \cdots \hat{b}_M^{n_M} \hat{\varrho}_{\text{in}} (\hat{b}_1^\dagger)^{n_1} \cdots (\hat{b}_M^\dagger)^{n_M}.
\label{eq:KrausForSmallT}
\end{multline}
When $T\rightarrow 0$, the summation over $\mathbf{n}$ in the above formula can be truncated to the lowest value of the total photon number
$n_1 + \cdots + n_M$ that gives a nonzero contribution to a click sequence $\mathbf{k}$. This yields a simple power scaling in $T$.
In the following, we will consider the physical regime when $\zeta \ll 1$, as otherwise formulas become overly complicated. We will use the assumption $T \ll 1$ mainly for illustrative purposes. We will carry out numerical calculations for arbitrary $T$ using the general expression given in Eq.~(\ref{Eq:Kraus}) in order to optimize the amount of generated entanglement.
\section{Imperfect photon sources}
\label{Sec:Sources}
We will consider in this paper two models for photon number distribution produced by an imperfect source. The most elementary model, which includes both non-unit preparation efficiency and multiphoton effects, is a statistical mixture of up to two photons, $p_0 \proj{0} + p_1 \proj{1} + p_2 \proj{2}$, where the three probabilities $p_0, p_1$, and $p_2$, satisfy the normalization constraint $p_0 + p_1 + p_2 = 1$. The main advantage of this elementary model is computational simplicity.
It will be convenient to introduce a physically motivated parametrization for the photon number distribution in the above model. Namely, we will assume that the statistics are generated by a source producing a single photon with a probability $1-\epsilon$, while a double photon emission occurs with a probability $\epsilon$, and that the output is subject to linear losses characterized by a power transmission coefficient $\eta$. The explicit expressions for photon number probabilities in this double-emission model are
\begin{subequations}
\label{Eq:UptoTwoPhotons}
\begin{align}
p_0 & = (1-\eta)(1-\eta\epsilon), \\
p_1 & = \eta + \eta(1-2\eta)\epsilon, \\
\label{Eq:UptoTwoPhotonsp2}
p_2 & = \eta^2 \epsilon.
\end{align}
\end{subequations}
The second model is motivated by heralded sources based on nondegenerate spontaneous parametric down-conversion \cite{CastellettoScholten}, when photons within produced pairs can be separated by polarization, frequency, or the emission direction. If the process involves only one pair of field modes for the signal and the idler beams, the probability of generating simultaneously $n$ pairs scales as $r^n$, where $r$ depends on the strength of the nonlinear process.
Suppose that the idler beam is monitored by a binary heralding detector with a very low efficiency. In this case, the conditional probability that the detector clicks scales linearly with the number of incident idler photons, which is a special case of Eq.~(\ref{Eq:CondProbBinaryLowEta}) for $M=1$. Consequently, the probability of producing $n$ photons in the heralded signal beam is given, after normalization, by $(1-r)^2 n r^{n-1}$. This statistics features a ``tail'' for $n > 2$, which vanishes more slowly than Poissonian and thermal distributions with increasing $n$. If the signal beam experiences losses characterized by a power transmission coefficient $\eta$, the photon statistics of the source is given by
\begin{align}
p_m & = (1-r)^2 \sum_{n=m}^{\infty} { n \choose m} \eta^m (1-\eta)^{n-m} n r^{n-1} \nonumber \\
& =\frac{(1-r)^2 r^{m-1} \eta^m [m+(1-\eta) r]}{[1-(1-\eta)r]^{m+2}}.
\label{eq:probdistribution}
\end{align}
This distribution describes the second, down-conversion model used in our analysis.
It is easy to check that when $r \ll 1$, expanding Eq.~(\ref{eq:probdistribution}) for $m=0,1,2$ up to the linear term in $r$ yields Eq.~(\ref{Eq:UptoTwoPhotons}) with $r=\epsilon/2$. Furthermore, we verified numerically that for $\eta \ge 0.834$ the overall multiphoton probability $\sum_{m=2}^{\infty} p_m$ for the down-conversion model is slightly smaller than the two-photon probability given by Eq.~(\ref{Eq:UptoTwoPhotonsp2}), with $\epsilon = 2r < 0.5$. Consequently, we can compare the performance of entanglement distribution schemes for the two models of the photon statistics with their parameters, identified as $r=\epsilon/2$, to estimate the effects of the actual shape of the multiphoton ``tail'' produced by an imperfect photon source.
The parameter $\eta$, common to both the models of the photon statistics, can be used to account for attenuation along the optical path from a photon source to a respective quantum memory, as well as linear losses associated with a mapping onto the memory and its subsequent readout. This parameter critically affects the quality of the generated entanglement. In contrast, the primary effect of losses along the path from the transmitted output ports of beam-splitters $T$ to detectors at the intermediary site, characterized by the transmission $\zeta$ introduced in Sec.~\ref{Sec:Subtracting}, is the reduction of the rate of heralding events.
\section{One-photon scheme}
\label{Sec:TwoMemories}
The simplest scheme for generating entanglement between two nodes, shown in Fig.~\ref{fig:figure2}, has been proposed by Sangouard \emph{et al.} in \cite{SangSimon07}. Each node has one single-photon source and a memory. We will label the corresponding modes with indices $a_1$ and $a_2$ for the two nodes. The beams sent to the intermediary site are interfered on a balanced $50/50$ beam splitter described by a unitary transformation
\begin{equation}
\mathbf{U} = \frac{1}{\sqrt{2}} \begin{pmatrix} 1 & 1 \\ 1 & -1 \end{pmatrix}.
\end{equation}
We will restrict our attention here to the regime $\zeta \ll 1$ and binary detectors monitoring the two output ports of the beam splitter. A calculation based on Eqs.~(\ref{Eq:rho(k)out}) and (\ref{Eq:CondProbBinaryLowEta}) shows that if perfect single-photon sources are used in the setup with the initial state $\hat{\varrho}_{\text{in}} = \proj{1_{a_1} 1_{a_2}}$, a single detector count implies generation of one of two conditional states, denoted here with the superscript $\pm$:
\begin{equation}
\label{Eq:rhooutpmOnePhoton}
\hat{\varrho}_{\text{out}}^{(\pm)} = \zeta T[ (1-T) \proj{\psi_\pm} + T \proj{\text{vac}}]
\end{equation}
where the first component is a maximally entangled state obtained by non-local photon subtraction,
\begin{align}
\label{Eq:psipm}
\ket{\psi_\pm} & = \frac{1}{\sqrt{2}} (\hat{a}_1 \pm \hat{a}_2) \ket{1_{a_1} 1_{a_2}} \nonumber \\
&= \frac{1}{\sqrt{2}} (\ket{0_{a_1} 1_{a_2}} \pm \ket{1_{a_1} 0_{a_2}})
\end{align}
with the sign of the superposition depending
on the detector that clicked, and $\ket{\text{vac}}$ denotes the vacuum state. In the limit $T\rightarrow 0$ the relative weight of the vacuum component with respect to $\ket{\psi_\pm}$ becomes negligible. This results in production of the ideal maximally entangled state, albeit with a diminishing overall success probability scaling as the product $\zeta T$.
\begin{figure}
\caption{(Color online) A scheme for producing a single photon in a delocalized superposition state between two quantum memories, originally proposed in Ref.~\cite{SangSimon07}
\label{fig:figure2}
\end{figure}
If the photon sources are imperfect with their statistics described by identical photon number distributions $p_m$, the initial state of the modes $a_1$ and $a_2$ is given in the general case by a density matrix
\begin{equation}
\hat{\varrho}_{\text{in}} = \sum_{m_1, m_2=0}^{\infty} p_{m_1} p_{m_2} \proj[{a_1}]{m_1} \otimes \proj[{a_2}]{m_2}.
\label{eq:rhoin}
\end{equation}
Assuming that binary detectors are used at the intermediary site,
we will be interested in the conditional states $\hat{\varrho}^{({\mathbf k})}$ defined in Eq.~(\ref{Eq:rho(k)out}) for ${\mathbf k} = (1,0)$ and
${\mathbf k} = (0,1)$. These states are identical up to a trivial $\pi$ phase shift performed on one of the modes, which of course does not change the amount of the generated entanglement. Following the notation introduced in Eq.~(\ref{Eq:rhooutpmOnePhoton}), we will label them with a superscript $\pm$ in lieu of ${\mathbf k}$.
Let us first analyze the entanglement present in the qubit subspace spanned by the zero- and one-photon Fock state for each of the stored modes
when imperfect single-photon sources are used. This characteristic is contained in the projected memory state, denoted here with a prime,
\begin{equation}
\mbox{${\hat{\varrho}'}$}^{(\pm)}_{\text{out}} = ( \hat{\Pi}_{a_1} \otimes \hat{\Pi}_{a_2}) \hat{\varrho}^{(\pm)}_{\text{out}}
( \hat{\Pi}_{a_1} \otimes \hat{\Pi}_{a_2})
\end{equation}
where the projection operators are
\begin{equation}
\hat{\Pi}_{\mu} = \proj[\mu]{0} + \proj[\mu]{1}, \quad \mu=a_1, a_2.
\label{eq:projection1ph}
\end{equation}
In the two-mode subspace spanned by $\ket{0_{a_1} 0_{a_2}}$, $\ket{0_{a_1} 1_{a_2}}$, $\ket{1_{a_1} 0_{a_2}}$, $\ket{1_{a_1} 1_{a_2}}$, the projected density matrix is given by
\begin{equation}
\mbox{${\hat{\varrho}'}$}^{(\pm)}_{\text{out}}
= \begin{pmatrix} \varrho_{00} & 0 & 0 & 0 \\ 0 & \varrho_{01} & c & 0 \\ 0 & c^\ast & \varrho_{10} & 0 \\ 0 & 0 & 0 & \varrho_{11}
\end{pmatrix},
\label{eq:rhoprim1ph}
\end{equation}
where the only non-zero elements in the limit $T\ll 1$ take the form
\begin{subequations}
\begin{align}
{\varrho}_{00} & = \zeta Tp_0p_1, \\
{\varrho}_{01} = {\varrho}_{10} & = \zeta T\left(p_0p_2+\frac{p_1^2}{2}\right), \\
\varrho_{11} & = 2 \zeta T p_1p_2, \\
c & = \pm \frac{1}{2} \zeta T p_1^2.
\end{align}
\end{subequations}
The positive partial transposition (PPT) criterion \cite{Peres96,Horodeckis96} implies that the state $\mbox{${\hat{\varrho}'}$}^{(\pm)}_{\text{out}}$ is entangled if and only if
\begin{equation}
|c|^2>\varrho_{00}\varrho_{11}. \label{eq:PPT}
\end{equation}
This inequality translates into a simple condition for the photon statistics:
\begin{equation}
\label{Eq:EntanglementGenIneq}
p_1 ^2 > 8 p_0 p_2
\end{equation}
that ensures generation of entanglement in the limit $T \ll 1$.
It is interesting to compare the above inequality with properties of the photon number distribution for classical light states, i.e., coherent states and their statistical mixtures. In the latter case, the weight of an $n$-photon term can be expressed as
\begin{equation}
p_n = \left\langle \frac{{\cal I}^n}{n!} e^{-{\cal I}} \right\rangle,
\end{equation}
where ${\cal I}$ is the mean photon number of a coherent state and angular brackets $\langle \ldots \rangle$ denote a classical statistical average over ${\cal I}$. Using the Schwarz inequality $\langle XY \rangle \le \langle X^2 \rangle \langle Y^2 \rangle$ for $X = e^{-{\cal I}/2}$ and $Y = {\cal I}e^{-{\cal I}/2}$ yields
\begin{equation}
p_1^2 \le 2 p_0 p_2.
\label{Eq:CoherentMixtures}
\end{equation}
It is worth noting a gap between the above classicality condition and the parameter region characterized by Eq.~(\ref{Eq:EntanglementGenIneq}) for which photon sources are capable of producing entanglement in the one-photon scheme considered here.
The condition for entanglement generation retains a relatively compact form for an arbitrary $T$ if the photon sources produce statistical mixtures of up to two photons. Using more general expressions for the elements of the density matrix $\mbox{${\hat{\varrho}'}$}^{(\pm)}_{\text{out}}$ in this case, the entanglement criterion given in Eq.~(\ref{eq:PPT}) can be generalized to
\begin{multline}
p_1^2\left(\frac{p_1^2}{4}-2p_0p_2\right) > 4 T^4 p_2^4 + 8 T^3 p_1 p_2^3 \\
+ 4 T^2 p_2^2 (p_1^2 +2 p_0 p_2) + 8 T p_0 p_1 p_2^2. \label{eq:PPTgeneral}
\end{multline}
It is seen that the right-hand side of the inequality is a fourth-order polynomial in $T$ with non-negative coefficients.
Consequently, the resulting condition on photon statistics becomes the most relaxed in the limit $T\rightarrow 0$, when it can be written simply as
Eq.~(\ref{Eq:EntanglementGenIneq}). We also verified by a direct calculation that if the conditional probability distribution $p({\bf k}|{\bf n})$ is taken in the general form given by Eq.~(\ref{Eq:CondProbBinary}) that is valid outside the regime $\zeta \ll 1$, then the right-hand side of Eq.~(\ref{eq:PPTgeneral}) also has the form of a polynomial in $T$ with non-negative coefficients. Consequently, the condition presented in Eq.~(\ref{Eq:EntanglementGenIneq}) is least restrictive over arbitrary values of $\zeta$ and $T$.
In Figs.~\ref{fig:figure3}(a) and \ref{fig:figure3}(b) we depict with thick solid lines (red online) the condition for entanglement generation given in Eq.~(\ref{Eq:EntanglementGenIneq}) in terms of parametrizations of the two models of photon statistics discussed in Sec.~\ref{Sec:Sources}. We will present a more quantitative characterization of the generated entanglement in the next section.
\begin{figure*}
\caption{(Color online) The effective entanglement of formation $E$ produced in the subspace spanned by zero- and one-photon Fock states for the input photon statistics (a) described by a mixture of up to two photons defined in Eq.~(\ref{Eq:UptoTwoPhotons}
\label{fig:figure3}
\end{figure*}
\section{One-photon entanglement}
\label{Sec:OnePhotonEntanglement}
Let us now discuss in more detail the amount of entanglement produced in the one-photon scheme and the possibility to verify its successful generation using Bell's inequalities.
\begin{figure*}
\caption{The minimum value of the Clauser-Horne combination ${\cal CH}
\label{fig:figure4}
\end{figure*}
As an entanglement measure, we choose the entanglement of formation $E_F$, which is defined as the number of maximally entangled states needed to prepare an ensemble of pure states representing a given mixed state, minimized over all such ensembles. For a normalized two-qubit state $\hat{\varrho}$ the entanglement of formation is given explicitly by \cite{HillWoot97,Woot98}
\begin{equation}
E_F(\hat\varrho)=H\left({\textstyle\frac{1}{2}} \bigl( 1 +\sqrt{1-[C(\hat\varrho)]^2}\bigr)\right),
\end{equation}
where $H(x)= -x \log_2 x - (1-x) \log_2 (1-x)$ denotes binary entropy and $C(\hat\varrho)$ is the so-called concurrence of the state $\hat\varrho$, which can be computed in a straightforward manner. The effective amount of entanglement which includes the nonunit probability of generating successfully the desired state is
$2 \textrm{Tr} (\mbox{${\hat{\varrho}'}$}^{(\pm)}_{\text{out}}) E_F ( \mbox{${\hat{\varrho}'}$}^{(\pm)}_{\text{out}} /
\textrm{Tr} (\mbox{${\hat{\varrho}'}$}^{(\pm)}_{\text{out}})) $, where the factor $2$ comes from two possible types of detection events denoted in Sec.~\ref{Sec:TwoMemories} as $\pm$. In the limit $\zeta \ll 1$ considered here, this expression is linear in $\zeta$. In order to factor out the effects of transmission losses and finite detection efficiency at the intermediary site, we will compute the rescaled quantity
\begin{equation}
E = \frac{2}{\zeta} \textrm{Tr} (\mbox{${\hat{\varrho}'}$}^{(\pm)}_{\text{out}}) E_F \left( \frac{\mbox{${\hat{\varrho}'}$}^{(\pm)}_{\text{out}} }{
\textrm{Tr} (\mbox{${\hat{\varrho}'}$}^{(\pm)}_{\text{out}})}\right)
\end{equation}
which is a function of the beam-splitter transmission $T$ and the photon statistics $\{ p_m\}$. To optimize the produced entanglement, for each set of the parameters characterizing the photon statistics we carried out maximization over the transmission $T$. The results are depicted in Figs.~\ref{fig:figure3}(a) and \ref{fig:figure3}(b), and the optimal values of $T$ are shown in Figs.~\ref{fig:figure3}(c) and \ref{fig:figure3}(d), correspondingly. It is seen that although the threshold condition given in Eq.~(\ref{Eq:EntanglementGenIneq}) is relatively relaxed, a substantial amount of entanglement is produced only for a small region of parameters close to the ideal case of perfect single-photon sources. In this region, the difference between the two models of the photon statistics becomes minor. For practical photon sources based on heralded parametric down-conversion, the realistic values of $r$ would be in the range $10^{-2}$ -- $10^{-1}$. We considered here a broader range of $r$ to characterize performance of other sources featuring a large multiphoton probability distributed over a ``tail'' extending to high photon numbers.
In order to reach rates calculated above in processing and utilizing produced entanglement, one needs to be able to perform a broad range of local operations on quantum memories storing noisy entangled states.
An alternative question is whether it would be possible to verify directly the presence of entanglement via, e.g., violation of Bell's inequalities. When measurements are based on photon-counting detectors, the one-photon state $\ket{\psi_\pm}$ defined in Eq.~(\ref{Eq:psipm}) is not sufficient on its own to demonstrate correlations incompatible with local hidden variable theories \cite{SinglePhotonNonlocality}. However, if in addition a local phase reference is available, one can implement noncommuting measurements whose results violate Bell's inequalities. We will consider here a scheme that relies on applying a phase-space displacement by mixing the field with a strong coherent field on an unbalanced beam splitter and counting photons in the transmitted beam \cite{BanaWodPRL1999}. For binary detectors, in the asymptotic limit of unit beam-splitter transmission, this realizes a projection on a coherent state whose amplitude is given by the displacement introduced by the reference field. When the measurement is performed on fields released from the memories, the probability of a joint no-count event is therefore given by
\begin{equation}
\label{Eq:Qalphabeta}
Q(\alpha,\beta) = \frac{1}{\textrm{Tr} (\hat{\varrho}^{(\pm)}_{\text{out}})} \bra{\alpha_{a_1} \beta_{a_2}} \hat{\varrho}^{(\pm)}_{\text{out}}
\ket{\alpha_{a_1} \beta_{a_2}}
\end{equation}
and has the interpretation, up to a multiplicative constant, of the two-mode $Q$ function for the normalized state $\hat{\varrho}^{(\pm)}_{\text{out}}$. The marginal no-count probabilities for individual detectors are given by an expression
\begin{equation}
\label{Eq:Qalpha}
Q_{a_1} (\alpha) = \frac{1}{\textrm{Tr} (\hat{\varrho}^{(\pm)}_{\text{out}})} \bra{\alpha_{a_1} } \textrm{Tr}_{a_2}( \hat{\varrho}^{(\pm)}_{\text{out}} )
\ket{\alpha_{a_1} }
\end{equation}
for mode $a_1$ and an analogous expression for mode $a_2$.
If two alternative coherent displacements $\alpha$ or $\alpha'$ are applied to mode $a_1$ and $\beta$ or $\beta'$ to mode $a_2$, measurements of joint and marginal probabilities can be used to evaluate the Clauser-Horne (CH) combination \cite{ClauHorne74},
\begin{multline}
{\cal CH} = Q(\alpha,\beta) + Q(\alpha', \beta) + Q(\alpha, \beta') - Q(\alpha',\beta') \\
- Q_{a_1}(\alpha) - Q_{a_2}(\beta)
\end{multline}
which for local hidden variable theories is bounded between
\begin{equation}
-1 \le {\cal CH} \le 0.
\end{equation}
In Fig.~\ref{fig:figure4} we depict the minimum value of the combination obtained by optimization over displacements $\alpha, \alpha', \beta$, and $\beta'$ for the two models of photon statistics introduced in Sec.~\ref{Sec:Sources}. The output density matrix $\hat{\varrho}^{(\pm)}_{\text{out}}$ has been taken in the limit $\zeta \ll 1$ and $T\ll 1$. The photon statistics $p_m$ based on the down-conversion model has been truncated at $m=3$, which reduces the trace of the input density matrix $\hat{\varrho}_{\text{in}}$ by less than $0.1\%$ within the relevant parameter region. We verified that including the photon statistics up to $m=6$ for the optimal displacements does not noticeably change the value of the CH combination.
It is seen that a violation of Bell's inequalities places stringent requirements on the photon statistics, much stricter than the generation of entanglement in the zero-one photon sector. Although the differences between the two-photon model and the down-conversion model for the photon statistics are rather minor, the two-photon model gives a slightly larger region over which a violation of Bell's inequality can be observed if the parameters are identified as $\epsilon = 2r$. It is also worth noting that because the phase-space displacement is a linear operation, the parameter $\eta$ characterizing the photon statistics can also include the nonunit efficiency of detectors used to implement the coherent-state projections described in Eqs.~(\ref{Eq:Qalphabeta}) and (\ref{Eq:Qalpha}).
\section{Two-photon scheme}
\label{Sec:FourMemories}
In order to prepare a pair of photons entangled with respect to a modal degree of freedom such as polarization, one could repeat the scheme described in Sec.~\ref{Sec:TwoMemories} twice and postselect the output on the presence of one excitation at each node. In this section we present and analyze a two-photon scheme that, starting from four photons, directly prepares two-photon entanglement without resorting to postselection. The scheme is a linear optics analog of the proposal based on generating twin excitations in atomic ensembles \cite{ChenZhao07,ZhaoChen07}. Its principle of operation exploits two-photon interference, thus avoiding the need for interferometric stability between the nodes.
\begin{figure}
\caption{(Color online) A scheme to generate a two-photon polarization-entangled state. Two horizontally polarized photons emitted by sources $S_1$ and $S_3$ and two vertically polarized photons emitted by sources $S_2$ and $S_4$ are partly reflected using beam splitters $T$ to quantum memories $M_1, \ldots , M_4$. The transmitted beams are combined and interfered using polarizing beam splitters (PBS) and half-wave plates (HWP), with outputs monitored by heralding detectors $D_1, \ldots , D_4$.}
\label{fig:figure5}
\end{figure}
The scheme is shown in Fig.~\ref{fig:figure5}. Each node comprises two modes described by annihilation operators $\hat{a}_1$ and $\hat{a}_2$ for node $A$ and $\hat{a}_3$ and $\hat{a}_4$ for node $B$. It will be convenient to think of the odd-numbered modes as polarized horizontally and even-numbered modes as polarized vertically. The photons emitted by sources $S_1, \ldots, S_4$ are transmitted through beam splitters characterized by power transmission $T$, with the reflected beams stored in quantum memories $M_1, \ldots, M_4$. The pairs of modes at each node are combined into a single path using polarizing beam splitters and sent to the intermediary site where two-photon interference is realized. First, the polarizations of the two beams are rotated by $45^\circ$ using half-wave plates which realize the transformation
\begin{equation}
\hat{a}_1 \rightarrow \frac{1}{\sqrt{2}}(\hat{a}_1 + \hat{a}_2), \quad
\hat{a}_2 \rightarrow \frac{1}{\sqrt{2}}(\hat{a}_1 - \hat{a}_2)
\end{equation}
and analogously for the modes $\hat{a}_3$ and $\hat{a}_4$. Then the polarization components are recombined in the rectilinear basis on a polarizing beam splitter which transmits horizontal polarization and reflects vertical polarization. The output beams are transmitted through half-wave plates rotating polarization by $45^\circ$ and separated into horizontal and vertical components, which are monitored by detectors $D_1, \ldots, D_4$. This linear optics network implements the following transformation of the modes $\hat{a}_1 , \ldots , \hat{a}_4$:
\begin{eqnarray}
\mathbf{U}=\frac{1}{2}\left(
\begin{array}{cccc}
1 & 1 & 1 & -1 \\
1 & 1 & -1 & 1 \\
1 & -1 & 1 & 1 \\
-1 & 1 & 1 & 1
\end{array}\right).
\end{eqnarray}
In the case of perfect single-photon sources, entanglement is generated between single-photon subspaces of pairs of modes $\hat{a}_1, \hat{a}_2$ and $\hat{a}_3, \hat{a}_4$, encoding polarization qubits that are stored in quantum memories $M_1, M_2$ and $M_3, M_4$.
It is convenient to denote the basis states for the two qubits as
\begin{align}
\label{Eq:qubitbases}
\ket[A]{h} & = \ket{1_{a_1} 0_{a_2}}, & \ket[B]{h} & = \ket{1_{a_3} 0_{a_4}} \nonumber \\
\ket[A]{v} & = \ket{0_{a_1} 1_{a_2}}, & \ket[B]{v} & = \ket{0_{a_3} 1_{a_4}}
\end{align}
and define the projection operators
\begin{equation}
\hat{\Pi}_{\nu} = \proj[\nu]{h} + \proj[\nu]{v}, \quad \nu=A, B.
\label{eq:projection2ph}
\end{equation}
We retain events when exactly one of the detectors $D_1, D_2$ and one of the detectors $D_3, D_4$ click. If the single-photon sources are ideal, sequences ${\bf k} = (k_1, k_2, k_3, k_4) = (1,0,1,0)$ and ${\bf k} = (0,1,0,1)$ generate a state
\begin{multline}
{\hat{\varrho}^{({\mathbf k})}}_{\text{out}} = {\textstyle\frac{1}{2}} \zeta^2 T^2 \{ (1-T)^2 \proj{\Phi_+} \\
+ {\textstyle\frac{1}{2}} T(1-T) [\hat{\Pi}_{A} \otimes \proj{0_{a_3} 0_{a_4}} \\ + \proj{0_{a_1} 0_{a_2}} \otimes \hat{\Pi}_{B}]
+ T^2 \proj{\text{vac}} \},
\end{multline}
where
\begin{align}
\ket{\Phi_+} & = \frac{1}{\sqrt{2}} (\hat{a}_2\hat{a}_4 + \hat{a}_1\hat{a}_3)\ket{1_{a_1} 1_{a_2} 1_{a_3} 1_{a_4}}\nonumber \\
& = \frac{1}{\sqrt{2}} ( \ket[A]{h}\ket[B]{h} + \ket[A]{v}\ket[B]{v})
\end{align}
is the maximally entangled two-photon state.
The remaining two combinations of clicks, i.e., ${\bf k} = (1,0,0,1)$ and ${\bf k} = (0,1,1,0)$, generate an analogous state with $\ket{\Phi_+}$ replaced by
\begin{equation}
\ket{\Psi_+} = \frac{1}{\sqrt{2}} ( \ket[A]{h}\ket[B]{v} + \ket[A]{v}\ket[B]{h} ).
\end{equation}
Analogously to the one-photon scheme, this state can be converted into $\ket{\Phi_+}$ by a local unitary transformation and we can restrict our attention to only one type of events.
\section{Two-photon entanglement}
\label{TwoPhotonEntanglement}
Let us now characterize entanglement generated using the scheme described in the preceding section. We will be primarily interested in the subspace spanned by the tensor products $\ket[A]{h}\ket[B]{h}$, $\ket[A]{h}\ket[B]{v}$, $\ket[A]{v}\ket[B]{h}$, and $\ket[A]{v}\ket[B]{v}$ of the qubit states defined in Eq.~(\ref{Eq:qubitbases}). The un-normalized two-photon state in this subspace can be written as
\begin{align}
\mbox{${\hat{\varrho}'}$}^{({\bf k})}_{\text{out}} & = ( \hat{\Pi}_{A} \otimes \hat{\Pi}_{B}) \hat{\varrho}^{({\bf k})}_{\text{out}}
( \hat{\Pi}_{A} \otimes \hat{\Pi}_{B}) \nonumber \\
& = \begin{pmatrix} \varrho_{hh} & 0 & 0 & c \\ 0 & \varrho_{hv} & 0 & 0 \\ 0 & 0 & \varrho_{vh} & 0 \\ c^\ast & 0 & 0 & \varrho_{vv}
\end{pmatrix},
\label{eq:rhoprim2ph}
\end{align}
where the only nonzero elements are given explicitly in the limit $\zeta\ll 1$ and $T\ll 1$ by
\begin{subequations}
\begin{align}
{\varrho}_{hh} = {\varrho}_{vv} & = {\textstyle\frac{1}{4}} \zeta^2T^2 (p_1^4+p_0p_1^2p_2+4p_0^2p_2^2+3p_0^2p_1p_3 ) \\
{\varrho}_{hv} = {\varrho}_{vh} & = {\textstyle\frac{1}{4}} \zeta^2T^2 (5p_0p_1^2p_2+3p_0^2p_1p_3 )\\
c & = {\textstyle\frac{1}{4}} \zeta^2T^2 p_1^4
\end{align}
\end{subequations}
The state $\mbox{${\hat{\varrho}'}$}^{({\bf k})}_{\text{out}}$ is entangled if and only if the PPT criterion is violated, $|c|^2 > \varrho_{hv} \varrho_{vh}$, which
expressed in terms of the photon statistics takes the form $p_1 ^3 > 5 p_0 p_1 p_2 + 3 p_0^2 p_3$. When $p_3=0$, this inequality reduces to
\begin{equation}
\label{Eq:Threshold2Photon}
p_1^2 > 5 p_0 p_2,
\end{equation}
which is weaker than the criterion for the two-photon scheme given in Eq.~(\ref{Eq:EntanglementGenIneq}), but still leaves a gap compared to the classicality condition derived in Eq.~(\ref{Eq:CoherentMixtures}).
\begin{figure*}
\caption{The effective entanglement of formation defined in Eq.~(\protect\ref{Eq:EffEF2photon}
\label{fig:figure6}
\end{figure*}
Analogously to the one-photon scheme, we quantify the amount of entanglement produced in the qubit sector using the rescaled entanglement of formation:
\begin{equation}
\label{Eq:EffEF2photon}
E = \frac{4}{\zeta^2} \textrm{Tr} (\mbox{${\hat{\varrho}'}$}^{({\bf k})}_{\text{out}}) E_F \left( \frac{\mbox{${\hat{\varrho}'}$}^{({\bf k})}_{\text{out}} }{\textrm{Tr} (\mbox{${\hat{\varrho}'}$}^{({\bf k})}_{\text{out}})}\right).
\end{equation}
In the front multiplicative factor, the numerator $4$ stems from four relevant combinations of detector counts, while the denominator $\zeta^2$ is a consequence of the quadratic scaling of the scheme with the efficiency of heralding detectors, as in the present case two photons need to reach the intermediary site and be detected there.
The results of numerical optimization of the effective entanglement of formation $E$ defined in Eq.~(\ref{Eq:EffEF2photon}) for the two models of photon statistics over the beam-splitter transmission $T$ in the limit $\zeta \ll 1$ are shown in Fig.~\ref{fig:figure6}. For nearly optimal sources the effective entanglement is lower compared to the one-photon scheme. This is qualitatively understood, as in the current case twice as many photons need to be routed correctly to heralding detectors through the linear optics circuit and the qubit prepared at each node is encoded in two orthogonally polarized states of a single photon rather than a pair of zero- and one-photon Fock states. It is worth noting that the region of parameters where entanglement can be generated is larger then in Fig.~\ref{fig:figure4}, consistent with the weaker threshold condition derived in Eq.~(\ref{Eq:Threshold2Photon}).
Two-photon entanglement can be used to test Bell's inequalities with photon-counting detectors, without auxiliary reference fields. We will consider here the standard CHSH inequality \cite{CHSH69} for correlations between photon polarizations when the pairs of modes carrying the qubits are separated on polarizing beam splitters and detected with two detectors at each node. In the present case, when imperfect photon sources are used, care needs to be taken to correctly include multiphoton terms present in the conditional states $\hat{\varrho}^{({\bf k})}_{\text{out}}$.
\begin{figure*}
\caption{The Clauser-Horne-Shimony-Holt combination ${\cal CHSH}
\label{fig:figure8}
\end{figure*}
Rotating the polarization basis of the qubit $A$ by an angle $\theta_A$ with the help of a half-wave plate is described by a linear transformation of the annihilation operators,
\begin{align}
\hat{a}_1 & \rightarrow \hat{a}_1 \cos{\theta_A} + \hat{a}_2 \sin{\theta_A} \nonumber \\
\hat{a}_2 & \rightarrow \hat{a}_1 \sin{\theta_A} - \hat{a}_2 \cos{\theta_A}
\end{align}
which induces a certain unitary transformation $\hat{U}_A (\theta_A)$ for the modes $\hat{a}_1$ and $\hat{a}_2$. Similarly, rotation of the polarization basis for the modes $\hat{a}_3$ and $\hat{a}_4$ is described by a unitary $\hat{U}_B (\theta_B)$. The polarization-rotated four-mode state after normalization reads
\begin{equation}
\hat{\varrho}(\theta_A, \theta_B) = \frac{[\hat{U}_A(\theta_A) \otimes \hat{U}_B(\theta_B)]
\hat{\varrho}^{({\bf k})}_{\text{out}} [\hat{U}_A^\dagger(\theta_A) \otimes \hat{U}_B^\dagger (\theta_B)]}{\textrm{Tr} \hat{\varrho}^{({\bf k})}_{\text{out}}}.
\end{equation}
We will assume that the output beams are monitored using binary detectors. Events when neither or both detectors clicked at one node are considered as inconclusive and carry no contribution to polarization correlation functions, but they are included in the overall normalization in order to avoid the detection loophole. The probability of a coincidence between detectors monitoring rotated beams $\hat{a}_1$ and $\hat{a}_3$ is given by
\begin{multline}
P_{13}(\theta_A, \theta_B) \\ = \sum_{k,l=1}^{\infty} \bra{k_{a_1} 0_{a_2} l_{a_3} 0_{a_4}} \hat{\varrho}(\theta_A, \theta_B)
\ket{k_{a_1} 0_{a_2} l_{a_3} 0_{a_4}},
\end{multline}
and analogously for the remaining three combinations of coincidences between the nodes described by probabilities $P_{14}(\theta_A, \theta_B)$,
$P_{23}(\theta_A, \theta_B)$, and $P_{24}(\theta_A, \theta_B)$. The polarization correlation function for given settings of polarizing beam splitters $\theta_A,\theta_B$ is expressed in terms of these probabilities as
\begin{multline}
J(\theta_A,\theta_B) = P_{13}(\theta_A, \theta_B) - P_{14}(\theta_A, \theta_B) \\ - P_{23}(\theta_A, \theta_B) +P_{24}(\theta_A, \theta_B).
\end{multline}
The CHSH combination,
\begin{equation}
\mathcal{CHSH}=J(\theta_A,\theta_B)+J(\theta_A',\theta_B)+J(\theta_A,\theta_B')-J(\theta_A',\theta_B'),
\end{equation}
satisfies for local hidden variable theories the inequality
\begin{equation}
-2 \le \mathcal{CHSH} \le 2.
\end{equation}
In Fig.~\ref{fig:figure8} we depict the CHSH combination for the standard choice of angles $\theta_A=0$, $\theta_A' = -\pi/4$, and $\theta_B'=-\theta_B = 3\pi/8$ in the regime $T\ll 1$ and $\zeta \ll 1$, assuming the double-emission model (a) and the down-conversion model (b) of the photon statistics. In the latter case, up to $m=4$ photons have been taken into account in calculations. We verified that the results do not change noticeably within the resolution of the graphs if numerical optimization over the angles $\theta_A$, $\theta_A'$, $\theta_B$, $\theta_B'$ is performed, with the down-conversion model truncated at $m=3$. The regions where a significant violation of Bell's inequalities is possible are similar to the one-photon case, although the difference between the two models for the photon statistics is now more pronounced. This can be attributed to the deleterious effects of multiphoton terms in the input photon statistics which generate double counts at one node, thus lowering the value of the correlation function $J(\theta_A,\theta_B)$.
\section{Conclusions}
\label{Sec:Conclusions}
We studied the performance of elementary linear-optics schemes for entanglement distribution based on imperfect single-photon sources, linear optics, and heralding detectors. The underlying principle of nonlocal photon subtraction permits preparation of one-photon entanglement, where a single photon is prepared in a delocalized superposition state, and two-photon entanglement, where two photons located at different nodes are entangled in a modal degree of freedom such as polarization. Two models of photon statistics describing imperfect photon sources were considered: the first one assumes occasional double emission, and the second one describes heralded sources based on spontaneous parametric down-conversion, with a relatively long multiphoton ``tail''. Other types of photon sources can be expected to exhibit statistics that lie between these two extreme models.
We analyzed sensitivity to photon source imperfections of entanglement generated between photon-number qubits in the one-photon scheme and polarization qubits in the two-photon scheme. Although nonseparable states are produced for a relatively broad range of parameters, a substantial amount of entanglement is obtained only for inputs close to ideal single photons. This analysis assumed implicitly that a wide range of operations can be implemented locally to process and distill noisy entanglement created in respective qubit subspaces. Distilled maximally entangled states between adjacent nodes could be used as a resource for standard entanglement swapping operations to extend the range of entanglement. In this strategy, the entanglement measure analyzed in this paper can be used as an indicator of overheads resulting from the use of imperfect resources that should be included in the standard analysis of the performance of quantum repeaters \cite{SangSimon07,SimondeRied07}. More generally,
an important task is to develop feasible and efficient methods for extending the range of entanglement in realistic quantum repeater architectures and to analyze the effects of imperfections beyond an elementary link \cite{1404.7183}.
A complementary question is whether the generated bipartite state can be used ``as is'' to test Bell's inequalities. For the one-photon scheme, auxiliary coherent reference beams are needed to implement noncommuting measurements based on photon counting, while in the two-photon case polarization measurements are sufficient. A construction of an elementary link for entanglement distribution with high-fidelity quantum memories should in principle permit a loophole-free violation of Bell's inequalities in a regime when only a lossy optical channel is available between the nodes. If this is the primary objective, one could consider generation of nonmaximally entangled states, which may be more robust for an imperfect read out of quantum memories \cite{EberhardPRA1992}.
We expect that this detailed study on the statistics of sources will be of great practical benefit with an increasing number of experimental systems studied to develop quantum repeater links, as well as from a more fundamental perspective, to perform loophole-free tests of Bell's inequalities.
\end{document} |
\begin{document}
\title{Bounds and optimisation of orbital angular momentum bandwidths within parametric down-conversion systems}
\author{F.~M.~Miatto\inst{1}, D.~Giovannini\inst{2}, J.~Romero\inst{2}, S.~Franke-Arnold\inst{2}, S.~M.~Barnett\inst{1}, M.~J.~Padgett\inst{2}}
\institute{SUPA and Department of Physics, University of Strathclyde, Glasgow G4 0NG, Scotland, U.K.\and School of Physics and Astronomy, SUPA, University of Glasgow, Glasgow G12 8QQ, U.K.}
\abstract{
The measurement of high-dimensional entangled states of orbital angular momentum prepared by spontaneous parametric down-conversion can be considered in two separate stages: a generation stage and a detection stage. Given a certain number of generated modes, the number of measured modes is determined by the measurement apparatus. We derive a simple relationship between the generation and detection parameters and the number of measured entangled modes.}
\authorrunning{F. Miatto et al.}
\titlerunning{Bounds of OAM bandwidths within PDC Systems}
\date{Received: date / Revised version: \today}
\maketitle
\section{Introduction}
Entangled states are a distinctive feature of quantum mechanics. Their use can lead to important technological advances in communication, security and, ultimately, computing \cite{Neilsen}. Entanglement in a high-dimensional Hilbert space means a high effective number of entangled modes that can be used to achieve a high shared information \cite{Molina-Terriza:2008}. It is therefore of great importance to choose the proper basis in which to detect entangled modes. The Schmidt basis is that which yields the maximum shared information \cite{BarnettQI}. For most entangled states, however, the elements of the Schmidt basis cannot readily be measured, perhaps because the size of the components in the detection apparatus do not match any possible detection mode \cite{Miatto:2011b}.
It is well-known that light can carry orbital angular momentum (OAM) and that this property is associated with a helical phase front \cite{AllenOAM} (and papers reprinted therein). Optical modes carrying OAM include the Laguerre-Gaussian modes \cite{Siegman} and also the Bessel beams.
Of central interest to us in this paper is the fact that photon pairs produced in spontaneous parametric down-conversion (SPDC) are naturally entangled in their OAM \cite{MairNat01,Sonja:2002,Torres:2003}. One clear manifestation of this entanglement is the existence of an EPR paradox for
the OAM and its conjugate quantum variable, the azimuthal angle \cite{Leach:2010,BarnettPRA90,Pors-Miatto:2011}.
The OAM is conserved in the down-conversion process and hence for a Gaussian ($\ell = 0$) pump, the
OAM of the signal and idler fields are perfectly anticorrelated. There are also correlations
on the radial direction (as quantified, for the Laguerre-Gaussian modes, by a radial index $p$) \cite{Miatto:2011} but these will not concern us in this paper. Our central concern will be the number of entangled lowest order ($p=0$) Laguerre-Gaussian modes generated in a down-conversion experiment. The typical setup that we consider is a type-I or type-II, degenerate SPDC setup. We work in the regime of undepleted pump and we neglect eventual anisotropies of the down-converted beams.
We find that, for any given set of generation parameters (pump waist $w_p$, wavelength $\lambda$, crystal length $L$) the detection apparatus can be prepared in a way that maximises the measured number of entangled modes and that two important parameters are $\gamma$, the ratio of the width of the pump beam to the width of the detection modes, and $L_R$, the length of the crystal normalised to the Rayleigh range of the pump beam:
\begin{equation}
\gamma_{s,i} = \frac{w_p}{w_{s,i}} \quad \mathrm{and} \quad L_R=\frac{L}{z_R}\, .
\label{LRdef}
\end{equation}
Where the Rayleigh range is $z_R=\frac{\pi w_p^2}{\lambda}$.
In this paper we assume that the signal and idler modes have the same width so that $w_s = w_i$ and $\gamma_s = \gamma_i = \gamma$.
The precise calculation of $w_{s,i}$ depends upon the details of the detection system. Our analysis can be applied if the back-projected detection mode size, $w_{i,s}$, is approximately $\ell$-independent over the range of OAM of interest, and if the modes with $p\neq0$ couple only weakly with the fundamental mode of the fibre that carries the signal to the coincidence counter.
We investigate the $L_R$ dependence of the OAM bandwidth, while recognising that many experiments operate in a regime where $L_R\ll1$ \cite{MairNat01,Jack:2010,Oemrawsingh2005,DadaNP11,kwiat2005}.
In the short crystal limit and near to collinearity the familiar sinc phase can be dropped \cite{Saleh}.
One can then obtain an analytical form for the down-converted state \cite{Miatto:2011,BenninkPRA10} and its extension to non-Gaussian pump beams \cite{YaoNJP11}. Our aim in this paper is to go beyond these existing analyses and to explore regimes in which the sinc phase matching term becomes significant, which leads to the exact analytical expression \eqref{analytic} and to the characterisation of the detection parameters. We present both an analytical treatment and also a simple geometrical argument for our results.
The second section of the paper specifies the definitions of the various bandwidths which are used. The third section contains the analytical approach to calculate the projection amplitudes. The fourth section contains the geometrical approach to calculate a simple formula that gives the measurement bandwidth. The fifth section contains the interpretation of the results and the conclusions.
\section{Definition of bandwidths}
For a distribution of probabilities, in our case for the OAM of the signal or idler photon in SPDC,
we can define a number of statistical measures. For high-dimensional entanglement we require as many modes as possible
to contribute to the state and, moreover, for these to contribute strongly, that is to have a significant probability.
A simple and convenient measure of this quantity is the Schmidt number \cite{LawPRL04,PorsPRL:2008}:
\begin{align}
K(\{p_i\}):=\frac{1}{\sum_ip_i^2}\, ,
\end{align}
where the probabilities $\{p_i\}$ are, in our case, those for each of the OAM modes.
The measure $K$ gives the effective number of contributing modes and hence the effective dimensionality of the system.
In experiments, it is typical to quote the full-width at half maximum as the measure of the bandwidth (FWHM) so as to include only modes that are well above the noise floor. FWHM should not be confused with $K$. For simple, symmetrical and single-peaked probability distributions, the Schmidt number provides a convenient measure of the bandwidth. The precise relationship between the Schmidt number and the FWHM depends upon the detailed shape of the distribution but typical of our systems is that the $K$ exceeds the FWHM, see figure \ref{example}. For a distribution like this we can define an effective
range of modes contributing to the state ranging from $\ell_\mathrm{max}$ to $\ell_\mathrm{min}=-\ell_\mathrm{max}$ such that $K=1+2|\ell_\mathrm{max}|$.
The \emph{generation bandwidth} is the effective number of entangled modes generated in the SPDC process. As it does not depend on the detection apparatus, it is a function only of the crystal length and of the size of the pump beam, combined into the quantity $L_R$, defined in eq.~\eqref{LRdef}. This bandwidth can be thought of as the dimensionality of the entanglement in OAM and can be calculated through the Schmidt decomposition of the SPDC state \cite{Miatto:2011b}. More on the generation bandwidth is detailed in its derivation, in section 4.1.
The \emph{measurement bandwidth} represents the number of modes that a detector will measure in an experiment and depends on both the generated modes and on the overlap of these with the detection modes. In doing so, we need to consider the optics used to image the light onto the detectors and any restriction arising from this, such as a restriction to $p=0$
Laguerre-Gaussian modes. The overlap between the generated modes and the back-projected detection modes needs to be maintained both in the image plane and in the far field plane of the crystal: a setup with high overlap in the image plane may still suffer from low overlap in the far field or vice versa and this would translate into a decreased modal sensitivity. This overlap requirement has a central role in the derivation of eq. \eqref{Lgen}, which is based on the argument that the angular spread of a generated mode cannot exceed the natural spread of the down-conversion cone. In the next sections we will define an image plane bandwidth and a far field bandwidth and, as we shall show, there is a natural way of combining the two. This geometrical result is strongly supported by the more complicated analytic result, which we evaluate numerically for a comparison in figure 3.
\section{Analytical treatment}
A direct calculation of the measurement bandwidth needs to consider the overlap between the SPDC state and a pair of joint detection modes \cite{Torres:2003,Miatto:2011}. This yields a series of complex measurement amplitudes $\{C_\ell\}$ where $\ell$ labels each value of the OAM that was measured. The measured Schmidt number (or the measurement bandwidth) is therefore given by the measure $K$ applied to the set of projection probabilities
\begin{align}
K(\{P_\ell\}),\quad\mathrm{where}\quad P_\ell=|C_\ell|^2.
\label{MB}
\end{align}
We seek to evaluate this quantity for a Gaussian pump laser, taking full account of the sinc phase-matching term. In this way we extend the regime of validity of earlier calculations.
We consider the measurement modes for the signal and idler fields to be a pair of Laguerre-Gaussian modes.
The LG modes are characterised by two integers $\ell$ and $p$ and a real positive number $w$, which represent the OAM quantum number, the radial quantum number and the Gaussian modal width, respectively. For simplicity, we set $p=0$, which limits
our analysis to modes with a single bright ring in the transverse plane. Many of our experiments are designed to detect $p=0$ modes with a higher efficiency, moreover, than higher-order modes. We note however, that modes with non-zero
$p$ are produced in the SPDC process \cite{Miatto:2011} and, indeed, it is these that makes it possible
to observe entanglement of three-dimensional vortex knots in SPDC \cite{RomeroPRL11}.
The SPDC wave function $\psi(\mathbf{q}_i,\mathbf{q}_s)$, in momentum space, is written in the following way, where the subscripts $s$ and $i$ refer to signal and idler modes \cite{Torres:2003}:
\begin{align}
\psi(\mathbf{q}_i,\mathbf{q}_s)=Ne^{-\frac{w_p^2}{4}|\mathbf{q}_i+\mathbf{q}_s|^2}\mathrm{sinc}\left(\frac{L}{4k_p}|\mathbf{q}_i-\mathbf{q}_s|^2\right) \, .
\end{align}
Here $\mathbf{q}$ is the transverse component of the momentum vector $\mathbf{k}$, $w_p$ is the pump width, $L$ is the crystal thickness, $k_p$ is the wave vector of the pump. The first term corresponds to the transverse wavevector
components of the pump, while the second term represents the phase-matching imposed on the down-conversion process by
the nonlinear crystal.
We consider each detection mode to be an LG mode with radial quantum number $p=0$. In polar coordinates ($\rho,\varphi$) in momentum space it has the form
\begin{align}
LG_\ell(\rho,\varphi)=\sqrt{\frac{w^2}{2\pi|\ell|!}}\left(\frac{\rho w}{\sqrt2}\right)^{|\ell|}e^{-\frac{\rho^2w^2}{4}}e^{i\ell\varphi}.
\end{align}
The projection amplitude is therefore calculated by evaluating the overlap integral of $\psi$ with two LG modes of opposite OAM (because of angular momentum conservation) \cite{MairNat01,Sonja:2002,Torres:2003}. The result is found to be
\begin{align}
C_\ell^{L_R,\gamma}=\frac{\mathcal{N}}{L_R} \left(\frac{2\gamma^2}{1+2\gamma^2}\right)^{|\ell|}\left[\xi^{|\ell|+1}\Phi_\ell^{L_R,\gamma}-\Phi_{\ell}^{0,\gamma}\right] \, .
\label{analytic}
\end{align}
We note that the first term in brackets corresponds to that obtained previously \cite{Miatto:2011}, specialised to equal signal and idler widths and $p=0$ modes. Here the function $\Phi_{\ell}^{L_R,\gamma}$ is the Lerch transcendent function of order $(1,|\ell|+1)$ and argument $-2\gamma^2\xi$ \cite{watson1995treatise}:
\begin{align}
\Phi_{\ell}^{L_R,\gamma}=\Phi(-2\gamma^2\xi,1,|\ell|+1),\qquad \xi=\frac{i+L_R}{i-2\gamma^2L_R}.
\end{align}
Note that $\xi=1$ for $L_R=0$.
Once $L_R$ and $\gamma$ are specified, the amplitudes $C^{L_R,\gamma}_\ell$ are to be used in eq.~\eqref{MB}, in order to calculate the measurement bandwidth. The dependence of the projection amplitudes on a transcendent function makes further analytical calculation difficult, and a numerical approach has to be employed. However, as the tails of the distribution of projection probabilities have a slow decay and therefore an effect on the width even at high $|\ell|$, the numerical approach is slow, if an accurate result is sought.
\begin{figure}
\caption{\label{example}
\label{example}
\label{fig:figure1}
\end{figure}
In figure \ref{fig:figure1} we give the probabilities for the angular momentum values $\ell$ for $L_R=0.001$ and $\gamma=2$. In this parameter range
existing analytical expressions provide an excellent approximation \cite{Torres:2003,Miatto:2011}.
\section{Geometrical argument}
In this section we find an upper (and therefore lower) bound for the generated OAM values, and for the measured OAM values. The measurement bandwidth that we calculate from such bounds matches the analytic result of the previous section and therefore allows to avoid calculating numerically the distribution of projection probabilities.
\begin{figure}
\caption{The relation between $\alpha$ and $\Delta k_z$ sets a natural upper bound to $\alpha$ for near-collinear emission.}
\label{triangle}
\end{figure}
The phase-matching efficiency of the down-conversion process depends upon the axial mismatch $\Delta k_z$ in wave vectors of the pump, signal and idler fields, and it is given by $\mathrm{sinc}^2\left(L\Delta k_z/2\right)$. When optimised for degenerate, near-collinear phase-matching, the signal and idler output is obtained over a narrow range of angles, $\alpha$, for which $L\Delta k_z \lesssim\pi$. With reference to figure \ref{triangle}, for small $\alpha$ (which corresponds to being near to collinearity) we can write
\begin{align}
\Delta k_z\simeq\frac{\alpha^2k_p}{2}.
\end{align}
It follows, therefore, that the allowed values of $\alpha$ are bounded from above:
\begin{align}
\alpha^2\lesssim\frac{2\pi}{k_pL}.
\end{align}
For Laguerre-Gaussian modes, in the paraxial regime, we can define an effective local wavevector associated with the gradient of the phase. The helical form of the wavefronts gives rise to an angular spreading of these such that at a distance $r$ from the mode axis, the angular spread is $\beta\simeq\ell/k r$ \cite{PadgettOC95}, which can be interpreted as the local spreading angle from the optical axis.
The natural restriction on $\alpha$ imposed by the phase matching therefore sets a limit $\beta\lesssim\alpha$ on the efficiency of production of the OAM carrying beams, imposing a restriction on the generated OAM bandwidth. Such restriction is a natural consequence of the fact that a generated mode cannot be more divergent than the down-conversion cone. The relation $\beta\lesssim\alpha$, using the definitions and bounds given above for $\beta$ and $\alpha$, can be rewritten as
\begin{align}
\ell\lesssim r\sqrt{\frac{\pi k_p}{2L}} \, ,
\label{Lgen}
\end{align}
where we have made the approximation that $k_{s,i} \approx k_p/2$.
This relation is the starting point to calculate the generation bandwidth and for the analysis in the far field of the image plane of the crystal.
\subsection{Generation bandwidth}
The beam size can be no bigger than that of the pump beam, i.e. $r\lesssim w_p$. Applying this bound to eq.~\eqref{Lgen} we obtain an upper bound for the generated OAM value:
\begin{align}
\ell_{\mathrm{gen}}\lesssim w_p\sqrt{\frac{\pi k_p}{2L}}=\sqrt{\frac{\pi }{L_R}}.
\end{align}
It follows, therefore, that the generation bandwidth is
\begin{align}
K_\mathrm{gen}=1+2\sqrt{\frac{\pi}{L_R}}.
\label{kgendef}
\end{align}
This number represents the effective number of entangled OAM modes generated by the source obtained by removing the $p=0$ restriction (as we are applying such restriction only to the measurement bandwidth). Equivalently, it can be thought of as the bandwidth obtained by removing the restriction on $\gamma$, i.e. if one does detect $p=0$, but with any $\gamma$. This way of thinking about $K_\mathrm{gen}$ can be helpful, as it relates to a measurement scheme. The relation between $K_\mathrm{gen}$ and the total Schmidt number $K$ or its azimuthal part $K_\mathrm{az}$ \cite{VanExter2006} is not straightforward, because $K_\mathrm{gen}$ can be thought of in terms of a measurement with any value of $\gamma$.
\subsection{Image plane bandwidth}
As anticipated in section 2, to calculate the measurement bandwidth we need to consider the overlap of the generated field with the detection modes in the image plane of the crystal and in its far field. Intuitively, a detection system which has a good overlap in the image plane, but that detects light that only comes from a narrow spread of directions would restrict the measured bandwidth. A similar restriction would occur for one that has a good overlap with the typical incoming angles of LG beams, but that has a poor overlap with the intensity in the image plane. It is clear that in order to optimise a detection system, both these quantities have to be taken into account.
To calculate the overlap in the image plane it suffices to note that a $p=0$ Laguerre-Gaussian mode with OAM number $\ell$ and width $w$ has its maximum intensity at a radius
\begin{align}
r=w\sqrt{\frac{\ell}{2}}.
\end{align}
For efficient conversion of pump to signal and idler we require that the pump, single and idler beams should all overlap, giving a restriction on the maximum size of the down-converted beams ($r_{s,i}\lesssim w_p$) and hence an upper bound to the value of OAM in the plane of the crystal corresponding to
\begin{align}
r_{s,i}=w_{s,i}\sqrt{\frac{\ell}{2}}\lesssim w_p.
\label{NFrestriction}
\end{align}
In terms of $\gamma$, this gives an upper bound of the value of the OAM in the plane of the crystal:
\begin{align}
\ell_\mathrm{ip}\lesssim2\gamma^2
\label{detband}
\end{align}
and hence an image plane bandwidth
\begin{align}
K_\mathrm{ip}=1+4\gamma^2 \, .
\end{align}
\subsection{Far field bandwidth}
It is clear that in the far field of the plane of the crystal, instead of a real space argument, we need to use the angular relationship $\beta\lesssim\alpha$, expressed in \eqref{Lgen}, where we apply the restriction for the maximum width of the detection modes given in \eqref{NFrestriction}:
\begin{align}
\ell\lesssim w_{s,i}\sqrt{\frac{\ell}{2}} \sqrt{\frac{\pi k_p}{2L}}.
\end{align}
From which, replacing $w_{s,i}$ with $w_p/\gamma$, we obtain an upper bound of the value of the OAM in the far field of the plane of the crystal:
\begin{align}
\ell_\mathrm{FF}\lesssim\frac{\pi}{2\gamma^2L_R}
\label{resgen}
\end{align}
and therefore a far field bandwidth
\begin{align}
K_\mathrm{FF}=1+\frac{\pi}{\gamma^2L_R}.
\end{align}
\subsection{Measurement bandwidth}
If $K_{\mathrm{ip}}$ and $K_{\mathrm{FF}}$ are very different from each other, the resulting measurement bandwidth is given by the smaller of the two. For cases where the bandwidths are similar it is sensible to combine them. The convolution of two normal distributions of widths $k$ and $k'$ gives a normal distribution of width $(k^{-2}+k'^{-2})^{-1/2}$. Similarly, we can get an estimate of the total measurement bandwidth by considering the convolution of two normal distributions of widths $K_\mathrm{ip}$ and $K_\mathrm{FF}$. The bandwidth of the resulting distribution is
\begin{align}
K&= \left(K_\mathrm{ip}^{-2}+K_\mathrm{FF}^{-2}\right)^{-1/2}\nonumber\\
&=\left(\left(1+4\gamma^2\right)^{-2}+\left(1+\frac{\pi}{\gamma^2L_R}\right)^{-2}\right)^{-1/2}.
\label{milesmath}
\end{align}
\section{Analysis of the results}
For a comparison between the analytic and geometric arguments, we calculate the width of the distribution given by the modulus squared of the coefficients in \eqref{analytic} and compare it to \eqref{milesmath}. In figure \ref{thegraph} we plot the two bandwidths as functions of $L_R$ for $\gamma=3$, $\gamma=5$ and $\gamma=7$. The solid curves (red online) represent the measurement bandwidth calculated from the numerical evaluation of he analytical model. The dashed curves (green online) are the same bandwidths calculated with our geometrical argument. The uppermost solid line (blue online) is the generation bandwidth. Note that to achieve high dimensional entanglement the crystal length should be a small fraction of the Rayleigh range.
\begin{figure}
\caption{\label{thegraph}
\label{thegraph}
\end{figure}
We see that the geometrical argument is in excellent agreement with the numerical evaluation of our analytical result. The effect of increasing $\gamma$ yields a higher measurement bandwidth for very small values of $L_R$, but for large enough values of $\gamma$ and for fixed $L_R$, the measurement bandwidth eventually drops. Therefore it reaches a maximum value for a particular crystal length. Under all conditions the measurement bandwidth never reaches that of the generation bandwidth, because we are restricting the measurement to modes with $p=0$. Note, however, that the full generation bandwidth does
not arise explicitly from additional values of the OAM but rather from entanglement in the radial quantum number $p$.
Differentiation of eq.~\eqref{milesmath} with respect to the crystal length gives an estimate of the value of $\gamma$ corresponding to the highest measurement bandwidth for a given $L_R$. In this way we find
\begin{align}
\gamma_{\mathrm{opt}}\approx\sqrt[4] {\frac{\pi}{4L_R}}.
\label{gammaopt}
\end{align}
It is worth noting that for such value of $\gamma$ we have that $K_\mathrm{ip}=K_\mathrm{FF}=K_\mathrm{gen}$, where $K_\mathrm{gen}$ is defined in \eqref{kgendef}. Therefore in the optimal case we have $K=K_\mathrm{gen}/\sqrt{2}$.
We define short crystal lengths as $L_R\ll\pi/4\gamma^4$, for which the generation bandwidth is large, meaning that the measurement bandwidth is dominated by the image plane overlap of the detection modes with the pump. This gives a measurement bandwidth of
\begin{align}
K\approx K_{\mathrm{ip}}=1+4\gamma^2.
\end{align}
Note that this short crystal limit is characterised by an independence of $K$ on the crystal length. In fact, it can be seen in figure \ref{thegraph} that the leftmost part of the measurement bandwidth curves is flat (for the $\gamma=7$ curve this is not visible in this plot, but the slope of eq.~\eqref{milesmath} near the origin is zero for any $\gamma$), and that the range of values of $L_R$ over which they stay flat is inversely proportional to $\gamma^4$.
For much longer crystals, $L_R\gg\pi/4\gamma^4$, the measurement bandwidth, as modified by the limiting overlap in the far field, becomes dominant, giving
\begin{align}
K\approx K_{\mathrm{FF}}=1+\frac{\pi}{L_R \gamma^2}.
\end{align}
In figure \ref{kgamma} we plot three different curves, that describe the value of the measurement bandwidth as a function of $\gamma$, for three different values of $L_R$. Note that for each choice of $L_R$ there is always an optimal value of $\gamma$ which maximises $K$, and it corresponds to the optimal value given in \eqref{gammaopt}.
\begin{figure}
\caption{\label{kgamma}
\label{kgamma}
\end{figure}
It is not an easy matter to determine the requisite parameters for existing experiments. Most our own experiments, however,
correspond to values of $\gamma$ in the range $1.5$ up to about $4$. In order to achieve higher degrees of entanglement in
OAM, corresponding to larger Schmidt number, our analyses suggest that it would be desirable to press towards higher values of $\gamma$.
\section{Conclusions}
We have shown two parameters determine the OAM bandwidth for entangled states produced by parametric down-conversion. These parameters are the ratio of the widths of pump and detection modes $\gamma=w_p/w_{s,i}$, and the crystal thickness normalised to the Rayleigh range of the pump $L_R=L/z_r$.
A simple geometrical argument approximates the analytical results extremely well and allows us to suggest what needs
to be adjusted in order to enhance the dimensionality of the entanglement. We have restricted our analysis to a detection system that is sensitive to the LG $p=0$ modes only. It is for this reason that the measurement bandwidth can never reach that of the generation bandwidth for any combination of parameters. It is possible, however, to identify an optimum value of $\gamma$ to maximise the measurement bandwidth for any normalised crystal length $L_R$.
\end{document} |
\begin{document}
\keywords{selection principle, Helly's theorem, metric space, regulated function,
pointwise convergence, weak convergence, approximate variation.}
\mathclass{Primary 26A45, 40A30; Secondary 54E35, 26A48.}
\thanks{The article was prepared within the framework of the Academic Fund Program
at the National Research University Higher School of Economics (HSE) in 2017--2018
(grant~no.\,17-01-0050) and by the Russian Academic Excellence Project ``5--100''.}
\abbrevauthors{V.\,V.~Chistyakov}
\abbrevtitle{Approximate variation}
\title{The approximate variation to pointwise selection principles}
\author{Vyacheslav V.~Chistyakov}
\address{Department of Informatics, Mathematics and Computer Science\\
National Research University Higher School of Economics\\
Bol'shaya Pech{\"e}rskaya Street 25/12\\
Nizhny Novgorod 603155, Russian Federation\\
E-mail: vchistyakov@hse.ru, czeslaw@mail.ru}
\maketitledis
\tableofcontents
\begin{abstract}
Let $T\subset\Rb$, $M$ be a metric space with metric $d$, and $M^T$ be the set of all
functions mapping $T$ into $M$. Given $f\in M^T$, we study the properties of the
\emph{approximate variation\/} $\{V_\vep(f)\}_{\vep>0}$, where $V_\vep(f)$ is the
greatest lower bound of Jordan variations $V(g)$ of functions $g\in M^T$ such that
$d(f(t),g(t))\le\vep$ for all $t\in T$. The notion of $\vep$-variation $V_\vep(f)$ was
introduced by Fra{\v n}kov{\'a} [Math.\ Bohem.\ 116 (1991), 20--59] for intervals
$T=[a,b]$ in $\Rb$ and $M=\Rb^N$ and extended to the general case by Chistyakov
and Chistyakova [Studia Math.\ 238 (2017), 37--57]. We prove directly the following basic
pointwise selection principle: {\it If a sequence of functions $\{f_j\}_{j=1}^\infty$ from
$M^T$ is such that the closure in $M$ of the set $\{f_j(t):j\in\Nb\}$ is compact for all
$t\in T$ and\/ $\limsup_{j\to\infty}V_\vep(f_j)$ is finite for all\/ $\vep>0$, then it contains
a subsequence, which converges pointwise on $T$ to a bounded regulated function
$f\in M^T$.} We establish several variants of this result for sequences of regulated and
nonregulated functions, for functions with values in reflexive separable Banach spaces,
for the almost everywhere convergence and weak pointwise convergence of extracted
subsequences, and comment on the necessity of assumptions in the selection principles.
The sharpness of all assertions is illustrated by examples.
\end{abstract}
\makeabstract
\chapter{Introduction} \label{s:intro}
A pointwise selection principle is a statement which asserts that under certain specified
assumptions on a given sequence of functions $f_j:T\to M$ ($j\in\Nb$), their domain $T$
and range $M$, the sequence admits a subsequence converging in (the topology of)
$M$ pointwise (=everywhere) on the set $T$; in other words, this is a compactness
theorem in the topology of pointwise convergence. Our intention here is twofold: first,
to draw attention to a conjunction of pointwise selection principles and characterizations of
regulated functions (cf.\ also \cite{waterman80}) and, second, to exhibit the main goal
of this paper.
To be specific, we let $T=I=[a,b]$ be a closed interval in $\Rb$ and $M=\Rb$ and
denote by: $\Rb^I$ the set of \emph{all\/} functions mapping $I$ into $\Rb$, $\Mon(I)$
the set of \emph{monotone\/} functions, $\BV(I)$ the set of functions of
\emph{bounded\/} (Jordan) \emph{variation}, and $\Reg(I)$ the set of
\emph{regulated\/} functions from $\Rb^I$. Recall that $f\in\Rb^I$ is regulated
provided the left limit $f(t-0)\in\Rb$ exists at each point $a<t\le b$ and the right limit
$f(t+0)\in\Rb$ exists at each point $a\le t<b$. Clearly,
$\Mon(I)\subset\BV(I)\subset\Reg(I)$, and it is well known that each function from
$\Reg(I)$ is bounded, has a finite or countable set of discontinuity points, and is the
uniform limit of a sequence of step functions on~$I$. Scalar- (and vector-) valued
regulated functions are of importance in various branches of analysis, e.g., the theory
of convergence of Fourier series, stochastic processes, Riemann- and Lebesgue-Stieltjes
integrals, generalized ordinary differential equations, impulse controls, modular analysis
(\cite{Aumann}, \cite{MMS}, \cite{Dieu}, \cite{GNW}, \cite{Hild}, \cite{Loja}, \cite{Nat},
\cite{Rao}, \cite{Rudin}, \cite{Saks}, \cite{Schwabik}, \cite{Schwartz}).
In order for a sequence of functions $\{f_j\}\subset\Rb^I$ to have a pointwise convergent
subsequence, it is quite natural, by virtue of Bolzano-Weierstrass' theorem (viz., a bounded
sequence in $\Rb$ admits a convergent subsequence), that $\{f_j\}$ should be
\emph{pointwise bounded\/} (i.e., $\sup_{j\in\Nb}|f_j(t)|<\infty$ for all $t\in I$).
However, a pointwise (or even uniformly) bounded sequence $\{f_j\}\subset\Rb^I$
need not have a pointwise convergent subsequence: a traditional example is the sequence
$f_j(t)=\sin(jt)$ for $j\in\Nb$ and $t\in I=[0,2\pi]$ (see Remark~\ref{r:sinjt} below).
So, additional assumptions on $\{f_j\}$ are to be imposed.
The historically first pointwise selection principles are due to Helly \cite{Helly}:
\emph{a uniformly bounded sequence $\{f_j\}\subset\Mon(I)$ contains a pointwise
convergent subsequence\/} (whose pointwise limit belongs to $\Mon(I)$). This theorem,
a selection principle for monotone functions, is based on and extends Bolzano-
Weierstrass' theorem and implies one more Helly's selection principle for functions of
bounded variation (in \eq{e:bVfj} below, $V(f)$ denotes the Jordan variation of
$f\in\Rb^I$): \emph{a pointwise bounded sequence $\{f_j\}\subset\Rb^I$ satisfying
\begin{equation} \label{e:bVfj}
\sup_{j\in\Nb}V(f_j)<\infty
\end{equation}
contains a pointwise convergent subsequence\/} (with the pointwise limit from
$\BV(I)$). Note that condition \eq{e:bVfj} of uniform boundedness of variations may be
replaced by a (seemingly) more general condition $\limsup_{j\to\infty}V(f_j)<\infty$.
It is well known that Helly's selection principles play a significant role in analysis (e.g.,
\cite{Hild}, \cite{Nat}, \cite{Saks}).
A vast literature already exists concerning generalizations of
Helly's principles for various classes of functions (\cite{Barbu}--\cite{JMS},
\cite{JDCS}--\cite{MatTr}, \cite{MMS}, \cite{Studia17}--\cite{waterman80},
\cite{Dudley}--\cite{Gnilka}, \cite{Megre}, \cite{MuOr}, \cite{Schramm},
\cite{IzVUZ}--\cite{Wat76}, and references therein) and their applications
(\cite{Barbu}, \cite{Sovae}, \cite{MMS}, \cite{JMAA2019}, \cite{JFA05}--\cite{Studia02},
\cite{Dudley}, \cite{GNW}, \cite{Hermes}, \cite{Schwabik}).
We recall some of these generalizations, which are relevant for our purposes.
Let $\varphi:[0,\infty)\to[0,\infty)$ be a nondecreasing continuous function such that
$\varphi(0)=0$, $\varphi(u)>0$ for $u>0$, and $\varphi(u)\to\infty$ as $u\to\infty$.
We say that $f\in\Rb^I$ is \emph{of bounded $\varphi$-variation\/} on~$I$ (in the sense
of Wiener and Young) and write $f\in\BV_{\!\varphi}(I)$ if the following quantity, called the
\emph{$\varphi$-variation\/} of $f$, is finite:
\begin{equation*}
V_\varphi(f)=\sup\,\biggl\{\sum_{i=1}^n\varphi\bigl(|f(I_i)|\bigr):
\mbox{$n\in\Nb$ and $\{I_i\}_1^n\prec I$}\biggr\},
\end{equation*}
where the notation $\{I_i\}_1^n\prec I$ stands for a non-ordered collection of $n$
non-overlapping intervals $I_i=[a_i,b_i]\subset I$ and $|f(I_i)|=|f(b_i)-f(a_i)|$,
$i=1,\dots,n$. \label{p:nco}
(In particular, if $\varphi(u)\!=\!u$, we have $V_\varphi(f)=V(f)$.)
It was shown by Musielak and Orlicz \cite{MuOr} that $\BV_{\!\varphi}(I)\subset\Reg(I)$,
and if $\varphi$ is additionally convex and $\varphi'(0)\equiv\lim_{u\to+0}\varphi(u)/u=0$,
then $\BV(I)$ is a proper subset of $\BV_{\!\varphi}(I)$. Goff\-man, Moran and
Waterman \cite{GMW} characterized the set $\Reg(I)$ as follows: if $f\!\in\!\Reg(I)$ and
\mbox{$\min\{f(t-0),f(t+0)\}\!\le\! f(t)\!\le\!\max\{f(t-0),f(t+0)\}$} at each point
$t\in I$ of discontinuity of $f$, then there is a convex function $\varphi$ (as above) with
$\varphi'(0)=0$ such that $f\in\BV_{\!\varphi}(I)$. A generalization of Helly's theorem
for BV functions, the so called \emph{Helly-type selection principle}, was established in
\cite{MuOr}, where condition \eq{e:bVfj} was replaced by
$\sup_{j\in\Nb}V_\varphi(f_j)<\infty$.
One more Helly-type selection principle is due to Waterman \cite{Wat76}, who replaced
condition \eq{e:bVfj} by $\sup_{j\in\Nb}V_\Lambda(f_j)<\infty$, where
$V_\Lambda(f)$ is the Waterman \emph{$\Lambda$-variation\/} of $f\in\Rb^I$
defined by (\cite{Wat72})
\begin{equation*}
V_\Lambda(f)=\sup\,\biggl\{\sum_{i=1}^n\frac{|f(I_i)|}{\lambda_i}:
\mbox{$n\in\Nb$ and $\{I_i\}_1^n\prec I$}\biggr\};
\end{equation*}
here $\Lambda=\{\lambda_i\}_{i=1}^\infty$ is a \emph{Waterman sequence}, i.e.,
$\Lambda\subset(0,\infty)$ is nondecreasing, unbounded and
$\sum_{i=1}^\infty1/\lambda_i=\infty$. (Formally, $V_\Lambda(f)=V(f)$ for
$\lambda_i=1$,~$i\in\Nb$.) For the set $\Lambda\BV(I)=\{f\in\Rb^I:
V_\Lambda(f)<\infty\}$ of functions \emph{of $\Lambda$-bounded variation},
Waterman \cite{Wat72} showed that $\Lambda\BV(I)\subset\Reg(I)$ and $\BV(I)$ is a
proper subset of $\Lambda\BV(I)$. Perlman \cite{Perl} proved that
$\BV(I)=\bigcap_\Lambda\Lambda\BV(I)$ and obtained the following characterization of
regulated functions: $\Reg(I)=\bigcup_\Lambda\Lambda\BV(I)$, where the intersection
above and the union are taken over all Waterman sequences~$\Lambda$
(but not over any countable collection).
Taking into account that the sets $\Mon(I)$, $\BV(I)$, $\BV_{\!\varphi}(I)$, and
$\Lambda\BV(I)$ are contained in $\Reg(I)$, Helly's selection principles and
their generalizations alluded to above are compactness theorems in the class of
regulated functions.
In the literature, there are characterizations of the set $\Reg(I)$, which do not rely on
notions of bounded (or generalized bounded) variations of any kind. One of them was
given by Chanturiya (\cite{Chan74}, \cite{Chan75}) in the form
$\Reg(I)=\{f\in\Rb^I:\nu_n(f)=o(n)\}$, where E.~Landau's small `$o$' means, as usual,
that $o(n)/n\to0$ as $n\to\infty$, and the sequence $\{\nu_n(f)\}_{n=1}^\infty
\subset[0,\infty]$, called the \emph{modulus of variation\/} of $f$, is defined by
(\cite{Chan74}, cf.\ also \cite[Section 11.3.7]{GNW})
\begin{equation*}
\nu_n(f)=\sup\,\biggl\{\sum_{i=1}^n|f(I_i)|:\{I_i\}_1^n\prec I\biggr\},\quad n\in\Nb.
\end{equation*}
Note that $\nu_n(f)\le V(f)$ for all $n\in\Nb$ and $\nu_n(f)\to V(f)$ as $n\to\infty$.
The author (\cite{JMAA05}, \cite{Ischia}) replaced condition \eq{e:bVfj} by
(a very weak one)
\begin{equation} \label{e:nunfj}
\limsup_{j\to\infty}\nu_n(f_j)=o(n)
\end{equation}
and obtained a Helly-type pointwise selection principle (in which the pointwise limit of the
extracted subsequence of $\{f_j\}$ belongs to $\Reg(I)$ and) which contains, as
particular cases, all the above Helly-type selection principles and many others
(\cite{DAN06}, \cite{MatTr}, \cite{Studia17}). Assumption \eq{e:nunfj} is applicable
to sequences of nonregulated functions, so the corresponding Helly-type pointwise
selection principle under \eq{e:nunfj} is already outside the scope of regulated functions.
To see this, let $\displaystylec\in\Rb^I$ be the Dirichlet function on $I=[0,1]$ (i.e., $\displaystylec(t)=1$ if
$t\in I$ is rational, and $\displaystylec(t)=0$ otherwise) and $f_j(t)=\displaystylec(t)/j$ for $j\in\Nb$ and
$t\in I$. We have $f_j\notin\Reg(I)$ and $\nu_n(f_j)=n/j$ for all $j,n\in\Nb$, and so,
\eq{e:nunfj} is satisfied while \eq{e:bVfj} is not (for any kinds of generalized variations
including $V_\vfi$ and $V_\Lambda$).
A special feature of condition \eq{e:nunfj} is that, for $f\in\Reg(I)$, it is
\emph{necessary\/} for the uniform convergence of $\{f_j\}$ to $f$, and
`almost necessary' for the pointwise convergence of $\{f_j\}$ to~$f$---note that this is
not at all the case for (uniform) conditions of the form~\eq{e:bVfj}.
Dudley and Norvai{\v s}a \cite[Part~III, Section~2]{Dudley} presented the following
characterization of regulated functions:
\mbox{$\Reg(I)=\{f\in\Rb^I:N_\vep(f)\!<\!\infty\,\,\forall\,\vep\!>\!0\}$},
where the (untitled) quantity $N_\vep(f)\in\{0\}\cup\Nb\cup\{\infty\}$ for $f\in\Rb^I$
is given by
\begin{equation*}
N_\vep(f)=\sup\,\Bigl\{n\in\Nb:\mbox{$\exists\,\{I_i\}_1^n\prec I$ such that
$\displaystyle\min_{1\le i\le n}|f(I_i)|>\vep$}\Bigr\},\quad\vep>0
\end{equation*}
(with $\sup\varnothing=0$). They established a Helly-type pointwise selection principle
in the class $\Reg(I)$ by replacing \eq{e:bVfj} with $\sup_{j\in\Nb}N_\vep(f_j)<\infty$
for all \mbox{$\vep>0$}. In a series of papers by the author, Maniscalco and Tretyachenko
(\cite[Chap\-ter~5]{MMS}, \cite{waterman80}, \cite{IzVUZ}, \cite{MZ08}), it was shown
that we get a more powerful selection principle (outside the scope of regulated functions)
if \eq{e:bVfj} is replaced by
\begin{equation} \label{e:Nefj}
\limsup_{j\to\infty}N_\vep(f_j)<\infty\quad\,\,\mbox{for \,all}\quad\,\,\vep>0.
\end{equation}
If we let the sequence of nonregulated functions $f_j(t)=\displaystylec(t)/j$ be as above, we find
$N_\vep(f_j)=\infty$ if $j<1/\vep$ and $N_\vep(f_j)=0$ if $j\ge1/\vep$, and so, condition
\eq{e:Nefj} is satisfied. Moreover, \eq{e:Nefj} is \emph{necessary\/} for the uniform
convergence and `almost necessary' for the pointwise convergence of $\{f_j\}$ to
$f\in\Reg(I)$. A comparison of different Helly-type pointwise selection principles is presented
in \cite{JMAA05}--\cite{MatTr}, \cite{MMS}, \cite{Studia17}, \cite{Manisc}.
Essential for the present paper, one more characterization of regulated functions is due to
Fra{\v n}kov{\'a} \cite{Fr}:
\mbox{$\Reg(I)=\{f\in\Rb^I:V_\vep(f)<\infty\,\,\forall\,\vep>0\}$}, where the
\emph{$\vep$-variation\/} $V_\vep(f)$ of $f\in\Rb^I$ is defined by
(\cite[Definition~3.2]{Fr})
\begin{equation*}
V_\vep(f)=\inf\,\bigl\{V(g):\mbox{$g\in\BV(I)$ and $|f(t)-g(t)|\le\vep$
$\forall\,t\in I$}\bigl\},\quad\vep>0
\end{equation*}
(with $\inf\varnothing=\infty$). She established a Helly-type selection principle in the class
$\Reg(I)$ under the assumption of uniform boundedness of $\vep$-variations
$\sup_{j\in\Nb}V_\vep(f_j)\!<\!\infty$ for all $\vep\!>\!0$
in place of \eq{e:bVfj}. However, following the `philosophy' of \eq{e:nunfj} and
\eq{e:Nefj}, a weaker condition, replacing \eq{e:bVfj}, is of the~form
\begin{equation} \label{e:Vee}
\limsup_{j\to\infty}V_\vep(f_j)<\infty\quad\,\,\mbox{for \,all}\quad\,\,\vep>0.
\end{equation}
Making use of \eq{e:Vee}, the author and Chistyakova \cite{Studia17} proved a
Helly-type pointwise selection principle outside the scope of regulated functions by
showing that \eq{e:Vee} implies \eq{e:nunfj}. If the sequence $f_j(t)=\displaystylec(t)/j$ is as
above, we get $V_\vep(f_j)=\infty$ if $j<1/(2\vep)$ and $V_\vep(f_j)=0$ if
$j\ge1/(2\vep)$, and so, \eq{e:Vee} is fulfilled while the uniform $\vep$-variations
are unbounded for $0<\vep<1/2$.
In this paper, we present a direct proof of a Helly-type pointwise selection principle
under \eq{e:Vee}, not relying on \eq{e:nunfj}, and show that condition \eq{e:Vee} is
necessary for the uniform convergence and `almost necessary' for the pointwise
convergence of $\{f_j\}$ to $f\in\Reg(I)$ (cf. Remark~\ref{r:neces} below).
All the above pointwise selection principles are based on the Helly selection theorem for
monotone functions. A different kind of a pointwise selection principle, basing on
Ramsey's theorem from formal logic \cite{Ramsey}, was given by Schrader~
\cite{Schrader}. In order to recall it, we introduce a notation: given a sign-changing
function $f\in\Rb^I$, we denote by $\mathcal{P}(f)$ the set of all finite collections
of points $\{t_i\}_{i=1}^n\subset I$ with $n\in\Nb$ such that $t_1<t_2<\dots<t_n$
and either $(-1)^if(t_i)>0$ for all $i=1,\dots,n$, or $(-1)^if(t_i)<0$ for all $i=1,\dots,n$,
or $(-1)^if(t_i)=0$ for all $i=1,\dots,n$. The quantity
\begin{equation*}
\mathcal{T}(f)=\sup\,\biggl\{\sum_{i=1}^n|f(t_i)|:\mbox{$n\in\Nb$ and
$\{t_i\}_{i=1}^n\in\mathcal{P}(f)$}\biggr\}
\end{equation*}
is said to be Schrader's \emph{oscillation\/} of $f$ on $I$; if $f$ is nonnegative on $I$
or $f$ is nonpositive on $I$, we set $\mathcal{T}(f)=\sup_{t\in I}|f(t)|$. Schrader
proved that \emph{if $\{f_j\}\subset\Rb^I$ is such that
$\sup_{j,k\in\Nb}\mathcal{T}(f_j-f_k)<\infty$, then $\{f_j\}$ contains a subsequence,
which converges everywhere on~$I$.} This is an \emph{irregular\/} pointwise
selection principle in the sense that, although the sequence $\{f_j\}$ satisfying
Schrader's condition is pointwise bounded on $I$, we cannot infer any `regularity'
properties of the (pointwise) limit function (e.g., it may be applied to the sequence
$f_j(t)=(-1)^j\displaystylec(t)$ for $j\in\Nb$ and $t\in[0,1]$). Maniscalco \cite{Manisc} proved
that Schrader's assumption and condition \eq{e:nunfj} are independent (in the sense that
they produce different pointwise selection principles). Extensions of Schrader's result
are presented in \cite{JMAA08}, \cite{waterman80}, \cite{JMAA13}, \cite{Piazza}.
One of the goals of this paper is to obtain irregular pointwise selection principles in
terms of Fra{\v n}kov{\'a}'s $\vep$-variations $V_\vep(f)$ (Section~\ref{ss:irreg}).
This paper is a thorough self-contained study of the \emph{approximate variation},
i.e., the family $\{V_\vep(f)\}_{\vep>0}$ for functions $f:T\to M$ mapping a nonempty
subset $T$ of $\Rb$ into a metric space $(M,d)$. We develop a number of pointwise
(and almost everywhere) selection principles, including irregular ones, for sequences of
functions with values in metric spaces, normed spaces and reflexive separable Banach
spaces. All assertions and their sharpness are illustrated by concrete examples.
The plan of the exposition can be clearly seen from the Contents. Finally, it is to be noted
that, besides powerful selection principles, based on $\vep$-variations, the notion of
approximate variation gives a nice and highly nontrivial example of a \emph{metric
modular\/} in the sense of the author (\cite{DAN06-2}, \cite{NA05}, \cite{MMS}), or
a classical \emph{modular} in the sense of Musielak-Orlicz (\cite{Mus}, \cite{MuOr-2})
if $(M,\|\cdot\|)$ is a normed linear space. Results corresponding to the modular aspects
of the approximate variation will be published elsewhere.
\chapter{The approximate variation and its properties} \label{s:appv}
\section{Notation and terminology}
We begin by introducing notations and the terminology which will be used throughout
this paper.
Let $T$ be a nonempty set (in the sequel, $T\subset\Rb$), $(M,d)$ be a metric space
with metric $d$, and $M^T$ be the set of all functions $f:T\to M$ mapping $T$
into~$M$. The set $M^T$ is equipped with the (extended-valued)
\emph{uniform metric}
\begin{equation*}
d_{\infty,T}(f,g)=\sup_{t\in T}\,d(f(t),g(t)),\quad\,\,f,g\in M^T.
\end{equation*}
The letter $c$ stands, as a rule, for a \emph{constant\/} function $c\in M^T$
(sometimes identified with $c\in M$).
The \emph{oscillation\/} of a function $f\in M^T$ on the set $T$ is the quantity
\footnote{The notation for the oscillation $|f(T)|$ should not be confused with the
notation for the increment $|f(I_i)|=|f(b_i)-f(a_i)|$ from p.~\pageref{p:nco}, the latter
being used only in the Introduction.}
\begin{equation*}
|f(T)|\equiv|f(T)|_d=\sup_{s,t\in T}d(f(s),f(t))\in[0,\infty],
\end{equation*}
also known as the \emph{diameter of the image\/} $f(T)=\{f(t):t\in T\}\subset M$.
We denote by $\Bd(T;M)=\{f\in M^T:|f(T)|<\infty\}$ the set of all
\emph{bounded functions\/} from $T$ into~$M$.
Given $f,g\in M^T$ and $s,t\in T$, by the triangle inequality for $d$, we find
\begin{equation} \label{e:1_1}
d_{\infty,T}(f,g)\le|f(T)|+d(f(t),g(t))+|g(T)|
\end{equation}
and
\begin{equation} \label{e:10}
d(f(s),f(t))\le d(g(s),g(t))+2d_{\infty,T}(f,g);
\end{equation}
the definition of the oscillation and inequality \eq{e:10} imply
\begin{equation} \label{e:1s2}
|f(T)|\le|g(T)|+2d_{\infty,T}(f,g).
\end{equation}
Clearly (by \eq{e:1_1} and \eq{e:1s2}), $d_{\infty,T}(f,g)<\infty$ for all
$f,g\in\Bd(T;M)$ and, for any \emph{constant\/} function $c\in M^T$,
$\Bd(T;M)=\{f\in M^T:d_{\infty,T}(f,c)<\infty\}$.
For a sequence of functions $\{f_j\}\equiv\{f_j\}_{j=1}^\infty\subset M^T$ and
$f\in M^T$, we write:
(a) $f_j\to f$ on $T$ to denote the \emph{pointwise\/} (=\,\emph{everywhere\/})
\emph{convergence\/} of $\{f_j\}$ to $f$ (that is, $\displaystyle\lim_{j\to\infty}d(f_j(t),f(t))=0$
for all $t\in T$);
(b) $f_j\rightrightarrows f$ on $T$ to denote the \emph{uniform convergence\/} of $\{f_j\}$ to $f$:
$\displaystyle\lim_{j\to\infty}d_{\infty,T}(f_j,f)=0$.
(Clearly, (b) implies (a), but not vice versa.)
Recall that a sequence of functions $\{f_j\}\subset M^T$ is said to be \emph{\pw\ \rc\/}
on $T$ provided the closure in $M$ of the set $\{f_j(t):j\in\Nb\}$
is compact for all $t\in T$.
From now on, we suppose that $T$ is a (nonempty) subset of the reals~$\Rb$.
The (Jordan) \emph{variation\/} of $f\in M^T$ is the quantity (e.g.,
\cite[Chapter~4, Section~9]{Schwartz})
\begin{equation*}
V(f,T)=\sup_P\sum_{i=1}^md(f(t_i),f(t_{i-1}))\in[0,\infty],
\end{equation*}
where the supremum is taken over all partitions $P$ of $T$, i.e., $m\in\Nb$ and
$P=\{t_i\}_{i=0}^m\subset T$ such that $t_{i-1}\le t_i$ for all $i=1,2,\dots,m$.
We denote by $\BV(T;M)=\{f\in M^T:V(f,T)<\infty\}$ the set of all
\emph{functions of bounded variation\/} from $T$ into~$M$.
The following four basic properties of the functional $V$ are well-known.
Given $f\in M^T$, we have:
\begin{itemize} \label{p:V}
\item[(V.1)] $|f(T)|\le V(f,T)$ (and so, $\BV(T;M)\subset\Bd(T;M)$);
\item[(V.2)] $V(f,T)=V(f,T\cap(-\infty,t])+V(f,T\cap[t,\infty))$ for all $t\in T$
(\emph{additivity\/} of $V$ in the second variable, cf.~
\cite{Var}, \cite{JDCS}, \cite{Schwartz});
\item[(V.3)] if $\{f_j\}\subset M^T$ and $f_j\to f$ on $T$, then
$V(f,T)\le\liminf_{j\to\infty}V(f_j,T)$ (sequential \emph{lower semicontinuity\/} of
$V$ in the first variable, cf.~\cite{JDCS}, \cite{MatSb});
\item[(V.4)] a \pw\ \rc\ sequence of functions $\{f_j\}\subset M^T$ satisfying condition
$\sup_{j\in\Nb}V(f_j,T)<\infty$ contains a subsequence, which converges \pw\
on $T$ to a function $f\in\BV(T;M)$ (Helly-type \emph{pointwise
selection principle}, cf.~\cite{JMAA}, \cite{Sovae}).
\end{itemize}
In what follows, the letter $I$ denotes a closed interval $I=[a,b]$ with the endpoints
$a,b\in\Rb$, $a<b$.
Now, we recall the notion of a regulated function (introduced in \cite{Aumann} for real
valued functions). We say (\cite{JMAA05}) that a function $f\in M^I$ is \emph{regulated}
\label{p:reg} (or \emph{proper}, or \emph{simple\/}) and write $f\in\Reg(I;M)$ if it
satisfies the Cauchy condition at every point of $I=[a,b]$, i.e., $d(f(s),f(t))\to0$ as
$I\ni s,t\to\tau-0$ for each $a<\tau\le b$, and $d(f(s),f(t))\to0$ as
$I\ni s,t\to\tau'+0$ for each $a\le\tau'<b$. It is well-known (e.g.,
\cite{Var}, \cite{JMAA05},\cite{Schwartz}) that
\begin{equation*}
\BV(I;M)\subset\Reg(I;M)\subset\Bd(I;M),
\end{equation*}
the set $\Reg(I;M)$ of all regulated function is closed with respect to the uniform
convergence, and the pair $(\Reg(I;M),d_{\infty,I})$ is a complete metric space provided
$(M,d)$ is complete (see also \cite[Theorem~2]{Studia17} for some generalization).
Furthermore, if $(M,d)$ is complete, then, by Cauchy's criterion, we have:
$f\in\Reg(I;M)$ if and only if the left limit $f(\tau-0)\in M$ exists at each
point $a<\tau\le b$ (meaning that $d(f(t),f(\tau-0))\to0$ as $I\ni t\to\tau-0$),
and the right limit $f(\tau'+0)\in M$ exists at each point $a\le\tau'<b$
(i.e., $d(f(t),f(\tau'+0))\to0$ as $I\ni t\to\tau'+0$).
Regulated functions can be uniformly approximated by step functions (see \eq{e:stR}) as
follows. Recall that $f\in M^I$ is said to be a \emph{step function\/} (in symbols,
$f\in\mbox{\rm St}(I;M)$) provided, for some $m\in\Nb$, there exists a partition
$a=t_0<t_1<t_2<\dots<t_{m-1}<t_m=b$ of $I=[a,b]$ such that $f$ takes a
constant value on each (open) interval $(t_{i-1},t_i)$, $i=1,2,\dots,m$. Clearly,
\begin{equation} \label{e:StBV}
\mbox{\rm St}(I;M)\subset\BV(I;M).
\end{equation}
Furthermore (cf.\ \cite[(7.6.1)]{Dieu}), we have
\begin{equation} \label{e:stR}
\Reg(I;M)=\{f\in M^I:\mbox{$\exists\,\{f_j\}\subset\mbox{\rm St}(I;M)$
such that $f_j\rightrightarrows f$ on $I$}\}
\end{equation}
(if, in addition, $f\in\BV(I;M)$, then $\{f_j\}\subset\mbox{\rm St}(I;M)$
can be chosen such that $f_j\rightrightarrows f$ on $I$ and $V(f_j,I)\le V(f,I)$ for all $j\in\Nb$,
cf.~\cite[Section~1.27]{Var}).
\section{Definition of the approximate variation} \label{ss:dav}
\begin{definition} \label{def:av}
The \emph{approximate variation\/} of a function $f\!\in\! M^T$ is the one-parameter
family $\{V_\vep(f,T)\}_{\vep>0}$ of \emph{$\vep$-variations\/} defined,
for each $\vep>0$,~by
\begin{equation} \label{e:av}
V_\vep(f,T)=\inf\,\{V(g,T):\mbox{$g\in\BV(T;M)$ and $d_{\infty,T}(f,g)\le\vep$}\}
\end{equation}
(with the convention that $\inf\es=\infty$).
\end{definition}
The notion of $\vep$-variation, which plays a crucial role in this paper, is originally due
to Fra{\v n}kov{\'a} \cite[Definition~3.2]{Fr} for $T=I=[a,b]$ and $M=\Rb^N$. It was
also considered and extended in \cite[Sections~4, 6]{Studia17} to any $T\subset\Rb$ and
metric space $(M,d)$, and \cite{JMAA17} for metric space valued functions of two variables.
A few comments concerning Definition~\ref{def:av} are in order. Sometimes it is
convenient to rewrite \eq{e:av} as $V_\vep(f,T)=\inf\{V(g,T):g\in G_{\vep,T}(f)\}$,
where
\begin{equation*}
G_{\vep,T}(f)=\{g\in\BV(T;M):d_{\infty,T}(f,g)\le\vep\}.
\end{equation*}
So, we obtain the value $V_\vep(f,T)$ if we ``minimize'' the lower semicontinuous
functional $g\mapsto V(g,T)$ over the metric subspace $G_{\vep,T}(f)$ of $\BV(T;M)$.
Clearly, $V_\vep(f,T)\in[0,\infty]$, and the value $V_\vep(f,T)$ does not change if
we replace condition $g\in\mbox{\rm BV}(T;M)$ at the right-hand side of \eq{e:av}
by less restrictive conditions $g\in M^T$ or $g\in\Bd(T;M)$.
Condition $V_\vep(f,T)=\infty$ simply means that $G_{\vep,T}(f)=\es$, i.e.,
\begin{equation} \label{e:besk}
\mbox{$V(g,T)=\infty$ \,for all \,$g\in M^T$ \,such that \,$d_{\infty,T}(f,g)\le\vep$.}
\end{equation}
The finiteness of $V_\vep(f,T)$ is equivalent to the following: for any number
$\eta>V_\vep(f,T)$ there is a function $g\in\BV(T;M)$, depending on $\vep$ and $\eta$,
such that $d_{\infty,T}(f,g)\le\vep$ and $V_\vep(f,T)\le V(g,T)\le\eta$.
Given $k\in\Nb$, setting $\eta=V_\vep(f,T)+(1/k)$, we find that
there is $g_k^\vep\in\BV(T;M)$ such that
\begin{equation} \label{e:fin}
d_{\infty,T}(f,g_k^\vep)\le\vep\quad\mbox{and}\quad
V_\vep(f,T)\le V(g_k^\vep,T)\le V_\vep(f,T)+(1/k);
\end{equation}
in particular, \eq{e:fin} implies $V_\vep(f,T)=\lim_{k\to\infty}V(g_k^\vep,T)$.
Given $\vep>0$, condition $V_\vep(f,T)=0$ is characterized as follows (cf.~\eq{e:fin}):
\begin{equation} \label{e:zer}
\mbox{$\exists\,\{g_k\}\!\subset\!\BV(T;M)$ such that
$\displaystyle\sup_{k\in\Nb}d_{\infty,T}(f,g_k)\!\le\!\vep$ and
$\displaystyle\lim_{k\to\infty}\!V(g_k,T)\!=\!0$.}
\end{equation}
In particular, if $g_k=c$ is a constant function on $T$ for all $k\in\Nb$, we have:
\begin{equation} \label{e:ze1}
\mbox{if \,$d_{\infty,T}(f,c)\le\vep$, \,then \,$V_\vep(f,T)=0$.}
\end{equation}
This is the case when $|f(T)|\le\vep$; more explicitly, \eq{e:ze1} implies
\begin{equation} \label{e:zero}
\mbox{if \,$\vep>0$ \,and \,$|f(T)|\le\vep$, \,then \,$V_\vep(f,T)=0$.}
\end{equation}
In fact, fixing $t_0\in T$, we may define a constant function by $c(t)=f(t_0)$
for all $t\in T$, so that $d_{\infty,T}(f,c)\le|f(T)|\le\vep$.
The lower bound $|f(T)|$ for $\vep$ in \eq{e:zero} can be refined provided $f\in M^T$
satisfies certain additional assumptions. By \eq{e:1s2}, $|f(T)|\le2d_{\infty,T}(f,c)$ for
every constant function $c\in M^T$.
Now, if $|f(T)|=2d_{\infty,T}(f,c)$ for some $c$, we have:
\begin{equation} \label{e:zero2}
\mbox{if \,$\vep>0$ \,and \,$\vep\ge|f(T)|/2$, \,then \,$V_\vep(f,T)=0$.}
\end{equation}
To see this, note that $d_{\infty,T}(f,c)=|f(T)|/2\le\vep$ and apply \eq{e:ze1}.
The number $|f(T)|/2$ in \eq{e:zero2} is the \emph{best possible\/} lower bound
for $\vep$, for which we may have $V_\vep(f,T)=0$; in fact, by Lemma~\ref{l:71}(b)
(see below), if $V_\vep(f,T)=0$, then $|f(T)|\le2\vep$, i.e., $\vep\ge|f(T)|/2$.
In other words, if $0<\vep<|f(T)|/2$, then $V_\vep(f,T)\ne0$.
To present an example of condition $|f(T)|=2d_{\infty,T}(f,c)$, suppose $f\in M^T$
has only two values, i.e., $f(T)=\{x,y\}$ for some $x,y\in M$, $x\ne y$. Then,
the mentioned condition is of the form
\begin{equation} \label{e:2max}
d(x,y)=2\max\{d(x,c),d(y,c)\}\quad\mbox{for \,some}\quad c\in M.
\end{equation}
Condition \eq{e:2max} is satisfied for such $f$ if, for instance, $(M,\|\cdot\|)$
is a \emph{normed linear space\/} over $\mathbb{K}=\Rb$ or $\mathbb{C}$
(always equipped) with the \emph{induced metric\/} $d(u,v)=\|u-v\|$, $u,v\in M$.
\label{p:nls}
In fact, we may set $c(t)=c=(x+y)/2$, $t\in T$. Note that \eq{e:2max} is concerned
with a certain form of `convexity' of metric space $(M,d)$ (cf.~
\cite[Example~1]{Studia17}).
If $f(T)=\{x,y,z\}$, condition $|f(T)|=2d_{\infty,T}(f,c)$ is of the form
\begin{equation*}
\max\{d(x,y),d(x,z),d(y,z)\}=2\max\{d(x,c),d(y,c),d(z,c)\}.
\end{equation*}
Some elementary properties of $\vep$-variation(s) of $f\in M^T$ are gathered in
\begin{lemma} \label{l:ele}
{\rm(a)} The function $\vep\mapsto V_\vep(f,T):(0,\infty)\to[0,\infty]$ is
{\sl nonincreasing}, and so, the following inequalities hold\/
{\rm(}for one-sided limits{\rm):}
\begin{equation} \label{e:osli}
\mbox{$V_{\vep+0}(f,T)\le V_\vep(f,T)\le V_{\vep-0}(f,T)$ \,in \,$[0,\infty]$
\,for \,all \,$\vep>0$.}
\end{equation}
\par\hspace{-14pt}
{\rm(b)} If $\es\ne T_1\subset T_2\subset T$, then $V_\vep(f,T_1)\le V_\vep(f,T_2)$
\,for \,all \,$\vep>0$.
\end{lemma}
\proof
(a) Let $0<\vep_1<\vep_2$. Since $d_{\infty,T}(f,g)\le\vep_1$ implies
$d_{\infty,T}(f,g)\le\vep_2$ for $g\in M^T$, we get
$G_{\vep_1,T}(f)\subset G_{\vep_2,T}(f)$, and so, by \eq{e:av},
$V_{\vep_2}(f,T)\le V_{\vep_1}(f,T)$.
\smallbreak
(b) Given $g\in M^T$, $T_1\subset T_2$ implies
$d_{\infty,T_1}(f,g)\le d_{\infty,T_2}(f,g)$. So, for any $\vep>0$,
$G_{\vep,T_2}(f)\subset G_{\vep,T_1}(f)$, which, by \eq{e:av}, yields
$V_\vep(f,T_1)\le V_\vep(f,T_2)$.
\sq
\section{Variants of the approximate variation} \label{ss:vav}
Here we consider two modifications of the notion of approximate variation.
The first one is obtained if we replace the nonstrict inequality $\le\vep$ in \eq{e:av} by
the strict inequality $<\vep$; namely, given $f\in M^T$ and $\vep>0$, we set
\begin{equation} \label{e:avm1}
V_\vep'(f,T)=\inf\,\{V(g,T):\mbox{$g\in\BV(T;M)$ such that $d_{\infty,T}(f,g)<\vep$}\}
\end{equation}
($\inf\es=\infty$). Clearly, Lemma~\ref{l:ele} holds for $V_\vep'(f,T)$. More
specific properties of $V_\vep'(f,T)$ are exposed in the following
\begin{proposition} \label{pr:1}
Given $f\in M^T$, we have\/{\rm:}
\begin{itemize}
\renewcommand{0.0pt plus 0.5pt minus 0.25pt}{0.0pt plus 0.5pt minus 0.25pt}
\item[{\rm(a)}] the function $\vep\mapsto V_\vep'(f,T)$, mapping $(0,\infty)$ into
$[0,\infty]$, is continuous from the left on $(0,\infty);$
\item[{\rm(b)}] inequalities $V_{\vep_1}'(f,T)\le V_{\vep+0}'(f,T)\le V_\vep(f,T)
\le V_{\vep-0}(f,T)\le V_\vep'(f,T)$ hold for all\/ $0<\vep<\vep_1$.
\end{itemize}
\end{proposition}
\proof
(a) In view of \eq{e:osli} for $V_\vep'(f,T)$, given $\vep>0$, it suffices to show that
$V_{\vep-0}'(f,T)\le V_\vep'(f,T)$ provided $V_\vep'(f,T)<\infty$. By \eq{e:avm1},
for any number $\eta>V_\vep'(f,T)$ there is $g=g_{\vep,\eta}\in\BV(T;M)$ such that
$d_{\infty,T}(f,g)<\vep$ and $V(g,T)\le\eta$. If a number $\vep'$ is such that
$d_{\infty,T}(f,g)<\vep'<\vep$, then \eq{e:avm1}
implies $V_{\vep'}'(f,T)\le V(g,T)\le\eta$. Passing to the limit as $\vep'\to\vep-0$,
we get $V_{\vep-0}'(f,T)\le\eta$ for all $\eta>V_\vep'(f,T)$, and so,
$V_{\vep-0}'(f,T)\le V_\vep'(f,T)$.
(b) To prove the first inequality, we note that $V_{\vep_1}'(f,T)\le V_{\vep'}'(f,T)$ for
all $\vep'$ with $\vep<\vep'<\vep_1$. It remains to pass to the limit as $\vep'\to\vep+0$.
For the second inequality, let $g\in G_{\vep,T}(f)$, i.e., $g\in\BV(T;M)$ and
$d_{\infty,T}(f,g)\le\vep$. Then, for any number $\vep'$ such that $\vep<\vep'$,
by virtue of \eq{e:avm1}, $V_{\vep'}'(f,T)\le V(g,T)$, and so, as $\vep'\to\vep+0$,
$V_{\vep+0}'(f,T)\le V(g,T)$. Taking the infimum over all $g\in G_{\vep,T}(f)$,
we obtain the second inequality.
The third inequality is a consequence of \eq{e:osli}.
Since $\{g\in\BV(T;M):d_{\infty,T}(f,g)<\vep\}\subset G_{\vep,T}(f)$, we have
$V_\vep(f,T)\le V_\vep'(f,T)$. Replacing $\vep$ by $\vep'$ with $0<\vep'<\vep$,
we get $V_{\vep'}(f,T)\le V_{\vep'}'(f,T)$, and so, passing to the limit as $\vep'\to\vep-0$
and taking into account item (a) above, we arrive at the fourth inequality.
\sq
In contrast to Proposition~\ref{pr:1}(a), it will be shown in Lemma~\ref{l:proper}(a) that
the function $\vep\mapsto V_\vep(f,T)$ is continuous from the right on $(0,\infty)$
\emph{only\/} under the additional assumption on the metric space $(M,d)$
(to be \emph{proper}).
In the case when $T=I=[a,b]$, the second variant of the approximate variation is
obtained if we replace the set of functions of bounded variation $\BV(I;M)$ in
\eq{e:av} by the set of step functions $\mbox{\rm St}(I;M)$: given $f\in M^I$,
\begin{equation} \label{e:avm2}
V_\vep^s(f,I)=\inf\,\{V(g,I):\mbox{$g\in\mbox{\rm St}(I;M)$ and
$d_{\infty,I}(f,g)\le\vep$}\},\quad\vep>0
\end{equation}
($\inf\es=\infty$). Clearly, $V_\vep^s(f,I)$ has the properties from Lemma~\ref{l:ele}.
\begin{proposition} \label{pr:2}
$V_{\vep+0}^s(f,I)\le V_\vep(f,I)\le V_\vep^s(f,I)$ for all $f\in M^I$ and $\vep>0$.
\end{proposition}
\proof
By \eq{e:StBV},
$\{g\in\mbox{\rm St}(I;M):d_{\infty,I}(f,g)\le\vep\}\subset G_{\vep,I}(f)$,
and so, \eq{e:av} and \eq{e:avm2} imply the right-hand side inequality.
In order to prove the left-hand side inequality, we may assume that $V_\vep(f,I)<\infty$.
By \eq{e:av}, for any $\eta>V_\vep(f,I)$ there is $g=g_{\vep,\eta}\in\BV(I;M)$
such that $d_{\infty,I}(f,g)\le\vep$ and $V(g,I)\le\eta$. Since
$g\in\BV(I;M)\subset\Reg(I;M)$, by virtue of \eq{e:stR}, there is a sequence
$\{g_j\}\subset\mbox{\rm St}(I;M)$ such that $g_j\rightrightarrows g$ on~$I$ and
$V(g_j,I)\le V(g,I)$ for all natural~$j$. Hence $\limsup_{j\to\infty}V(g_j,I)\le V(g,I)$
and, by property (V.3) (p.~\pageref{p:V}), $V(g,I)\le\liminf_{j\to\infty}V(g_j,I)$,
and so, $\lim_{j\to\infty}V(g_j,I)=V(g,I)$. Now, let $\vep'>0$ be arbitrary. Then,
there is $j_1=j_1(\vep')\in\Nb$ such that $V(g_j,I)\le V(g,I)+\vep'$ for all $j\ge j_1$,
and, since $g_j\rightrightarrows g$ on $I$, there is $j_2=j_2(\vep')\in\Nb$ such that
$d_{\infty,I}(g_j,g)\le\vep'$ for all $j\ge j_2$. Noting that, for all $j\ge\max\{j_1,j_2\}$,
$g_j\in\mbox{\rm St}(I;M)$ and
\begin{equation*}
d_{\infty,I}(f,g_j)\le d_{\infty,I}(f,g)+d_{\infty,I}(g,g_j)\le\vep+\vep',
\end{equation*}
by the definition \eq{e:avm2} of $V_\vep^s(f,I)$, we get
\begin{equation*}
V_{\vep+\vep'}^s(f,I)\le V(g_j,I)\le V(g,I)+\vep'\le\eta+\vep'.
\end{equation*}
Passing to the limit as $\vep'\to+0$, we find $V_{\vep+0}^s(f,I)\le\eta$ for all
$\eta>V_\vep(f,I)$, and so, $V_{\vep+0}^s(f,I)\le V_\vep(f,I)$.
\sq
Propositions \ref{pr:1}(b) and \ref{pr:2} show that the quantities $V_\vep'(f,T)$ and
$V_\vep^s(f,I)$ are somehow `equivalent' to $V_\vep(f,T)$, so their theories will no longer
be developed in the sequel, and the theory of $V_\vep(f,T)$ is sufficient for our purposes.
\section{Properties of the approximate variation} \label{ss:pro}
In order to effectively calculate the approximate variation of a function, we need more
of its properties. Item (a) in the next lemma justifies the term `approximate variation',
introduced in Definition~\ref{def:av}.
\begin{lemma} \label{l:71}
Given $f\in M^T$, we have\/{\rm:}\par
\begin{itemize}
\item[{\rm(a)}] $\lim_{\vep\to+0}V_\vep(f,T)=\sup_{\vep>0}V_\vep(f,T)=V(f,T);$
\item[{\rm(b)}] $|f(T)|\le V_\vep(f,T)+2\vep$ \,for all \,$\vep>0;$
\item[{\rm(c)}] $|f(T)|=\infty$ {\rm(}i.e., $f\!\notin\!\Bd(T;M)${\rm)}
if and only if \,$V_\vep(f,T)\!=\!\infty$ for all $\vep\!>\!0;$
\item[{\rm(d)}] $\inf_{\vep>0}(V_\vep(f,T)+\vep)\le|f(T)|\le
\inf_{\vep>0}(V_\vep(f,T)+2\vep);$
\item[{\rm(e)}] $|f(T)|=0$ {\rm(}i.e.,\,$f$ is constant{\rm)}
if and only if\, $V_\vep(f,T)\!=\!0$ for all $\vep\!>\!0;$
\item[{\rm(f)}] if \,$0<\vep<|f(T)|$, then
\,$\max\{0,|f(T)|-2\vep\}\le V_\vep(f,T)\le V(f,T)$.
\end{itemize}
\end{lemma}
\proof
(a) By Lemma~\ref{l:ele}(a), $C\equiv\lim_{\vep\to+0}V_\vep(f,T)
=\sup_{\vep>0}V_\vep(f,T)$ is well-defined in $[0,\infty]$. First, we assume that
$f\in\BV(T;M)$. Since $f\in G_{\vep,T}(f)$ for all $\vep>0$, definition \eq{e:av} implies
$V_\vep(f,T)\le V(f,T)$ for all $\vep>0$, and so, $C\le V(f,T)<\infty$. Now, we prove
that $V(f,T)\le C$. By definition of $C$,
for every $\eta>0$, there is $\delta=\delta(\eta)>0$
such that $V_\vep(f,T)<C+\eta$ for all $\vep\in(0,\delta)$. Let
$\{\vep_k\}_{k=1}^\infty\subset(0,\delta)$ be such that $\vep_k\to0$ as $k\to\infty$.
For every $k\in\Nb$, the definition of $V_{\vep_k}(f,T)<C+\eta$ implies the existence
of $g_k\in\BV(T;M)$ such that $d_{\infty,T}(f,g_k)\le\vep_k$ and $V(g_k,T)\le C+\eta$.
Since $\vep_k\to0$, $g_k\rightrightarrows f$ on $T$, and so, property (V.3) on p.\,\pageref{p:V} yields
\begin{equation*}
V(f,T)\le\liminf_{k\to\infty}V(g_k,T)\le C+\eta\quad\mbox{for \,all}\quad\eta>0,
\end{equation*}
whence $V(f,T)\le C<\infty$. Thus, $C$ and $V(f,T)$ are finite or not simultaneously,
and $C=V(f,T)$, which establishes (a).
(b) The inequality is clear if $V_\vep(f,T)=\infty$, so we assume that $V_\vep(f,T)$ is
finite. By definition \eq{e:av}, for every $\eta>V_\vep(f,T)$ there is
$g=g_\eta\in\BV(T;M)$ such that $\mbox{$d_{\infty,T}(f,g)\le\vep$}$ and
$V(g,T)\le\eta$. Inequality \eq{e:1s2} and property (V.1) on p.\,\pageref{p:V} imply
\begin{equation*}
|f(T)|\le|g(T)|+2d_{\infty,T}(f,g)\le V(g,T)+2\vep\le\eta+2\vep.
\end{equation*}
It remains to take into account the arbitrariness of $\eta>V_\vep(f,T)$.
(c) The necessity is a consequence of item (b). To prove the sufficiency, assume, on the
contrary, that $|f(T)|\!<\!\infty$. Then, by \eq{e:zero}, for any \mbox{$\vep\!>\!|f(T)|$},
we have $V_\vep(f,T)=0$, which contradicts the assumption $V_\vep(f,T)=\infty$.
(d) The right-hand side inequality is equivalent to item (b). To establish the left-hand
side inequality, we note that if $|f(T)|<\infty$ and $\vep>|f(T)|$, then, by \eq{e:zero},
$V_\vep(f,T)=0$, and so,
\begin{equation*}
|f(T)|=\inf_{\vep>|f(T)|}\vep=\inf_{\vep>|f(T)|}(V_\vep(f,T)+\vep)
\ge\inf_{\vep>0}(V_\vep(f,T)+\vep).
\end{equation*}
Now, if $|f(T)|=\infty$, then, by item (c), $V_\vep(f,T)+\vep=\infty$ for all $\vep>0$,
and so, $\inf_{\vep>0}(V_\vep(f,T)+\vep)=\infty$.
(e) ($\Rightarrow\!$) Since $f$ is constant on $T$, $f\in\BV(T;M)$ and
$d_{\infty,T}(f,f)=0<\vep$, and so, definition \eq{e:av} implies
$0\le V_\vep(f,T)\le V(f,T)=0$.
($\!\Leftarrow$) By virtue of item (d), if $V_\vep(f,T)=0$ for all $\vep>0$, then
$|f(T)|=0$.
(f) We may assume that $f\in\Bd(T;M)$. By item (a), $V_\vep(f,T)\le V(f,T)$, and
by item (b), $|f(T)|-2\vep\le V_\vep(f,T)$ for all $0<\vep<|f(T)|/2$. It is also clear
that $0\le V_\vep(f,T)$ for all $|f(T)|/2\le\vep<|f(T)|$.
\sq
\begin{remark} \label{r:8one} \rm
By \eq{e:zero} and Lemma~\ref{l:71}(c),\,(e), the $\vep$-variation $V_\vep(f,T)$,
initially defined for all $\vep>0$ and $f\in M^T$, is \emph{completely characterized\/}
whenever $\vep>0$ and $f\in\Bd(T;M)$ are such that $0<\vep<|f(T)|$.
The sharpness of assertions in Lemma~\ref{l:71}(b), (d) is presented in
Example~\ref{ex:gDf}(b), (c), (d) on pp.~\pageref{p:L71b}--\pageref{p:36d}
(for (a),\,(b),\,(f), see Example~\ref{exa:t}).
\end{remark}
In order to get the first feeling of the approximate variation, we present an example
(which later on will be generalized, cf.\ Example~\ref{ex:1}).
\begin{example} \label{exa:t} \rm
Let $f:T=[0,1]\to[0,1]$ be given by $f(t)=t$. We are going to evaluate $V_\vep(f,T)$,
$\vep>0$. Since $|f(T)|=1$, by \eq{e:zero}, $V_\vep(f,T)=0$ for all $\vep\ge1=|f(T)|$.
Moreover, if $c(t)\equiv1/2$ on $T$, then $|f(t)-c(t)|\le1/2$ for all $t\in T$, and so,
$V_\vep(f,T)=0$ for all $\vep\ge1/2$. Now, suppose $0<\vep<1/2$. By Lemma~
\ref{l:71}(f), $V_\vep(f,T)\ge1-2\vep$. To establish the reverse inequality, define
$g:T\to\Rb$ by $g(t)=(1-2\vep)t+\vep$, $0\le t\le1$ (draw the graph on the plane).
Clearly, $g$ is increasing on $[0,1]$ and, for all $t\in[0,1]$,
\begin{equation*}
f(t)-\vep=t-\vep=t-2\vep+\vep\le g(t)=t-2\vep t+\vep\le t+\vep=f(t)+\vep,
\end{equation*}
i.e., $d_{\infty,T}(f,g)=\sup_{t\in T}|f(t)-g(t)|\le\vep$. It follows that
\begin{equation*}
V(g,T)=g(1)-g(0)=(1-2\vep+\vep)-\vep=1-2\vep,
\end{equation*}
and so, by definition \eq{e:av}, we get $V_\vep(f,T)\le V(g,T)=1-2\vep$. Thus,
\begin{equation*}
\mbox{if \,$f(t)=t$, \,then}\,\,\, V_\vep(f,[0,1])=\left\{
\begin{tabular}{ccr}
$\!\!1-2\vep$ & \mbox{if} & $0<\vep<1/2$,\\[2pt]
$\!\!0$ & \mbox{if} & $\vep\ge1/2$.
\end{tabular}\right.
\end{equation*}
\end{example}
\begin{lemma}[semi-additivity of the approximate variation] \label{l:mor}
Given $f\in M^T$, $\vep>0$, $t\in T$, if\/ $T_1=T\cap(-\infty,t]$ and\/
$T_2=T\cap[t,\infty)$, then we have\/{\rm:}
\begin{equation*}
V_\vep(f,T_1)+V_\vep(f,T_2)\le V_\vep(f,T)\le V_\vep(f,T_1)+V_\vep(f,T_2)+2\vep.
\end{equation*}
\end{lemma}
\proof
1. First, we prove the left-hand side inequality. We may assume that
$V_\vep(f,T)<\infty$ (otherwise, the inequality is obvious). By definition \eq{e:av},
given $\eta>V_\vep(f,T)$, there is $g=g_\eta\in\BV(T;M)$ such that
$d_{\infty,T}(f,g)\le\vep$ and $V(g,T)\le\eta$. We set $g_1(s)=g(s)$ for all $s\in T_1$
and $g_2(s)=g(s)$ for all $s\in T_2$, and note that $g_1(t)=g(t)=g_2(t)$. Since,
for $i=1,2$, we have $d_{\infty,T_i}(f,g_i)\le d_{\infty,T}(f,g)\le\vep$ and
$g_i\in\BV(T_i;M)$, by \eq{e:av}, we find $V_\vep(f,T_i)\le V(g_i,T_i)$, and so,
the additivity property (V.2) of $V$ (p.~\pageref{p:V})~implies
\begin{align*}
V_\vep(f,T_1)+V_\vep(f,T_2)&\le V(g_1,T_1)+V(g_2,T_2)=
V(g,T_1)+V(g,T_2)\\[2pt]
&=V(g,T)\le\eta\quad\mbox{for \,all}\quad\eta>V_\vep(f,T).
\end{align*}
This establishes the left-hand side inequality.
2. Now, we prove the right-hand side inequality. We may assume that $V_\vep(f,T_1)$
and $V_\vep(f,T_2)$ are finite (otherwise, our inequality becomes $\infty=\infty$).
We may also assume that $T\cap(-\infty,t)\ne\es$ and $T\cap(t,\infty)\ne\es$
(for, otherwise, we have $T_1=T\cap(-\infty,t]=\{t\}$ and $T_2=T$, or
\mbox{$T_2=T\cap[t,\infty)=\{t\}$} and $T_1=T$, respectively, and the inequality
is clear). By definition \eq{e:av}, for $i=1,2$, given $\eta_i>V_\vep(f,T_i)$, there
exists $g_i\in\BV(T_i;M)$ such that $d_{\infty,T_i}(f,g_i)\le\vep$ and $V(g_i,T_i)\le\eta_i$.
Given $u\in M$ (to be specified below), we define $g\in\BV(T;M)$ by
\begin{equation*}
\mbox{$g(s)\!=\!g_1(s)$ if $s\in T\!\cap\!(-\infty,t)$, $g(t)\!=\!u$, and $g(s)\!=\!g_2(s)$
if $s\in T\!\cap\!(t,\infty)$.}
\end{equation*}
Arguing with partitions of $T_i$ for $i=1,2$ (see step~3 below) and applying the
triangle inequality for $d$, we get
\begin{equation} \label{e:nin1}
V(g,T_i)\le V(g_i,T_i)+d(g(t),g_i(t))\le\eta_i+d(u,g_i(t)).
\end{equation}
By the additivity (V.2) of $V$, we find
\begin{equation} \label{e:nin2}
V(g,T)=V(g,T_1)+V(g,T_2)\le\eta_1+d(u,g_1(t))+\eta_2+d(u,g_2(t)).
\end{equation}
Now, we set $u=g_1(t)$ (by symmetry, we may set $u=g_2(t)$ as well). Since
$g=g_1$ on $T_1=T\cap(-\infty,t]$ and $g=g_2$ on $T\cap(t,\infty)\subset T_2$, we get
\begin{equation} \label{e:nin3}
d_{\infty,T}(f,g)\le\max\{d_{\infty,T_1}(f,g_1),d_{\infty,T_2}(f,g_2)\}\le\vep.
\end{equation}
Noting that (cf.~\eq{e:nin2})
\begin{align*}
d(u,g_2(t))&=d(g_1(t),g_2(t))\le d(g_1(t),f(t))+d(f(t),g_2(t))\\[2pt]
&\le d_{\infty,T_1}(g_1,f)+d_{\infty,T_2}(f,g_2)\le\vep+\vep=2\vep,
\end{align*}
we conclude from \eq{e:av}, \eq{e:nin3} and \eq{e:nin2} that
\begin{equation*}
V_\vep(f,T)\le V(g,T)\le\eta_1+\eta_2+2\vep.
\end{equation*}
The arbitrariness of numbers $\eta_1\!>\!V_\vep(f,T_1)$ and $\eta_2\!>\!V_\vep(f,T_2)$
proves the desired inequality.
3. \emph{Proof of\/ \eq{e:nin1}} for $i=1$ (the case $i=2$ is similar). Let
$\{t_k\}_{k=0}^m\subset T_1$ be a partition of $T_1$, i.e.,
$t_0<t_1<\dots<t_{m-1}<t_m=t$. Since $g(s)=g_1(s)$ for $s\in T$, $s<t$, we have:
\begin{align*}
\sum_{k=1}^md(g(t_k),g(t_{k-1}))&=\sum_{k=1}^{m-1}d(g(t_k),g(t_{k-1}))
+d(g(t_m),g(t_{m-1}))\\
&=\sum_{k=1}^{m-1}d(g_1(t_k),g_1(t_{k-1}))+d(g_1(t_m),g_1(t_{m-1}))\\
&\qquad+d(g(t_m),g(t_{m-1}))-d(g_1(t_m),g_1(t_{m-1}))\\[4pt]
&\le V(g_1,T_1)+|d(g(t),g_1(t_{m-1}))-d(g_1(t),g_1(t_{m-1}))|\\[4pt]
&\le V(g_1,T_1)+d(g(t),g_1(t)),
\end{align*}
where the last inequality is due to the triangle inequality for $d$. Taking the supremum
over all partitions of $T_1$, we obtain the left-hand side inequality in \eq{e:nin1}
for $i=1$.
\sq
\begin{remark} \label{r:ifo}
The informative part of Lemma~\ref{l:mor} concerns the case when $f\in\Bd(T;M)$
and $0<\vep<|f(T)|$; if fact, if $\vep\ge|f(T)|$, then $\vep\ge|f(T_1)|$ and
$\vep\ge|f(T_2)|$, and so, by \eq{e:zero},
$V_\vep(f,T)=V_\vep(f,T_1)=V_\vep(f,T_2)=0$. The sharpness of the inequalities
in Lemma~\ref{l:mor} is shown in Example~\ref{ex:2}.
\end{remark}
Interestingly, the approximate variation characterizes regulated functions. The following
assertion is Fra{\v n}kov{\'a}'s result \cite[Proposition~3.4]{Fr} rewritten from $I=[a,b]$
and $M=\Rb^N$ to the case of an arbitrary metric space $(M,d)$ (which was
announced in \cite[equality~(4.2)]{Studia17}).
\begin{lemma} \label{l:Regc}
$\Reg(I;M)=\{f\in M^I:\mbox{$V_\vep(f,I)<\infty$ for all $\vep>0$.}\}$
\end{lemma}
\proof
($\subset$) If $f\!\in\!\Reg(I;M)$, then, by \eq{e:stR}, given $\vep\!>\!0$, there is
$g_\vep\!\in\!\mbox{\rm St}(I;M)$ such that $d_{\infty,I}(f,g_\vep)\le\vep$. Since
$g_\vep\in\BV(I;M)$, definition \eq{e:av} implies $V_\vep(f,I)\le V(g_\vep,I)<\infty$.
($\supset$) Suppose $f\in M^I$ and $V_\vep(f,I)<\infty$ for all $\vep>0$. Given
$a<\tau\le b$, let us show that $d(f(s),f(t))\to0$ as $s,t\to\tau-0$ (the arguments for
$a\le\tau'<b$ and the limit as $s,t\to\tau'+0$ are similar). Let $\vep>0$ be arbitrary.
We define the \emph{$\vep$-variation function\/} by $\vfi_\vep(t)=V_\vep(f,[a,t])$,
$t\in I$. By Lemma~\ref{l:ele}(b),
$0\le\vfi_\vep(s)\le\vfi_\vep(t)\le V_\vep(f,I)<\infty$ for all $s,t\in I$, $s\le t$, i.e.,
$\vfi_\vep:I\to[0,\infty)$ is bounded and nondecreasing, and so, the left limit
$\lim_{t\to\tau-0}\vfi_\vep(t)$ exists in $[0,\infty)$. Hence, there is
$\delta=\delta(\vep)\in(0,\tau-a]$ such that $|\vfi_\vep(t)-\vfi_\vep(s)|<\vep$
for all $s,t\in[\tau-\delta,\tau)$. Now, let $s,t\in[\tau-\delta,\tau)$, $s\le t$, be
arbitrary. Lemma~\ref{l:mor} (with $T_1=[a,s]$, $T_2=[s,t]$ and $T=[a,t]$) implies
$V_\vep(f,[s,t])\le\vfi_\vep(t)-\vfi_\vep(s)<\vep$. By the definition of $V_\vep(f,[s,t])$,
there is $g=g_\vep\in\BV([s,t];M)$ such that $d_{\infty,[s,t]}(f,g)\le\vep$ and
$V(g,[s,t])\le\vep$. Thus, by virtue of \eq{e:10},
\begin{equation*}
d(f(s),f(t))\le d(g(s),g(t))+2d_{\infty,[s,t]}(f,g)\le V(g,[s,t])+2\vep\le3\vep.
\end{equation*}
This completes the proof that $\lim_{I\ni s,t\to\tau-0}d(f(s),f(t))=0$.
\sq
\begin{remark} \label{r:indir}
We presented a direct proof of assertion $(\supset)$ in Lemma~\ref{l:Regc}. Indirectly,
we may argue as in \cite[Proposition~3.4]{Fr} as follows. Since, for each $k\in\Nb$,
$V_{1/k}(f,I)<\infty$, by definition \eq{e:av}, there is $g_k\in\BV(I;M)$ such that
$d_{\infty,I}(f,g_k)\le1/k$ (and $V(g_k,I)\le V_{1/k}(f,I)+(1/k)$). Noting that
$g_k\rightrightarrows f$ on~$I$, each $g_k\in\Reg(I;M)$, and $\Reg(I;M)$ is closed with respect
to the uniform convergence, we get $f\in\Reg(I;M)$. An illustration of
Lemma~\ref{l:Regc} is presented in Examples~\ref{ex:1} and \ref{ex:gDf}.
\end{remark}
Now we study the approximate variation in its interplay with the uniform convergence
of sequences of functions (see also Examples~\ref{ex:rieq}--\ref{ex:ucbw}).
\begin{lemma} \label{l:uc}
Suppose $f\in M^T$, $\{f_j\}\subset M^T$ and $f_j\rightrightarrows f$ on $T$. We have\/{\rm:}
\begin{itemize}
\renewcommand{0.0pt plus 0.5pt minus 0.25pt}{0.0pt plus 0.5pt minus 0.25pt}
\item[{\rm(a)}]
$\displaystyle\!\! V_{\vep+0}(f,T)\!\le\!\liminf_{j\to\infty}V_\vep(f_j,T)\!\le\!
\limsup_{j\to\infty}V_\vep(f_j,T)\!\le\! V_{\vep-0}(f,T)$ for all \mbox{$\vep\!>\!0;$}
\item[{\rm(b)}] $\!\!$if $V_\vep(f_j,T)\!<\!\infty$ for all $\vep\!>\!0$ and $j\!\in\!\Nb$,
then $V_\vep(f,T)\!<\!\infty$ for all $\vep\!>\!0$.
\end{itemize}
\end{lemma}
\proof
(a) Only the first and the last inequalities are to be verified.
1. In order to prove the first inequality, we may assume (passing to a suitable
subsequence of $\{f_j\}$ if necessary) that the right-hand side (i.e., the $\liminf$)
is equal to $C\equiv\lim_{j\to\infty}V_\vep(f_j,T)<\infty$. Suppose $\eta>0$ is given
arbitrarily. Then, there is $j_0=j_0(\eta)\in\Nb$ such that $V_\vep(f_j,T)\le C+\eta$
for all $j\ge j_0$. By the definition of $V_\vep(f_j,T)$, for every $j\ge j_0$ there is
$g_j=g_{j,\eta}\in\BV(T;M)$ such that $d_{\infty,T}(f_j,g_j)\le\vep$ and
$V(g_j,T)\le V_\vep(f_j,T)+\eta$. Since $f_j\rightrightarrows f$ on $T$, we have
$d_{\infty,T}(f_j,f)\to0$ as $j\to\infty$, and so, there is $j_1=j_1(\eta)\in\Nb$ such that
$d_{\infty,T}(f_j,f)\le\eta$ for all $j\ge j_1$. Noting that
\begin{equation*}
d_{\infty}(f,g_j)\le d_{\infty}(f,f_j)+d_{\infty}(f_j,g_j)\le\eta+\vep\quad
\mbox{for all $j\ge\max\{j_0,j_1\}$,}
\end{equation*}
we find, by virtue of definition \eq{e:av},
\begin{equation*}
V_{\eta+\vep}(f,T)\le V(g_j,T)\le V_\vep(f_j,T)+\eta\le(C+\eta)+\eta=C+2\eta.
\end{equation*}
Passing to the limit as $\eta\to+0$, we arrive at $V_{\vep+0}(f,T)\le C$, which
was to be proved.
2. To establish the last inequality, with no loss of generality we may assume that
$V_{\vep-0}(f,T)<\infty$. Given $\eta>0$, there is $\delta=\delta(\eta,\vep)\in(0,\vep)$
such that if $\vep'\in[\vep-\delta,\vep)$, we have
$V_{\vep'}(f,T)\le V_{\vep-0}(f,T)+\eta$. Since $f_j\rightrightarrows f$ on $T$, given
$\vep-\delta\le\vep'<\vep$, there is $j_0=j_0(\vep',\vep)\in\Nb$ such that
$d_{\infty,T}(f_j,f)\le\vep-\vep'$ for all $j\ge j_0$. By the definition of $V_{\vep'}(f,T)$,
for every $j\in\Nb$ we find $g_j=g_{j,\vep'}\in\BV(T;M)$ such that
$d_{\infty,T}(f,g_j)\le\vep'$ and
\begin{equation*}
V_{\vep'}(f,T)\le V(g_j,T)\le V_{\vep'}(f,T)+(1/j),
\end{equation*}
so that $\lim_{j\to\infty}V(g_j,T)=V_{\vep'}(f,T)$. Noting that, for all $j\ge j_0$,
\begin{equation*}
d_{\infty,T}(f_j,g_j)\le d_{\infty,T}(f_j,f)+d_{\infty,T}(f,g_j)\le(\vep-\vep')+\vep'=\vep,
\end{equation*}
we find from \eq{e:av} that $V_\vep(f_j,T)\le V(g_j,T)$ for all $j\ge j_0$. It follows that
\begin{equation*}
\limsup_{j\to\infty}V_\vep(f_j,T)\le\lim_{j\to\infty}V(g_j,T)=V_{\vep'}(f,T)
\le V_{\vep-0}(f,T)+\eta.
\end{equation*}
It remains to take into account the arbitrariness of $\eta>0$.
(b) Let $\vep>0$ and $0<\vep'<\vep$. Given $j\in\Nb$, since $V_{\vep'}(f_j,T)<\infty$,
by definition \eq{e:av}, there is $g_j\in\BV(T;M)$ such that $d_{\infty,T}(f_j,g_j)\le\vep'$
and $V(g_j,T)\le V_{\vep'}(f_j,T)+1$. Since $f_j\rightrightarrows f$ on $T$, there is
$j_0=j_0(\vep-\vep')\in\Nb$ such that $d_{\infty,T}(f_{j_0},f)\le\vep-\vep'$.
Noting that
\begin{equation*}
d_{\infty,T}(f,g_{j_0})\le d_{\infty,T}(f,f_{j_0})+d_{\infty,T}(f_{j_0},g_{j_0})
\le(\vep-\vep')+\vep'=\vep,
\end{equation*}
we get, by \eq{e:av}, $V_\vep(f,T)\le V(g_{j_0},T)\le V_{\vep'}(f_{j_0},T)+1<\infty$.
\sq
\begin{lemma}[change of variable in the approximate variation] \label{l:chvar}
If $T\subset\Rb$, $\vfi:T\to\Rb$ is a {\sl strictly monotone} function and
$f:\vfi(T)\to M$, then
\footnote{Here, as usual, $\vfi(T)=\{\vfi(t):t\in T\}$ is the image of $T$ under $\vfi$,
and $f\circ\vfi$ is the composed function of $\vfi:T\to\Rb$ and $f:\vfi(T)\to M$ given
by $(f\circ\vfi)(t)=f(\vfi(t))$, $t\in T$.}
\begin{equation*}
V_\vep(f,\vfi(T))=V_\vep(f\circ\vfi,T)\quad\mbox{for \,all \,\,$\vep>0$.}
\end{equation*}
\end{lemma}
\proof
We need the following `change of variable' formula for Jordan's variation (cf.\
\cite[Theorem~2.20]{Var}, \cite[Proposition~2.1(V4)]{MatSb}):
if $T\subset\Rb$, $\vfi:T\to\Rb$
is a (not necessarily strictly) \emph{monotone} function and $g:\vfi(T)\to M$, then
\begin{equation} \label{e:chava}
V(g,\vfi(T))=V(g\circ\vfi,T).
\end{equation}
($\ge$) Suppose $V_\vep(f,\vfi(T))\!<\!\infty$. By definition \eq{e:av}, for every
$\eta\!>\!V_\vep(f,\vfi(T))$ there is $g\in\BV(\vfi(T);M)$ such that
$d_{\infty,\vfi(T)}(f,g)\le\vep$ and $V(g,\vfi(T))\le\eta$. We have $g\circ\vfi\in M^T$,
\begin{equation} \label{e:444}
d_{\infty,T}(f\circ\vfi,g\circ\vfi)=d_{\infty,\vfi(T)}(f,g)\le\vep,
\end{equation}
and, by \eq{e:chava}, $V(g\circ\vfi,T)=V(g,\vfi(T))\le\eta$. Thus,
by \eq{e:av} and \eq{e:444},
\begin{equation*}
\mbox{$V_\vep(f\circ\vfi,T)\le V(g\circ\vfi,T)\le\eta$ \,\,\,for all
\,\,$\eta>V_\vep(f,\vfi(T))$,}
\end{equation*}
and so,
$V_\vep(f\circ\vfi,T)\le V_\vep(f,\vfi(T))<\infty$.
($\le$) Now, suppose $V_\vep(f\circ\vfi,T)<\infty$. Then, for every
$\eta>V_\vep(f\circ\vfi,T)$ there exists $g\in\BV(T;M)$ such that
$d_{\infty,T}(f\circ\vfi,g)\le\vep$ and $V(g,T)\le\eta$. Denote by
$\vfi^{-1}:\vfi(T)\to T$ the inverse function of $\vfi$. Clearly, $\vfi^{-1}$ is
strictly monotone on $\vfi(T)$ in the same sense as $\vfi$ on $T$. Setting
$g_1=g\circ\vfi^{-1}$, we find $g_1:\vfi(T)\to M$ and, by \eq{e:444},
\begin{align*}
d_{\infty,\vfi(T)}(f,g_1)&=d_{\infty,\vfi(T)}\bigl((f\circ\vfi)\circ\vfi^{-1},
g\circ\vfi^{-1}\bigr)\\[3pt]
&=d_{\infty,\vfi^{-1}(\vfi(T))}(f\circ\vfi,g)=d_{\infty,T}(f\circ\vfi,g)\le\vep.
\end{align*}
Furthermore, by \eq{e:chava},
\begin{equation*}
V(g_1,\vfi(T))=V(g\circ\vfi^{-1},\vfi(T))=V(g,\vfi^{-1}(\vfi(T)))=V(g,T)\le\eta.
\end{equation*}
Thus, $V_\vep(f,\vfi(T))\le V(g_1,\vfi(T))\le\eta$ for all $\eta>V_\vep(f\circ\vfi,T)$,
which implies the inequality $V_\vep(f,\vfi(T))\le V_\vep(f\circ\vfi,T)<\infty$.
\sq
Lemma~\ref{l:chvar} will be applied in Example~\ref{ex:midp}
(cf.~Case $\al>1$ on p.~\pageref{p:a>1}).
Under additional assumptions on the metric space $(M,d)$, we get three more
properties of the approximate variation. Recall that $(M,d)$ is called \emph{proper\/}
(or has the \emph{Heine-Borel property\/}) if all \emph{closed bounded\/} subsets
of $M$ are \emph{compact}. \label{p:properms} For instance, if $(M,\|\cdot\|)$ is a
\emph{finite-dimensional\/} normed linear space with induced metric $d$
(cf.~p.~\pageref{p:nls}), then $(M,d)$ is a proper metric space.
Note that a proper metric space is complete. In fact, if $\{x_j\}_{j=1}^\infty$ is a Cauchy
sequence in $M$, then it is bounded and, since $M$ is proper, the set $\{x_j:j\in\Nb\}$
is \rc\ in~$M$. Hence a subsequence of $\{x_j\}_{j=1}^\infty$ converges in $M$ to an
element $x\in M$. Now, since $\{x_j\}_{j=1}^\infty$ is Cauchy, we get $x_j\to x$ as
$j\to\infty$, which proves the completeness of~$M$.
\begin{lemma} \label{l:proper}
Let $(M,d)$ be a {\sl proper} metric space and $f\in M^T$. We have\/{\rm:}
\begin{itemize}
\renewcommand{0.0pt plus 0.5pt minus 0.25pt}{0.0pt plus 0.5pt minus 0.25pt}
\item[{\rm(a)}] the function $\vep\mapsto V_\vep(f,T)$ is continuous from the right
on $(0,\infty);$
\item[{\rm(b)}] given $\vep>0$, $V_\vep(f,T)<\infty$ if and only if
$V_\vep(f,T)=V(g,T)$ for some function $g=g_\vep\in G_{\vep,T}(f)$ {\rm(}i.e.,
the infimum in\/ \eq{e:av} is attained, and so, becomes the minimum{\rm);}
\item[{\rm(c)}] if\/ $\{f_j\}\subset M^T$ and $f_j\to f$ on $T$, then
$V_\vep(f,T)\le\liminf_{j\to\infty}V_\vep(f_j,T)$ for all $\vep>0$.
\end{itemize}
\end{lemma}
\proof
(a) By virtue of \eq{e:osli}, it suffices to show that $V_\vep(f,T)\le V_{\vep+0}(f,T)$
provided $V_{\vep+0}(f,T)$ is finite. In fact, given
$\eta\!>\!V_{\vep+0}(f,T)\!=\!\lim_{\vep'\to\vep+0}V_{\vep'}(f,T)$,
there is \mbox{$\delta\!=\!\delta(\eta)\!>\!0$} such that $\eta>V_{\vep'}(f,T)$
for all $\vep'$ with
$\vep<\vep'\le\vep+\delta$. Let $\{\vep_k\}_{k=1}^\infty$ be a sequence such that
$\vep<\vep_k\le\vep+\delta$ for all $k\in\Nb$ and $\vep_k\to\vep$ as $k\to\infty$.
Given $k\in\Nb$, setting $\vep'=\vep_k$, we find $\eta>V_{\vep_k}(f,T)$, and so,
by definition \eq{e:av}, there is $g_k\in\BV(T;M)$ (also depending on $\eta$) such that
\begin{equation} \label{e:tfo}
d_{\infty,T}(f,g_k)\le\vep_k\quad\mbox{and}\quad V(g_k,T)\le\eta.
\end{equation}
By the first inequality in \eq{e:tfo}, the sequence $\{g_k\}$ is pointwise bounded on $T$,
because, given $t\in T$, by the triangle inequality for $d$, we have
\begin{align}
d(g_k(t),g_j(t))&\le d(g_k(t),f(t))+d(f(t),g_j(t)) \nonumber\\[3pt]
&\le d_{\infty,T}(g_k,f)+d_{\infty,T}(f,g_j) \label{e:tipb} \\[3pt]
&\le\vep_k+\vep_j\le2(\vep+\delta)\quad\mbox{for all}\quad k,j\in\Nb,\nonumber
\end{align}
and since $(M,d)$ is \emph{proper}, the sequence $\{g_k\}$ is \pw\ \rc\ on~$T$.
So, the second inequality in \eq{e:tfo} and the Helly-type selection principle in
$\BV(T;M)$ (which is property (V.4) on~p.~\pageref{p:V}) imply the existence of a
subsequence of $\{g_k\}$, again denoted by $\{g_k\}$ (and the corresponding
subsequence of $\{\vep_k\}$---again by~$\{\vep_k\}$), and a function $g\in\BV(T;M)$
such that $g_k\to g$ pointwise on~$T$. Noting that, by \eq{e:tfo},
\begin{equation} \label{e:kove}
d_{\infty,T}(f,g)\le\liminf_{k\to\infty}d_{\infty,T}(f,g_k)\le\lim_{k\to\infty}\vep_k=\vep
\end{equation}
and, by the lower semicontinuity of $V$ (property (V.3) on p.~\pageref{p:V}),
\begin{equation} \label{e:mke}
V(g,T)\le\liminf_{k\to\infty}V(g_k,T)\le\eta,
\end{equation}
we find, from definition \eq{e:av}, that $V_\vep(f,T)\le V(g,T)\le\eta$. It remains
to take into account the arbitrariness of $\eta>V_{\vep+0}(f,T)$.
Items (b) and (c) were essentially established in \cite{Fr} for $T=[a,b]$ and $M=\Rb^N$
as Propositions 3.5 and 3.6, respectively. For the sake of completeness, we present the
proofs of (b) and (c) in our more general situation.
(b) The sufficiency ($\!\Leftarrow$) is clear. Now we establish the necessity~
($\Rightarrow\!$). By definition \eq{e:av}, given $k\in\Nb$, there is $g_k\in\BV(T;M)$
such that
\begin{equation} \label{e:25on}
d_{\infty,T}(f,g_k)\le\vep\quad\mbox{and}\quad
V_\vep(f,T)\le V(g_k,T)\le V_\vep(f,T)+(1/k).
\end{equation}
From \eq{e:tipb} and \eq{e:25on}, we find $d(g_k(t),g_j(t))\le2\vep$ for all
$k,j\in\Nb$ and~$t\in T$, and so, the sequence $\{g_k\}$ is \pw\ bounded on $T$,
and since $(M,d)$ is \emph{proper}, $\{g_k\}$ is \pw\ \rc\ on~$T$. Moreover,
by \eq{e:25on}, $\sup_{k\in\Nb}V(g_k,T)\le V_\vep(f,T)+1<\infty$.
By the Helly-type selection principle (V.4) in $\BV(T;M)$, there are a subsequence of
$\{g_k\}$, again denoted by $\{g_k\}$, and a function $g\in\BV(T;M)$ such that
$g_k\to g$ on $T$. As in \eq{e:kove}, we get $d_{\infty,T}(f,g)\le\vep$, and so,
\eq{e:av}, \eq{e:mke} and \eq{e:25on} yield
\begin{equation*}
V_\vep(f,T)\le V(g,T)\le\lim_{k\to\infty}V(g_k,T)=V_\vep(f,T).
\end{equation*}
(c) Passing to a subsequence of $\{f_j\}$ (if necessary), we may assume that the
right-hand side of the inequality in (c) is equal to $C_\vep=\lim_{j\to\infty}V_\vep(f_j,T)$
and finite. Given $\eta>C_\vep$, there is $j_0=j_0(\eta,\vep)\in\Nb$ such that
$\eta>V_\vep(f_j,T)$ for all $j\ge j_0$. For every $j\ge j_0$, by the definition of
$V_\vep(f_j,T)$, there is $g_j\in\BV(T;M)$ such that
\begin{equation} \label{e:t25}
d_{\infty,T}(f_j,g_j)\le\vep\quad\mbox{and}\quad V(g_j,T)\le\eta.
\end{equation}
Since $f_j\to f$ \pw\ on $T$, $\{f_j\}$ is \pw\ \rc\ on $T$, and so, $\{f_j\}$ is \pw\
bounded on $T$, i.e., $B(t)=\sup_{j,k\in\Nb}d(f_j(t),f_k(t))<\infty$ for all $t\in T$.
By the triangle inequality for $d$ and \eq{e:t25}, given $j,k\ge j_0$ and $t\in T$, we have
\begin{align*}
d(g_j(t),g_k(t))&\le d(g_j(t),f_j(t))+d(f_j(t),f_k(t))+d(f_k(t),g_k(t))\\[3pt]
&\le d_{\infty,T}(g_j,f_j)+B(t)+d_{\infty,T}(f_k,g_k)\le B(t)+2\vep.
\end{align*}
This implies that the sequence $\{g_j\}_{j=j_0}^\infty$ is \pw\ bounded on $T$, and
since $(M,d)$ is \emph{proper}, it is \pw\ \rc\ on~$T$. It follows from \eq{e:t25} that
$\sup_{j\ge j_0}V(g_j,T)$ does not exceed $\eta<\infty$,
and so, by the Helly-type selection principle (V.4)
in $\BV(T;M)$, there are a subsequence $\{g_{j_p}\}_{p=1}^\infty$ of
$\{g_j\}_{j=j_0}^\infty$ and a function $g\in\BV(T;M)$ such that $g_{j_p}\to g$ \pw\
on~$T$ as $p\to\infty$. Since $f_{j_p}\to f$ \pw\ on $T$ as $p\to\infty$, we find,
from \eq{e:t25} and property (V.3) on p.~\pageref{p:V}, that
\begin{equation*}
d_{\infty,T}(f,g)\le\liminf_{p\to\infty}d_{\infty,T}(f_{j_p},g_{j_p})\le\vep
\end{equation*}
and
\begin{equation*}
V(g,T)\le\liminf_{p\to\infty}V(g_{j_p},T)\le\eta.
\end{equation*}
Now, definition \eq{e:av} implies $V_\vep(f,T)\le V(g,T)\le\eta$ for all $\eta>C_\vep$,
and so, $V_\vep(f,T)\le C_\vep=\lim_{j\to\infty}V_\vep(f_j,T)$, which was to be proved.
\sq
\begin{remark}
The inequality in Lemma~\ref{l:proper}(c) agrees with the left-hand side inequality in
Lemma~\ref{l:uc}(a): in fact, if $(M,d)$ is \emph{proper}, $\{f_j\}\subset M^T$ and
$f_j\rightrightarrows f$ on $T$, then, by Lemma~\ref{l:proper}(a),
\begin{equation*}
V_\vep(f,T)=V_{\vep+0}(f,T)\le\liminf_{j\to\infty}V_\vep(f_j,T),\quad\vep>0.
\end{equation*}
The properness of $(M,d)$ in Lemma~\ref{l:proper} is essential:
item (a) is illustrated in Example~\ref{ex:gDf}(e) on p.~\pageref{p:rico},
(b)---in Example~\ref{ex:ims1}, and (c)---in Example~\ref{ex:ims2}.
\end{remark}
\chapter{Examples of approximate variations} \label{s:exav}
This section is devoted to various examples of approximate variations. In particular,
it is shown that all assertions in Section~\ref{ss:pro} are sharp.
\section{Functions with values in a normed linear space}
\begin{example} \label{ex:1} \rm
Let $T\subset\Rb$ and $(M,\|\cdot\|)$ be a normed linear space (cf.~p.~\pageref{p:nls}).
We have $d_{\infty,T}(f,g)=\|f-g\|_{\infty,T}$, $f,g\in M^T$, where the
\emph{uniform norm\/} on $M^T$ is given by
\begin{equation*}
\|f\|_{\infty,T}=\sup_{t\in T}\|f(t)\|,\quad\,\,f\in M^T.
\end{equation*}
We are going to estimate (and/or evaluate) the approximate variation
$\{V_\vep(f,T)\}_{\vep>0}$ for the function $f:T\to M$ defined, for $x,y\in M$, $x\ne0$,
by
\begin{equation} \label{e:fxy}
\mbox{$f(t)=\vfi(t)x+y$, \,$t\in T$, \,\,where \,$\vfi\in\BV(T;\Rb)$ is
\emph{nonconstant}.}
\end{equation}
To begin with, recall that $0<|\vfi(T)|\le V(\vfi,T)<\infty$ and
\begin{equation*}
|\vfi(T)|=\sup_{s,t\in T}|\vfi(s)-\vfi(t)|=\mbox{\rm s}(\vfi)-\mbox{\rm i}(\vfi),
\end{equation*}
where $\mbox{\rm s}(\vfi)\!\equiv\!\mbox{\rm s}(\vfi,T)\!=\!\sup_{t\in T}\vfi(t)$ and
$\mbox{i}(\vfi)\!\equiv\!\mbox{\rm i}(\vfi,T)\!=\!\inf_{t\in T}\vfi(t)$. Moreover,
\begin{equation} \label{e:sif}
\biggl|\vfi(t)-\frac{\mbox{\rm i}(\vfi)+\mbox{\rm s}(\vfi)}2\biggr|\le
\frac{\mbox{\rm s}(\vfi)-\mbox{\rm i}(\vfi)}2=\frac{|\vfi(T)|}2\quad\,
\mbox{for all \,\,$t\in T$.}
\end{equation}
In fact, given $t\in T$, we have $\mbox{\rm i}(\vfi)\le\vfi(t)\le\mbox{\rm s}(\vfi)$,
and so, subtracting $(\mbox{\rm i}(\vfi)+\mbox{\rm s}(\vfi))/2$ from
both sides, we get
\begin{align*}
-\frac{|\vfi(T)|}2=\frac{\mbox{\rm i}(\vfi)-\mbox{\rm s}(\vfi)}2&=
\mbox{\rm i}(\vfi)-\frac{\mbox{\rm i}(\vfi)+\mbox{\rm s}(\vfi)}2\le\\
&\le\vfi(t)-\frac{\mbox{\rm i}(\vfi)+\mbox{\rm s}(\vfi)}2\le\\
&\le\mbox{\rm s}(\vfi)-\frac{\mbox{\rm i}(\vfi)+\mbox{\rm s}(\vfi)}2=
\frac{\mbox{\rm s}(\vfi)-\mbox{\rm i}(\vfi)}2=\frac{|\vfi(T)|}2.
\end{align*}
Since $|f(T)|=|\vfi(T)|\!\cdot\!\|x\|$, by \eq{e:zero}, $V_\vep(f,T)=0$ for all
$\vep\ge|\vfi(T)|\!\cdot\!\|x\|$. Furthermore, if
$c\equiv c(t)=(\mbox{\rm i}(\vfi)+\mbox{\rm s}(\vfi))(x/2)+y$, $t\in T$, then $c$
is a constant function on $T$ and, by \eq{e:sif}, we have
\begin{equation*}
\|f(t)-c\|=\biggl|\vfi(t)-\frac{\mbox{\rm i}(\vfi)+\mbox{\rm s}(\vfi)}2\biggr|\!\cdot\!\|x\|
\le\frac{|\vfi(T)|}2\!\cdot\!\|x\|\quad\,\,\mbox{for all \,\,$t\in T$,}
\end{equation*}
i.e., $\|f-c\|_{\infty,T}\le|\vfi(T)|\!\cdot\!\|x\|/2$. By \eq{e:ze1}, we find
\begin{equation} \label{e:ov2}
V_\vep(f,T)=0\quad\mbox{for \,all}\quad\vep\ge|\vfi(T)|\!\cdot\!\|x\|/2.
\end{equation}
Now, assume that $0<\vep<|\vfi(T)|\!\cdot\!\|x\|/2$. Lemma~\ref{l:71}(f) implies
\begin{equation} \label{e:tvd}
V_\vep(f,T)\ge|f(T)|-2\vep=|\vfi(T)|\!\cdot\!\|x\|-2\vep.
\end{equation}
Define the function $g\in M^T$ by
\begin{align}
g(t)&=\biggl[\biggl(1-\frac{2\vep}{V(\vfi,T)\|x\|}\biggr)\vfi(t)+
\frac{(\mbox{\rm i}(\vfi)+\mbox{\rm s}(\vfi))\,\vep}{V(\vfi,T)\|x\|}\biggr]x+y=
\label{e:gt1}\\[4pt]
&=\vfi(t)x-\frac{2\vep}{V(\vfi,T)\|x\|}\biggl(\vfi(t)-
\frac{\mbox{\rm i}(\vfi)+\mbox{\rm s}(\vfi)}2\biggr)x+y,\quad t\in T.
\label{e:gt2}
\end{align}
Note that since $|\vfi(T)|\!\le\! V(\vfi,T)$, the assumption on $\vep$ gives
$\vep\!<\!V(\vfi,T)\|x\|/2$, and so, $0<2\vep/(V(\vfi,T)\|x\|)<1$. Given $t\in T$,
\eq{e:gt2} and \eq{e:sif} imply
\begin{equation*}
\|f(t)\!-\!g(t)\|\!=\!\frac{2\vep}{V(\vfi,T)\|x\|}\!\cdot\!
\biggl|\vfi(t)-\frac{\mbox{\rm i}(\vfi)+\mbox{\rm s}(\vfi)}2\biggr|\!\cdot\!\|x\|
\le\frac{2\vep}{V(\vfi,T)}\!\cdot\!\frac{|\vfi(T)|}{2}\le\vep,
\end{equation*}
and so, $\|f-g\|_{\infty,T}\le\vep$. By \eq{e:gt1}, we find
\begin{equation*}
V(g,T)=\biggl(1-\frac{2\vep}{V(\vfi,T)\|x\|}\biggr)V(\vfi,T)\|x\|=V(\vfi,T)\|x\|-2\vep.
\end{equation*}
Hence, by definition \eq{e:av}, $V_\vep(f,T)\le V(g,T)=V(\vfi,T)\|x\|-2\vep$.
From here and \eq{e:tvd} we conclude that
\begin{equation} \label{e:trdo}
|\vfi(T)|\cdot\|x\|-2\vep\le V_\vep(f,T)\le V(\vfi,T)\|x\|-2\vep
\,\,\,\mbox{if}\,\,\,0\!<\!\vep\!<\!|\vfi(T)|\cdot\|x\|/2.
\end{equation}
In particular, if $\vfi\in\Rb^T$ is (nonconstant and) \emph{monotone}, then
$V(\vfi,T)=|\vfi(T)|$, and so, \eq{e:trdo} yields
\begin{equation} \label{e:mntn}
\mbox{if \,\,$0\!<\!\vep\!<\!|\vfi(T)|\!\cdot\!\|x\|/2$, \,then
\,\,$V_\vep(f,T)=|\vfi(T)|\cdot\|x\|-2\vep$.}
\end{equation}
Clearly, Example \ref{exa:t} is a particular case of \eq{e:mntn} and \eq{e:ov2} with
$T=[0,1]$, $M=\Rb$, $x=1$, $y=0$, and $\vfi(t)=t$, $t\in T$.
However, if $\vfi$ from \eq{e:fxy} is nonmonotone, both inequalities \eq{e:trdo} may be
strict (cf.~Remark~\ref{r:siq}). Note also that assertion \eq{e:mntn} implies the classical
Helly selection principle for monotone functions (cf.\ Remark~\ref{r:cHp}).
\end{example}
\begin{example} \label{ex:2} \rm
Here we show that the inequalities in Lemma~\ref{l:mor} are sharp and may be strict.
In fact, letting $\vfi(t)=t$, $t\in T=[0,1]$, and $y=0$ in \eq{e:fxy}, and setting
$T_1=[0,\frac12]$ and $T_2=[\frac12,1]$, we get, by virtue of \eq{e:mntn}
and \eq{e:ov2},
\begin{equation*}
V_\vep(f,T)=\left\{
\begin{tabular}{ccr}
$\!\!\|x\|-2\vep$ & \mbox{if} & $0<\vep<\frac12\|x\|$,\\[3pt]
$\!\!0$ & \mbox{if} & $\vep\ge\frac12\|x\|$,
\end{tabular}\right.
\end{equation*}
and, for $i=1,2$,
\begin{equation*}
V_\vep(f,T_i)=\left\{
\begin{tabular}{ccr}
$\!\!\frac12\|x\|-2\vep$ & \mbox{if} & $0<\vep<\frac14\|x\|$,\\[3pt]
$\!\!0$ & \mbox{if} & $\vep\ge\frac14\|x\|$.
\end{tabular}\right.
\end{equation*}
It remains, in Lemma~\ref{l:mor}, to consider the cases:
(a) $0<\vep<\frac14\|x\|$, (b) $\frac14\|x\|\le\vep<\frac12\|x\|$,
and (c) $\vep\ge\frac12\|x\|$. Explicitly, we have, in place of
\begin{align*}
\mbox{$V_\vep(f,T_1)+V_\vep(f,T_2)$}&\le\mbox{$V_\vep(f,T)$}\le
\mbox{$V_\vep(f,T_1)+V_\vep(f,T_2)+2\vep:$}\\[8pt]
\mbox{(a) $(\frac12\|x\|\!-\!2\vep)\!+\!(\frac12\|x\|\!-\!2\vep)$}&
<\mbox{$\|x\|\!-\!2\vep$}=
\mbox{$(\frac12\|x\|\!-\!2\vep)\!+\!(\frac12\|x\|\!-\!2\vep)+2\vep;$}\\[2pt]
\mbox{(b)\quad $0+0$}&<\mbox{$\|x\|\!-\!2\vep$}\le\mbox{$0+0+2\vep;$}\\[2pt]
\mbox{(c)\quad $0+0$}&=\mbox{\quad\quad$\!0\,$\quad}<
\mbox{$0+0+2\vep.$}
\end{align*}
\end{example}
\begin{example} \label{ex:thr} \rm
Let $\tau\in I=[a,b]$, $(M,d)$ be a metric space, and $x,y\in M$, $x\ne y$.
Define $f\in M^I$ by
\begin{equation} \label{e:ftau}
f(t)\equiv f_\tau(t)=\left\{
\begin{tabular}{ccl}
$\!\!x$ & \mbox{if} & $t=\tau$,\\[2pt]
$\!\!y$ & \mbox{if} & $t\in I$, $t\ne\tau$.
\end{tabular}\right.
\end{equation}
Clearly, $|f(I)|=d(x,y)$, $V(f,I)=d(x,y)$ if $\tau\in\{a,b\}$, and \mbox{$V(f,I)=2d(x,y)$}
if \mbox{$a\!<\!\tau\!<\!b$}. By \eq{e:zero}, we get $V_\vep(f,I)=0$
for all $\vep\ge d(x,y)$.
Lemma~\ref{l:71}(f) provides the following inequalities for $0<\vep<d(x,y)$:
(a) if $\tau=a$ or $\tau=b$, then
\begin{align*}
d(x,y)-2\vep&\le V_\vep(f,I)\le d(x,y)\quad\mbox{if}\quad
0<\vep<\textstyle\frac12d(x,y),\\[3pt]
0&\le V_\vep(f,I)\le d(x,y)\quad\mbox{if}\quad\textstyle\frac12d(x,y)\le\vep<d(x,y);
\end{align*}
(b) if $a<\tau<b$, then
\begin{align*}
d(x,y)-2\vep&\le V_\vep(f,I)\le 2d(x,y)\quad\mbox{if}\quad
0<\vep<\textstyle\frac12d(x,y),\\[3pt]
0&\le V_\vep(f,I)\le 2d(x,y)\quad\mbox{if}\quad\textstyle\frac12d(x,y)\le\vep<d(x,y).
\end{align*}
Under additional assumptions on the metric space $(M,d)$, the values $V_\vep(f,I)$ for
$0<\vep<d(x,y)$ can be given more exactly. To see this, we consider two cases
(A) and (B) below.
(A) Let $M=\{x,y\}$ be the two-point set with metric $d$ and $0\!<\!\vep\!<\!d(x,y)$.
Since $f(t)=x$ or $f(t)=y$ for all $t\in I$, we have: if $g\in M^I$ and
$d_{\infty,I}(f,g)\le\vep$, then $g=f$ on $I$, i.e., $G_{\vep,I}(f)=\{f\}$. Thus,
$V_\vep(f,I)=V(f,I)$, and so,
\begin{align*}
V_\vep(f,I)&=d(x,y)\quad\,\,\,\mbox{if}\quad \tau\in\{a,b\},\\[3pt]
V_\vep(f,I)&=2d(x,y)\quad \mbox{if}\quad a<\tau<b.
\end{align*}
(B) Let $(M,\|\cdot\|)$ be a normed linear space with induced metric $d$ and
$0<\vep<d(x,y)=\|x-y\|$. By \eq{e:zero2}, $V_\vep(f,I)=0$ for all
$\vep\ge\frac12\|x-y\|$. We assert that if $0<\vep<\frac12\|x-y\|$, then
\begin{align}
V_\vep(f,I)&=\|x-y\|-2\vep\qquad\,\mbox{if}\quad\tau\in\{a,b\},\label{e:tab}\\[3pt]
V_\vep(f,I)&=2(\|x-y\|-2\vep)\quad\mbox{if}\quad a<\tau<b.\label{e:ntab}
\end{align}
In order to establish these equalities, we first note that the function $f$ from
\eq{e:ftau} can be expressed as (cf.~\eq{e:fxy})
\begin{equation} \label{e:ftex}
\mbox{$f(t)\!=\!\vfi(t)(x-y)+y$, where $\vfi(t)\!\equiv\!\vfi_\tau(t)\!=\!\left\{
\begin{tabular}{ccl}
$\!\!1$ & \mbox{if} & $t=\tau$,\\[2pt]
$\!\!0$ & \mbox{if} & $t\ne\tau$,
\end{tabular}\right.$ \,$t\in I$.}
\end{equation}
\emph{Proof of}~\eq{e:tab}. If $\tau\in\{a,b\}$, then $\vfi$ is monotone on $I$ with
$\mbox{\rm i}(\vfi)\!=\!0$, $\mbox{\rm s}(\vfi)\!=\!1$, and $V(\vfi,I)\!=\!|\vfi(I)|\!=\!1$.
Now, \eq{e:tab} follows from \eq{e:ftex}~and~\eq{e:mntn}.
\smallbreak
Note that function $g$ from \eq{e:gt1}, used in obtaining \eq{e:tab}, is of the form
\begin{equation*}
g(t)=\biggl[\biggl(1-\frac{2\vep}{\|x-y\|\,}\biggr)\vfi(t)+
\frac{\vep}{\|x-y\|}\biggr](x-y)+y,\quad t\in I,
\end{equation*}
i.e., if $\mbox{\rm e}_{x,y}=(x-y)/\|x-y\|$ is the \emph{unit vector\/}
(`directed from $y$ to $x$'), then
\begin{equation} \label{e:unve}
g(\tau)=x-\vep\mbox{\rm e}_{x,y}\quad\mbox{and}\quad
g(t)=y+\vep\mbox{\rm e}_{x,y},\,\,\,t\in I\setminus\{\tau\}.
\end{equation}
This implies $\|f-g\|_{\infty,I}=\vep$ (for all $\tau\in I$), and we have,
for $\tau\in\{a,b\}$,
\begin{equation} \label{e:Vgta}
V(g,I)=|g(I)|=\|(x-\vep\mbox{\rm e}_{x,y})-(y+\vep\mbox{\rm e}_{x,y})\|
=\|x-y\|-2\vep.
\end{equation}
\emph{Proof of}~\eq{e:ntab}. Suppose $a<\tau<b$ and $0<\vep<\frac12\|x-y\|$.
First, consider an arbitrary function $g\in M^I$ such that
$\|f-g\|_{\infty,I}=\sup_{t\in I}\|f(t)-g(t)\|\le\vep$. Since $P=\{a,\tau,b\}$ is a
partition of $I$, by virtue of \eq{e:10} and \eq{e:ftau}, we get
\begin{align}
V(g,I)&\ge\|g(\tau)-g(a)\|+\|g(b)-g(\tau)\|\nonumber\\[3pt]
&\ge(\|f(\tau)-f(a)\|-2\vep)+(\|f(b)-f(\tau)\|-2\vep)\label{e:ggff2e}\\[3pt]
&=2(\|x-y\|-2\vep).\nonumber
\end{align}
Due to the arbitrariness of $g$ as above, \eq{e:av} implies
$V_\vep(f,I)\ge2(\|x-y\|-2\vep)$. Now, for the function $g$ from \eq{e:unve},
the additivity of $V$ and \eq{e:Vgta} yield
\begin{equation*}
V(g,I)\!=\!V(g,[a,\tau])+V(g,[\tau,b])\!=\!|g([a,\tau])|+|g([\tau,b])|
\!=\!2(\|x-y\|-2\vep),
\end{equation*}
and so, $V_\vep(f,I)\!\le\! V(g,I)\!=\!2(\|x-y\|-2\vep)$. This completes the proof of~\eq{e:ntab}.
\end{example}
\begin{remark} \label{r:siq}
If $\vfi$ from \eq{e:fxy} is nonmonotone, inequalities in \eq{e:trdo} may be
\emph{strict}. In fact, supposing $a<\tau<b$, we find that the function $\vfi=\vfi_\tau$
from \eq{e:ftex} is not monotone, $|\vfi(I)|=1$ and $V(\vfi,I)=2$, and so, by
\eq{e:ntab}, inequalities \eq{e:trdo} for function $f$ from \eq{e:ftex} are of the form:
\begin{equation*}
\|x-y\|-2\vep<V_\vep(f,I)=2(\|x-y\|-2\vep)<2\|x-y\|-2\vep
\end{equation*}
if $0<\vep<\frac12\|x-y\|$.
\end{remark}
\begin{example} \label{ex:midp}
Let $I=[a,b]$, $a<\tau<b$, $(M,\|\cdot\|)$ be a normed linear space, $x,y\in M$,
$x\ne y$, and $\al\in\Rb$. Define $f\in M^I$ by
\begin{equation} \label{e:alp}
\mbox{$f(t)\!=\!x$ if $a\le t<\tau$, \,$f(\tau)\!=\!(1\!-\!\al)x\!+\!\al y$,
and $f(t)\!=\!y$ if $\tau<t\le b$.}
\end{equation}
We are going to evaluate the approximate variation $\{V_\vep(f,I)\}_{\vep>0}$
for all $\al\!\in\!\Rb$.
For this, we consider three possibilities: $0\le\al\le1$, $\al<0$, and $\al>1$.
\emph{Case\/ $0\le\al\le1$}. We assert that (independently of $\al\in[0,1]$)
\begin{equation} \label{e:veo}
V_\vep(f,I)=\left\{
\begin{tabular}{ccr}
$\!\!\|x-y\|-2\vep$ & \mbox{if} & $0<\vep<\textstyle\frac12\|x-y\|$,\\[3pt]
$\!\!0$ & \mbox{if} & $\vep\ge\textstyle\frac12\|x-y\|$.
\end{tabular}\right.
\end{equation}
To see this, we note that $f$ can be represented in the form \eq{e:fxy}:
\begin{equation*}
f(t)\!=\!\vfi(t)(x-y)+(1-\al)x+\al y\!\quad\mbox{with}\!\quad\vfi(t)=\left\{
\begin{tabular}{ccc}
$\!\!\al$ & \mbox{if} & $a\le t<\tau$,\\[2pt]
$\!\!0$ & \mbox{if} & $t=\tau$,\\[2pt]
$\!\!\al\!-\!1$ & \mbox{if} & $\tau<t\le b$.
\end{tabular}\right.
\end{equation*}
Since $\al\in[0,1]$, $\vfi$ is nonicreasing on $I$ and $|\vfi(I)|=|\al-(\al-1)|=1$.
Hence, \eq{e:mntn} implies the first line in \eq{e:veo}. The second line in \eq{e:veo} is
a consequence of \eq{e:ov2}.
\emph{Case\/ $\al<0$}. The resulting form of $V_\vep(f,I)$ is given by \eq{e:cas1},
\eq{e:cas2} and \eq{e:buda}.
Now we turn to their proofs. We set $x_\al=(1-\al)x+\al y$ in \eq{e:alp} and note that
\begin{align}
x_\al-x&=(-\al)(x-y)=(-\al)\|x-y\|\mbox{\rm e}_{x,y},\label{e:xax}\\[3pt]
x_\al-y&=(1-\al)(x-y)=(1-\al)\|x-y\|\mbox{\rm e}_{x,y},\label{e:xay}
\end{align}
where $\mbox{\rm e}_{x,y}=(x-y)/\|x-y\|$.
Let us evaluate $|f(I)|$ and $V(f,I)$. Since $1-\al>-\al$, and $\al<0$ implies
$1-\al>1$, by \eq{e:xax} and \eq{e:xay}, $\|x_\al-y\|>\|x_\al-x\|$ and
$\|x_\al-y\|>\|x-y\|$, and since $f$ assumes only values $x$, $x_\al$, and $y$,
\begin{equation*}
|f(I)|=\|x_\al-y\|=(1-\al)\|x-y\|.
\end{equation*}
For $V(f,I)$, by the additivity (V.2) of $V$, \eq{e:xax} and \eq{e:xay}, we find
\begin{align*}
V(f,I)&=V(f,[a,\tau])+V(f,[\tau,b])=|f([a,\tau])|+|f([\tau,b])|\\[3pt]
&=\|f(\tau)-f(a)\|+\|f(b)-f(\tau)\|=\|x_\al-x\|+\|y-x_\al\|\\[3pt]
&=(-\al)\|x-y\|+(1-\al)\|x-y\|=(1-2\al)\|x-y\|.
\end{align*}
Setting $c=c(t)=\frac12(x_\al+y)$ for all $t\in I$, we get, by \eq{e:xay},
\begin{equation*}
\|x_\al-c\|=\|y-c\|=\textstyle\frac12\|x_\al-y\|=\frac12(1-\al)\|x-y\|=\frac12|f(I)|,
\end{equation*}
and
\begin{align*}
\|x-c\|&=\textstyle\|x-\frac12(x_\al+y)\|=\frac12\|(x\!-\!x_\al)+(x\!-\!y)\|
=\frac12\|\al(x\!-\!y)+(x\!-\!y)\|\\[4pt]
&=\textstyle\frac12|\al+1|\!\cdot\!\|x\!-\!y\|\le\frac12(1+|\al|)\|x\!-\!y\|
\stackrel{{\scriptscriptstyle(\al<0)}}{=}
\frac12(1\!-\!\al)\|x\!-\!y\|=\frac12|f(I)|.
\end{align*}
Hence $\|f-c\|_{\infty,I}\le\frac12|f(I)|$, and it follows from \eq{e:ze1} that
\begin{equation} \label{e:buda}
V_\vep(f,I)=0\quad\mbox{if}\quad\vep\ge\textstyle\frac12|f(I)|
=\frac{1-\al}2\|x-y\|.
\end{equation}
It remains to consider the case when $0<\vep<\frac{1-\al}2\|x-y\|$, which we split
into two subcases:
\begin{equation*}
\mbox{(I) $0<\vep<\frac{(-\al)}2\|x-y\|$, and
(II) $\frac{(-\al)}2\|x-y\|\le\vep<\frac{1-\al}2\|x-y\|$.}
\end{equation*}
\emph{Subcase\/}~(I). First, given $g\in M^I$ with $\|f-g\|_{\infty,I}\le\vep$, since
$P=\{a,\tau,b\}$ is a partition of $I$, applying \eq{e:ggff2e}, we get
\begin{align*}
V(g,I)&\ge(\|f(\tau)-f(a)\|-2\vep)+(\|f(b)-f(\tau)\|-2\vep)\\[3pt]
&=((-\al)\|x-y\|-2\vep)+((1-\al)\|x-y\|-2\vep)\\[3pt]
&=(1-2\al)\|x-y\|-4\vep,
\end{align*}
and so, by \eq{e:av}, $V_\vep(f,I)\ge(1-2\al)\|x-y\|-4\vep$. Now, we define a concrete
(=`test') function $g\in M^I$ by the rule:
\begin{align}
g(t)&=x+\vep\mbox{\rm e}_{x,y}\quad\mbox{if}\quad a\le t<\tau,\nonumber\\[3pt]
g(\tau)&=x_\al-\vep\mbox{\rm e}_{x,y},\label{e:gco}\\[3pt]
g(t)&=y+\vep\mbox{\rm e}_{x,y}\quad\mbox{if}\quad \tau<t\le b.\nonumber
\end{align}
Clearly, by \eq{e:alp} and \eq{e:gco}, $\|f-g\|_{\infty,I}=\vep$. Furthermore,
\begin{align*}
V(g,I)&=\|g(\tau)-g(a)\|+\|g(\tau)-g(b)\|\\[3pt]
&=\|(x_\al-x)-2\vep\mbox{\rm e}_{x,y}\|+\|(x_\al-y)-2\vep\mbox{\rm e}_{x,y}\|\\[3pt]
&=\Bigl\|(-\al)\|x-y\|\mbox{\rm e}_{x,y}-2\vep\mbox{\rm e}_{x,y}\Bigr\|
+\Bigl\|(1-\al)\|x-y\|\mbox{\rm e}_{x,y}-2\vep\mbox{\rm e}_{x,y}\Bigr\|\\[3pt]
&=\bigl|(-\al)\|x-y\|-2\vep\bigr|+\bigl|(1-\al)\|x-y\|-2\vep\bigr|.
\end{align*}
Assumption (I) implies $2\vep<(-\al)\|x-y\|<(1-\al)\|x-y\|$, so
\begin{equation*}
V(g,I)\!=\!((-\al)\|x-y\|-2\vep)+((1-\al)\|x-y\|-2\vep)\!=\!(1-2\al)\|x-y\|-4\vep.
\end{equation*}
By \eq{e:av}, $V_\vep(f,I)\le V(g,I)=(1-2\al)\|x-y\|-4\vep$. Thus,
\begin{equation} \label{e:cas1}
V_\vep(f,I)=(1-2\al)\|x-y\|-4\vep\quad\mbox{if}\quad
0<\vep<\textstyle\frac{(-\al)}2\|x-y\|.
\end{equation}
Note that, in agreement with Lemma~\ref{l:71}(a), $V_\vep(f,I)\to V(f,I)$ as
$\vep\to+0$.
\emph{Subcase\/}~(II). First, given $g\in M^I$ with $\|f-g\|_{\infty,I}\le\vep$,
by virtue of \eq{e:10} and \eq{e:xay}, we get
\begin{equation*}
V(g,I)\ge\|g(b)-g(\tau)\|\ge\|f(b)-f(\tau)\|-2\vep=(1-\al)\|x-y\|-2\vep,
\end{equation*}
and so, definition \eq{e:av} implies $V_\vep(f,I)\ge(1-\al)\|x-y\|-2\vep$. Now, define
a test function $g\in M^I$ by
\begin{equation} \label{e:gspe}
\mbox{$g(t)=x_\al-\vep\mbox{\rm e}_{x,y}$ \,if \,$a\le t\le\tau$, and
\,$g(t)=y+\vep\mbox{\rm e}_{x,y}$ \,if \,$\tau<t\le b$.}
\end{equation}
Let us show that $\|f-g\|_{\infty,I}\le\vep$. Clearly, by \eq{e:alp}, $\|f(t)-g(t)\|=\vep$
for all $\tau\le t\le b$. Now, suppose $a\le t<\tau$. We have, by \eq{e:xax},
\begin{align*}
\|f(t)-g(t)\|&=\|x-x_\al+\vep\mbox{\rm e}_{x,y}\|=
\Bigl\|\al\|x-y\|\mbox{\rm e}_{x,y}+\vep\mbox{\rm e}_{x,y}\Bigr\|\\[2pt]
&=\bigl|\al\|x-y\|+\vep\bigr|\equiv A_\al.
\end{align*}
Suppose first that $\al>-1$ (i.e., $x_\al$ is closer to $x$ than $x$ to $y$ in the sense that
$\|x_\al-x\|=(-\al)\|x-y\|<\|x-y\|$). Then $(-\al)<\frac12(1-\al)$, and so, for $\vep$
from subcase (II) we have either
\begin{equation*}
\mbox{(II${}_1$) $\frac{(-\al)}2\|x\!-\!y\|\le\vep\!<\!(-\al)\|x\!-\!y\|$, or
(II{$_2$}) $(-\al)\|x\!-\!y\|\le\vep\!<\!\frac{1-\al}2\|x\!-\!y\|$.}
\end{equation*}
In case (II${}_1$), $\al\|x-y\|+\vep<0$, which implies $A_\al=(-\al)\|x-y\|-\vep$.
Hence, the left-hand side inequality in (II${}_1$) gives $A_\al\le\vep$. In case (II${}_2$),
$\al\|x-y\|+\vep\ge0$, which implies $A_\al=\al\|x-y\|+\vep<\vep$ (because $\al<0$).
Now, suppose $\al\le-1$ (i.e., $\|x-y\|\le(-\al)\|x-y\|=\|x_\al-x\|$, which means that
$x_\al$ is farther from $x$ than $x$ from $y$), so that $\frac12(1-\al)\le(-\al)$.
In this case, assumption (II) implies only condition (II${}_1$), and so, as above,
$A_\al=(-\al)\|x-y\|-\vep\le\vep$. This completes the proof of $\|f-g\|_{\infty,I}\le\vep$.
For the variation $V(g,I)$ of function $g$ from \eq{e:gspe}, we have, by \eq{e:xay},
\begin{align*}
V(g,I)&=\|(x_\al-\vep\mbox{\rm e}_{x,y})-(y+\vep\mbox{\rm e}_{x,y})\|
=\|(x_\al-y)-2\vep\mbox{\rm e}_{x,y}\|\\[3pt]
&=\Bigl\|(1-\al)\|x-y\|\mbox{\rm e}_{x,y}-2\vep\mbox{\rm e}_{x,y}\Bigr\|
=(1-\al)\|x-y\|-2\vep.
\end{align*}
Hence $V_\vep(f,I)\le V(g,I)=(1-\al)\|x-y\|-2\vep$. Thus, we have shown that
\begin{equation} \label{e:cas2}
V_\vep(f,I)=(1-\al)\|x-y\|-2\vep\quad\mbox{if}\quad
\textstyle\frac{(-\al)}2\|x-y\|\le\vep<\frac{(1-\al)}2\|x-y\|.
\end{equation}
\emph{Case\/ $\al>1$}. \label{p:a>1}
We reduce this case to the case $\al<0$ and apply Lemma~\ref{l:chvar}.
Set $T=[a',b']$ with $a'=2\tau-b$ and $b'=2\tau-a$, so that $a'<\tau<b'$, and define
$\vfi:T\to\Rb$ by $\vfi(t)=2\tau-t$, $a'\le t\le b'$. Clearly, $\vfi$ is strictly decreasing
on $T$, $\vfi(T)=[a,b]=I$, and $\vfi(\tau)=\tau$. Let us show that the composed
function $f'=f\circ\vfi\in M^T$ is of the same form as \eq{e:alp}.
If $a'\le t<\tau$, then $\tau<\vfi(t)\le b$, and so, by \eq{e:alp},
$f'(t)=f(\vfi(t))=y$; if $t=\tau$, then $f'(\tau)=f(\vfi(\tau))=f(\tau)=x_\al$;
and if $\tau<t\le b'$, then $a\le\vfi(t)<\tau$, and so, $f'(t)=f(\vfi(t))=x$.
Setting $x'=y$, $y'=x$, and $\al'=1-\al$, we get $\al'<0$,
\begin{equation*}
\mbox{$f'(t)=x'$ \,if \,$a'\le t<\tau$,\quad $f'(t)=y'$ \,if \,$\tau<t\le b'$,}
\end{equation*}
and
\begin{equation*}
f'(\tau)=x_\al=(1-\al)x+\al y=\al'y'+(1-\al')x'=(1-\al')x'+\al'y'\equiv x'_{\al'}.
\end{equation*}
By Lemma~\ref{l:chvar}, given $\vep>0$,
\begin{equation*}
V_\vep(f,I)=V_\vep(f,[a,b])=V_\vep(f,\vfi(T))=V_\vep(f\circ\vfi,T)=V_\vep(f',[a',b']),
\end{equation*}
where, since $f'$ is of the form \eq{e:alp}, $V_\vep(f',[a',b'])$ is given by
\eq{e:cas1}, \eq{e:cas2} and \eq{e:buda} with $f$, $x$, $y$, and $\al$ replaced by
$f'$, $x'$, $y'$, and $\al'$, respectively. Noting that $\|x'-y'\|=\|x-y\|$, $1-\al'=\al$,
$1-2\al'=2\al-1$, and $(-\al')=\al-1$, we get, for $\al>1$:
\begin{equation*}
V_\vep(f,I)=\left\{
\begin{tabular}{ccr}
$\!\!(2\al-1)\|x-y\|-4\vep$ & \mbox{if} &
\mbox{$0<\,\vep<\frac{\al-1}2\|x-y\|$},\\[3pt]
$\!\!\al\|x-y\|-2\vep$ & \mbox{if} &
\mbox{$\frac{\al-1}2\|x-y\|\le\,\,\vep<\,\frac\al2\|x-y\|$,\,\,\,\,}\\[3pt]
$\!\!0$ & \mbox{if} & \mbox{$\,\,\vep\,\ge\,\frac\al2\|x-y\|$.\,\,\,}
\end{tabular}\right.
\end{equation*}
Finally, we note that, for $\al>1$, we have, by \eq{e:xax} and \eq{e:xay},
\begin{equation*}
V(f,I)\!=\!\|x-x_\al\|+\|x_\al-y\|\!=\!\al\|x-y\|+(\al-1)\|x-y\|\!=\!(2\al-1)\|x-y\|,
\end{equation*}
and so, $V_\vep(f,I)\to V(f,I)$ as $\vep\to+0$.
\end{example}
\section{The generalized Dirichlet function}
\begin{example}[generalized Dirichlet function] \label{ex:gDf} \rm
This is an illustration of Lem\-ma~\ref{l:Regc} illuminating several specific features
of the approximate variation.
(a) Let $T=I=[a,b]$, $(M,d)$ be a metric space, and $\Qb$ denote (as usual) the set
of all rational numbers. We set $I_1=I\cap\Qb$ and $I_2=I\setminus\Qb$. A function
$f\in M^I$ is said to be a \emph{generalized Dirichlet function\/} if $f\in\Bd(I;M)$ and
\begin{equation*}
\displaystyleelta f\equiv\displaystyleelta f(I_1,I_2)=\inf_{s\in I_1,t\in I_2}d(f(s),f(t))>0.
\end{equation*}
Clearly, $f\!\notin\!\Reg(I;M)$ (in fact, if, say, $a\!<\!\tau\!\le\! b$, then for all
\mbox{$\delta\!\in\!(0,\tau-a)$},
$s\in(\tau-\delta,\tau)\cap\Qb$ and $t\in(\tau-\delta,\tau)\setminus\Qb$, we have
$d(f(s),f(t))\ge\displaystyleelta f>0$).
Setting $|f(I_1,I_2)|=\sup_{s\in I_1,t\in I_2}d(f(s),f(t))$, we find
\begin{equation*}
|f(I_1,I_2)|\le|f(I_1)|+d(f(s_0),f(t_0))+|f(I_2)|,\quad s_0\in I_1,\,\,t_0\in I_2,
\end{equation*}
and
\begin{equation*}
0<\displaystyleelta f\le|f(I_1,I_2)|\le|f(I)|=\max\{|f(I_1)|,|f(I_2)|,|f(I_1,I_2)|\}.
\end{equation*}
Furthermore (cf.~Lemma~\ref{l:Regc}), we have
\begin{equation} \label{e:Dirin}
\mbox{$V_\vep(f,I)=\infty$ \,\,if \,\,$0<\vep<\displaystyleelta f/2$, \,and
\,\,$V_\vep(f,I)=0$ \,\,if \,\,$\vep\ge|f(I)|$;}
\end{equation}
the values of $V_\vep(f,I)$ for $\displaystyleelta f/2\le\vep<|f(I)|$ depend on (the structure of)
the metric space $(M,d)$ in general (see items (b), (c) and (d) below). The second
assertion in \eq{e:Dirin} is a consequence of \eq{e:zero}.
In order to prove the first assertion in \eq{e:Dirin}, we show that if $0<\vep<\displaystyleelta f/2$,
$g\in M^I$ and $d_{\infty,I}(f,g)\le\vep$, then $V(g,I)=\infty$ (cf.~\eq{e:besk}).
In fact, given $n\in\Nb$, let $P=\{t_i\}_{i=0}^{2n}$ be a partition of $I$ (i.e.,
$a\le t_0<t_1<t_2<\dots<t_{2n-1}<t_{2n}\le b$) such that
$\{t_{2i}\}_{i=0}^n\subset I_1$ and $\{t_{2i-1}\}_{i=1}^n\subset I_2$. Given
$i\in\{1,2,\dots,n\}$, by the triangle inequality for $d$, we have
\begin{align}
d(f(t_{2i}),f(t_{2i-1}))&\le d(f(t_{2i}),g(t_{2i}))\!+\!d(g(t_{2i}),g(t_{2i-1}))
\!+\!d(g(t_{2i-1}),f(t_{2i-1})) \nonumber\\[3pt]
&\le d_{\infty,I_1}(f,g)+d(g(t_{2i}),g(t_{2i-1}))+d_{\infty,I_2}(g,f) \nonumber\\[3pt]
&\le\vep+d(g(t_{2i}),g(t_{2i-1}))+\vep. \label{e:twe1}
\end{align}
It follows from the definition of $V(g,I)$ that
\begin{align}
V(g,I)&\ge\sum_{i=1}^{2n}d(g(t_i),g(t_{i-1}))\ge\sum_{i=1}^nd(g(t_{2i}),g(t_{2i-1}))
\nonumber\\
&\ge\sum_{i=1}^n\Bigl(d(f(t_{2i}),f(t_{2i-1}))-2\vep\Bigr)\ge (\displaystyleelta f-2\vep)n.
\label{e:twe2}
\end{align}
It remains to take into account the arbitrariness of $n\in\Nb$.
In a particular case of the classical \emph{Dirichlet function\/} $f=\displaystylec_{x,y}:I\to M$
defined, for $x,y\in M$, $x\ne y$, by
\begin{equation} \label{e:Dir}
\mbox{$\displaystylec_{x,y}(t)=x$ \,\,if \,\,$t\in I_1$, \,and
\,\,$\displaystylec_{x,y}(t)=y$ \,\,if \,\,$t\in I_2$,}
\end{equation}
we have $\displaystyleelta f=\displaystyleelta\displaystylec_{x,y}=d(x,y)$ and $|f(I)|=|\displaystylec_{x,y}(I_1,I_2)|=d(x,y)$,
and so, \eq{e:Dirin} assumes the form
(which was established in \cite[assertion~(4.4)]{Studia17}):
\begin{equation} \label{e:Dirass}
\mbox{$V_\vep(f,I)\!=\!\infty$ \,\,if \,\,$0\!<\!\vep\!<\!d(x,y)/2$, \,and
\,\,$V_\vep(f,I)\!=\!0$ \,\,if \,\,$\vep\!\ge\! d(x,y)$.}
\end{equation}
(b) \label{p:L71b} This example and items (c) and (d) below illustrate the sharpness of
assertions in Lemma~\ref{l:71}(b),\,(d). Let $(M,\|\cdot\|)$ be a normed linear space
with induced metric $d$ (cf.\ p.~\pageref{p:nls}) and $f=\displaystylec_{x,y}$ be the Dirichlet
function \eq{e:Dir}. Setting $c=c(t)=(x+y)/2$, $t\in I$, we find
\begin{equation*}
2d_{\infty,I}(f,c)=2\max\{\|x-c\|,\|y-c\|\}=\|x-y\|=d(x,y)=|f(I)|,
\end{equation*}
and so, by \eq{e:zero2} and \eq{e:2max}, the second equality in \eq{e:Dirass}
is refined as follows:
\begin{equation} \label{e:refi}
V_\vep(f,I)=0\quad\mbox{for all}\quad\vep\ge\frac{\|x-y\|}2=\frac{d(x,y)}2.
\end{equation}
This shows the sharpness of the inequality in Lemma~\ref{l:71}(b). Inequalities in
Lemma~\ref{l:71}(d) assume the form:
\begin{equation*}
\inf_{\vep>0}(V_\vep(f,I)+\vep)=\frac{\|x-y\|}2<|f(I)|=\|x-y\|=
\inf_{\vep>0}(V_\vep(f,I)+2\vep).
\end{equation*}
More generally, \eq{e:Dirass} and \eq{e:refi} hold for a complete and \emph{metrically
convex\/} (in the sense of K.~Menger \cite{Menger}) metric space $(M,d)$ (see
\cite[Example~1]{Studia17}).
(c) In the context of \eq{e:Dir}, assume that $M=\{x,y\}$ is the two-point set with
metric $d$. If $0<\vep<d(x,y)$, $g\in M^I$ and $d_{\infty,I}(f,g)\le\vep$, then
$g=f=\displaystylec_{x,y}$ on $I$, and so, $V(g,I)=\infty$. By \eq{e:besk}, the first assertion
in \eq{e:Dirass} can be expressed more exactly as $V_\vep(f,I)=\infty$ for all
$0<\vep<d(x,y)$. Now, (in)equalities in Lemma~\ref{l:71}(d) are of the form:
\begin{equation*}
\inf_{\vep>0}(V_\vep(f,I)+\vep)=d(x,y)=|f(I)|<2d(x,y)=
\inf_{\vep>0}(V_\vep(f,I)+2\vep).
\end{equation*}
(d) \label{p:L71d} Given $x,y\in\Rb$, $x\ne y$, and $0\le r\le|x-y|/2$, we set
\begin{equation*}
M_r=\Rb\setminus\bigl(\textstyle\frac12(x+y)-r,\frac12(x+y)+r\bigr)
\quad\mbox{and}\quad d(u,v)=|u-v|,\,\,u,v\in M_r.
\end{equation*}
Note that $(M_r,d)$ is a proper metric space (cf.~p.~\pageref{p:properms}). If
$f=\displaystylec_{x,y}:I\to M_r$ is the Dirichlet function \eq{e:Dir} on $I$, we claim that
\begin{equation} \label{e:Mr}
\mbox{$V_\vep(f,I)=\infty$ \,\,if \,\,$0<\vep<\frac12|x-y|+r$, \,and
\,$V_\vep(f,I)=0$ \,otherwise.}
\end{equation}
\emph{Proof of~\eq{e:Mr}}. Since $M_0=\Rb$, assertion \eq{e:Mr} for $r=0$ follows
from \eq{e:Dirass} and \eq{e:refi}. Now, suppose $r>0$. From \eq{e:Dirass}, we find
$V_\vep(f,I)=\infty$ if $0<\vep<\frac12|x-y|$, and $V_\vep(f,I)=0$ if $\vep\ge|x-y|$.
So, only the case when $\frac12|x-y|\le\vep<|x-y|$ is to be considered. We split this
case into two subcases:
\begin{equation*}
\mbox{(I) $\frac12|x-y|\le\vep<\frac12|x-y|+r$, and
(II) $\frac12|x-y|+r\le\vep<|x-y|$.}
\end{equation*}
\emph{Case}~(I). Let us show that if $g:I\to M_r$ and $d_{\infty,I}(f,g)\le\vep$, then
$V(g,I)=\infty$. Given $t\in I=I_1\cup I_2$, the inclusion $g(t)\in M_r$ is equivalent to
\begin{equation} \label{e:gtimr}
g(t)\le\textstyle\frac12(x+y)-r\quad\mbox{or}\quad g(t)\ge\textstyle\frac12(x+y)+r,
\end{equation}
and condition $d_{\infty,I}(f,g)=|f-g|_{\infty,I}\le\vep$ is equivalent to
\begin{equation} \label{e:difg}
\mbox{$|x-g(s)|\le\vep$ \,$\forall\,s\in I_1$, \,and \,$|y-g(t)|\le\vep$ \,$\forall\,t\in I_2$.}
\end{equation}
Due to the symmetry in $x$ and $y$ everywhere, we may assume that $x<y$.
Suppose $s\in I_1$. The first condition in \eq{e:difg} and assumption (I) imply
\begin{equation*}
x-\vep\le g(s)\le x+\vep<\textstyle x+\frac12|x-y|+r=x+\frac12(y-x)+r=\frac12(x+y)+r,
\end{equation*}
and so, by \eq{e:gtimr}, we find $g(s)\le\frac12(x+y)-r$. Note that, by (I),
\begin{equation*}
-\vep\le g(s)-x\le\textstyle\frac12(x+y)-r-x=\frac12(y-x)-r=\frac12|y-x|-r\le\vep-r<\vep.
\end{equation*}
Given $t\in I_2$, the second condition in \eq{e:difg} and assumption (I) yield
\begin{equation*}
y+\vep\ge g(t)\ge y-\vep>\textstyle y-\frac12|x-y|-r=y-\frac12(y-x)-r=\frac12(x+y)-r,
\end{equation*}
and so, by \eq{e:gtimr}, we get $g(t)\ge\frac12(x+y)+r$. Note also that, by (I),
\begin{equation*}
\vep\ge g(t)-y\ge\textstyle\frac12(x+y)+r-y=\frac12(x-y)+r=-\frac12|x-y|+r
\ge-\vep+r>-\vep.
\end{equation*}
Thus, we have shown that, given $s\in I_1$ and $t\in I_2$,
\begin{equation} \label{e:ggts}
g(t)-g(s)\ge\textstyle\frac12(x+y)+r-\bigl(\frac12(x+y)-r\bigr)=2r.
\end{equation}
Given $n\in\Nb$, let $\{t_i\}_{i=0}^{2n}$ be a partition of $I$ such that
$\{t_{2i}\}_{i=0}^n\subset I_1$ and $\{t_{2i-1}\}_{i=1}^n\subset I_2$. Taking into
account \eq{e:ggts} with $s=t_{2i}$ and $t=t_{2i-1}$, we get
\begin{equation*}
V(g,I)\ge\sum_{i=1}^{2n}|g(t_i)-g(t_{i-1})|\ge\sum_{i=1}^n\bigl(
g(t_{2i-1})-g(t_{2i})\bigr)\ge2rn.
\end{equation*}
\emph{Case}~(II). We set $c=c(t)=\vep+\min\{x,y\}$, $t\in I$; under our
assumption
$x<y$, we have $c=\vep+x$. Note that $c\in M_r$: in fact, (II) and $x<y$ imply
$\frac12(y-x)+r\le\vep<y-x$, and so, $\frac12(x+y)+r\le c=\vep+x<y$.
If $s\in I_1$, we find $|x-c(s)|=\vep$, and if $t\in I_2$, we get, by assumption~(II),
\begin{equation*}
|y-c(t)|=|y-x-\vep|=y-x-\vep\le|x-y|-\textstyle\frac12|x-y|-r\le\frac12|x-y|+r\le\vep.
\end{equation*}
It follows that (cf.~\eq{e:difg}) $d_{\infty,I}(f,c)\le\vep$, and since $c$ is constant
on $I$, we conclude from \eq{e:ze1} that $V_\vep(f,I)=0$. This completes
the proof of \eq{e:Mr}.
\sq
Two conclusions from \eq{e:Mr} are in order. First, given $0\le r\le\frac12|x-y|$ and
$\vep>0$, $V_\vep(f,I)=0$ if and only if $|f(I)|=|x-y|\le2\vep-2r$ (cf.\ \eq{e:zero}
and Lemma~\ref{l:71}(b)). Second, the inequalities in Lemma~\ref{l:71}(d) are
as follows:
\begin{align*}
\inf_{\vep>0}(V_\vep(f,I)+\vep)&=\textstyle\frac12|x-y|+r\le|f(I)|=|x-y|\\[2pt]
&\le|x-y|+2r=\inf_{\vep>0}(V_\vep(f,I)+2\vep).
\end{align*}
The inequalities at the left and at the right become equalities for $r=\frac12|x-y|$ and
$r=0$, respectively; otherwise, the mentioned inequalities are strict.
\label{p:36d}
(e) \label{p:rico} Let $x,y\in\Rb$, $x\ne y$, and $0\le r<|x-y|/2$. We set
\begin{equation*}
M_r=\Rb\setminus\bigl[\textstyle\frac12(x+y)-r,\frac12(x+y)+r\bigr]
\quad\mbox{and}\quad d(u,v)=|u-v|,\,\,u,v\in M_r.
\end{equation*}
Note that $(M_r,d)$ is an improper metric space. For the
Dirichlet function $f=\displaystylec_{x,y}:I\to M_r$ from \eq{e:Dir}, we have:
\begin{equation} \label{e:Mrim}
\mbox{$V_\vep(f,I)=\infty$ \,\,if \,\,$0<\vep\le\frac12|x-y|+r$, \,and
\,$V_\vep(f,I)=0$ \,otherwise.}
\end{equation}
Clearly, the function $\vep\mapsto V_\vep(f,I)$ is not continuous from the right at
$\vep=\frac12|x-y|+r$ (cf.\ Lemma~\ref{l:proper}(a)). The proof of \eq{e:Mrim}
follows the same lines as those of \eq{e:Mr}, so we present only the necessary
modifications. We split the case when $\frac12|x-y|\le\vep<|x-y|$ into two subcases:
\begin{equation*}
\mbox{(I) $\frac12|x-y|\le\vep\le\frac12|x-y|+r$, and
(II) $\frac12|x-y|+r<\vep<|x-y|$.}
\end{equation*}
\emph{Case}~(I). Given $g:I\to M_r$ with $d_{\infty,I}(f,g)\le\vep$, to see that
$V(g,I)=\infty$, we have \emph{strict\/} inequalities in \eq{e:gtimr}, conditions
\eq{e:difg}, and assume that $x<y$. If $s\in I_1$, then (as above)
$g(s)\le\frac12(x+y)+r$, and so, by (strict) \eq{e:gtimr}, $g(s)<\frac12(x+y)-r$.
If $t\in I_2$, then $g(t)\ge\frac12(x+y)-r$, and so, by \eq{e:gtimr},
$g(t)>\!\frac12(x+y)+r$. Thus, $g$ is discontinuous at every point of $I=I_1\cup I_2$,
and so, $V(g,I)=\infty$ (in fact, if, on the contrary, $g$ is continuous at, say,
a point $s\in I_1$, then the inequality $g(s)\!<\!\frac12(x+y)-r$ holds in a
neighbourhood~of~$s$, and since the neighbourhood contains an irrational point
$t\in I_2$, we get $g(t)>\!\frac12(x+y)+r$, which is a contradiction; recall also that
a $g\in\BV(I;\Rb)$ is continuous on $I$ apart, possibly, an at most countable subset
of $I$).
\emph{Case}~(II). It is to be noted only that $\frac12(x+y)+r<c=\vep+x<y$, and so,
$c\in M_r$; in fact, by (II) and assumption $x<y$, $\frac12(y-x)+r<\vep<y-x$. \qed
\end{example}
\section{Examples with convergent sequences} \label{ss:ecnv}
\begin{example} \label{ex:rieq} \rm
The left limit $V_{\vep-0}(f,T)$ in Lemma~\ref{l:uc}(a) cannot, in general, be replaced
by $V_\vep(f,T)$. To see this, we let $T=I$, $(M,\|\cdot\|)$ be a normed linear space,
$\{x_j\}$, $\{y_j\}\subset M$ be two sequences, $x,y\in M$, $x\ne y$, and
$x_j\to x$ and $y_j\to y$ in $M$ as $j\to\infty$. If $f_j=\displaystylec_{x_j,y_j}$, $j\in\Nb$,
and $f=\displaystylec_{x,y}$ are Dirichlet functions \eq{e:Dir} on $I$, then $f_j\rightrightarrows f$ on $I$,
which follows from
\begin{equation*}
\|f_j-f\|_{\infty,I}=\max\{\|x_j-x\|,\|y_j-y\|\}\to0\quad\mbox{as}\quad j\to\infty.
\end{equation*}
The values $V_\vep(f,I)$ are given by \eq{e:Dirass} and \eq{e:refi}, and, similarly,
if $j\in\Nb$,
\begin{equation} \label{e:steen}
\mbox{$V_\vep(f_j,I)=\infty$ \,if \,$0<\vep<\frac12\|x_j-y_j\|$, \,\,\,
$V_\vep(f_j,I)=0$ \,if \,$\vep\ge\frac12\|x_j-y_j\|$.}
\end{equation}
Setting $\vep=\frac12\|x-y\|$, $\al_j=1+(1/j)$, $x_j=\al_jx$ and $y_j=\al_jy$,
$j\in\Nb$, we find
\begin{equation*}
V_{\vep+0}(f,I)=V_\vep(f,I)=0<\infty=V_{\vep-0}(f,I),
\end{equation*}
whereas, since $\vep<\frac12\al_j\|x-y\|=\frac12\|x_j-y_j\|$ for all $j\in\Nb$,
\begin{equation*}
\mbox{$V_\vep(f_j,I)=\infty$ for all $j\in\Nb$, and so,
$\displaystyle\lim_{j\to\infty}V_\vep(f_j,I)=\infty$.}
\end{equation*}
\end{example}
\begin{example} \label{ex:voo} \rm
The right-hand side inequality in Lemma~\ref{l:uc}(a) may not hold if $\{f_j\}\subset M^T$
converges to $f\in M^T$ only \emph{pointwise\/} on $T$. To see this, suppose
$C\equiv\inf_{j\in\Nb}|f_j(T)|>0$ and $f=c$ (is a constant function) on~$T$. Given
$0<\vep<C/2$, Lemma~\ref{l:71}(f) implies
\begin{equation*}
\mbox{$V_\vep(f_j,T)\ge|f_j(T)|-2\vep\ge C-2\vep>0=V_\vep(c,T)=V_\vep(f,T)$,
\quad $j\in\Nb$.}
\end{equation*}
For instance, given a sequence $\{\tau_j\}\subset(a,b)\subset I=[a,b]$ such that
$\tau_j\to a$ as $j\to\infty$, and $x,y\in M$, $x\ne y$, defining $\{f_j\}\subset M^I$
(as in Example~\ref{ex:thr}) by $f_j(\tau_j)=x$ and $f_j(t)=y$ if
$t\in I\setminus\{\tau_j\}$, $j\in\Nb$, we have $C=d(x,y)>0$ and
$f_j\to c\equiv y$ \pw\ on~$I$.
The arguments above are not valid for the uniform convergence: in fact, if
$f_j\rightrightarrows f=c$ on $T$, then, by \eq{e:1s2}, $|f_j(T)|\le2d_{\infty,T}(f_j,c)\to0$ as
$j\to\infty$, and so, $C=0$.
\end{example}
\begin{example} \label{ex:ucbw} \rm
Lemma~\ref{l:uc}(b) is wrong for the \pw\ convergence \mbox{$f_j\to f$}.
To see this, let $T=I=[a,b]$, $(M,d)$ be a metric space,
$x,y\in M$, $x\ne y$, and, given $j\in\Nb$, define
$f_j\in M^I$ at $t\in I$ by: $f_j(t)=x$ if $j!t$ is integer, and $f_j(t)=y$ otherwise.
Each $f_j$ is a step function on $I$, so it is regulated and, hence, by Lemma~\ref{l:Regc},
$V_\vep(f_j,I)<\infty$ for all $\vep>0$. At the same time, the sequence $\{f_j\}$
converges (only) pointwise on $I$ to the Dirichlet function $f=\displaystylec_{x,y}$ (cf.~\eq{e:Dir}),
and so, by \eq{e:Dirass}, $V_\vep(f,I)=\infty$ for all $0<\vep<\frac12d(x,y)$.
\end{example}
\section{Examples with improper metric spaces} \label{ss:exims}
\pagebreak
\begin{example} \label{ex:ims1} \rm
This example is similar to Example~\ref{ex:gDf}(e) (p.~\pageref{p:rico}), but with
\emph{finite\/} values of $V_\vep(f,I)$. It shows that the assumption on the
\emph{proper\/} metric space $(M,d)$ in Lemma~\ref{l:proper}(b) is essential.
Let $x,y\in\Rb$, $x\ne y$, $M=\Rb\setminus\{\frac12(x+y)\}$ with metric
$d(u,v)=|u-v|$ for $u,v\in M$, $I=[a,b]$, $\tau=a$ or $\tau=b$, and $f\in M^I$ be
given by (cf.\ \eq{e:ftau}): $f(\tau)=x$ and $f(t)=y$ if $t\in I$, $t\ne\tau$.
We claim that (as in \eq{e:tab})
\begin{equation} \label{e:511}
V_\vep(f,I)=\left\{
\begin{tabular}{ccr}
$\!\!|x-y|-2\vep$ & \mbox{if} & $0<\vep<\frac12|x-y|$,\\[3pt]
$\!\!0$ & \mbox{if} & $\vep\ge\frac12|x-y|$.
\end{tabular}\right.
\end{equation}
In order to verify this, we note that $|f(I)|=|x-y|$, and so, by \eq{e:zero},
$V_\vep(f,I)=0$ for all $\vep\ge|x-y|$. We split the case $0<\vep<|x-y|$ into
\begin{equation*}
\mbox{(I) $0<\vep<\frac12|x-y|$; \,(II) $\vep=\frac12|x-y|$;
\,(III) $\frac12|x-y|<\vep<|x-y|$.}
\end{equation*}
Due to the symmetry (in $x$ and $y$), we may consider only the case $x<y$.
\emph{Case\/}~(I). Given $g\in M^I$ with $d_{\infty,I}(f,g)\le\vep$, inequality
\eq{e:10} implies
\begin{equation*}
V(g,I)\ge|g(t)-g(\tau)|\ge|f(t)-f(\tau)|-2\vep=|x-y|-2\vep\quad(t\ne\tau),
\end{equation*}
and so, by \eq{e:av}, $V_\vep(f,I)\ge|x-y|-2\vep$. Now, following \eq{e:unve}, we set
\begin{equation} \label{e:givep}
g_\vep(\tau)=x+\vep\quad\mbox{and}\quad g_\vep(t)=y-\vep\,\,\,\,\mbox{if}\,\,\,\,
t\in I\setminus\{\tau\}.
\end{equation}
We have $g_\vep:I\to M$, because assumption $0<\vep<\frac12(y-x)$ yields
\begin{equation*}
g_\vep(\tau)=x+\vep<x+\textstyle\frac12(y-x)=\frac12(x+y)
\end{equation*}
and, if $t\in I$, $t\ne\tau$,
\begin{equation*}
g_\vep(t)=y-\vep>y-\textstyle\frac12(y-x)=\frac12(x+y).
\end{equation*}
Moreover, $d_{\infty,I}(f,g_\vep)=\vep$ and
\begin{equation*}
V(g_\vep,I)=|g_\vep(I)|=|(y-\vep)-(x+\vep)|\stackrel{\mbox{\tiny(I)}}{=}
y-x-2\vep=|x-y|-2\vep.
\end{equation*}
Hence $V_\vep(f,I)\!\le\! V(g_\vep,I)\!=\!|x\!-\!y|\!-\!2\vep$.
This proves the upper line in \eq{e:511}.
\emph{Case\/}~(II). Here we rely on the full form of \eq{e:zer}. Let a sequence
$\{\vep_k\}_{k=1}^\infty$ be such that $0<\vep_k<\vep=\frac12|x-y|$ for all
$k\in\Nb$ and $\vep_k\to\vep$ as $k\to\infty$. We set $g_k=g_{\vep_k}$, $k\in\Nb$,
where $g_{\vep_k}$ is defined in \eq{e:givep} (with $\vep=\vep_k$). By Case~(I),
given $k\in\Nb$, $g_k\in\BV(I;M)$, $V(g_k,I)=|x-y|-2\vep_k$ and
$d_{\infty,I}(f,g_k)=\vep_k<\vep$. Since $V(g_k,I)\to0$ as $k\to\infty$, we conclude
from \eq{e:zer} that $V_\vep(f,I)=0$.
\emph{Case\/}~(III). We set $c(t)=\vep+\min\{x,y\}$, $t\in I$, and argue as in
Example \ref{ex:gDf}(e) (in~Case (II) for $r=0$). This gives $V_\vep(f,I)=0$, and
completes the proof of \eq{e:511}.
Clearly, the metric space $(M,d)$ in this example is \emph{not proper}. Let us show that
Lemma~\ref{l:proper}(b) is wrong. In fact, by contradition, assume that there is
$g\in\BV(I;M)$ with $d_{\infty,I}(f,g)\le\vep=\frac12|x-y|$ such that
$V_\vep(f,I)=V(g,I)$. By \eq{e:511}, $V(g,I)=0$, and so, $g=c$ is a constant
function $c:I\to M$. From $d_{\infty,I}(f,c)\le\vep$, we find $|x-c|=|f(\tau)-c|\le\vep$,
and so (as above, $x<y$),
\begin{equation*}
c\le x+\vep=x+\textstyle\frac12(y-x)=\frac12(x+y),
\end{equation*}
and, if $t\ne\tau$, then $|y-c|=|f(t)-c|\le\vep$, which implies
\begin{equation*}
c\ge y-\vep=y-\textstyle\frac12(y-x)=\frac12(x+y).
\end{equation*}
Hence, $c=c(t)=\frac12(x+y)$, $t\in I$, but $g=c\notin M^I$, which is a contradiction.
\end{example}
\begin{example} \label{ex:ims2} \rm
Here we show that the assumption that the metric space $(M,d)$ is \emph{proper\/} in
Lemma~\ref{l:proper}(c) is essential.
Let $x,y\in\Rb$, $x\ne y$, $M=\Rb\setminus\{\frac12(x+y)\}$ with metric
$d(u,v)=|u-v|$, $u,v\in M$, $I=[0,1]$, and the sequence $\{f_j\}\subset M^I$ be
given by
\begin{equation} \label{e:seqfj}
f_j(t)=\left\{
\begin{tabular}{ccl}
$\!\!x$ & \mbox{if} & $j!t$ is integer,\\[3pt]
$\!\!y$ & & otherwise,
\end{tabular}\right.\,\,\quad t\in I,\,\,\,j\in\Nb.
\end{equation}
We claim that, for all $j\in\Nb$,
\begin{equation} \label{e:jfac}
V_\vep(f_j,I)=\left\{
\begin{tabular}{ccr}
$\!\!2\!\cdot\! j!\,(|x-y|-2\vep)$ & \mbox{if} & $0<\vep<\frac12|x-y|$,\\[3pt]
$\!\!0$ & \mbox{if} & $\vep\ge\frac12|x-y|$.
\end{tabular}\right.
\end{equation}
Suppose that we have already established \eq{e:jfac}. The sequence $\{f_j\}$ from
\eq{e:seqfj} converges pointwise on $I$ to the Dirichlet function $f=\displaystylec_{x,y}$ from
\eq{e:Dir}. Let $\vep=\frac12|x-y|$. By \eq{e:Mrim} with $r=0$, we have
$V_\vep(f,I)=\infty$, while, by \eq{e:jfac}, we get $V_\vep(f_j,I)=0$ for all $j\in\Nb$,
and so, $\lim_{j\to\infty}V_\vep(f_j,I)=0$. Thus, the properness of metric space
$(M,d)$ in Lemma~\ref{l:proper}(c) is indispensable.
\emph{Proof of\/}~\eq{e:jfac}.
In what follows, we fix $j\in\Nb$. By \eq{e:zero}, $V_\vep(f_j,I)=0$ for all
$\vep\ge|f_j(I)|=|x-y|$. Now, we consider cases (I)--(III) from Example~\ref{ex:ims1}.
\emph{Case\/}~(I). We set $t_k=k/j!$ (so that $f_j(t_k)=x$) for $k=0,1,\dots,j!$, and
$s_k=\frac12(t_{k-1}+t_k)=(k-\frac12)/j!$ (so that $f_j(s_k)=y$) for $k=1,2,\dots,j!$.
So, we have the following partition of the interval $I=[0,1]$:
\begin{equation} \label{e:partj}
0=t_0<s_1<t_1<s_2<t_2\dots<s_{j!-1}<t_{j!-1}<s_{j!}<t_{j!}=1.
\end{equation}
If $g\in M^I$ is arbitrary with $d_{\infty,I}(f_j,g)\le\vep$, then, applying \eq{e:10}, we get
\begin{align*}
V(g,I)&\ge\,\sum_{k=1}^{j!}\bigl(|g(t_k)-g(s_k)|+|g(s_k)-g(t_{k-1})|\bigr)\\
&\ge\,\sum_{k=1}^{j!}\bigl(|f_j(t_k)-f_j(s_k)|-2\vep+|f_j(s_k)-f_j(t_{k-1})|-2\vep\bigr)
\\[3pt]
&=\,2\!\cdot\!j!\,(|x-y|-2\vep),
\end{align*}
and so, by definition \eq{e:av}, $V_\vep(f_j,I)\ge2\!\cdot\!j!\,(|x-y|-2\vep)$.
Now, we define a test function $g_\vep$ on $I$ by (cf.~\eq{e:unve}): given $t\in I$,
\begin{equation} \label{e:geep}
\mbox{$g_\vep(t)\!=\!x\!-\!\vep\mbox{\rm e}_{x,y}$\,\,\,if\, $j!t$ is integer, and\,
$g_\vep(t)\!=\!y\!+\!\vep\mbox{\rm e}_{x,y}$\,\,\,otherwise,}
\end{equation}
where $\mbox{\rm e}_{x,y}=(x-y)/|x-y|$. Due to the symmetry in $x$ and $y$,
we may assume that $x<y$, and so, $g_\vep(t)=x+\vep$ if $j!t$ is integer, and
$g_\vep(t)=y-\vep$ otherwise, $t\in I$. We first note that $g_\vep:I\to M$; in fact,
if $j!t$ is integer, then
\begin{equation*}
g_\vep(t)=x+\vep<x+\textstyle\frac12|x-y|=x+\frac12(y-x)=\frac12(x+y),
\end{equation*}
and if $j!t$ is not integer, then
\begin{equation*}
g_\vep(t)=y-\vep>y-\textstyle\frac12|x-y|=y-\frac12(y-x)=\frac12(x+y).
\end{equation*}
Clearly, $d_{\infty,I}(f_j,g_\vep)=\vep$ and, by the additivity of $V$, for the partition
\eq{e:partj}, we find
\begin{align*}
V(g_\vep,I)&=\sum_{k=1}^{j!}\bigl(V(g_\vep,[t_{k-1},s_k])+V(g_\vep,[s_k,t_k])\bigr)\\
&=\sum_{k=1}^{j!}\bigl(|g_\vep(s_k)-g_\vep(t_{k-1})|+|g_\vep(t_k)-g_\vep(s_k)|\bigr)\\
&=\sum_{k=1}^{j!}\bigl(|(y-\vep)-(x+\vep)|+|(x+\vep)-(y-\vep)|\bigr)\\[3pt]
&=2\!\cdot\!j!\,(|x-y|-2\vep).
\end{align*}
Thus, $V_\vep(f_j,I)\le V(g_\vep,I)$, and this implies the upper line in \eq{e:jfac}.
\emph{Case\/}~(II). Let a sequence $\{\vep_k\}_{k=1}^\infty$ be such that
$0<\vep_k<\vep=\frac12|x-y|$, $k\in\Nb$, and $\vep_k\to\vep$ as $k\to\infty$.
Set $g_k=g_{\vep_k}$, $k\in\Nb$, where $g_{\vep_k}$ is given by \eq{e:geep}
(with $x<y$). We know from Case~(I) that, for every $k\in\Nb$, $g_k\in\BV(I;M)$,
$V(g_k,I)=2\!\cdot\!j!\,(|x-y|-2\vep_k)$, and $d_{\infty,I}(f_j,g_k)=\vep_k<\vep$.
Since $V(g_k,I)\to0$ as $k\to\infty$, we conclude from \eq{e:zer} that $V_\vep(f_j,I)=0$.
\emph{Case\/}~(III). We set $c=c(t)=\vep+\min\{x,y\}$ for all $t\in I$, i.e., under
our assumption $x<y$, $c=\vep+x$. Note that $c\in M$, because assumption (III) and
$x<y$ imply $\vep>\frac12(y-x)$, and so, $c=\vep+x>\frac12(y-x)+x=\frac12(x+y)$.
Furthermore, $d_{\infty,I}(f_j,c)\le\vep$; in fact, given $t\in I$, if $j!t$ is integer, then
$|f_j(t)-c(t)|=|x-c|=\vep$, and if $j!t$ is not integer, then
\begin{equation*}
|f_j(t)-c(t)|=|y-x-\vep|\stackrel{\mbox{\tiny(III)}}{=}y-x-\vep<
|x-y|-\textstyle\frac12|x-y|=\frac12|x-y|<\vep.
\end{equation*}
Since $c$ is a constant function from $M^I$, we get $V_\vep(f_j,I)=0$.
This completes the proof of \eq{e:jfac}.
\end{example}
\chapter{Pointwise selection principles} \label{s:sp}
\section{Functions with values in a metric space} \label{ss:metsp}
Our first main result, an extension of Theorem~3.8 from \cite{Fr}, is a
\emph{\pw\ selection principle\/} for metric space valued univariate functions in terms
of the approximate variation (see Theorem~\ref{t:SP}).
In order to formulate it, we slightly generalize the notion of a regulated function
(cf.~p.~\pageref{p:reg}). If $T\subset\Rb$ is an arbitrary set and $(M,d)$ is a metric
space, a function $f\in M^T$ is said to be \emph{regulated\/} on $T$ (in symbols,
$f\in\Reg(T;M)$) if it satisfies the Cauchy condition at every left limit point of $T$ and
every right limit point of $T$. More explicitly, given $\tau\in T$, which is a \emph{left
limit point\/} of $T$ (i.e., $T\cap(\tau-\delta,\tau)\ne\es$ for all $\delta>0$), we have
$d(f(s),f(t))\to0$ as $T\ni s,t\to\tau-0$; and given $\tau'\in T$, which is a \emph{right
limit point\/} of $T$ (i.e., $T\cap(\tau',\tau'+\delta)\ne\es$ for all $\delta>0$), we have
$d(f(s),f(t))\to0$ as $T\ni s,t\to\tau'+0$. The proof of Lemma~\ref{l:Regc} in ($\supset$)
shows that
\begin{equation} \label{e:supReg}
\Reg(T;M)\supset\{f\in M^T:\mbox{$V_\vep(f,T)<\infty$ for all $\vep>0$}\};
\end{equation}
it suffices to set $\vfi_\vep(t)=V_\vep(f,T\cap(-\infty,t])$, $t\in T$, and treat
$s,t$ from~$T$.
In contrast to the case when $T=I$ is an
interval (see p.~\pageref{p:reg}), a function $f\in\Reg(T;M)$ may not be bounded
in general: for instance, $f\in\Rb^T$ given on
$T=[0,1]\cup\{2-\frac1n\}_{n=2}^\infty$ by: $f(t)=t$ if $0\le t\le1$ and
$f(2-\frac1n)=n$ if $n\in\Nb$, is regulated in the above sense, but not bounded.
In what follows, we denote by $\Mon(T;\Rb^+)$ the set of all bounded nondecreasing
functions mapping $T$ into $\Rb^+=[0,\infty)$ ($\Rb^+$ may be replaced by~$\Rb$).
It is worthwhile to recall the classical \emph{Helly selection principle\/} for an arbitrary
set $T\subset\Rb$ (e.g., \cite[Proof of Theorem~1.3]{Sovae}): \emph{a uniformly
bounded sequence of functions from $\Mon(T;\Rb)$ contains a subsequence which
converges \pw\ on $T$ to a function from $\Mon(T;\Rb)$.} \label{p:Hellym}
\begin{theorem} \label{t:SP}
Let $\es\ne T\subset\Rb$ and $(M,d)$ be a metric space. If $\{f_j\}\subset M^T$ is a
\pw\ \rc\ sequence of functions on $T$ such that
\begin{equation} \label{e:sp}
\limsup_{j\to\infty}V_\vep(f_j,T)<\infty\quad\mbox{for all}\quad\vep>0,
\end{equation}
then there is a subsequence of $\{f_j\}$, which converges \pw\ on $T$ to a
{\sl bounded regulated} function $f\in M^T$. In addition, if $(M,d)$ is proper, then
$V_\vep(f,T)$ does not exceed the $\limsup$ in\/ \eq{e:sp} for all $\vep>0$.
\end{theorem}
\proof
We present a direct proof based only on the properties of the approximate variation
from Section~\ref{ss:pro} (an indirect proof, based on the notion of the \emph{joint
modulus of variation of two functions}, was given in \cite[Theorem~3]{Studia17}).
By Lemma~\ref{l:ele}(b), given $\vep>0$ and $j\in\Nb$, the $\vep$-variation function
defined by the rule $t\mapsto V_\vep(f_j,T\cap(-\infty,t])$ is nondecreasing on $T$.
Note also that, by assumption \eq{e:sp}, for each $\vep>0$ there are $j_0(\vep)\in\Nb$
and a number $C(\vep)>0$ such that $V_\vep(f_j,T)\le C(\vep)$ for all $j\ge j_0(\vep)$.
We divide the rest of the proof into five steps.
1. Let us show that for each decreasing sequence $\{\vep_k\}_{k=1}^\infty$ of
positive numbers $\vep_k\to0$ there are a subsequence of $\{f_j\}$, again denoted by
$\{f_j\}$, and a sequence of functions $\{\vfi_k\}_{k=1}^\infty\subset\Mon(T;\Rb^+)$
such that
\begin{equation} \label{e:SP1}
\lim_{j\to\infty}V_{\vep_k}(f_j,T\cap(-\infty,t])=\vfi_k(t)\quad
\mbox{for \,all \,$k\in\Nb$ \,and \,$t\in T$.}
\end{equation}
In order to prove \eq{e:SP1}, we make use of the Cantor diagonal procedure.
Lemma~\ref{l:ele}(b) and remarks above imply
\begin{equation*}
\mbox{ $V_{\vep_1}(f_j,T\cap(-\infty,t])\le V_{\vep_1}(f_j,T)\le C(\vep_1)$
for all $t\in T$ and $j\ge j_0(\vep_1)$,}
\end{equation*}
i.e., the sequence of functions $\{t\mapsto V_{\vep_1}(f_j,T\cap(-\infty,t])\}
_{j=j_0(\vep_1)}^\infty\subset\Mon(T;\Rb^+)$ is uniformly bounded on $T$ by
constant $C(\vep_1)$. By the classical Helly selection principle (for monotone functions),
there are a subsequence $\{J_1(j)\}_{j=1}^\infty$ of $\{j\}_{j=j_0(\vep_1)}^\infty$
and a function $\vfi_1\in\Mon(T;\Rb^+)$ such that
$V_{\vep_1}(f_{J_1(j)},T\cap(-\infty,t])$ converges to $\vfi_1(t)$ in $\Rb$ as
$j\to\infty$ for all $t\in T$. Now, choose the least number $j_1\in\Nb$ such that
$J_1(j_1)\ge j_0(\vep_2)$. Inductively, assume that $k\in\Nb$, $k\ge2$, and a
subsequence $\{J_{k-1}(j)\}_{j=1}^\infty$ of $\{j\}_{j=j_0(\vep_1)}^\infty$ and
the number $j_{k-1}\in\Nb$ with $J_{k-1}(j_{k-1})\ge j_0(\vep_k)$ are already
constructed. By Lemma~\ref{l:ele}(b), we get
\begin{equation*}
\mbox{ $V_{\vep_k}(f_{J_{k-1}(j)},T\cap(-\infty,t])\!\le\!
V_{\vep_k}(f_{J_{k-1}(j)},T)\!\le\! C(\vep_k)$
for all $t\!\in\! T$ and $j\!\ge\! j_{k-1}$,}
\end{equation*}
and so, by the Helly selection principle, there are a subsequence $\{J_k(j)\}_{j=1}^\infty$
of the sequence $\{J_{k-1}(j)\}_{j=j_{k-1}}^\infty$ and a function
$\vfi_k\in\Mon(T;\Rb^+)$ such that
\begin{equation*}
\lim_{j\to\infty}V_{\vep_k}(f_{J_k(j)},T\cap(-\infty,t])=\vfi_k(t)\quad
\mbox{for all}\quad t\in T.
\end{equation*}
Given $k\in\Nb$, $\{J_j(j)\}_{j=k}^\infty$ is a subsequence of $\{J_k(j)\}_{j=1}^\infty$,
and so, the diagonal sequence $\{f_{J_j(j)}\}_{j=1}^\infty$, again denoted by $\{f_j\}$,
satisfies condition \eq{e:SP1}.
2. Let $Q$ be an at most countable dense subset of $T$. Note that any point $t\in T$,
which is not a limit point for $T$ (i.e., $T\cap(t-\delta,t+\delta)=\{t\}$ for some
$\delta>0$), belongs to~$Q$. Since, for any $k\in\Nb$, $\vfi_k\in\Mon(T;\Rb^+)$,
the set $Q_k\subset T$ of points of discontinuity of $\vfi_k$ is at most countable.
Setting $S=Q\cup\bigcup_{k=1}^\infty Q_k$, we find that $S$ is an at most countable
dense subset of $T$; moreover, if $S\ne T$, then every point $t\in T\setminus S$ is
a limit point for $T$ and
\begin{equation} \label{e:SP2}
\mbox{$\vfi_k$ is continuous on $T\setminus S$ for all $k\in\Nb$.}
\end{equation}
Since $S\subset T$ is at most countable and $\{f_j(s):j\in\Nb\}$ is \rc\ in $M$ for all
$s\in S$, applying the Cantor diagonal procedure and passing to a subsequence of
$\{f_j(s)\}_{j=1}^\infty$ if necessary, with no loss of generality we may assume that,
for each $s\in S$, $f_j(s)$ converges in $M$ as $j\to\infty$ to a (unique) point
denoted by $f(s)\in M$ (so that $f:S\to M$).
If $S=T$, we turn to Step~4 below and complete the proof.
3. Now, assuming that $S\ne T$, we prove that $f_j(t)$ converges in $M$ as $j\to\infty$
for all $t\in T\setminus S$, as well. Let $t\in T\setminus S$ and $\eta>0$ be arbitrarily
fixed. Since $\vep_k\to0$ as $k\to\infty$ (cf.\ Step~1), we pick and fix
$k=k(\eta)\in\Nb$ such that $\vep_k\le\eta$. By \eq{e:SP2}, $\vfi_k$ is continuous
at $t$, and so, by the density of $S$ in $T$, there is $s=s(k,t)\in S$ such that
$|\vfi_k(t)-\vfi_k(s)|\le\eta$. From property \eq{e:SP1}, there is
$j^1=j^1(\eta,k,t,s)\in\Nb$ such that, for all $j\ge j^1$,
\begin{equation} \label{e:SP3}
|V_{\vep_k}(f_j,T\cap(-\infty,t])\!-\!\vfi_k(t)|\!\le\!\eta\,\,\mbox{and}\,\,
|V_{\vep_k}(f_j,T\cap(-\infty,s])\!-\!\vfi_k(s)|\!\le\!\eta.
\end{equation}
Assuming that $s<t$ (with no loss of generality) and applying Lemma~\ref{l:mor}
(where $T$ is replaced by $T\cap(-\infty,t]$, $T_1$---by $T\cap(-\infty,s]$,
and $T_2$---by $T\cap[s,t]$), we get
\begin{align*}
V_{\vep_k}(f_j,T\cap[s,t])&\le V_{\vep_k}(f_j,T\cap(-\infty,t])-
V_{\vep_k}(f_j,T\cap(-\infty,s])\\[3pt]
&\le|V_{\vep_k}(f_j,T\cap(-\infty,t])-\vfi_k(t)|+|\vfi_k(t)-\vfi_k(s)|\\[3pt]
&\qquad+|\vfi_k(s)-V_{\vep_k}(f_j,T\cap(-\infty,s])|\\[3pt]
&\le\eta+\eta+\eta=3\eta\quad\mbox{for all}\quad j\ge j^1.
\end{align*}
By the definition of $V_{\vep_k}(f_j,T\cap[s,t])$, for each $j\ge j^1$, there is
$g_j\in\BV(T\cap[s,t];M)$ (also depending on $\eta$, $k$, $t$, and $s$) such that
\begin{equation*}
d_{\infty,T\cap[s,t]}(f_j,g_j)\le\vep_k\quad\mbox{and}\quad
V(g_j,T\cap[s,t])\le V_{\vep_k}(f_j,T\cap[s,t])+\eta.
\end{equation*}
These inequalities, \eq{e:10} and property (V.1) on p.~\pageref{p:V} yield,
for all $j\ge j^1$,
\begin{align}
d(f_j(s),f_j(t))&\le d(g_j(s),g_j(t))+2d_{\infty,T\cap[s,t]}(f_j,g_j)\nonumber\\[3pt]
&\le V(g_j,T\cap[s,t])+2\vep_k\le(3\eta+\eta)+2\eta=6\eta.\label{e:SP4}
\end{align}
Being convergent, the sequence $\{f_j(s)\}_{j=1}^\infty$ is Cauchy in $M$, and so,
there is a natural number $j^2=j^2(\eta,s)$ such that $d(f_j(s),f_{j'}(s))\le\eta$ for all
$j,j'\ge j^2$. Since the number $j^3=\max\{j^1,j^2\}$ depends only on $\eta$
(and $t$) and
\begin{align*}
d(f_j(t),f_{j'}(t))&\le d(f_j(t),f_j(s))+d(f_j(s),f_{j'}(s))+d(f_{j'}(s),f_{j'}(t))\\[3pt]
&\le 6\eta+\eta+6\eta=13\eta\quad\mbox{for all}\quad j,j'\ge j^3,
\end{align*}
the sequence $\{f_j(t)\}_{j=1}^\infty$ is Cauchy in $M$. Taking into account that the set
$\{f_j(t):j\in\Nb\}$ is \rc\ in $M$, we conclude that $f_j(t)$ converges in $M$ as
$j\to\infty$ to a (unique) point denoted by $f(t)\in M$ (so, $f:T\setminus S\to M$).
4. At the end of Steps 2 and 3, we have shown that the function
$f$ mapping $T=S\cup(T\setminus S)$ into $M$ is the \pw\ limit on $T$ of a subsequence
$\{f_{j_p}\}_{p=1}^\infty$ of the original sequence $\{f_j\}_{j=1}^\infty$. By virtue
of Lemma~\ref{l:71}(b) and assumption \eq{e:sp}, given $\vep>0$, we get
\begin{align*}
|f(T)|&\le\liminf_{p\to\infty}|f_{j_p}(T)|
\le\liminf_{p\to\infty}V_\vep(f_{j_p},T)+2\vep\\[3pt]
&\le\limsup_{j\to\infty}V_\vep(f_j,T)+2\vep<\infty,
\end{align*}
and so, $f$ is a \emph{bounded\/} function on $T$, i.e., $f\in\Bd(T;M)$.
Now, we prove that $f$ is \emph{regulated\/} on $T$. Given $\tau\in T$, which is a
left limit point for $T$, let us show that $d(f(s),f(t))\to0$ as $T\ni s,t\to\tau-0$ (similar
arguments apply if $\tau'\in T$ is a right limit point for $T$). This is equivalent to showing
that for every $\eta>0$ there is $\delta=\delta(\eta)>0$ such that
$d(f(s),f(t))\le7\eta$ for all $s,t\in T\cap(\tau-\delta,\tau)$ with $s<t$.
Recall that the (finally) extracted subsequence of the original sequence $\{f_j\}$,
here again denoted by $\{f_j\}$, satisfies condition \eq{e:SP1} and $f_j\to f$ \pw\ on~$T$.
Let $\eta>0$ be arbitrarily fixed. Since $\vep_k\to0$, pick and fix $k=k(\eta)\in\Nb$
such that $\vep_k\le\eta$. Furthermore, since $\vfi_k\in\Mon(T;\Rb^+)$ and $\tau\in T$
is a left limit point of $T$, the left limit $\lim_{T\ni t\to\tau-0}\vfi_k(t)\in\Rb^+$ exists.
Hence, there is $\delta=\delta(\eta,k)>0$ such that $|\vfi_k(t)-\vfi_k(s)|\le\eta$ for
all $s,t\in T\cap(\tau-\delta,\tau)$. Now, let $s,t\in T\cap(\tau-\delta,\tau)$ be arbitrary.
By \eq{e:SP1}, there is $j^1=j^1(\eta,k,s,t)\in\Nb$ such that if $j\ge j^1$, the
inequalities \eq{e:SP3} hold. Arguing exactly the same way as between lines \eq{e:SP3}
and \eq{e:SP4}, we find that $d(f_j(s),f_j(t))\le6\eta$ for all $j\ge j^1$. Noting that
$f_j(s)\to f(s)$ and $f_j(t)\to f(t)$ in $M$ as $j\to\infty$, by the triangle inequality
for $d$, we have, as $j\to\infty$,
\begin{equation*}
|d(f_j(s),f_j(t))-d(f(s),f(t))|\le d(f_j(s),f(s))+d(f_j(t),f(t))\to0.
\end{equation*}
So, there is $j^2=j^2(\eta,s,t)\in\Nb$ such that $d(f(s),f(t))\le d(f_j(s),f_j(t))+\eta$
for all $j\ge j^2$. Thus, if $j\ge\max\{j^1,j^2\}$, we get
$d(f(s),f(t))\le6\eta+\eta=7\eta$.
5. Finally, assume that $(M,d)$ is a \emph{proper\/} metric space. Once again (as at the
beginning of Step~4) it is convenient to denote the \pw\ convergent subsequence of
$\{f_j\}$ by $\{f_{j_p}\}_{p=1}^\infty$. So, since $f_{j_p}\to f$ \pw\ on $T$ as
$p\to\infty$, we may apply Lemma~\ref{l:proper}(c) and assumption \eq{e:sp}
and get, for all $\vep>0$,
\begin{equation*}
V_\vep(f,T)\le\liminf_{p\to\infty}V_\vep(f_{j_p},T)\le
\limsup_{j\to\infty}V_\vep(f,T)<\infty.
\end{equation*}
This and \eq{e:supReg} (or Lemma~\ref{l:Regc} if $T=I$) also imply $f\in\Reg(T;M)$.
This completes the proof of Theorem~\ref{t:SP}.
\sq
A few remarks concerning Theorem~\ref{t:SP} are in order (see also Remarks
\ref{r:cHp} and \ref{r:neces}).
\begin{remark} \label{r:four2} \rm
If $(M,d)$ is a \emph{proper\/} metric space, then the assumption that
`$\{f_j\}\subset M^T$
is \pw\ \rc\ on $T$' in Theorem~\ref{t:SP} can be replaced by an (seemingly weaker,
but, actually) equivalent condition `$\{f_j\}\subset M^T$ and $\{f_j(t_0)\}$ is
\emph{eventually bounded\/} in $M$ for some $t_0\in T$' in the sense that there are
$J_0\in\Nb$ and a constant $C_0>0$ such that $d(f_j(t_0),f_{j'}(t_0))\le C_0$ for all
$j,j'\ge J_0$. In fact, fixing $\vep>0$, e.g., $\vep=1$, by Lemma~\ref{l:71}(b), we get
$|f_j(T)|\le V_1(f_j,T)+2$ for all $j\in\Nb$, and so, applying assumption \eq{e:sp},
\begin{equation*}
\limsup_{j\to\infty}|f_j(T)|\le\limsup_{j\to\infty}V_1(f_j,T)+2<\infty.
\end{equation*}
Hence, there are $J_1\in\Nb$ and a constant $C_1>0$ such that
$|f_j(T)|\le C_1$ for all $j\ge J_1$. By the triangle inequality for $d$,
given $t\in T$, we find, for all $j,j'\ge\max\{J_0,J_1\}$,
\begin{align}
d(f_j(t),f_{j'}(t))&\le d(f_j(t),f_j(t_0))+d(f_j(t_0),f_{j'}(t_0))+d(f_{j'}(t_0),f_{j'}(t))
\nonumber\\[3pt]
&\le|f_j(T)|+C_0+|f_{j'}(T)|\le C_1+C_0+C_1, \label{e:Cio}
\end{align}
i.e., $\{f_j(t)\}$ is eventually bounded uniformly in $t\in T$. Thus, since $M$ is proper,
$\{f_j(t)\}$ is \rc\ in $M$ for all $t\in T$.
In the case under consideration, an alternative proof of Theorem~\ref{t:SP},
worth mentioning of, can be given (see Theorem~\ref{t:SPprop} and its proof).
However, for a general metric space $(M,d)$, the relative compactness of $\{f_j(t)\}$
at all points $t\in T$ cannot be replaced by their (closedness and) boundedness even
at a single point of $T$. To see this, let $T=I=[a,b]$ and $M=\ell^1\subset\Rb^\Nb$
be the (infinite-dimensional) Banach space of all summable sequences
$u=\{u_n\}_{n=1}^\infty\in\ell^1$ equipped with the norm
$\|u\|=\sum_{n=1}^\infty|u_n|<\infty$. If $j\in\Nb$, denote by
$e_j=\{u_n\}_{n=1}^\infty$ the unit vector from $\ell^1$ given by
$u_n=0$ if $n\ne j$ and $u_j=1$. Now, define the sequence $\{f_j\}\subset M^T$
by $f_j(a)=e_j$ and $f_j(t)=0$ if $a<t\le b$, $j\in\Nb$. We have: the set
$\{f_j(a)\}_{j=1}^\infty=\{e_j:j\in\Nb\}$ is closed and bounded in $M$,
$\{f_j(t)\}_{j=1}^\infty=\{0\}$ is compact in $M$ if $a<t\le b$, and
(cf. Example~\ref{ex:thr} and \eq{e:tab}), given $j\in\Nb$, $V_\vep(f_j,T)=1-2\vep$
if $0<\vep<1/2$, and $V_\vep(f_j,T)=0$ if $\vep\ge1/2$. Clearly, condition \eq{e:sp}
is satisfied for $\{f_j\}$, but no subsequence of $\{f_j\}$ converges in $M$ at the
point $t=a$.
\end{remark}
\begin{theorem} \label{t:SPprop}
Suppose $T\subset\Rb$, $(M,d)$ is a {\sl proper} metric space, and a sequence of
functions $\{f_j\}\subset M^T$ is such that $\{f_j(t_0)\}$ is eventually bounded in $M$
for some $t_0\in T$ and condition\/ \eq{e:sp} holds. Then, a subsequence of $\{f_j\}$
converges \pw\ on $T$ to a {\sl bounded} function $f\in M^T$ such that
$V_\vep(f,T)\le\limsup_{j\to\infty}V_\vep(f_j,T)$ for all $\vep>0$.
\end{theorem}
\proof
1. Let $\{\vep_k\}_{k=1}^\infty\subset(0,\infty)$ be such that $\vep_k\to0$ as
$k\to\infty$. Given $k\in\Nb$, condition \eq{e:sp} implies the existence of
$j_0'(\vep_k)\in\Nb$ and a constant $C(\vep_k)>0$ such that
$V_{\vep_k}(f_j,T)<C(\vep_k)$ for all $j\ge j_0'(\vep_k)$. By definition \eq{e:av},
for each $j\ge j_0'(\vep_k)$, there is $g_j^{(k)}\in\BV(T;M)$ such that
\footnote{Conditions $\{f_j\}\subset M^T$ is \rc\ on $T$ and $\{g_j\}\subset M^T$ is
such that $d_{\infty,T}(f_j,g_j)\le\vep$ for all $j\in\Nb$ \emph{do not\/} imply in general
that $\{g_j\}$ is also \rc\ on $T$: e.g.\ (cf.\ notation in Remark~\ref{r:four2}), $T=[0,1]$,
$M=\ell^1$ (which is not proper), $f_j(t)=0$ and $g_j(t)=\vep t e_j$ for all $j\in\Nb$
and $t\in T$.}
\begin{equation} \label{e:dnVC}
d_{\infty,T}(f_j,g_j^{(k)})\le\vep_k\quad\mbox{ and }\quad V(g_j^{(k)},T)\le C(\vep_k).
\end{equation}
Since $\{f_j(t_0)\}$ is eventually bounded and \eq{e:sp} holds, we get inequality
\eq{e:Cio} for all $j,j'\ge\max\{J_0,J_1\}$. It follows that if $t\in T$, $k\in\Nb$, and
$j,j'\ge j_0(\vep_k)\equiv\max\{j_0'(\vep_k),J_0,J_1\}$, we find, by the triangle
inequality for $d$, \eq{e:dnVC},~and~\eq{e:Cio},
\begin{align*}
d(g_j^{(k)}(t),g_{j'}^{(k)}(t))&\le d(g_j^{(k)}(t),f_j(t))+d(f_j(t),f_{j'}(t))
+d(f_{j'}(t),g_{j'}^{(k)}(t))\\[2pt]
&\le d_{\infty,T}(g_j^{(k)},f_j)+d(f_j(t),f_{j'}(t))+d_{\infty,T}(f_{j'},g_{j'}^{(k)})\\[2pt]
&\le\vep_k+(C_0+2C_1)+\vep_k.
\end{align*}
In this way, we have shown that
\begin{equation} \label{e:djjk}
\sup_{j,j'\ge j_0(\vep_k)}d(g_j^{(k)}(t),g_{j'}^{(k)}(t))\le2\vep_k+C_0+2C_1\,\,\,
\mbox{for all $k\in\Nb$ and $t\in T$,}
\end{equation}
and, by the second inequality in \eq{e:dnVC},
\begin{equation} \label{e:Vgj}
\sup_{j\ge j_0(\vep_k)}V(g_j^{(k)},T)\le C(\vep_k)\quad\mbox{for \,all}\quad k\in\Nb.
\end{equation}
2. Applying Cantor's diagonal procedure, let us show the following: given $k\in\Nb$, there
exist a subsequence of $\{g_j^{(k)}\}_{j=j_0(\vep_k)}^\infty$, denoted by
$\{g_j^{(k)}\}_{j=1}^\infty$, and $g^{(k)}\in\BV(T;M)$ such that
\begin{equation} \label{e:gkkt}
\lim_{j\to\infty}d(g_j^{(k)}(t),g^{(k)}(t))=0\quad\mbox{for \,all}\quad t\in T.
\end{equation}
Setting $k=1$ in \eq{e:djjk} and \eq{e:Vgj}, we find that the sequence
$\{g_j^{(1)}\}_{j=j_0(\vep_1)}^\infty$ has uniformly bounded (by $C(\vep_1)$)
Jordan variations and is uniformly bounded on $T$ (by $2\vep_1+C_0+2C_1$), and so,
since $M$ is a \emph{proper\/} metric space, the sequence is \pw\ \rc\ on~$T$.
By the Helly-type \pw\ selection principle in $\BV(T;M)$ (cf.\ property (V.4) on
p.~\pageref{p:V}), there are a subsequence $\{J_1(j)\}_{j=1}^\infty$ of
$\{j\}_{j=j_0(\vep_1)}^\infty$ and a function $g^{(1)}\in\BV(T;M)$ such that
$g_{J_1(j)}^{(1)}(t)\to g^{(1)}(t)$ in $M$ as $j\to\infty$ for all $t\in T$.
Pick the least number $j_1\in\Nb$ such that $J_1(j_1)\ge j_0(\vep_2)$. Inductively,
if $k\in\Nb$ with $k\ge2$, a subsequence $\{J_{k-1}(j)\}_{j=1}^\infty$ of
$\{j\}_{j=j_0(\vep_1)}^\infty$, and the number $j_{k-1}\in\Nb$ such that
$J_{k-1}(j_{k-1})\ge j_0(\vep_k)$ are already chosen, we get the sequence of functions
$\{g_{J_{k-1}(j)}^{(k)}\}_{j=j_{k-1}}^\infty\subset\BV(T;M)$, which, by virtue of
\eq{e:djjk} and \eq{e:Vgj}, satisfies conditions
\begin{equation*}
\sup_{j,j'\ge j_{k-1}}d\bigl(g_{J_{k-1}(j)}^{(k)}(t),g_{J_{k-1}(j')}^{(k)}(t)\bigr)\le
2\vep_k+C_0+2C_1\quad\mbox{for \,all}\quad t\in T
\end{equation*}
and
\begin{equation*}
\sup_{j\ge j_{k-1}}V\bigl(g_{J_{k-1}(j)}^{(k)},T\bigr)\le C(\vep_k).
\end{equation*}
By Helly's-type selection principle (V.4) in $\BV(T;M)$, there are a subsequence
$\{J_k(j)\}_{j=1}^\infty$ of $\{J_{k-1}(j)\}_{j=j_{k-1}}^\infty$ and a function
$g^{(k)}\in\BV(T;M)$ such that $g_{J_k(j)}^{(k)}(t)\to g^{(k)}(t)$ in $M$ as
$j\to\infty$ for all $t\in T$. Since, for each $k\in\Nb$, $\{J_j(j)\}_{j=k}^\infty$ is a
subsequence of $\{J_k(j)\}_{j=j_{k-1}}^\infty\subset\{J_k(j)\}_{j=1}^\infty$,
we conclude that the diagonal sequence $\{g_{J_j(j)}^{(k)}\}_{j=1}^\infty$,
(which was) denoted by $\{g_j^{(k)}\}_{j=1}^\infty$ (at the beginning of step~2),
satisfies condition \eq{e:gkkt}.
We denote the corresponding diagonal subsequence $\{f_{J_j(j)}\}_{j=1}^\infty$ of
$\{f_j\}$ again by $\{f_j\}$.
3. Since $\BV(T;M)\subset\Bd(T;M)$ (by (V.1) on p.~\pageref{p:V}),
$\{g^{(k)}\}_{k=1}^\infty\subset\Bd(T;M)$. We are going to show that
$\{g^{(k)}\}_{k=1}^\infty$ is a Cauchy sequence with respect to the
uniform metric $d_{\infty,T}$. For this, we employ an idea from \cite[p.~49]{Fr}.
Let $\eta>0$ be arbitrary. From $\vep_k\to0$, we find $k_0=k_0(\eta)\in\Nb$ such that
$\vep_k\le\eta$ for all $k\ge k_0$. Now, suppose $k,k'\in\Nb$ be (arbitrary) such that
$k,k'\ge k_0$. By virtue of \eq{e:gkkt}, for each $t\in T$, there is a number
$j^1=j^1(t,\eta,k,k')\in\Nb$ such that if $j\ge j^1$, we have
\begin{equation*}
d\bigl(g_j^{(k)}(t),g^{(k)}(t)\bigr)\le\eta\quad\mbox{ and }\quad
d\bigl(g_j^{(k')}(t),g^{(k')}(t)\bigr)\le\eta.
\end{equation*}
Now, it follows from the triangle inequality for $d$ and the first inequality in
\eq{e:dnVC} that if $j\ge j^1$,
\begin{align*}
d\bigl(g^{(k)}(t),g^{(k')}(t)\bigr)&\le d\bigl(g^{(k)}(t),g_j^{(k)}(t)\bigr)
+d\bigl(g_j^{(k)}(t),f_j(t)\bigr)\\[2pt]
&\qquad+d\bigl(f_j(t),g_j^{(k')}(t)\bigr)+d\bigl(g_j^{(k')}(t),g^{(k')}(t)\bigr)\\[2pt]
&\le\eta+\vep_k+\vep_{k'}+\eta\le4\eta.
\end{align*}
By the arbitrariness of $t\in T$, $d_{\infty,T}d(g^{(k)},g^{(k')})\le4\eta$ for all
$k,k'\ge k_0$.
4. Being proper, $(M,d)$ is complete, and so, $\Bd(T;M)$ is complete with respect to
the uniform metric $d_{\infty,T}$. By step~3, there is $g\in\Bd(T;M)$ such that
$g^{(k)}\rightrightarrows g$ on $T$ (i.e., $d_{\infty,T}(g^{(k)},g)\to0$) as $k\to\infty$.
Let us prove that $f_j\to g$ pointwise on $T$ as $j\to\infty$ ($\{f_j\}$ being from
the end of step~2).
Let $t\in T$ and $\eta>0$ be arbitrary. Choose and fix a number $k=k(\eta)\in\Nb$
such that $\vep_k\le\eta$ and $d_{\infty,T}(g^{(k)},g)\le\eta$. By \eq{e:gkkt},
there is $j^2=j^2(t,\eta,k)\in\Nb$ such that $d(g_j^{(k)}(t),g^{(k)}(t))\le\eta$
for all $j\ge j^2$, and so, \eq{e:dnVC} implies
\begin{align*}
d(f_j(t),g(t))&\le d\bigl(f_j(t),g_j^{(k)}(t)\bigr)+d\bigl(g_j^{(k)}(t),g^{(k)}(t)\bigr)
+d\bigl(g^{(k)}(t),g(t)\bigr)\\[2pt]
&\le\vep_k+d\bigl(g_j^{(k)}(t),g^{(k)}(t)\bigr)+d_{\infty,T}(g^{(k)},g)\\[2pt]
&\le\eta+\eta+\eta=3\eta\quad\,\mbox{for \,all}\quad j\ge j^2,
\end{align*}
which proves our assertion.
Thus, we have shown that a suitable (diagonal) subsequence $\{f_{j_p}\}_{p=1}^\infty$
of the original sequence $\{f_j\}_{j=1}^\infty$ converges pointwise on $T$ to the
function $g$ from $\Bd(T;M)$. Setting $f=g$ and applying Lemma~\ref{l:proper}(c),
we conclude that
\begin{equation*}
V_\vep(f,T)=V_\vep(g,T)\le\liminf_{p\to\infty}V_\vep(f_{j_p},T)
\le\limsup_{j\to\infty}V_\vep(f_j,T)\quad\forall\,\vep>0.
\end{equation*}
This completes the proof of Theorem~\ref{t:SPprop}.
\sq
A simple consequence of Theorem~\ref{t:SPprop} is the following
\begin{corollary}
Assume that assumption \eq{e:sp} in Theorem~{\rm\ref{t:SPprop}} is replaced by
condition $\lim_{j\to\infty}|f_j(T)|=0$. Then, a subsequence of $\{f_j\}$ converges
pointwise on $T$ to a constant function on $T$.
\end{corollary}
\proof
In fact, given $\vep>0$, there is $j_0=j_0(\vep)\in\Nb$ such that $|f_j(T)|\le\vep$
for all $j\ge j_0$, and so, by \eq{e:zero}, $V_\vep(f_j,T)=0$ for all $j\ge j_0$.
This implies
\begin{equation*}
\limsup_{j\to\infty}V_\vep(f_j,T)\le\sup_{j\ge j_0}V_\vep(f_j,T)=0\quad
\mbox{for \,all}\quad\vep>0.
\end{equation*}
By Theorem~\ref{t:SPprop}, a subsequence of $\{f_j\}$ converges \pw\ on $T$ to
a function $f\in M^T$ such that $V_\vep(f,T)=0$ for all $\vep>0$. Lemma~\ref{l:71}(e)
yields $|f(T)|=0$, i.e., $f$ is a constant function on $T$.
\sq
\begin{remark} \label{r:cHp} \rm
The classical Helly selection principle for monotone functions (p.~\pageref{p:Hellym})
is a particular case of Theorem~\ref{t:SP}. In fact, suppose $\{\vfi_j\}\subset\Rb^T$
is a sequence of monotone functions, for which there is a constant $C>0$ such that
$|\vfi_j(t)|\le C$ for all $t\in T$ and $j\in\Nb$. Setting $(M,\|\cdot\|)=(\Rb,|\cdot|)$,
$x=1$ and $y=0$ in Example~\ref{ex:1}, for every $j\in\Nb$
we find, from \eq{e:mntn} and \eq{e:ov2},
that $V_\vep(\vfi_j,T)=|\vfi_j(T)|-2\vep$ if $0<\vep<\frac12|\vfi_j(T)|$ and
$V_\vep(\vfi_j,T)=0$ if $\vep\ge\frac12|\vfi_j(T)|$. Since $|\vfi_j(T)|\le2C$, we get
$V_\vep(\vfi_j,T)\le2C$ for all $j\in\Nb$ and $\vep>0$, and so, \eq{e:sp} is satisfied.
Similarly, Theorem~\ref{t:SP} implies Helly's selection principle for functions of bounded
variation (cf.\ property (V.4) on p.~\pageref{p:V}). In fact, if $\{f_j\}\subset M^T$
and $C=\sup_{j\in\Nb}V(f_j,T)$ is finite, then, by Lemma~\ref{l:71}(a),
$V_\vep(f_j,T)\le C$ for all $j\in\Nb$ and $\vep>0$, and so, \eq{e:sp} is fulfilled.
Now, if a subsequence of $\{f_j\}$ converges pointwise on $T$ to $f\in M^T$, then
property (V.3) (p.~\pageref{p:V}) implies $f\in\BV(T;M)$ with $V(f,T)\le C$.
\end{remark}
\begin{remark} \label{r:neces} \rm
(a) Condition \eq{e:sp} is \emph{necessary\/} for the \emph{uniform convergence\/}
in the following sense: if $\{f_j\}\subset M^T$, $f_j\rightrightarrows f$ on $T$ and
$V_\vep(f,T)<\infty$ for all $\vep>0$, then, by Lemma~\ref{l:uc}(a),
\begin{equation*}
\limsup_{j\to\infty}V_\vep(f_j,T)\le V_{\vep-0}(f,T)\le V_{\vep'}(f,T)<\infty\quad
\mbox{for all}\quad 0<\vep'<\vep.
\end{equation*}
(b) Contrary to this, \eq{e:sp} \emph{is not\/} necessary for the \pw\
convergence (see Examples~\ref{ex:notnec} and \ref{ex:poico}).
On the other hand, condition \eq{e:sp} is
`almost necessary' for the \pw\ convergence $f_j\to f$ on $T$ in the following sense.
Assume that $T\subset\Rb$ is a measurable set with \emph{finite\/} Lebesgue
measure $\mathcal{L}(T)$ and $\{f_j\}\subset M^T$ is a sequence of measurable
functions such that $f_j\to f$ on $T$ (or even $f_j$ converges almost everywhere
on $T$ to~$f$) and $V_\vep(f,T)<\infty$ for all $\vep>0$. By Egorov's Theorem
(e.g., \cite[Section~3.2.7]{Rao}), given $\eta>0$, there is a measurable set
$T_\eta\subset T$ such that $\mathcal{L}(T\setminus T_\eta)\le\eta$ and
$f_j\rightrightarrows f$ on $T_\eta$. By (a) above and Lemma~\ref{l:ele}(b), we have
\begin{equation*}
\limsup_{j\to\infty}V_\vep(f_j,T_\eta)\le V_{\vep'}(f,T_\eta)\le V_{\vep'}(f,T)<\infty
\quad\mbox{for all}\quad 0<\vep'<\vep.
\end{equation*}
\end{remark}
\section{Examples illustrating Theorem~\protect\ref{t:SP}} \label{ss:illSP}
\begin{example} \label{ex:sinejt}
The main assumption \eq{e:sp} in Theorem~\ref{t:SP} is essential. In fact, it is
well known that the sequence of functions $\{f_j\}\subset\Rb^T$ on the interval
$T=[0,2\pi]$ defined by $f_j(t)=\sin(jt)$, $0\le t\le2\pi$, has no subsequence convergent
at all points of $T$ (cf.\ \cite[Example~3]{JMAA05}; more explicitly this is revived in
Remark~\ref{r:sinjt} below). Let us show that $\{f_j\}$ does not satisfy condition~
\eq{e:sp}.
Let us fix $j\in\Nb$. First, note that, given $t,s\in[0,2\pi]$, we have $\sin(jt)=0$ if and
only if $t=t_k=k\pi/j$, $k=0,1,2,\dots,2j$, and $|\sin(js)|=1$ if and only if
$s=s_k=\frac12(t_{k-1}+t_k)=(k-\frac12)\pi/j$, $k=1,2,\dots,2j$. Setting
$I_k=[t_{k-1},s_k]$ and $I_k'=[s_k,t_k]$, we find
\begin{equation*}
T=[0,2\pi]=\bigcup_{k=1}^{2j}[t_{k-1},t_k]=\bigcup_{k=1}^{2j}(I_k\cup I_k')\quad
\mbox{(non-overlapping intervals),}
\end{equation*}
and $f_j$ is strictly monotone on each interval $I_k$ and $I_k'$, $k=1,2\dots,2j$.
By virtue of Lemma~\ref{l:mor}, given $\vep>0$, we have
\begin{equation} \label{e:sut}
\sum_{k=1}^{2j}\!\bigl(V_\vep(f_j,I_k)+V_\vep(f_j,I_k')\bigr)\le V_\vep(f_j,T)
\le\sum_{k=1}^{2j}\!\bigl(V_\vep(f_j,I_k)+V_\vep(f_j,I_k')\bigr)+(4j-1)2\vep.
\end{equation}
It suffices to calculate $V_\vep(f_j,I_k)$ for $k=1$, where $I_1=[t_0,s_1]=[0,\pi/2j]$
(the other $\vep$-variations in \eq{e:sut} are calculated similarly and give the same value).
Since $f_j$ is strictly increasing on $I_1$, $f_j(I_1)=[0,1]$ and $|f_j(I_1)|=1$,
\eq{e:mntn} and \eq{e:ov2} imply $V_\vep(f_j,I_1)=1-2\vep$ if $0<\vep<\frac12$
(and $V_\vep(f_j,I_1)=0$ if $\vep\ge\frac12$). Hence, $V_\vep(f_j,I_k)=
V_\vep(f_j,I_k')=1-2\vep$ for all $0<\vep<\frac12$ and $k=1,2,\dots,2j$, and
it follows from \eq{e:sut} that
\begin{equation*}
4j(1-2\vep)\le V_\vep(f_j,T)\le4j(1-2\vep)+(4j-1)2\vep=4j-2\vep,
\quad 0<\vep<\textstyle\frac12.
\end{equation*}
Thus, condition \eq{e:sp} is not satisfied by $\{f_j\}$.
\end{example}
\begin{remark} \label{r:sinjt} \rm
Since the sequence of functions $\{f_j\}_{j=1}^\infty$ from Example~\ref{ex:sinejt},
i.e., $f_j(t)=\sin(jt)$ for $t\in[0,2\pi]$, plays a certain role in the sequel as well, for the
sake of completeness, we recall here the proof of the fact (e.g.,
\cite[Chapter~7, Example~7.20]{Rudin}) that no subsequence of
$\{\sin(jt)\}_{j=1}^\infty$
converges in $\Rb$ for all $t\in[0,2\pi]$; note that $\{f_j\}$ is a uniformly bounded
sequence of continuous functions on the compact set $[0,2\pi]$. On the contrary,
assume that there is an increasing sequence $\{j_p\}_{p=1}^\infty\subset\Nb$
such that $\sin(j_pt)$ converges as $p\to\infty$ for all $t\in[0,2\pi]$. Given $t\in[0,2\pi]$,
this implies $\sin(j_pt)-\sin(j_{p+1}t)\to0$, and so, $(\sin(j_pt)-\sin(j_{p+1}t))^2\to0$
as $p\to\infty$. By Lebesgue's dominated convergence theorem,
$I_p\equiv\int_0^{2\pi}(\sin(j_pt)-\sin(j_{p+1}t))^2dt\to0$ as $p\to\infty$.
However, a straightforward computation of the integral $I_p$ (note that $j_p\!<\!j_{p+1}$)
gives the value $I_p=2\pi$ for all $p\in\Nb$, which is a contradiction.
More precisely (cf.~\cite[Chapter~10, Exercise~16]{Rudin}), the set $E\subset[0,2\pi]$ of
all points $t\in[0,2\pi]$, for which $\sin(j_pt)$ converges as $p\to\infty$ (with
$\{j_p\}_{p=1}^\infty$ as above), is of Lebesgue measure zero, $\mathcal{L}(E)=0$.
To see this, it suffices to note that, for a measurable set $A\subset E$,
$\int_A\sin(j_pt)dt\to0$ and
\begin{equation*}
\int_A(\sin(j_pt))^2dt=\frac12\int_A(1-\cos(2j_pt))dt\to\frac12\mathcal{L}(A)\quad
\mbox{as}\quad p\to\infty.
\end{equation*}
To illustrate the assertion in the previous paragraph, let us show that, given $t\in\Rb$,
$\sin(jt)$ converges as $j\to\infty$ if and only if $\sin t=0$ (i.e., $t=\pi k$ for some
integer~$k$). Since the sufficiency is clear, we prove the necessity. Suppose $t\in\Rb$
and the limit $\phi(t)=\lim_{j\to\infty}\sin(jt)$ exists in~$\Rb$. To show that $\sin t=0$,
we suppose, by contadiction, that $\sin t\ne0$. Passing to the limit as $j\to\infty$ in $\sin(j+2)t+\sin(jt)=2\sin(j+1)t\cdot\cos t$, we get
$\phi(t)+\phi(t)=2\phi(t)\cos t$, which is equivalent to $\phi(t)=0$ or $\cos t=1$.
Since $\sin(j+1)t=\sin(jt)\cdot\cos t+\sin t\cdot\cos(jt)$, we find
\begin{equation*}
\cos(jt)=\frac{\sin(j+1)t-\sin(jt)\cdot\cos t}{\sin t},
\end{equation*}
and so, $\lim_{j\to\infty}\cos(jt)=\phi(t)(1-\cos t)/\sin t$. Hence
\begin{align*}
1&=\lim_{j\to\infty}\bigl(\sin^2(jt)+\cos^2(jt)\bigr)=
(\phi(t))^2+(\phi(t))^2\cdot\biggl(\frac{1\!-\!\cos t}{\sin t}\biggr)^{\!2}\\
&\qquad\qquad\qquad=(\phi(t))^2\cdot\frac{2(1\!-\!\cos t)}{\sin^2t},
\end{align*}
and so, $\phi(t)\ne0$ and $1\ne\cos t$, which is a contradition. (In a similar manner,
one may show that, given $t\in\Rb$, $\cos(jt)$ converges as $j\to\infty$ if and only if
$\cos t=1$, i.e., $t=2\pi k$ for some integer~$k$; see \cite[p.~233]{Bridger}).
Returning to the convergence set $E\subset[0,2\pi]$ of the sequence
$\{\sin(j_pt)\}_{p=1}^\infty$, as a consequence of the previous assertion, we find that
if $j_p=p$, then $E=\{0,\pi,2\pi\}$. In general, the set $E$ may be
`quite large': for instance, if $j_p=p!$, then $E=\pi\cdot(\Qb\cap[0,2])$, which is
countable and dense in~$[0,2\pi]$.
\end{remark}
\begin{example} \label{ex:notnec} \rm
That condition \eq{e:sp} in Theorem~\ref{t:SP} is \emph{not necessary\/} for the
pointwise convergence $f_j\to f$ can be illustrated by the sequence $\{f_j\}$ from
Example~\ref{ex:ucbw}, where $I=[a,b]=[0,1]$. We assert that if
$0<\vep<\frac12d(x,y)$, then $\lim_{j\to\infty}V_\vep(f_j,I)=\infty$. To see this,
given $j\in\Nb$, we consider a partition of $I$ as defined in \eq{e:partj}. Supposing
that $g\in M^I$ is arbitrary such that $d_{\infty,I}(f_j,g)\le\vep$, we find, by virtue
of \eq{e:10},
\begin{equation*}
V(g,I)\ge\sum_{k=1}^{j!}d(g(t_k),g(s_k))\ge
\sum_{k=1}^{j!}\bigl(d(f(t_k),f(s_k))\!-\!2\vep\bigr)\!=\!j!\bigl(d(x,y)\!-\!2\vep\bigr).
\end{equation*}
By definition \eq{e:av}, $V_\vep(f_j,I)\ge j!(d(x,y)-2\vep)$, which proves our assertion.
\end{example}
\begin{example} \label{ex:equim} \rm
The choice of an appropriate (equivalent) metric on $M$ is essential in Theorem~\ref{t:SP}.
(Recall that two metrics $d$ and $d'$ on $M$ are \emph{equivalent\/} if, given
a sequence $\{x_j\}\subset M$ and $x\in M$, conditions $d(x_j,x)\to0$ and
$d'(x_j,x)\to0$ are equivalent.)
Let $d$ be an unbounded metric on $M$, i.e., $\sup_{x,y\in M}d(x,y)=\infty$ (for
instance, given $N\in\Nb$ and $q\ge1$,
$M=\Rb^N$ and $d(x,y)=\|x-y\|$, where $x=(x_1,\dots,x_N)$,
$y=(y_1,\dots,y_N)\in\Rb^N$ and $\|x\|=\bigl(\sum_{i=1}^N|x_i|^q\bigr)^{1/q}$).
The unboundedness of $d$ is equivalent to $\sup_{x\in M}d(x,y)=\infty$ for all
$y\in M$, so let us fix $y_0\in M$ and pick $\{x_j\}\subset M$ such that
$d(x_j,y_0)\to\infty$ as $j\to\infty$ (e.g., in the case $M=\Rb^N$ we may set
$x_j=(j,j,\dots,j)$ and $y_0=(0,0,\dots,0)$). Given a sequence $\{\tau_j\}\subset(a,b)
\subset I=[a,b]$ such that $\tau_j\to a$ as $j\to\infty$, define $\{f_j\}\subset M^I$ by
(cf.\ Example~\ref{ex:thr})
\begin{equation*}
\mbox{$f_j(\tau_j)=x_j$ \,\,and \,\,$f_j(t)=y_0$ \,if\,
$t\in I\setminus\{\tau_j\}$, $j\in\Nb$.}
\end{equation*}
Clearly, $f_j\to c(t)\equiv y_0$ pointwise on $I$, and so, $\{f_j\}$ is \pw\ \rc\ on $I$
(this can be seen directly by noting that the set $\{f_j(t):j\in\Nb\}$ is equal to
$\{x_k:\mbox{$k\in\Nb$ and $\tau_k=t$}\}\cup\{y_0\}$, which is finite for all $t\in I$).
Given $\vep>0$, there is $j_0=j_0(\vep)\in\Nb$ such that $|f_j(I)|=d(x_j,y_0)>2\vep$
for all $j\ge j_0$, and so, by Lemma~\ref{l:71}(f),
\begin{equation*}
V_\vep(f_j,I)\ge|f_j(I)|-2\vep=d(x_j,y_0)-2\vep\quad\mbox{for all}\quad j\ge j_0.
\end{equation*}
Since $\lim_{j\to\infty}d(x_j,y_0)=\infty$, this implies
$\lim_{j\to\infty}V_\vep(f_j,I)=\infty$, and so, Theorem~\ref{t:SP} is inapplicable
in this context.
On the other hand, the metric $d'$ on $M$, given by $d'(x,y)=\frac{d(x,y)}{1+d(x,y)}$,
$x,y\in M$, is equivalent to~$d$. Let us denote by $V'(f_j,I)$ and $V'_\vep(f_j,I)$ the
variation and the $\vep$-variation of $f_j$ on $I$ with respect to metric $d'$, respectively.
The variation $V'(f_j,I)$ is equal to (by virtue of the additivity of $V'$)
\begin{equation*}
V'(f_j,I)\!=\!V'(f_j,[a,\tau_j])+V'(f_j,[\tau_j,b])\!=\!d'(x_j,y_0)+d'(x_j,y_0)\!=\!
2\,\frac{d(x_j,y_0)}{1+d(x_j,y_0)}.
\end{equation*}
Now, if $\vep>0$, by Lemma~\ref{l:71}(a), $V_\vep'(f_j,I)\le V'(f_j,I)$ for all $j\in\Nb$,
and so,
\begin{equation*}
\limsup_{j\to\infty}V_\vep'(f_j,I)\le\lim_{j\to\infty}V'(f_j,I)=2.
\end{equation*}
Thus, the main assumption \eq{e:sp} in Theorem~\ref{t:SP} is satisfied, and this
Theorem is applicable to the sequence $\{f_j\}$.
Another interpretation of this example is that the main condition \eq{e:sp} is
\emph{not invariant\/} under equivalent metrics on $M$.
\end{example}
\begin{example} \label{ex:poico} \rm
Here we show that condition \eq{e:sp} in Theorem~\ref{t:SP} is \emph{not necessary\/}
for the pointwise convergence $f_j\to f$ on $T$, although we have $V_\vep(f_j,T)<\infty$
and $V_\vep(f,T)<\infty$ for all $\vep>0$. Furthermore, condition \eq{e:sp} may not
hold with respect to \emph{any\/} (equivalent) metric on $M$ such that $d(f_j(t),f(t))\to0$
as $j\to\infty$ for all $t\in T$. In fact, let $T=[0,2\pi]$ and $M=\Rb$, and define
$\{f_j\}\subset M^T$ by (cf.\ \cite[Example~4]{JMAA05}): $f_j(t)=\sin(j^2t)$ if
$0\le t\le2\pi/j$ and $f_j(t)=0$ if $2\pi/j<t\le2\pi$, $j\in\Nb$. Clearly, $\{f_j\}$ converges
\pw\ on $T$ to the function $f\equiv0$ with respect to \emph{any\/} metric $d$ on $M$,
which is equivalent to the usual metric $(x,y)\mapsto|x-y|$ on $\Rb$. Since $f_j$ is
continuous on $T=[0,2\pi]$ with respect to metric $|x-y|$, and so, with respect to $d$,
we find, by Lemma~\ref{l:Regc}, $V_\vep(f_j,T)<\infty$ and $V_\vep(f,T)=0$ with
respect to $d$ for all $j\in\Nb$ and $\vep>0$. Now, given $j,n\in\Nb$, we set
\begin{equation*}
s_{j,n}=\frac1{j^2}\Bigl(2\pi n-\frac{3\pi}2\Bigr)\quad\mbox{and}\quad
t_{j,n}=\frac1{j^2}\Bigl(2\pi n-\frac\pi2\Bigr),
\end{equation*}
so that $f_j(s_{j,n})=1$ and $f_j(t_{j,n})=-1$. Note also that
\begin{equation*}
0<s_{j,1}<t_{j,1}<s_{j,2}<t_{j,2}<\dots<s_{j,j}<t_{j,j}<2\pi/j\quad
\mbox{for all}\quad j\in\Nb.
\end{equation*}
Let $0<\vep<\frac12d(1,-1)$. Given $j\in\Nb$, suppose $g\!\in\! M^T$ is arbitrary
such that \mbox{$d_{\infty,T}(f_j,g)\!\le\!\vep$}.
The definition of $V(g,T)$ and \eq{e:10} give
\begin{align*}
V(g,T)&\ge\sum_{n=1}^jd(g(s_{j,n}),g(t_{j,n}))\ge
\sum_{n=1}^j\bigl(d(f_j(s_{j,n}),f_j(t_{j,n}))-2\vep\bigr)\\
&=\bigl(d(1,-1)-2\vep\bigr)j.
\end{align*}
By the arbitrariness of $g$ as above and \eq{e:av}, $V_\vep(f_j,T)\ge(d(1,-1)-2\vep)j$,
and so, condition \eq{e:sp} is not fulfilled for $0<\vep<\frac12d(1,-1)$.
\end{example}
\begin{example} \label{ex:irreg} \rm
(a) Theorem~\ref{t:SP} is inapplicable to the sequence $\{f_j\}$ from Example~
\ref{ex:rieq}, because (although $f_j\rightrightarrows f=\displaystylec_{x,y}$ on $I$)
$\lim_{j\to\infty}V_\vep(f_j,I)=\infty$ for $\vep=\frac12\|x-y\|$. The reason is that
the limit function $\displaystylec_{x,y}$ is not regulated (if $x\ne y$). However, see Remark~
\ref{r:neces}(a) if the limit function is regulated.
(b) Nevertheless, Theorem~\ref{t:SP} can be successfully applied to sequences of
\emph{nonregulated\/} functions. To see this, we again use the context of
Example~\ref{ex:rieq}, where we suppose $x=y\in M$, so that $f(t)=\displaystylec_{x,x}(t)=x$,
$t\in I$. Recall also that we have $f_j=\displaystylec_{x_j,y_j}$ with $x_j\ne y_j$, $j\in\Nb$,
$x_j\to x$ and $y_j\to y=x$ in $M$, and $f_j\rightrightarrows f\equiv x$ on $I$. Given $\vep>0$,
there is $j_0=j_0(\vep)\in\Nb$ such that $\|x_j-y_j\|\le2\vep$ for all $j\ge j_0$, which
implies, by virtue of \eq{e:steen}, $V_\vep(f_j,I)=0$ for all $j\ge j_0$. This yields
condition \eq{e:sp}:
\begin{equation*}
\limsup_{j\to\infty}V_\vep(f_j,I)\le\sup_{j\ge j_0}V_\vep(f_j,I)=0
\end{equation*}
(cf.\ also \cite[Example~3]{Studia17}).
On the other hand, for a fixed $k\in\Nb$ and $0<\vep<\frac12\|x_k-y_k\|$, we have,
from \eq{e:steen}, $V_\vep(f_k,I)=\infty$, and so, $\sup_{j\in\Nb}V_\vep(f_j,I)\ge
V_\vep(f_k,I)=\infty$. Thus, condition of uniform boundedness of $\vep$-variations
$\sup_{j\in\Nb}V_\vep(f_j,I)<\infty$, which was assumed in \cite[Theorem~3.8]{Fr},
is more restrictive than condition~\eq{e:sp}.
\end{example}
\section{Two extensions of Theorem~\protect\ref{t:SP}}
Applying Theorem~\ref{t:SP} and the diagonal procedure over expanding intervals,
we get the following \emph{local\/} version of Theorem~\ref{t:SP}.
\begin{theorem} \label{t:SPloc}
If $T\subset\Rb$, $(M,d)$ is a metric space and $\{f_j\}\subset M^T$ is a \pw\ \rc\
sequence of functions such that
\begin{equation*}
\mbox{$\displaystyle\limsup_{j\to\infty}V_\vep(f_j,T\cap[a,b])<\infty$ \,for all \,$a,b\in T$,
$a\le b$, and $\vep>0$,}
\end{equation*}
then a subsequence of $\{f_j\}$ converges pointwise on $T$ to a {\sl regulated} function
$f\!\in\!\Reg(T;M)\!$ such that $f$ is {\sl bounded} on $T\cap[a,b]$ for all $a,b\in T$,
$a\le b$.
\end{theorem}
\proof
With no loss of generality, we may assume that sequences $\{a_k\}$ and $\{b_k\}$
from $T$ are such that $a_{k+1}\!<\!a_k\!<\!b_k\!<\!b_{k+1}$ for all $k\!\in\!\Nb$,
$a_k\to\inf T\notin T$ and $b_k\to\sup T\notin T$ as $k\to\infty$.
By Theorem~\ref{t:SP}, applied to $\{f_j\}$ on $T\cap[a_1,b_1]$, there is a subsequence
$\{J_1(j)\}_{j=1}^\infty$ of $\{J_0(j)\}_{j=1}^\infty=\{j\}_{j=1}^\infty$ such that
$\{f_{J_1(j)}\}_{j=1}^\infty$ converges \pw\ on $T\cap[a_1,b_1]$ to a bounded
regulated function $f_1':T\cap[a_1,b_1]\to M$. Since
\begin{equation*}
\limsup_{j\to\infty}V_\vep(f_{J_1(j)},T\cap[a_2,b_2])\le
\limsup_{j\to\infty}V_\vep(f_j,T\cap[a_2,b_2])\!<\!\infty\,\,\,\mbox{for all}\,\,\,\vep>0,
\end{equation*}
applying Theorem~\ref{t:SP} to $\{f_{J_1(j)}\}_{j=1}^\infty$ on $T\cap[a_2,b_2]$,
we find a subsequence $\{J_2(j)\}_{j=1}^\infty$ of $\{J_1(j)\}_{j=1}^\infty$ such that
$\{f_{J_2(j)}\}_{j=1}^\infty$ converges \pw\ on the set $T\cap[a_2,b_2]$ to a bounded
regulated function $f_2':T\cap[a_2,b_2]\to M$. Since $[a_1,b_1]\subset[a_2,b_2]$, we get
$f_2'(t)=f_1'(t)$ for all $t\in T\cap[a_1,b_1]$. Proceeding this way, for each $k\in\Nb$ we
obtain a subsequence $\{J_k(j)\}_{j=1}^\infty$ of $\{J_{k-1}(j)\}_{j=1}^\infty$ and a
bounded regulated function $f_k':T\cap[a_k,b_k]\to M$ such that $f_{J_k(j)}\to f_k'$ on
$T\cap[a_k,b_k]$ as $j\to\infty$ and $f_k'(t)=f_{k-1}'(t)$ for all
$t\in T\cap[a_{k-1},b_{k-1}]$. Define $f:T\to M$ as follows: given $t\in T$, we have
$\inf T<t<\sup T$, so there is $k=k(t)\in\Nb$ such that $t\in T\cap[a_k,b_k]$, and so,
we set $f(t)=f_k'(t)$. The diagonal sequence $\{f_{J_j(j)}\}_{j=1}^\infty$ converges
\pw\ on $T$ to the function $f$, which satisfies the \emph{conclusions\/} of
Theorem~\ref{t:SPloc}.
\sq
Theorem~\ref{t:SP} implies immediately that if $(M,d)$ is a \emph{proper\/} metric space,
$\{f_j\}\subset M^T$ is a \pw\ \rc\ sequence and there is $E\subset T$ of measure zero,
$\mathcal{L}(E)=0$, such that $\limsup_{j\to\infty}V_\vep(f_j,T\setminus E)<\infty$ for
all $\vep>0$, then a subsequence of $\{f_j\}$ converges a.e.\ (=\,almost everywhere)
on $T$ to a function $f\in M^T$ such that $V_\vep(f,T\setminus E)<\infty$ for all
$\vep>0$. The following theorem is a \emph{selection principle for the a.e.\ convergence\/}
(it may be considered as subsequence-converse to Remark~\ref{r:neces}(b) concerning
the `almost necessity' of condition \eq{e:sp} for the \pw\ convergence).
\begin{theorem} \label{t:SPae}
Suppose $T\subset\Rb$, $(M,d)$ is a {\sl proper} metric space and $\{f_j\}\subset M^T$
is a \pw\ \rc\/ {\rm(}or a.e.\ \rc{\rm)}~on~$T$ sequence of functions satisfying the
condition\/{\rm:} for every $\eta>0$ there is a measurable set $E_\eta\subset T$ of
Lebesgue measure $\mathcal{L}(E_\eta)\le\eta$ such that
\begin{equation} \label{e:spae}
\limsup_{j\to\infty}V_\vep(f_j,T\setminus E_\eta)<\infty\quad\mbox{for all}\quad\vep>0.
\end{equation}
Then a subsequence of $\{f_j\}$ converges a.e.\ on $T$ to a function $f\in M^T$
having the property\/{\rm:} given $\eta>0$, there is a measurable set
$E'_\eta\subset T$ of Lebesgue measure $\mathcal{L}(E'_\eta)\le\eta$ such that
$V_\vep(f,T\setminus E'_\eta)<\infty$ for all $\vep>0$.
\end{theorem}
\proof
We follow the proof of Theorem~6 from \cite{JMAA05} with appropriate modifications.
Let $T_0\subset T$ be a set of Lebesgue measure zero such that the set
$\{f_j(t):j\in\Nb\}$ is \rc\ in $M$ for all $t\in T\setminus T_0$. We employ
Theorem~\ref{t:SP} several times as well as the diagonal procedure. By the assumption,
there is a measurable set $E_1\subset T$ of measure $\mathcal{L}(E_1)\le1$ such that
\eq{e:spae} holds with $\eta=1$. The sequence $\{f_j\}$ is \pw\ \rc\ on
$T\setminus(T_0\cup E_1)$ and, by Lemma~\ref{l:ele}(b), for all $\vep>0$,
\begin{equation*}
\limsup_{j\to\infty}V_\vep(f_j,T\setminus(T_0\cup E_1))\le
\limsup_{j\to\infty}V_\vep(f_j,T\setminus E_1)<\infty.
\end{equation*}
By Theorem~\ref{t:SP}, there are a subsequence $\{J_1(j)\}_{j=1}^\infty$ of
$\{j\}_{j=1}^\infty$ and a function $f^{(1)}:T\setminus(T_0\cup E_1)\to M$,
satisfying $V_\vep(f^{(1)},T\setminus(T_0\cup E_1))<\infty$ for all $\vep>0$,
such that $f_{J_1(j)}\to f^{(1)}$ \pw\ on $T\setminus(T_0\cup E_1)$ as $j\to\infty$.
Inductively, if $k\ge2$ and a subsequence $\{J_{k-1}(j)\}_{j=1}^\infty$ of
$\{j\}_{j=1}^\infty$ is already chosen, by the assumption \eq{e:spae}, there is a
measurable set $E_k\subset T$ with $\mathcal{L}(E_k)\le1/k$ such that
$\limsup_{j\to\infty}V_\vep(f_j,T\setminus E_k)<\infty$ for all $\vep>0$.
The sequence $\{f_{J_{k-1}(j)}\}_{j=1}^\infty$ is \pw\ \rc\ on
$T\setminus(T_0\cup E_k)$ and, again by Lemma~\ref{l:ele}(b), for all $\vep>0$,
\begin{align*}
\!\!\!\!\limsup_{j\to\infty}V_\vep(f_{J_{k-1}(j)},T\setminus(T_0\cup E_k))&\le
\limsup_{j\to\infty}V_\vep(f_{J_{k-1}(j)},T\setminus E_k)\\[3pt]
&\le\limsup_{j\to\infty}V_\vep(f_j,T\setminus E_k)<\infty.
\end{align*}
Theorem~\ref{t:SP} implies the existence of a subsequence $\{J_k(j)\}_{j=1}^\infty$
of $\{J_{k-1}(j)\}_{j=1}^\infty$ and a function $f^{(k)}:T\setminus(T_0\cup E_k)\to M$,
satisfying $V_\vep(f^{(k)},T\setminus(T_0\cup E_k))<\infty$ for all $\vep>0$, such that
$f_{J_k(j)}\to f^{(k)}$ \pw\ on $T\setminus(T_0\cup E_k)$ as $j\to\infty$.
The set $E=T_0\cup\bigcap_{k=1}^\infty E_k$ is of measure zero, and we have the
equality $T\setminus E=\bigcup_{k=1}^\infty(T\setminus(T_0\cup E_k))$. Define the
function $f:T\setminus E\to M$ as follows: given $t\in T\setminus E$, there is $k\in\Nb$
such that $t\in T\setminus(T_0\cup E_k)$, and so, we set $f(t)=f^{(k)}(t)$. The value
$f(t)$ is well-defined, i.e., it is independent of a particular $k$: in fact, if
$t\in T\setminus(T_0\cup E_{k_1})$ for some $k_1\in\Nb$ with, say, $k<k_1$ (with
no loss of generality), then, by the construction above, $\{J_{k_1}(j)\}_{j=1}^\infty$
is a subsequence of $\{J_k(j)\}_{j=1}^\infty$, which implies
\begin{equation*}
f^{(k_1)}(t)=\lim_{j\to\infty}f_{J_{k_1}(j)}(t)=\lim_{j\to\infty}f_{J_k(j)}(t)
=f^{(k)}(t)\quad\mbox{in}\quad M.
\end{equation*}
Let us show that the diagonal sequence $\{f_{J_j}(j)\}_{j=1}^\infty$ (which, of course,
is a subsequence of the original sequence $\{f_j\}$) converges \pw\ on $T\setminus E$
to the function~$f$. To see this, suppose $t\in T\setminus E$. Then
$t\in T\setminus(T_0\cup E_k)$ for some $k\in\Nb$, and so, $f(t)=f^{(k)}(t)$.
Since $\{f_{J_j(j)}\}_{j=k}^\infty$ is a subsequence of $\{f_{J_k(j)}\}_{j=1}^\infty$,
we find
\begin{equation*}
\lim_{j\to\infty}f_{J_j(j)}(t)=\lim_{j\to\infty}f_{J_k(j)}(t)=f^{(k)}(t)=f(t)\quad
\mbox{in}\quad M.
\end{equation*}
We extend $f$ from $T\setminus E$ to the whole $T$ arbitrarily and denote this extension
again by $f$. Given $\eta>0$, pick the minimal $k\in\Nb$ such that $1/k\le\eta$ and
set $E'_\eta=T_0\cup E_k$. It follows that $\mathcal{L}(E'_\eta)=\mathcal{L}(E_k)
\le1/k\le\eta$, $f=f^{(k)}$ on $T\setminus(T_0\cup E_k)=T\setminus E'_\eta$, and
\begin{equation*}
V_\vep(f,T\setminus E'_\eta)=V_\vep(f^{(k)},T\setminus(T_0\cup E_k))<\infty\quad
\mbox{for all}\quad\vep>0,
\end{equation*}
which was to be proved.
\sq
\section{Weak pointwise selection principles} \label{ss:weak}
In this section, we establish a variant of Theorem~\ref{t:SP} for functions with values
in a reflexive Banach space taking into account some specific features of this case
(such as the validity of the weak \pw\ convergence of sequences of functions).
Suppose $(M,\|\cdot\|)$ is a normed linear space over the field $\Kb=\Rb$ or $\Cb$
(equipped with the absolute value $|\cdot|$) and $M^*$ is its \emph{dual}, i.e.,
$M^*=L(M;\Kb)$ is the space of all continuous (=\,bounded) linear functionals on~$M$.
Recall that $M^*$ is a Banach space under the norm
\begin{equation} \label{e:nrm*}
\|x^*\|=\sup\bigl\{|x^*(x)|:\mbox{$x\in M$ and $\|x\|\le1$}\bigr\},\quad x^*\in M^*.
\end{equation}
The natural duality between $M$ and $M^*$ is determined by the bilinear functional
$\lan\cdot,\cdot\ran:M\times M^*\to\Kb$ defined by $\lan x,x^*\ran=x^*(x)$ for all
$x\in M$ and $x^*\in M^*$. Recall also that a sequence $\{x_j\}\subset M$ is said to
\emph{converge weakly\/} in $M$ to an element $x\in M$, written as $x_j\wto x$ in $M$,
if $\lan x_j,x^*\ran\to\lan x,x^*\ran$ in $\Kb$ as $j\to\infty$ for all $x^*\in M^*$.
It is well known that if $x_j\wto x$ in $M$, then $\|x\|\le\liminf_{j\to\infty}\|x_j\|$.
The notion of the approximate variation $\{V_\vep(f,T)\}_{\vep>0}$ for $f\in M^T$ is
introduced as in \eq{e:av} with respect to the induced metrics $d(x,y)=\|x-y\|$,
$x,y\in M$, and $d_{\infty,T}(f,g)=\|f-g\|_{\infty,T}=\sup_{t\in T}\|f(t)-g(t)\|$,
$f,g\in M^T$.
\begin{theorem} \label{t:SPweak}
Let $T\subset\Rb$ and $(M,\|\cdot\|)$ be a reflexive Banach space with separable dual
$(M^*,\|\cdot\|)$. Suppose the sequence $\{f_j\}\subset M^T$ is such that
\begin{itemize}
\renewcommand{0.0pt plus 0.5pt minus 0.25pt}{0.0pt plus 0.5pt minus 0.25pt}
\item[{\rm(i)}] $\sup_{j\in\Nb}\|f_j(t_0)\|\le C_0$ for some $t_0\in T$ and $C_0\ge0;$
\item[{\rm(ii)}] $v(\vep)\equiv\limsup_{j\to\infty}V_\vep(f_j,T)<\infty$ for all $\vep>0$.
\end{itemize}
Then, there is a subsequence of $\{f_j\}$, again denoted by $\{f_j\}$, and a function
$f\in M^T$, satisfying $V_\vep(f,T)\le v(\vep)$ for all $\vep>0$ {\rm(}and, a fortiori,
$f$ is bounded and regulated on $T${\rm)}, such that $f_j(t)\wto f(t)$ in $M$
for all $t\in T$.
\end{theorem}
\proof
1. First, we show that there is $j_0\in\Nb$ such that $C(t)\equiv\sup_{j\ge j_0}\|f_j(t)\|$
is finite for all $t\in T$. In fact, by Lemma~\ref{l:71}(b), $|f_j(T)|\le V_\vep(f_j,T)+2\vep$
with, say, $\vep=1$, for all $j\in\Nb$, which implies
\begin{equation*}
\limsup_{j\to\infty}|f_j(T)|\le\limsup_{j\to\infty}V_1(f_j,T)+2=v(1)+2<\infty\quad
\mbox{by (ii)},
\end{equation*}
and so, there is $j_0\in\Nb$ and a constant $C_1>0$ such that $|f_j(T)|\le C_1$ for
all $j\ge j_0$. Now, given $j\ge j_0$ and $t\in T$, we get, by (i),
\begin{equation*}
\|f_j(t)\|\le\|f_j(t_0)\|+\|f_j(t)-f_j(t_0)\|\le C_0+|f_j(T)|\le C_0+C_1,
\end{equation*}
i.e., $C(t)\le C_0+C_1$ for all $t\in T$.
2. Given $j\in\Nb$ and $x^*\in M^*$, we set $f_j^{x^*}(t)=\lan f_j(t),x^*\ran$ for
all $t\in T$. Let us verify that the sequence $\{f_j^{x^*}\}_{j=j_0}^\infty\subset\Kb^T$
satisfies the assumptions of Theorem~\ref{t:SP}. By \eq{e:nrm*} and Step~1, we have
\begin{equation} \label{e:bfu*}
\mbox{$|f_j^{x^*}(t)|\le\|f_j(t)\|\!\cdot\!\|x^*\|\le C(t)\|x^*\|$ \,\,\,for all \,\,$t\in T$
\,and \,$j\ge j_0$,}
\end{equation}
and so, $\{f_j^{x^*}\}_{j=j_0}^\infty$ is \pw\ \rc\ on $T$. If $x^*=0$, then
$f_j^{x^*}=0$ in $\Kb^T$, which implies $V_\vep(f_j^{x^*},T)=0$ for all $j\in\Nb$
and $\vep>0$. Now, we show that if $x^*\ne0$, then
\begin{equation} \label{e:Veofn}
\mbox{$V_\vep(f_j^{x^*},T)\le V_{\vep/\|x^*\|}(f_j,T)\|x^*\|$ \,\,\,for all
\,\,\,$j\in\Nb$ \,and \,$\vep>0$.}
\end{equation}
To prove \eq{e:Veofn}, we may assume that $V_{\vep/\|x^*\|}(f_j,T)<\infty$.
By definition \eq{e:av}, for every $\eta>0$ there is $g_j=g_{j,\eta}\in\BV(T;M)$
(also depending on $\vep$ and $x^*$) such that
\begin{equation*}
\|f_j-g_j\|_{\infty,T}\le\vep/\|x^*\|\quad\mbox{and}\quad
V(g_j,T)\le V_{\vep/\|x^*\|}(f_j,T)+\eta.
\end{equation*}
Setting $g_j^{x^*}(t)=\lan g_j(t),x^*\ran$ for all $t\in T$ (and so,
$g_j^{x^*}\in\Kb^T$), we find
\begin{align*}
\bigl|f_j^{x^*}-g_j^{x^*}\bigr|_{\infty,T}&=\sup_{t\in T}|\lan f_j(t)-g_j(t),x^*\ran|
\le\sup_{t\in T}\|f_j(t)-g_j(t)\|\!\cdot\!\|x^*\|\\[2pt]
&=\|f_j-g_j\|_{\infty,T}\|x^*\|\le(\vep/\|x^*\|)\|x^*\|=\vep.
\end{align*}
Furthermore, it is straightforward that $V(g_j^{x^*},T)\!\le\! V(g_j,T)\|x^*\|$. Once
again from definition \eq{e:av}, it follows that
\begin{equation*}
V_\vep(f_j^{x^*},T)\le V(g_j^{x^*},T)\le V(g_j,T)\|x^*\|\le
\bigl(V_{\vep/\|x^*\|}(f_j,T)+\eta\bigr)\|x^*\|.
\end{equation*}
Passing to the limit as $\eta\to+0$, we arrive at \eq{e:Veofn}.
Now, by \eq{e:Veofn} and (ii), for every $\vep>0$ and $x^*\in M^*$, $x^*\ne0$,
we have
\begin{equation} \label{e:vue*}
v_{x^*}(\vep)\equiv\limsup_{j\to\infty}V_\vep(f_j^{x^*},T)
\le v(\vep/\|x^*\|)\|x^*\|<\infty.
\end{equation}
Taking into account \eq{e:bfu*} and \eq{e:vue*}, given $x^*\in M^*$, we may
apply Theorem~\ref{t:SP} to the sequence $\{f_j^{x^*}\}_{j=j_0}^\infty\subset\Kb^T$
and extract a subsequence $\{f_{j,x^*}\}_{j=1}^\infty$ (depending on $x^*$
as well) of $\{f_j\}_{j=j_0}^\infty$ and find a function $f_{x^*}\in\Kb^T$, satisfying
$V_\vep(f_{x^*},T)\le v_{x^*}(\vep)$ for all $\vep>0$ (and so, $f_{x^*}$ is bounded
and regulated on $T$), such that $\lan f_{j,x^*}(t),x^*\ran\!\to\! f_{x^*}(t)$ in $\Kb$
as $j\!\to\!\infty$ for all \mbox{$t\in T$}.
3. Making use of the diagonal procedure, we are going to get rid of the dependence
of $\{f_{j,x^*}\}_{j=1}^\infty$ on the element $x^*\in M^*$. Since $M^*$ is separable,
$M^*$ contains a countable dense subset $\{x^*_k\}_{k=1}^\infty$. Setting
$x^*=x_1^*$ in \eq{e:bfu*} and \eq{e:vue*} and applying Theorem~\ref{t:SP} to the
sequence of functions $f_j^{x_1^*}=\lan f_j(\cdot),x_1^*\ran\in\Kb^T$, $j\ge j_0$,
we obtain a subsequence $\{J_1(j)\}_{j=1}^\infty$ of $\{j\}_{j=j_0}^\infty$ and a
function $f_{x_1^*}\in\Kb^T$ (both depending on $x_1^*$), satisfying
$V_\vep(f_{x_1^*},T)\le v_{x_1^*}(\vep)$ for all $\vep>0$, such that
$\lan f_{J_1(j)}(t),x_1^*\ran\to f_{x_1^*}(t)$ in $\Kb$ as $j\to\infty$ for all $t\in T$.
Inductively, assume that $k\ge2$ and the subsequence $\{J_{k-1}(j)\}_{j=1}^\infty$
of $\{j\}_{j=j_0}^\infty$ is already chosen. Putting $x^*=x_k^*$, replacing
$j$ by $J_{k-1}(j)$ in \eq{e:bfu*} and taking into account \eq{e:vue*}, we get
\begin{equation*}
\sup_{j\in\Nb}\bigl|\lan f_{J_{k-1}(j)}(t),x_k^*\ran\bigr|\le C(t)\|x_k^*\|\quad
\mbox{for all}\quad t\in T
\end{equation*}
and
\begin{equation*}
\limsup_{j\to\infty}V_\vep\bigl(\lan f_{J_{k-1}(j)}(\cdot),x_k^*\ran,T\bigr)
\le v_{x_k^*}(\vep)<\infty\quad\mbox{for all}\quad\vep>0.
\end{equation*}
By Theorem~\ref{t:SP}, applied to the sequence $\{f_{J_{k-1}(j)}^{x_k^*}\}_{j=1}^
\infty\subset\Kb^T$, there are a subsequence $\{J_k(j)\}_{j=1}^\infty$ of
$\{J_{k-1}(j)\}_{j=1}^\infty$ and a function $f_{x_k^*}\in\Kb^T$, satisfying
$V_\vep(f_{x_k^*},T)\le v_{x_k^*}(\vep)$ for all $\vep>0$, such that
$\lan f_{J_k(j)}(t),x_k^*\ran\to f_{x_k^*}(t)$ in $\Kb$ as $j\to\infty$ for all $t\in T$.
It follows that the diagonal subsequence $\{f_{J_j(j)}\}_{j=1}^\infty$ of
$\{f_j\}_{j=j_0}^\infty$, denoted by $\{f_j\}_{j=1}^\infty$, satisfies the condition:
\begin{equation} \label{e:fjfu}
\mbox{$\displaystyle\lim_{j\to\infty}\lan f_j(t),x_k^*\ran=f_{x_k^*}(t)$ \,\,\,for all\,\,\,
$t\in T$ \,and \,$k\in\Nb$.}
\end{equation}
4. Let us show that the sequence $f_j^{x^*}(t)=\lan f_j(t),x^*\ran$, $j\in\Nb$, is
Cauchy in $\Kb$ for every $x^*\in M^*$ and $t\in T$. Since the sequence
$\{x_k^*\}_{k=1}^\infty$ is dense in $M^*$, given $\eta>0$, there is
$k=k(\eta)\in\Nb$ such that $\|x^*-x_k^*\|\le\eta/(4C(t)+1)$, and, by \eq{e:fjfu},
there is $j^0=j^0(\eta)\in\Nb$ such that
$\bigl|\lan f_j(t),x_k^*\ran-\lan f_{j'}(t),x_k^*\ran\bigr|\le\eta/2$ for all $j,j'\ge j^0$.
Hence
\begin{align*}
|f_j^{x^*}(t)-f_{j'}^{x^*}(t)|&\le\|f_j(t)-f_{j'}(t)\|\!\cdot\!\|x^*-x_k^*\|
+|\lan f_j(t),x_k^*\ran-\lan f_{j'}(t),x_k^*\ran|\\[3pt]
&\le2C(t)\frac{\eta}{4C(t)+1}+\frac\eta2\le\eta\quad\mbox{for all}\quad j,j'\ge j^0.
\end{align*}
By the completeness of $(\Kb,|\cdot|)$, there is $f_{x^*}(t)\in\Kb$ such that
$f_j^{x^*}(t)\to f_{x^*}(t)$ in $\Kb$ as $j\to\infty$. Thus, we have shown that
for every $x^*\in M^*$ there is a function $f_{x^*}\in\Kb^T$ satisfying,
by virtue of Lemma~\ref{l:proper}(c), \eq{e:Veofn} and \eq{e:vue*},
\begin{equation*}
V_\vep(f_{x^*},T)\le\liminf_{j\to\infty}V_\vep(f_j^{x^*},T)\le v_{x^*}(\vep)
\quad\mbox{for all}\quad\vep>0
\end{equation*}
and such that
\begin{equation} \label{e:uu*}
\mbox{$\displaystyle\lim_{j\to\infty}\lan f_j(t),x^*\ran=f_{x^*}(t)$ \,in \,$\Kb$
\,\,\,for all \,\,\,$t\in T$.}
\end{equation}
5. Now, we show that, for every $t\in T$, the sequence $\{f_j(t)\}$ converges
weakly in $M$ to an element of $M$. The reflexivity of $M$ implies
\begin{equation*}
f_j(t)\in M=M^{**}=L(M^*;\Kb)\quad\mbox{for all}\quad j\in\Nb.
\end{equation*}
Define the functional $F_t:M^*\to\Kb$ by $F_t(x^*)=f_{x^*}(t)$ for all $x^*\in M^*$.
It follows from \eq{e:uu*} that
\begin{equation*}
\lim_{j\to\infty}\lan f_j(t),x^*\ran=f_{x^*}(t)=F_t(x^*)\quad\mbox{for all}
\quad x^*\in M^*,
\end{equation*}
i.e., the sequence of functionals $\{f_j(t)\}\subset L(M^*;\Kb)$ converges \pw\
on $M^*$ to the functional $F_t:M^*\to\Kb$. By the uniform boundedness principle,
$F_t\in L(M^*;\Kb)$ and $\|F_t\|\le\liminf_{j\to\infty}\|f_j(t)\|$. Setting $f(t)=F_t$
for all $t\in T$, we find $f\in M^T$ and, for all $x^*\in M^*$ and $t\in T$,
\begin{equation} \label{e:wto}
\lim_{j\to\infty}\lan f_j(t),x^*\ran=F_t(x^*)=\lan F_t,x^*\ran=\lan f(t),x^*\ran,
\end{equation}
which means that $f_j(t)\wto f(t)$ in $M$ for all $t\in T$. (Note that \eq{e:uu*} and
\eq{e:wto} imply $f_{x^*}(t)=\lan f(t),x^*\ran$ for all $x^*\in M^*$ and $t\in T$.)
6. It remains to prove that $V_\vep(f,T)\le v(\vep)$ for all $\vep>0$. Recall that the
sequence $\{f_j\}\subset M^T$, we deal with here, is the diagonal sequence
$\{f_{J_j(j)}\}_{j=1}^\infty$ from the end of Step~3, which satisfies conditions
\eq{e:wto} and, in place of (ii),
\begin{equation} \label{e:newve}
\limsup_{j\to\infty}V_\vep(f_j,T)\le v(\vep)\quad\mbox{for all}\quad\vep>0.
\end{equation}
Let us fix $\vep>0$. Since $v(\vep)<\infty$ by (ii), for every $\eta>v(\vep)$ condition
\eq{e:newve} implies the existence of $j_1=j_1(\eta,\vep)\in\Nb$ such that
$\eta>V_\vep(f_j,T)$ for all $j\ge j_1$. Hence, for every $j\ge j_1$, by the definition
of $V_\vep(f_j,T)$, there is $g_j\in\BV(T;M)$ such that
\begin{equation} \label{e:gjj}
\|f_j-g_j\|_{\infty,T}=\sup_{t\in T}\|f_j(t)-g_j(t)\|\le\vep\quad\mbox{and}\quad
V(g_j,T)\le\eta.
\end{equation}
These conditions and assumption (i) imply $\sup_{j\ge j_1}V(g_j,T)\le\eta$ and
\begin{equation*}
\|g_j(t_0)\|\le\|g_j(t_0)-f_j(t_0)\|+\|f_j(t_0)\|\le\|g_j-f_j\|_{\infty,T}+C_0\le\vep+C_0
\end{equation*}
for all $j\ge j_1$. Since $(M,\|\cdot\|)$ is a reflexive Banach space with separable dual
$M^*$, by the weak Helly-type pointwise selection principle (see Theorem~7 and
Remarks (1)--(4) in \cite{JMAA05}, or Theorem~3.5 in \cite[Chapter~1]{Barbu}),
there are a subsequence $\{g_{j_p}\}_{p=1}^\infty$ of $\{g_j\}_{j=j_1}^\infty$ and
a function $g\in\BV(T;M)$ such that $g_{j_p}\wto g(t)$ in $M$ as $p\to\infty$ for all
$t\in T$. Noting that $f_{j_p}(t)\wto f(t)$ in $M$ as $p\to\infty$ for all $t\in T$ as well,
we get $f_{j_p}(t)-g_{j_p}(t)\wto f(t)-g(t)$ in $M$ as $p\to\infty$, and so, taking into
account the first condition in \eq{e:gjj}, we find
\begin{equation*}
\|f(t)-g(t)\|\le\liminf_{p\to\infty}\|f_{j_p}(t)-g_{j_p}(t)\|\le\vep\quad\mbox{for all}
\quad t\in T,
\end{equation*}
which implies $\|f-g\|_{\infty,T}\le\vep$. Had we already shown that $V(g,T)\le\eta$,
definition \eq{e:av} would yield $V_\vep(f,T)\le V(g,T)\le\eta$ for every $\eta>v(\vep)$,
which completes the proof of Theorem~\ref{t:SPweak}.
In order to prove that $V(g,T)\le\eta$, suppose $P=\{t_i\}_{i=0}^m\subset T$ is a
partition of $T$. Since $g_{j_p}(t)\wto g(t)$ in $M$ as $p\to\infty$ for all $t\in T$,
given $i\in\{1,2,\dots,m\}$, we have $g_{j_p}(t_i)-g_{j_p}(t_{i-1})\wto g(t_i)-g(t_{i-1})$
in $M$ as $p\to\infty$, and so,
\begin{equation*}
\|g(t_i)-g(t_{i-1})\|\le\liminf_{p\to\infty}\|g_{j_p}(t_i)-g_{j_p}(t_{i-1})\|.
\end{equation*}
Summing over $i=1,2,\dots,m$ and taking into account the properties of the limit inferior
and the second condition in \eq{e:gjj}, we get
\begin{align*}
\sum_{i=1}^m\|g(t_i)-g(t_{i-1})\|&\le\sum_{i=1}^m\liminf_{p\to\infty}
\|g_{j_p}(t_i)-g_{j_p}(t_{i-1})\|\\
&\le\liminf_{p\to\infty}\sum_{i=1}^m\|g_{j_p}(t_i)-g_{j_p}(t_{i-1})\|\\[3pt]
&\le\liminf_{p\to\infty}V(g_{j_p},T)\le\eta.
\end{align*}
Thus, by the arbitrariness of partition $P$ of $T$, we conclude that $V(g,T)\le\eta$,
which was to be proved.
\sq
Assumption (ii) in Theorem~\ref{t:SPweak} can be weakened as the following theorem
shows.
\begin{theorem} \label{t:SPw2}
Under the assumptions of Theorem\/~{\rm\ref{t:SPweak}} on $T$ and $(M,\|\cdot\|)$,
suppose the sequence $\{f_j\}\subset M^T$ is such that
\begin{itemize}
\renewcommand{0.0pt plus 0.5pt minus 0.25pt}{0.0pt plus 0.5pt minus 0.25pt}
\item[{\rm(i)}] $C(t)\equiv\sup_{j\in\Nb}\|f_j(t)\|<\infty$ for all $t\in T;$
\item[{\rm(ii)}] $v_{x^*}(\vep)\equiv\limsup_{j\to\infty}
V_\vep(\lan f_j(\cdot),x^*\ran,T)\!<\!\infty$ for all $\vep\!>\!0$ and $x^*\in M^*$.
\footnote{As in Step~2 of the proof of Theorem~\ref{t:SPweak},
$\lan f_j(\cdot),x^*\ran(t)=\lan f_j(t),x^*\ran=f_j^{x^*}(t)$, $t\in T$.}
\end{itemize}
Then, there is a subsequence of $\{f_j\}$, again denoted by $\{f_j\}$, and a function
$f\in M^T$, satisfying $V_\vep(\lan f(\cdot),x^*\ran,T)\le v_{x^*}(\vep)$ for all
$\vep>0$ and $x^*\in M^*$, such that $f_j(t)\wto f(t)$ in $M$ as $j\to\infty$
for all $t\in T$.
\end{theorem}
\proof
It suffices to note that assumption (i) implies \eq{e:bfu*} with $j_0=1$, replace
\eq{e:vue*} by assumption (ii), and argue as in Steps~3--5 of the proof of
Theorem~\ref{t:SPweak}.
\sq
The next example illustrates the applicability of Theorems~\ref{t:SPweak} and \ref{t:SPw2}.
\begin{example} \label{ex:} \rm
In examples (a) and (b) below, we assume the following. Let $M=L^2[0,2\pi]$ be the
real Hilbert space of all square Lebesgue summable functions on the interval $[0,2\pi]$
equipped with the \emph{inner product}
\begin{equation*}
\lan x,y\ran=\int_0^{2\pi}\!\!x(s)y(s)\,ds\quad\!\!\mbox{and the \emph{norm}}
\quad\!\!\|x\|=\sqrt{\lan x,x\ran},\,\,\,x,y\in M.
\end{equation*}
It is well known that $M$ is separable, self-adjoint ($M=M^*$), and so, reflexive
($M=M^{**}$). Given $j\in\Nb$, define two functions $x_j,y_j\in M$ by
\begin{equation*}
x_j(s)=\sin(js)\quad\mbox{and}\quad y_j(s)=\cos(js)\quad\mbox{for all}\quad
s\in[0,2\pi].
\end{equation*}
Clearly, $\|x_j\|=\|y_j\|=\sqrt\pi$ and, by Lyapunov-Parseval's equality,
\begin{equation*}
\frac{\lan x,1\ran^2}8+\sum_{j=1}^\infty\Bigl(\lan x,x_j\ran^2+
\lan x,y_j\ran^2\Bigr)=\pi\|x\|^2,\quad x\in M,
\end{equation*}
we find $\lan x,x_j\ran\to0$ and $\lan x,y_j\ran\to0$ as $j\to\infty$ for all $x\in M$,
and so, $x_j\wto0$ and $y_j\wto 0$ in $M$.
In examples (a) and (b) below, we set $T=I=[0,1]$.
(a) This example illustrates Theorem~\ref{t:SPweak}. Define the sequence
$\{f_j\}\subset M^T$ by $f_j(t)=tx_j$, $t\in T$. Clearly, $f_j(t)\wto0$ in $M$ for all
$t\in T$. Note, however, that the sequence $\{f_j(t)\}$ does \emph{not\/} converge
in (the norm of) $M$ at all points $0<t\le1$, because
$\|f_j(t)\!-\!f_k(t)\|^2=(\|x_j\|^2\!+\!\|x_k\|^2)t^2=2\pi t^2$,~\mbox{$j\ne k$}.
Since $f_j(0)=0$ in $M$ for all $j\in\Nb$, we verify only condition (ii) of Theorem~
\ref{t:SPweak}. Setting $\vfi(t)=t$ for $t\in T$, $x=x_j$ and $y=0$ in \eq{e:fxy}, we find
$|\vfi(T)|=1$ and $\|x\|=\|x_j\|=\sqrt\pi$, and so, by virtue of \eq{e:mntn}, we get
\begin{equation*}
V_\vep(f_j,T)=\left\{
\begin{tabular}{ccr}
$\!\!\sqrt\pi-2\vep$ & \mbox{if} & $0<\vep<\sqrt\pi/2$\\[3pt]
$\!\!0$ & \mbox{if} & $\vep\ge\sqrt\pi/2$
\end{tabular}\right.\quad\mbox{for all}\quad j\in\Nb,
\end{equation*}
which implies condition (ii) in Theorem~\ref{t:SPweak}. Note that (cf.\
Lemma~\ref{l:71}(a)) $V(f_j,T)=\lim_{\vep\to+0}V_\vep(f_j,T)=\sqrt\pi$ for all
$j\in\Nb$. Also, it is to be noted that Theorem~\ref{t:SP} is inapplicable to $\{f_j\}$,
because the set $\{f_j(t):j\in\Nb\}$ is not \rc\ in (the norm of) $M$ for all $0<t\le1$.
(b) Here we present an example when Theorem~\ref{t:SPw2} is applicable, while
Theorem~\ref{t:SPweak} is not. Taking into account definition \eq{e:Dir} of the
Dirichlet function, we let the sequence $\{f_j\}\subset M^T$ be given by
$f_j(t)=\displaystylec_{x_j,y_j}(t)$ for all $t\in T$ and $j\in\Nb$. More explicitly,
\begin{equation*}
f_j(t)(s)=\displaystylec_{x_j(s),y_j(s)}(t)=\left\{
\begin{tabular}{ccl}
$\!\!\sin(js)$ & \!\!\mbox{if}\!\! & $t\in I_1\equiv[0,1]\cap\Qb$,\\[3pt]
$\!\!\cos(js)$ & \!\!\mbox{if}\!\! & $t\in I_2\equiv[0,1]\setminus\Qb$,
\end{tabular}\right.\quad\!\! s\in[0,2\pi].
\end{equation*}
Note that
\begin{equation} \label{e:D01}
f_j(t)=\displaystylec_{x_j,0}(t)+\displaystylec_{0,y_j}(t)=\displaystylec_{1,0}(t)x_j+\displaystylec_{0,1}(t)y_j,\quad t\in T,
\end{equation}
where $\displaystylec_{1,0}$ and $\displaystylec_{0,1}$ are the corresponding real-valued Dirichlet
functions on $T=[0,1]$. By \eq{e:D01}, $f_j(t)\wto0$ in $M$ for all $t\in T$.
On the other hand, the sequence $\{f_j(t)\}$ \emph{diverges\/} in (the norm of) $M$
at all points $t\in T$: in fact,
\begin{equation*}
\|x_j-x_k\|^2=\lan x_j-x_k,x_j-x_k\ran=\|x_j\|^2+\|x_k\|^2=2\pi,\quad j\ne k,
\end{equation*}
and, similarly, $\|y_j-y_k\|^2=2\pi$, $j\ne k$, from which we get
\begin{equation*}
\|f_j(t)-f_k(t)\|=\left\{
\begin{tabular}{ccl}
$\!\!\|x_j-x_k\|$ & \!\!\mbox{if}\!\! & $t\in I_1$\\[3pt]
$\!\!\|y_j-y_k\|$ & \!\!\mbox{if}\!\! & $t\in I_2$
\end{tabular}\right.\!\!=\sqrt{2\pi},\quad j\ne k.
\end{equation*}
(It already follows that Theorem~\ref{t:SP} is inapplicable to $\{f_j\}$.)
Given $t\in T$ and $j\in\Nb$, we have
\begin{equation*}
\|f_j(t)\|=\|\displaystylec_{x_j,y_j}(t)\|=\left\{
\begin{tabular}{ccl}
$\!\!\|x_j\|$ & \!\!\mbox{if}\!\! & $t\in I_1$\\[3pt]
$\!\!\|y_j\|$ & \!\!\mbox{if}\!\! & $t\in I_2$
\end{tabular}\right.\!\!=\sqrt\pi,
\end{equation*}
and so, conditions (i) in Theorems~\ref{t:SPweak} and \ref{t:SPw2} are satisfied.
Let us show that condition (ii) in Theorem~\ref{t:SPweak} does not hold. In fact,
by \eq{e:Dirass} and \eq{e:refi},
\begin{equation*}
V_\vep(f_j,T)=\left\{
\begin{tabular}{ccr}
$\!\!\infty$ & \!\!\mbox{if}\!\! & $0<\vep<\textstyle\frac12\|x_j-y_j\|$,\\[3pt]
$\!\!0$ & \!\!\mbox{if}\!\! & $\vep\ge\textstyle\frac12\|x_j-y_j\|$,
\end{tabular}\right.
\end{equation*}
where $\|x_j\!-\!y_j\|^2\!=\!\lan x_j\!-\!y_j,x_j-y_j\ran\!=\!
\|x_j\|^2\!+\!\|y_j\|^2\!=\!2\pi$,
i.e., $\|x_j\!-\!y_j\|\!=\!\sqrt{2\pi}$.
Now, we show that condition (ii) in Theorem~\ref{t:SPw2} is satisfied (cf.\ Example~
\ref{ex:irreg}). By \eq{e:D01}, for every $x^*\in M^*=M$ and $t\in T$, we have
\begin{align*}
\lan f_j(t),x^*\ran&=\lan\displaystylec_{x_j,y_j}(t),x^*\ran=
\lan\displaystylec_{1,0}(t)x_j+\displaystylec_{0,1}(t)y_j,x^*\ran\\[3pt]
&=\displaystylec_{1,0}(t)\lan x_j,x^*\ran+\displaystylec_{0,1}(t)\lan y_j,x^*\ran=\displaystylec_{x_j',y_j'}(t),
\end{align*}
where $x_j'=\lan x_j,x^*\ran$ and $y_j'=\lan y_j,x^*\ran$. Again, by \eq{e:Dirass}
and \eq{e:refi},
\begin{equation} \label{e:xpyp}
V_\vep(\lan f_j(\cdot),x^*\ran,T)=V_\vep(\displaystylec_{x_j',y_j'},T)=\left\{
\begin{tabular}{ccr}
$\!\!\infty$ & \!\!\mbox{if}\!\! & $0<\vep<\textstyle\frac12|x_j'-y_j'|$,\\[3pt]
$\!\!0$ & \!\!\mbox{if}\!\! & $\vep\ge\textstyle\frac12|x_j'-y_j'|$,
\end{tabular}\right.
\end{equation}
where $|x_j'-y_j'|=|\lan x_j,x^*\ran-\lan y_j,x^*\ran|\to0$ as $j\to\infty$. Hence, given
$\vep>0$,
there is $j_0=j_0(\vep,x^*)\in\Nb$ such that $|x_j'-y_j'|\le2\vep$ for all
$j\ge j_0$, and so, \eq{e:xpyp} implies $V_\vep(\lan f_j(\cdot),x^*\ran,T)=0$ for all
$j\ge j_0$. Thus,
\begin{equation*}
\limsup_{j\to\infty}V_\vep(\lan f_j(\cdot),x^*\ran,T)\le\sup_{j\ge j_0}
V_\vep(\lan f_j(\cdot),x^*\ran,T)=0
\end{equation*}
(i.e., $V_\vep(\lan f_j(\cdot),x^*\ran,T)\!\to\!0$, $j\!\to\!\infty$), which yields
condition (ii) in \mbox{Theorem~\ref{t:SPw2}}.
\end{example}
\section{Irregular pointwise selection principles} \label{ss:irreg}
In what follows, we shall be dealing with double sequences of the form
$\al:\Nb\times\Nb\to[0,\infty]$ having the property that $\al(j,j)=0$ for all $j\in\Nb$
(e.g., \eq{e:spir}). The \emph{limit superior\/} of $\al(j,k)$ as $j,k\to\infty$ is defined by
\begin{equation*}
\limsup_{j,k\to\infty}\al(j,k)=\lim_{n\to\infty}\sup\bigl\{\al(j,k):
\mbox{$j\ge n$ and $k\ge n$}\bigr\}.
\end{equation*}
For a number $\al_0\ge0$, we say that $\al(j,k)$ \emph{converges\/} to $\al_0$ as
$j,k\to\infty$ and write $\lim_{j,k\to\infty}\al(j,k)=\al_0$ if for every $\eta>0$ there is
$J=J(\eta)\in\Nb$ such that $|\al(j,k)-\al_0|\le\eta$ for all $j\ge J$ and $k\ge J$ with
$j\ne k$.
The main result of this section is the following \emph{irregular \pw\ selection principle\/}
in terms of the approximate variation (see also Example~\ref{ex:nrg}).
\begin{theorem} \label{t:SPir}
Suppose $T\subset\Rb$, $(M,\|\cdot\|)$ is a normed linear space, and
$\{f_j\}\subset M^T$ is a \pw\ \rc\ sequence of functions such that
\begin{equation} \label{e:spir}
\limsup_{j,k\to\infty}V_\vep(f_j-f_k,T)<\infty\quad\mbox{for all}\quad\vep>0.
\end{equation}
Then $\{f_j\}$ contains a subsequence which converges pointwise on $T$.
\end{theorem}
In order to prove this theorem, we need a lemma.
\begin{lemma} \label{l:RT}
Suppose $\vep>0$, $C>0$, and a sequence $\{F_j\}_{j=1}^\infty\subset M^T$ of
{\sl distinct} functions are such that
\begin{equation} \label{e:Fjk}
V_\vep(F_j-F_k,T)\le C\quad\mbox{for all}\quad j,k\in\Nb.
\end{equation}
Then, there exist a subsequence $\{F_j^\vep\}_{j=1}^\infty$ of $\{F_j\}_{j=1}^\infty$
and a nondecreasing function $\vfi^\vep:T\to[0,C]$ such that
\begin{equation} \label{e:fek}
\lim_{j,k\to\infty}V_\vep(F_j^\vep-F_k^\vep,T\cap(-\infty,t])=\vfi^\vep(t)\quad
\mbox{for all}\quad t\in T.
\end{equation}
\end{lemma}
Since the proof of Lemma~\ref{l:RT} is rather lengthy and involves certain ideas from
formal logic (Ramsey's Theorem \ref{t:Ramsey}), for the time being we postpone
it until the end of the proof of Theorem~\ref{t:SPir}.
\begin{proof}[Proof of Theorem~\protect\ref{t:SPir}]
First, we may assume that $T$ is \emph{uncountable}. In fact, if $T$ is (at most)
countable, then, by the relative compactness of sets $\{f_j(t):j\in\Nb\}\subset M$
for all $t\in T$, we may apply the standard diagonal procedure to extract a subsequence
of $\{f_j\}$ which converges \pw\ on~$T$. Second, we may assume that all functions
in the sequence $\{f_j\}$ are \emph{distinct}. To see this, we argue as follows. If there
are only finitely many distinct functions in $\{f_j\}$, then we may choose a constant
subsequence of $\{f_j\}$ (which is, clearly, \pw\ convergent on $T$). Otherwise,
we may pick a subsequence of $\{f_j\}$ (if necessary) consisting of distinct functions.
Given $\vep>0$, we set (cf.\ \eq{e:spir})
\begin{equation*}
C(\vep)=1+\limsup_{j,k\to\infty}V_\vep(f_j-f_k,T)<\infty.
\end{equation*}
So, there is $j_0(\vep)\in\Nb$ such that
\begin{equation} \label{e:ejkC}
\mbox{$V_\vep(f_j-f_k,T)\le C(\vep)$ \,\,\,for all \,\,\,$j\ge j_0(\vep)$
\,and \,$k\ge j_0(\vep)$.}
\end{equation}
Let $\{\vep_n\}_{n=1}^\infty\!\subset\!(0,\infty)$ be a decreasing sequence such that
$\vep_n\!\to\!0$ as $n\!\to\!\infty$.
We divide the rest of the proof into two main steps for clarity.
\emph{Step~1.} There is a subsequence of $\{f_j\}$, again denoted by $\{f_j\}$, and
for each $n\in\Nb$ there is a nondecreasing function $\vfi_n:T\to[0,C(\vep_n)]$ such that
\begin{equation} \label{e:fint}
\lim_{j,k\to\infty}V_{\vep_n}(f_j-f_k,T\cap(-\infty,t])=\vfi_n(t)\quad\mbox{for all}
\quad t\in T.
\end{equation}
In order to prove \eq{e:fint}, we apply Lemma~\ref{l:RT}, induction and the diagonal
procedure. Setting $\vep=\vep_1$, $C=C(\vep_1)$ and $F_j=f_{J_0(j)}$ with
$J_0(j)=j_0(\vep_1)+j-1$, $j\in\Nb$, we find that condition \eq{e:ejkC} implies
\eq{e:Fjk}, and so, by Lemma~\ref{l:RT}, there are a subsequence
$\{J_1(j)\}_{j=1}^\infty$ of $\{J_0(j)\}_{j=1}^\infty=\{j\}_{j=j_0(\vep_1)}^\infty$
and a nondecreasing function $\vfi_1=\vfi^{\vep_1}:T\to[0,C(\vep_1)]$ such that
\begin{equation*}
\lim_{j,k\to\infty}V_{\vep_1}(f_{J_1(j)}-f_{J_1(k)},T\cap(-\infty,t])=\vfi_1(t)\quad
\mbox{for all}\quad t\in T.
\end{equation*}
Let $j_1\in\Nb$ be the least number such that $J_1(j_1)\ge j_0(\vep_2)$. Inductively,
suppose $n\in\Nb$, $n\ge2$, and a subsequence $\{J_{n-1}(j)\}_{j=1}^\infty$ of
$\{j\}_{j=j_0(\vep_1)}^\infty$ and the number $j_{n-1}\in\Nb$ with
$J_{n-1}(j_{n-1})\ge j_0(\vep_n)$ are already chosen. To apply Lemma~\ref{l:RT}
once again, we set $\vep=\vep_n$, $C=C(\vep_n)$ and $F_j=f_{J(j)}$ with
$J(j)=J_{n-1}(j_{n-1}+j-1)$, $j\in\Nb$. Since for every $j\in\Nb$ we have
$J(j)\ge J_{n-1}(j_{n-1})\ge j_0(\vep_n)$, we get, by \eq{e:ejkC},
\begin{equation*}
V_{\vep_n}(F_j-F_k,T)\le C(\vep_n)\quad\mbox{for all}\quad j,k\in\Nb.
\end{equation*}
By Lemma~\ref{l:RT}, there are a subsequence $\{J_n(j)\}_{j=1}^\infty$ of the
sequence $\{J(j)\}_{j=1}^\infty$, (more explicitly) the latter being equal to
$\{J_{n-1}(j)\}_{j=j_{n-1}}^\infty$, and a nondecreasing function
$\vfi_n=\vfi^{\vep_n}:T\to[0,C(\vep_n)]$ such that
\begin{equation} \label{e:JJ}
\lim_{j,k\to\infty}V_{\vep_n}(f_{J_n(j)}-f_{J_n(k)},T\cap(-\infty,t])=\vfi_n(t)\quad
\mbox{for all}\quad t\in T.
\end{equation}
We assert that the diagonal subsequence $\{f_{J_j(j)}\}_{j=1}^\infty$ of $\{f_j\}$,
again denoted by $\{f_j\}$, satisfies \eq{e:fint} for all $n\in\Nb$. In order to see this,
let us fix $n\in\Nb$ and $t\in T$. By \eq{e:JJ}, given $\eta>0$, there is a number
$J^0=J^0(\eta,n,t)\in\Nb$ such that if $j',k'\ge J^0$, $j'\ne k'$, we have
\begin{equation} \label{e:pp}
\bigl|V_{\vep_n}(f_{J_n(j')}-f_{J_n(k')},T\cap(-\infty,t])-\vfi_n(t)\bigr|\le\eta.
\end{equation}
Since $\{J_j(j)\}_{j=n}^\infty$ is a subsequence of $\{J_n(j)\}_{j=1}^\infty$, there is a
strictly increasing natural sequence $q:\Nb\to\Nb$ such that $J_j(j)=J_n(q(j))$ for all
$j\ge n$. Define $J^*=\max\{n,J^0\}$. Now, for arbitrary $j,k\ge J^*$, $j\ne k$, we
set $j'=q(j)$ and $k'=q(k)$. Since $j,k\ge J^*\ge n$, we find $J_j(j)=J_n(j')$ and
$J_k(k)=J_n(k')$, where $j'\ne k'$, $j'=q(j)\ge j\ge J^*\ge J^0$ and, similarly,
$k'\ge J^0$. It follows from \eq{e:pp} that
\begin{equation*}
\bigl|V_{\vep_n}(f_{J_j(j)}-f_{J_k(k)},T\cap(-\infty,t])-\vfi_n(t)\bigr|\le\eta.
\end{equation*}
which proves our assertion.
\emph{Step~2.} Let $Q$ denote an at most countable dense subset of $T$. Clearly,
$Q$ contains every point of $T$ which is not a limit point for $T$. Since, for any $n\in\Nb$,
the function $\vfi_n$ from \eq{e:fint} is nondecreasing on $T$, the set $Q_n\subset T$
of its points of discontinuity is at most countable. We set
$S=Q\cup\bigcup_{n=1}^\infty Q_n$. The set $S$ is an at most countable dense
subset of $T$ and has the property:
\begin{equation} \label{e:TmS}
\mbox{for each $n\in\Nb$, the function $\vfi_n$ is continuous on $T\setminus S$.}
\end{equation}
By the relative compactness of the set $\{f_j(t):j\in\Nb\}$ for all $t\in T$ and at most
countability of $S\subset T$, we may assume (applying the diagonal procedure and
passing to a subsequence of $\{f_j\}$ if necessary) that, for every $s\in S$, $f_j(s)$
converges in $M$ as $j\to\infty$ to a point of $M$ denoted by $f(s)$ (hence $f:S\to M$).
It remains to show that the sequence $\{f_j(t)\}_{j=1}^\infty$ is Cauchy in $M$ for
every $t\in T\setminus S$. In fact, this and the relative compactness of
$\{f_j(t):j\in\Nb\}$ imply the convergence of $f_j(t)$ as $j\to\infty$ to a point of $M$
denoted by $f(t)$. In other words, $f_j$ converges \pw\ on $T$ to the function
\mbox{$f:T=S\cup(T\setminus S)\to M$}.
Let $t\in T\setminus S$ and $\eta>0$ be arbitrary. Since $\vep_n\to0$ as $n\to\infty$,
choose and fix $n=n(\eta)\in\Nb$ such that $\vep_n\le\eta$. The definition of $S$ implies
that $t$ is a limit point for $T$ and a point of continuity of $\vfi_n$, and so, by the
density of $S$ in $T$, there is $s=s(n,t)\in S$ such that $|\vfi_n(t)-\vfi_n(s)|\le\eta$.
Property \eq{e:fint} yields the existence of $j^1=j^1(\eta,n,t,s)\in\Nb$ such that if
$j,k\ge j^1$, $j\ne k$,
\begin{equation*}
\bigl|V_{\vep_n}(f_j\!-\!f_k,T\cap(-\infty,\tau])\!-\!\vfi_n(\tau)\bigr|\le\eta
\quad\mbox{for}\quad\mbox{$\tau=t$ \,and \,$\tau=s$.}
\end{equation*}
Suppose $s<t$ (the case when $s>t$ is treated similarly). Applying Lemma~\ref{l:mor}
(with $T$ replaced by $T\cap(-\infty,t]$, $T_1$---by $T\cap(-\infty,s]$, and
$T_2$---by $T\cap[s,t]$), we get
\begin{align*}
V_{\vep_n}(f_j\!-\!f_k,T\cap[s,t])&\le V_{\vep_n}(f_j\!-\!f_k,T\cap(-\infty,t])
-V_{\vep_n}(f_j\!-\!f_k,T\cap(-\infty,s])\\[3pt]
&\le|V_{\vep_n}(f_j\!-\!f_k,T\cap(-\infty,t])\!-\!\vfi_n(t)|+|\vfi_n(t)\!-\!\vfi_n(s)|\\[3pt]
&\qquad+|\vfi_n(s)\!-\!V_{\vep_n}(f_j\!-\!f_k,T\cap(-\infty,s])|\\[3pt]
&\le\eta+\eta+\eta=3\eta\quad\mbox{for all}\quad j,k\ge j^1\,\,\mbox{with}\,\, j\ne k.
\end{align*}
Now, given $j,k\ge j^1$, $j\ne k$, by the definition of $V_{\vep_n}(f_j-f_k,T\cap[s,t])$,
there is $g_{j,k}\in\BV(T\cap[s,t];M)$, also depending on $\eta$, $n$, $t$ and $s$,
such that
\begin{equation*}
\|(f_j-f_k)-g_{j,k}\|_{\infty,\,T\cap[s,t]}\le\vep_n
\end{equation*}
and
\begin{equation*}
V(g_{j,k},T\cap[s,t])\le V_{\vep_n}(f_j-f_k,T\cap[s,t])+\eta.
\end{equation*}
\par\medbreak\noindent
These inequalities and \eq{e:10} imply, for all $j,k\ge j^1$ with $j\ne k$,
\begin{align*}
\|(f_j\!-\!f_k)(s)\!-\!(f_j\!-\!f_k)(t)\|&\le\|g_{j,k}(s)\!-\!g_{j,k}(t)\|
+2\|(f_j\!-\!f_k)\!-\!g_{j,k}\|_{\infty,\,T\cap[s,t]}\\[3pt]
&\le V(g_{j,k},T\cap[s,t])+2\vep_n\le(3\eta+\eta)+2\eta=6\eta.
\end{align*}
Since the sequence $\{f_j(s)\}_{j=1}^\infty$ is convergent in $M$, it is Cauchy, and so,
there is $j^2=j^2(\eta,s)\in\Nb$ such that $\|f_j(s)-f_k(s)\|\le\eta$ for all $j,k\ge j^2$.
It follows that $j^3=\max\{j^1,j^2\}$ depends only on $\eta$ (and $t$), and we have
\begin{align*}
\|f_j(t)-f_k(t)\|&\le\|(f_j-f_k)(t)-(f_j-f_k)(s)\|+\|(f_j-f_k)(s)\|\\[3pt]
&\le6\eta+\eta=7\eta\quad\mbox{for all}\quad j,k\ge j^3.
\end{align*}
Thus, $\{f_j(t)\}_{j=1}^\infty$ is a Cauchy sequence in $M$, which completes the proof.
\end{proof}
Various remarks and examples concerning Theorem~\ref{t:SPir} follow after the proof
of Lemma~\ref{l:RT}.
Now we turn to the proof of Lemma~\ref{l:RT}. We need Ramsey's Theorem from
formal logic \cite[Theorem~A]{Ramsey}, which we are going to recall now.
Let $\Gamma$ be a set, $n\in\Nb$, and $\gamma_1,\gamma_2,\dots,\gamma_n$ be
(pairwise) distinct elements of $\Gamma$. The (non-ordered) collection
$\{\gamma_1,\gamma_2,\dots,\gamma_n\}$ is said to be an \emph{$n$-combination\/}
of elements of $\Gamma$ (note that an $n$-combination may be generated by $n!$
different injective functions $\gamma:\{1,2,\dots,n\}\to\Gamma$ with
$\gamma_i=\gamma(i)$ for all $i=1,2,\dots,n$). We denote by $\Gamma[n]$
the family of all $n$-combinations of elements of~$\Gamma$.
\begin{theorem}[Ramsey \cite{Ramsey}] \label{t:Ramsey}
Suppose $\Gamma$ is an infinite set, $n,m\in\Nb$, and
$\Gamma[n]=\bigcup_{i=1}^mG_i$ is a {\sl disjoint} union of $m$ nonempty sets
$G_i\subset\Gamma[n]$. Then, under the Axiom of Choice, there are an infinite
set $\displaystyleelta\subset\Gamma$ and $i_0\in\{1,2,\dots,m\}$ such that
$\displaystyleelta[n]\subset G_{i_0}$.
\end{theorem}
This theorem will be applied several times in the proof of Lemma~\ref{l:RT} with $\Gamma$
a subset of $\{F_j:j\in\Nb\}$ and $n=m=2$.
The application of Ramsey's Theorem in the context of pointwise selection principles was
initiated by Schrader \cite{Schrader} and later on was extended by several authors
(Di Piazza and Maniscalco \cite{Piazza}, Maniscalco \cite{Manisc}, Chistyakov and
Maniscalco \cite{JMAA08}, Chistyakov, Maniscalco and Tretyachenko \cite{waterman80},
Chistyakov and Tretyachenko \cite{JMAA13}) for real- and metric space-valued functions
of one and several real variables.
\begin{proof}[Proof of Lemma~\protect\ref{l:RT}]
We divide the proof into three steps.
\emph{Step~1.} Let us show that for every $t\in T$ there is a subsequence
$\{F_j^{(t)}\}_{j=1}^\infty$ of $\{F_j\}_{j=1}^\infty$, depending on $t$ and $\vep$,
such that the double limit
\begin{equation} \label{e:loC}
\lim_{j,k\to\infty}V_\vep(F_j^{(t)}-F_k^{(t)},T\cap(-\infty,t])\quad\mbox{exists in}
\quad [0,C]
\end{equation}
(clearly, the sequence $\{F_j^{(t)}\}_{j=1}^\infty$ satisfies the uniform estimate
\eq{e:Fjk}).
Given $t\in T$, for the sake brevity, we set $T_t^-=T\cap(-\infty,t]$.
By Lemma~\ref{l:ele}(b) and \eq{e:Fjk}, we have
\begin{equation*}
0\le V_\vep(F_j-F_k,T_t^-)\le V_\vep(F_j-F_k,T)\le C\quad\mbox{for all}\quad j,k\in\Nb.
\end{equation*}
In order to apply Theorem~\ref{t:Ramsey}, we set $\Gamma=\{F_j:j\in\Nb\}$,
$c_0=C/2$, and denote by $G_1$ the set of those pairs $\{F_j,F_k\}$ with $j,k\in\Nb$,
$j\ne k$, for which $V_\vep(F_j-F_k,T_t^-)\in[0,c_0)$, and by $G_2$---the set of all
pairs $\{F_j,F_k\}$ with $j,k\in\Nb$, $j\ne k$, such that
$V_\vep(F_j-F_k,T_t^-)\in[c_0,C]$. Clearly, $\Gamma[2]=G_1\cup G_2$ and
$G_1\cap G_2=\es$. If $G_1$ and $G_2$ are both nonempty, then, by
Theorem~\ref{t:Ramsey}, there is a subsequence $\{F_j^1\}_{j=1}^\infty$ of
$\{F_j\}_{j=1}^\infty$ (cf.\ Remark~\ref{r:Fj1}) \label{p:Fj1} such that either
\par(i${}_1$)\, $\{F_j^1,F_k^1\}\in G_1$ for all $j,k\in\Nb$, $j\ne k$, or
\par(ii${}_1$) $\{F_j^1,F_k^1\}\in G_2$ for all $j,k\in\Nb$, $j\ne k$.
In the case when $G_1\ne\es$ and (i${}_1$) holds, or $G_2=\es$, we set
$[a_1,b_1]=[0,c_0]$, while if $G_2\ne \es$ and (ii${}_1$) holds, or $G_1=\es$,
we set $[a_1,b_1]=[c_0,C]$.
Inductively, assume that $p\in\Nb$, $p\ge2$, and a subsequence
$\{F_j^{p-1}\}_{j=1}^\infty$ of $\{F_j\}_{j=1}^\infty$ and an interval
$[a_{p-1},b_{p-1}]\subset[0,C]$ such that
\begin{equation*}
V_\vep(F_j^{p-1}-F_k^{p-1},T_t^-)\in[a_{p-1},b_{p-1}]\quad\mbox{for all}
\quad j,k\in\Nb,\,\,j\ne k,
\end{equation*}
are already chosen. To apply Theorem~\ref{t:Ramsey}, we set
$\Gamma=\{F_j^{p-1}:j\in\Nb\}$, define $c_{p-1}=\frac12(a_{p-1}+b_{p-1})$, and
denote by $G_1$ the set of all pairs $\{F_j^{p-1},F_k^{p-1}\}$ with $j,k\in\Nb$, $j\ne k$,
such that $V_\vep(F_j^{p-1}-F_k^{p-1},T_t^-)\in[a_{p-1},c_{p-1})$, and by
$G_2$---the set of all pairs $\{F_j^{p-1},F_k^{p-1}\}$ with $j,k\in\Nb$, $j\ne k$,
for which $V_\vep(F_j^{p-1}-F_k^{p-1},T_t^-)\in[c_{p-1},b_{p-1}]$. We have the union
$\Gamma[2]=G_1\cup G_2$ of disjoint sets. If $G_1$ and $G_2$ are both nonempty,
then, by Ramsey's Theorem, there is a subsequence $\{F_j^p\}_{j=1}^\infty$ of
$\{F_j^{p-1}\}_{j=1}^\infty$ such that either
\par(i${}_p$)\, $\{F_j^p,F_k^p\}\in G_1$ for all $j,k\in\Nb$, $j\ne k$, or
\par(ii${}_p$) $\{F_j^p,F_k^p\}\in G_2$ for all $j,k\in\Nb$, $j\ne k$.
\par\noindent
If $G_1\ne\es$ and (i${}_p$) holds, or $G_2=\es$, we set
$[a_p,b_p]=[a_{p-1},c_{p-1}]$, while if $G_2\ne\es$ and (ii${}_p$) holds, or
$G_1=\es$, we set $[a_p,b_p]=[c_{p-1},b_{p-1}]$.
In this way for each $p\in\Nb$ we have nested intervals
$[a_p,b_p]\subset[a_{p-1},b_{p-1}]$ in $[a_0,b_0]=[0,C]$ with
$b_p-a_p=C/2^p$ and a subsequence $\{F_j^p\}_{j=1}^\infty$ of
$\{F_j^{p-1}\}_{j=1}^\infty$ (where $F_j^0=F_j$, $j\in\Nb$) such that
\begin{equation*}
V_\vep(F_j^p-F_k^p,T_t^-)\in[a_p,b_p]\quad\mbox{for all}\quad j,k\in\Nb,\,\,j\ne k.
\end{equation*}
Let $\ell\in[0,C]$ be the common limit of $a_p$ and $b_p$ as $p\to\infty$ (note that
$\ell$ depends on $t$ and $\vep$). Denoting the diagonal sequence
$\{F_j^j\}_{j=1}^\infty$ by $\{F_j^{(t)}\}_{j=1}^\infty$ we infer that the limit
in \eq{e:loC} is equal to~$\ell$. In fact, given $\eta>0$, there is $p(\eta)\in\Nb$
such that $a_{p(\eta)},b_{p(\eta)}\in[\ell-\eta,\ell+\eta]$ and, since
$\{F_j^{(t)}\}_{j=p(\eta)}^\infty$ is a subsequence of
$\{F_j^{p(\eta)}\}_{j=1}^\infty$, we find, for all $j,k\ge p(\eta)$ with $j\ne k$, that
\begin{equation*}
V_\vep(F_j^{(t)}-F_k^{(t)},T_t^-)\in[a_{p(\eta)},b_{p(\eta)}]\subset[\ell-\eta,\ell+\eta].
\end{equation*}
\emph{Step~2.} Given a set $A\subset\Rb$, we denote by $\ov A$ its closure in $\Rb$.
Let $Q$ be an at most countable dense subset of $T$ (hence $Q\subset T\subset\ov Q$).
The set $T_L=\{t\in T:\mbox{$T\cap(t-\delta,t)=\es$ for some $\delta>0$}\}$ of
points from $T$, which are isolated from the left for $T$, is at most countable, and
the same is true for the set $T_R=\{t\in T:\mbox{$T\cap(t,t+\delta)=\es$ for some
$\delta>0$}\}$ of points from $T$ isolated from the right for $T$. Clearly,
$T_L\cap T_R\subset Q$, and the set $Z=Q\cup T_L\cup T_R$ is an at most
countable dense subset of~$T$.
We assert that there are a subsequence $\{F_j^*\}_{j=1}^\infty$ of
$\{F_j\}_{j=1}^\infty$ and a nondecreasing function $\vfi:Z\to[0,C]$ (both depending
on $\vep$) such that
\begin{equation} \label{e:fiz}
\lim_{j,k\to\infty}V_\vep(F_j^*-F_k^*,T\cap(-\infty,s])=\vfi(s)\quad\mbox{for all}
\quad s\in Z.
\end{equation}
With no loss of generality, we may assume that $Z=\{s_p\}_{p=1}^\infty$. By Step~1,
there are a subsequence $\{F_j^{(s_1)}\}_{j=1}^\infty$ of $\{F_j\}_{j=1}^\infty$,
denoted by $\{F_j^{(1)}\}_{j=1}^\infty$, and a number from $[0,C]$, denoted by
$\vfi(s_1)$, such that
\begin{equation*}
\lim_{j,k\to\infty}V_\vep(F_j^{(1)}-F_k^{(1)},T\cap(-\infty,s_1])=\vfi(s_1).
\end{equation*}
Inductively, if $p\in\Nb$, $p\ge2$, and a subsequence $\{F_j^{(p-1)}\}_{j=1}^\infty$ of
$\{F_j\}_{j=1}^\infty$ is already chosen, we apply Step~1 once again to pick a
subsequence $\{F_j^{(p)}\}_{j=1}^\infty$ of $\{F_j^{(p-1)}\}_{j=1}^\infty$ and a
number $\vfi(s_p)\in[0,C]$ such that
\begin{equation*}
\lim_{j,k\to\infty}V_\vep(F_j^{(p)}-F_k^{(p)},T\cap(-\infty,s_p])=\vfi(s_p).
\end{equation*}
Denoting by $\{F_j^*\}_{j=1}^\infty$ the diagonal subsequence $\{F_j^{(j)}\}_{j=1}^
\infty$of $\{F_j\}_{j=1}^\infty$, we establish \eq{e:fiz}. It remains to note that, by
Lemma~\ref{l:ele}(b), the function $\vfi:Z\!\to\![0,C]$, defined by the left-hand side of
\eq{e:fiz}, is nondecreasing~on~$Z$.
\emph{Step~3.} In this step, we finish the proof of \eq{e:fek}. Applying Saks' idea
\cite[Chapter~7, Section~4, Lemma~(4.1)]{Saks}, we extend the function $\vfi$,
defined by \eq{e:fiz}, from the set $Z$ to the whole $\Rb$ as follows: given $t\in\Rb$,
\begin{equation*}
\wt\vfi(t)=\sup\{\vfi(s):s\in Z\cap(-\infty,t]\}\quad\mbox{if}\quad Z\cap(-\infty,t]\ne\es
\end{equation*}
and
\begin{equation*}
\wt\vfi(t)=\inf\{\vfi(s):s\in Z\}\quad\mbox{otherwise.}
\end{equation*}
Clearly, $\wt\vfi:\Rb\to[0,\infty)$ is nondecreasing and $\wt\vfi(\Rb)\subset\ov{\vfi(Z)}
\subset[0,C]$. Therefore, the set $D\subset\Rb$ of points of discontinuity of $\wt\vfi$
is at most countable.
Let us show that if $\{F_j^*\}_{j=1}^\infty$ is the sequence from \eq{e:fiz}, then
\begin{equation} \label{e:tiw}
\lim_{j,k\to\infty}V_\vep(F_j^*-F_k^*,T\cap(-\infty,t])=\wt\vfi(t)\quad
\mbox{for all}\quad t\in T\setminus D.
\end{equation}
By virtue of \eq{e:fiz}, we may assume that $t\in T\setminus(D\cup Z)$. Let $\eta>0$ be
fixed. Since $t$ is a point of continuity of $\wt\vfi$, there is $\delta=
\delta(\eta)>0$ such that
\begin{equation} \label{e:stw}
\mbox{$\wt\vfi(s)\in[\wt\vfi(t)-\eta,\wt\vfi(t)+\eta]$ \,for all \,$s\in\Rb$ \,such that
\,$|s-t|\le\delta$.}
\end{equation}
Since $T\subset\ov Z$ and $t\notin T_L$, we find $\ov Z\cap(t-\delta,t)\supset
T\cap(t-\delta,t)\ne\es$, and so, there is $s'\in Z$ with $t-\delta<s'<t$.
By \eq{e:fiz}, there is $j^1=j^1(\eta)\in\Nb$ such that, for all $j,k\ge j^1$, $j\ne k$,
\begin{equation} \label{e:juke}
\mbox{$V_\vep(F_j^*-F_k^*,T\cap(-\infty,s'])\in[\vfi(s')-\eta,\vfi(s')+\eta]$.}
\end{equation}
Similarly, $t\notin T_R$ implies the existence of $s''\in Z$ with $t<s''<t+\delta$, and so,
by \eq{e:fiz}, for some $j^2=j^2(\eta)\in\Nb$, we have, for all $j,k\ge j^2$, $j\ne k$,
\begin{equation} \label{e:keju}
\mbox{$V_\vep(F_j^*-F_k^*,T\cap(-\infty,s''])\in[\vfi(s'')-\eta,\vfi(s'')+\eta]$.}
\end{equation}
Since $s'<t<s''$, $T\cap(-\infty,s']\subset T\cap(-\infty,t]\subset T\cap(-\infty,s'']$,
and so, by Lemma~\ref{l:ele}(b), we get, for all $j,k\in\Nb$,
\begin{equation*}
V_\vep(F_j^*-F_k^*,T\cap(-\infty,s'])\!\le\! V_\vep(F_j^*-F_k^*,T\cap(-\infty,t])
\!\le\! V_\vep(F_j^*-F_k^*,T\cap(-\infty,s'']).
\end{equation*}
Setting $j^3=\max\{j^1,j^2\}$ and noting that $\wt\vfi(s')=\vfi(s')$ and
$\wt\vfi(s'')=\vfi(s'')$, we find, from \eq{e:juke}, \eq{e:keju} and \eq{e:stw}, that
\begin{align*}
V_\vep(F_j^*\!-\!F_k^*,T\!\cap\!(-\infty,t])&\!\in\!
\bigl[V_\vep(F_j^*\!-\!F_k^*,T\!\cap\!(-\infty,s']),
V_\vep(F_j^*\!-\!F_k^*,T\!\cap\!(-\infty,s''])\bigr]\\[3pt]
&\!\subset\![\vfi(s')-\eta,\vfi(s'')+\eta]=[\wt\vfi(s')-\eta,\wt\vfi(s'')+\eta]\\[3pt]
&\!\subset\![\wt\vfi(t)-2\eta,\wt\vfi(t)+2\eta]\quad\mbox{for all}
\quad j,k\ge j^3,\,\,j\ne k,
\end{align*}
which proves \eq{e:tiw}.
Finally, we note that $T=(T\setminus D)\cup(T\cap D)$ where $T\cap D$ is at most
countable. Furthermore, being a subsequence of the original sequence
$\{F_j\}_{j=1}^\infty$, the sequence $\{F_j^*\}_{j=1}^\infty$ from \eq{e:fiz} and
\eq{e:tiw} satisfies the uniform estimate \eq{e:Fjk}. So, arguing as in Step~2 with $Z$
replaced by $T\cap D$, we obtain a subsequence of $\{F_j^*\}_{j=1}^\infty$, denoted
by $\{F_j^\vep\}_{j=1}^\infty$, and a nondecreasing function $\psi:T\cap D\to[0,C]$
such that
\begin{equation} \label{e:psizh}
\lim_{j,k\to\infty}V_\vep(F_j^\vep-F_k^\vep,T\cap(-\infty,t])=\psi(t)\quad
\mbox{for all}\quad t\in T\cap D.
\end{equation}
We define the desired function $\vfi^\vep:T\to[0,C]$ by $\vfi^\vep(t)=\wt\vfi(t)$ if
$t\in T\setminus D$ and $\vfi^\vep(t)=\psi(t)$ if $t\in T\cap D$. Now, it follows from
\eq{e:tiw} and \eq{e:psizh} that equality \eq{e:fek} holds, where, in view of
Lemma~\ref{l:ele}(b), the function $\vfi^\vep$ is nondecreasing on~$T$.
This completes the proof of Lemma~\ref{l:RT}.
\end{proof}
\begin{remark} \label{r:Fj1} \rm
Here we present more details on the existence of the subsequence
$\{F_j^1\}_{j=1}^\infty$ of $\{F_j\}_{j=1}^\infty$ after the first application of
Ramsey's Theorem (cf.~p.~\pageref{p:Fj1}). By Theorem~\ref{t:Ramsey},
there is an infinite set $\displaystyleelta\subset\Gamma=\{F_j:j\in\Nb\}$ such that either
$\displaystyleelta[2]\subset G_1$ or $\displaystyleelta[2]\subset G_2$. We infer that
\begin{equation} \label{e:Delt}
\mbox{$\displaystyleelta\!=\!\{F_{q(n)}\!:n\!\in\!\Nb\}$ for some strictly increasing
sequence $q:\Nb\!\to\!\Nb$,}
\end{equation}
and, setting $F_j^1\!=\!F_{q(j)}$ for $j\!\in\!\Nb$, we have
$\displaystyleelta[2]\!=\!\bigl\{\{F_j^1,F_k^1\}:j,k\!\in\!\Nb,\,j\!\ne\! k\bigr\}$.
Since the set $\Nb$ of natural numbers is well-ordered (i.e., every nonempty subset
of $\Nb$ has the minimal element), the sequence $q:\Nb\to\Nb$ can be defined as follows:
$q(1)=\min\{j\in\Nb:F_j\in\displaystyleelta\}$, and, inductively, if $n\in\Nb$, $n\ge2$, and
natural numbers $q(1)\!<\!q(2)\!<\!\dots\!<\!q(n-1)$ are already defined, we~set
\begin{equation} \label{e:qn}
q(n)=\min\bigl\{j\in\Nb\setminus\{q(1),q(2),\dots,q(n\!-\!1)\}:F_j\in\displaystyleelta\bigr\}.
\end{equation}
The sequence $q$ is strictly increasing: if $n\in\Nb$ and
$j\in\Nb\setminus\{q(1),q(2),\dots,q(n)\}$
is such that $F_j\in\displaystyleelta$, then $j\ne q(n)$, and since
$j\in\Nb\setminus\{q(1),q(2),\dots,q(n\!-\!1)\}$,
we have, by \eq{e:qn}, $j\ge q(n)$, i.e., $j>q(n)$; by the arbitrariness of $j$ as
above and \eq{e:qn} (for $n+1$ in place of $n$), we get $q(n+1)>q(n)$.
Clearly, $q(n)\ge n$.
Let us verify the equality in \eq{e:Delt}. The inclusion ($\supset$) is clear from
\eq{e:qn}. To see that inclusion ($\subset$) holds, let $F\in\displaystyleelta$, so that
$\displaystyleelta\subset\Gamma$ implies $F=F_{j_0}$ for some $j_0\in\Nb$. We have
$q(j_0)\ge j_0$, and since $F_{j_0}\in\displaystyleelta$, $j_0\ge q(1)$. Hence
$q(1)\le j_0\le q(j_0)$. We claim that there is $1\le n_0\le j_0$ such that $q(n_0)=j_0$
(this implies $F=F_{j_0}=F_{q(n_0)}\in\{F_{q(n)}:n\in\Nb\}$ and establishes~($\subset$)).
By contradiction, if $q(n)\ne j_0$ for all $n=1,2,\dots,j_0$, then $j_0$ belongs to the set
$\{j\in\Nb\setminus\{q(1),q(2),\dots,q(j_0)\}:F_j\in\displaystyleelta\}$, and so, by \eq{e:qn},
$q(j_0+1)\le j_0$, which contradicts $q(j_0+1)>q(j_0)\ge j_0$.
\end{remark}
\begin{remark}
If $(M,\|\cdot\|)$ is a \emph{finite-dimensional\/} normed linear space, the condition of
relative compactness of sets $\{f_j(t):j\in\Nb\}$ at all points $t\in T$ in Theorem~
\ref{t:SPir} can be lightened to the condition $\sup_{j\in\Nb}\|f_j(t_0)\|\equiv C_0
<\infty$ for some $t_0\in T$. In fact, by Lemma~\ref{l:71}(b) and \eq{e:ejkC} with
fixed $\vep_0>0$ and $j_0\equiv j_0(\vep_0)$, we get
\begin{equation*}
|(f_j-f_{j_0})(T)|\le V_{\vep_0}(f_j-f_{j_0},T)+2\vep_0\le C(\vep_0)+2\vep_0\,\,\,
\mbox{for all}\,\,\,j\ge j_0.
\end{equation*}
Hence, given $t\in T$, we find
\begin{align*}
\|f_j(t)\|&\le\|(f_j-f_{j_0})(t)-(f_j-f_{j_0})(t_0)\|+\|f_{j_0}(t)\|+\|f_j(t_0)\|
+\|f_{j_0}(t_0)\|\\[3pt]
&\le(C(\vep_0)+2\vep_0)+\|f_{j_0}(t)\|+2C_0\quad\mbox{for all}\quad j\ge j_0,
\end{align*}
and so, the set $\{f_j(t):j\in\Nb\}$ is \rc\ in $M$.
\end{remark}
\begin{remark}
Under the assumptions on $T$ and $M$ from Theorem~\ref{t:SPir}, if a sequence
$\{f_j\}\subset M^T$ converges \emph{uniformly\/} on $T$ to a function $f\in M^T$,
then
\begin{equation} \label{e:fjik}
\lim_{j,k\to\infty}V_\vep(f_j-f_k,T)=0\quad\mbox{for all}\quad\vep>0,
\end{equation}
i.e., condition \eq{e:spir} is \emph{necessary}. In fact, given $\vep>0$, there is
$j_0=j_0(\vep)\in\Nb$ such that $\|f_j-f_k\|_{\infty,T}\le\vep$ for all $j,k\ge j_0(\vep)$.
Since the zero function $0$ on $T$ is constant, we get $V_\vep(f_j-f_k,T)\le V(0,T)=0$
for all $j,k\ge j_0(\vep)$.
\end{remark}
\begin{remark}
In Example \ref{ex:ntns}, we show that condition \eq{e:spir} is \emph{not necessary\/}
for the pointwise convergence of $\{f_j\}$ to $f$. However, it is `almost necessary' in
the following sense (cf.\ Remark~\ref{r:neces}(b)). Let $T\subset\Rb$ be a measurable
set with \emph{finite\/} Lebesgue measure $\mathcal{L}(T)$ and $\{f_j\}\subset M^T$
be a sequence of measurable functions which converges \pw\ or almost everywhere
on $T$ to a function $f\in M^T$. Egorov's Theorem implies that for every $\eta>0$
there is a measurable set $T_\eta\subset T$ such that $\mathcal{L}(T\setminus T_\eta)
\le\eta$ and $f_j\rightrightarrows f$ on $T_\eta$. By \eq{e:fjik}, we get
\begin{equation*}
\lim_{j,k\to\infty}V_\vep(f_j-f_k,T_\eta)=0\quad\mbox{for all}\quad\vep>0.
\end{equation*}
\end{remark}
Applying Theorem~\ref{t:SPir} and the diagonal procedure we get the following
\begin{theorem} \label{t:SPirvar}
Under the assumptions of Theorem\/~{\rm\ref{t:SPir}}, if a sequence of functions
$\{f_j\}\subset M^T$ is such that, for all $\vep>0$,
\begin{equation*}
\mbox{$\displaystyle\limsup_{j,k\to\infty}V_\vep(f_j-f_k,T\setminus E)<\infty$ for an at most
countable $E\subset T$}
\end{equation*}
or
\begin{equation*}
\mbox{$\displaystyle\limsup_{j,k\to\infty}V_\vep(f_j-f_k,T\cap[a,b])<\infty$ \,for all \,$a,b\in T$,
$a\le b$,}
\end{equation*}
then $\{f_j\}$ contains a subsequence which converges pointwise on $T$.
\end{theorem}
\begin{example} \label{ex:ntns}
Condition \eq{e:spir} is \emph{not necessary\/} for the pointwise convergence even if
all functions in the sequence $\{f_j\}$ are regulated. To see this, let $\{f_j\}\subset M^T$
be the sequence from Example~\ref{ex:ucbw}, where $T=I=[0,1]$ and $(M,\|\cdot\|)$
is a normed linear space. First, note that
\begin{equation} \label{e:xixx}
\limsup_{j,k\to\infty}V_\vep(f_j-f_k,T)\ge\limsup_{j\to\infty}V_\vep(f_j-f_{j+1},T)
\quad\mbox{for all}\quad\vep>0.
\end{equation}
Let us fix $j\in\Nb$ and set $t_k=k/(j+1)!$, $k=0,1,\dots,(j+1)!$, so that
$f_{j+1}(t_k)=x$ for all such~$k$. We have $f_j(t_k)=x$ if and only if $j!t_k$ is an
integer, i.e., $k=n(j+1)$ with $n=0,1,\dots,j!$. It follows that $(f_j-f_{j+1})(t)=y-x$
if $t=t_k$ for those $k\in\{0,1,\dots,(j+1)!\}$, for which $k\ne n(j+1)$ for all
$n\in\{0,1,\dots,j!\}$ (and, in particular, for $k=1,2,\dots,j$); in the remaining cases
of $t\in T$ we have $(f_j-f_{j+1})(t)=0$. If $s_k=\frac12(t_{k-1}+t_k)=
(k-\frac12)/(j+1)!$, $k=1,2,\dots,(j+1)!$, we get a partition of the interval
$T=[0,1]$ of the form
\begin{equation*}
0=t_0<s_1<t_1<s_2<t_2<\dots<s_{(j+1)!}<t_{(j+1)!}=1,
\end{equation*}
and $f_j(s_k)=f_{j+1}(s_k)=y$ for all $k=1,2,\dots,j$. Now, let
$0<\vep<\frac12\|x-y\|$, and a function $g\in M^T$ be arbitrary such that
$\|(f_j-f_{j+1})-g\|_{\infty,T}\le\vep$. By \eq{e:10}, we find
\begin{align*}
V(g,T)&\ge\sum_{k=1}^{(j+1)!}\|g(t_k)\!-\!g(s_k)\|\ge\sum_{k=1}^j\bigl(
\|(f_j\!-\!f_{j+1})(t_k)\!-\!(f_j\!-\!f_{j+1})(s_k)\|\!-\!2\vep\bigr)\\
&=(\|y-x\|-2\vep)j,
\end{align*}
and so, by \eq{e:av}, $V_\vep(f_j-f_{j+1},T)\ge(\|y-x\|-2\vep)j$. Hence,
\eq{e:xixx} implies
\begin{equation*}
\limsup_{j,k\to\infty}V_\vep(f_j-f_k,T)=\infty\quad\mbox{for \,all}\quad
0<\vep<\textstyle\frac12\|x-y\|.
\end{equation*}
\end{example}
\begin{example} \label{ex:nrg}
Under the assumptions of Theorem~\ref{t:SPir} we cannot infer that the limit function
$f$ of an extracted subsequence of $\{f_j\}$ is a \emph{regulated\/} function
(this is the reason to term this theorem an \emph{irregular\/} selection principle).
Let $T=[a,b]$, $(M,\|\cdot\|)$ be a normed linear space, $x,y\in M$, $x\ne y$,~and
$\al_j=1+(1/j)$, $j\in\Nb$ (cf.\ Example~\ref{ex:rieq}). The sequence of Dirichlet
functions $f_j=\al_j\displaystylec_{x,y}=\displaystylec_{\al_jx,\al_jy}$, $j\in\Nb$, converges
\emph{uniformly\/} on $T$ to the Dirichlet function $f=\displaystylec_{x,y}$, which is
non-regulated. By virtue of \eq{e:fjik}, Theorem~\ref{t:SPir} can be applied to the
sequence $\{f_j\}$. On the other hand, Example~\ref{ex:rieq} shows that $\{f_j\}$ does
not satisfy condition \eq{e:sp}, and so, Theorem~\ref{t:SP} is inapplicable.
Sometimes it is more appropriate to apply Theorem~\ref{t:SPir} in the form of
Theorem~\ref{t:SPirvar}. Let $\{\beta_j\}_{j=1}^\infty\subset\Rb$ be a bounded
sequence (not necessarily convergent). Formally, Theorem~\ref{t:SPir} cannot be
applied to the sequence $f_j=\beta_j\displaystylec_{x,y}$, $j\in\Nb$, on $T=[a,b]$ (e.g., with
$\beta_j=(-1)^j$ or $\beta_j=(-1)^j+(1/j)$). However, note that for every $j\in\Nb$
the restriction of $f_j$ to the set $T\setminus\Qb$ is the constant function
$c(t)\equiv\beta_jy$ on $T\setminus\Qb$, whence $V_\vep(f_j-f_k,T\setminus\Qb)=0$
for all $\vep>0$. Hence Theorem~\ref{t:SPirvar} is applicable to $\{f_j\}$.
$\square$
\end{example}
More examples, which can be adapted to the situation under consideration, can be found
in \cite[Section~4]{JMAA08}.
The following theorem is a counterpart of Theorem~\ref{t:SPae}.
\begin{theorem}
Let $T\subset\Rb$, $(M,\|\cdot\|)$ be a normed linear space and $\{f_j\}\subset M^T$
be a \pw\ \rc\ (or a.e.\ \rc) on $T$ sequence of functions satisfying the condition\/{\rm:}
for every $p\in\Nb$ there is a measurable set $E_p\subset T$ with Lebesgue measure
$\mathcal{L}(E_p)\le1/p$ such that
\begin{equation*}
\limsup_{j,k\to\infty}V_\vep(f_j-f_k,T\setminus E_p)<\infty\quad\mbox{for \,all}
\quad\vep>0.
\end{equation*}
Then $\{f_j\}$ contains a subsequence which converges almost everywhere on~$T$.
\end{theorem}
Finally, we present an extension of Theorem~\ref{t:SPir} in the spirit of Theorems
\ref{t:SPweak} and \ref{t:SPw2}.
\begin{theorem}
Let $T\subset\Rb$ and $(M,\|\cdot\|)$ be a reflexive Banach space with separable dual
$(M^*,\|\cdot\|)$. Suppose the sequence of functions $\{f_j\}\subset M^T$ is such that
\begin{itemize}
\renewcommand{0.0pt plus 0.5pt minus 0.25pt}{0.0pt plus 0.5pt minus 0.25pt}
\item[{\rm(i)}] $\sup_{j\in\Nb}\|f_j(t_0)\|\le C_0$ for some $t_0\in T$ and $C_0\ge0;$
\item[{\rm(ii)}] $\limsup_{j,k\to\infty}V_\vep(\lan(f_j-f_k)(\cdot),x^*\ran,T)<\infty$
for all\/ $\vep>0$ and $x^*\in M^*$.
\end{itemize}
Then, there is a subsequence of $\{f_j\}$, again denoted by $\{f_j\}$, and a function
$f\in M^T$ such that $f_j(t)\wto f(t)$ in $M$ as $j\to\infty$ for all $t\in T$.
\end{theorem}
\end{document} |
\begin{document}
\begin{center}
{\LARGE{Improving Frenet's Frame Using Bishop's Frame}}\\[15pt]
Daniel Carroll$^1$, Emek K\"ose$^1$ \& Ivan Sterling$^1$
\end{center}
$^1$ Mathematics and Computer Science Department, St Mary's College of Maryland, St Mary's City, MD, 20686, USA \par
Correspondence: Ivan Sterling, Mathematics and Computer Science Department, St Mary's College of Maryland, St Mary's City, MD, 20686, USA. Tel: 1-240-431-8185. E-mail: isterling@smcm.edu
\\%
\textbf{Abstract}
The main drawback of the Frenet frame is that it is undefined at those points where the curvature is zero. Furthermore, in the case of planar curves, the Frenet frame does not agree with the standard framing of curves in the plane. The main drawback of the Bishop frame is that the principle normal vector N is not in it. Our new frame, which we call the Beta frame, combines, on a large set of curves, the best aspects of the Bishop frames and the Frenet frames. It yields a globally defined normal, a globally defined signed curvature, and a globally defined torsion. For planar curves it agrees with the standard framing of curves in the plane.
\textbf{Keywords:} Frenet Frames, Bishop Frames.
\section{Introduction}
Let $\gamma: (a,b) \longrightarrow \mathbb{R}^3$ be a curve in $R^3$. If $\gamma$ is $C^2$ with $\gamma'(t) \neq 0$, then it carries a Bishop frame $\{T,M_1, M_2\}$. If $\gamma$ is $C^3$ with $\gamma'(t)$ and $\gamma''(t)$ linearly independent, then it carries a Frenet frame $\{T,N,B\}$. The main drawback of the Frenet frame is that it is undefined when $\gamma''(t)=0$. This corresponds precisely to those points where the curvature $\kappa(t)$ is zero. Also the principle normal vector $N(t)$ of the Frenet frame may have a non-removable discontinuity at these points. In the case of planar curves, the Frenet frame does not agree with the standard framing of curves in the plane. Finally the torsion $\tau(t)$ is not defined when $\kappa(t)=0$. The main drawback of the Bishop frame is that the principle normal vector $N$ is not (except in rare cases) in the set $\{T,M_1, M_2\}$.
The history of the Frenet equations for a curve in $\mathbb{R}^3$ is interesting. Discovered in 1831 by Senff and Bartels they should probably be called the Senff-Bartels equations. In 1847 they were rediscovered in the dissertation of Frenet, which was published in 1852. Independently they were also discovered (and published) by Serret in 1851. See (Reich, 1973) for details on this early history.
Bishop frames were introduced in 1975 in the Monthly article ``There is More Than One Way to Frame a Curve" (Bishop, 1975). Bishop frames are now ubiquitous in the literature on curve theory and its applications.
Our new frame, which we call the Beta frame of $\gamma$, combines, on a large set of curves, the best aspects of the Bishop frames and the Frenet frames. It yields a globally defined normal $N^\beta$, a globally defined signed curvature $\kappa^\beta$, and a globally defined torsion $\tau^\beta$. If $\gamma$ is planar, it agrees with the standard framing of curves in the plane.
Our approach was motivated by attempts to improve the details of our work on discrete Frenet frames (Carroll, Hankins, K\"ose \& Sterling). The Beta frame introduced in this paper discretizes in a natural way consistent with our discrete frame defined in (Carroll, Hankins, K\"ose \& Sterling). These ideas are particularly useful in applications, such as DNA analysis and computer graphics. For example see Hanson's technical report (Hanson, 2007), which discusses several of the issues that we address here.
Notation: $C^0$ means continuous, $C^\infty$ means infinitely differentiable, and $C^\omega$ means real analytic. For $k \in \mathbb{N}$ we say a function is $C^k$ if its derivatives up to order $k$ are continuous. If $\gamma'(s) \neq 0$ for all $t$, then $\gamma$ can be reparametrized by arclength $s$. If the derivative with respect to $s$ is denoted by $\dot{\gamma}$, then $\Vert \dot{\gamma}(s) \Vert \equiv 1$. Whenever necessary to simplify notation we assume $0 \in (a,b)$. Finally, by an abuse of language, we use the term ``planar curve" to mean a curve in $\mathbb{R}^2 \subset \mathbb{R}^3$.
The authors would like to thank the referees for helpful comments.
\section{Before Bishop}
\subsection{The Standard Theory for Curves in $\mathbb{R}^2$}
Let $\gamma: (a,b) \longrightarrow \mathbb{R}^2$ be a $C^2$ curve in two-space with $\Vert \dot{\gamma}(s) \Vert \equiv 1$. $T(s):=\dot{\gamma}(s)$ is called the unit tangent vector to $\gamma$ at $s$. Let the normal $N(s)$ be the unique unit vector orthogonal to $T(s)$ such that $\{T(s),N(s)\}$ is positively oriented. (We think of this $N$ as the good normal, as opposed to the bad normal below.) The signed curvature $\kappa^{signed}$ is defined by
\[ \dot{T}(s) =: \kappa^{signed}(s) N(s).\]
The ``unsigned curvature", the curvature of the osculating circle, is
\[\kappa(s) :=|\kappa^{signed}(s)|.\]
Alternatively, not wisely, one could first define curvature $\kappa^{bad}$ by
\[\kappa^{bad}(s) := \Vert \dot{T}(s) \Vert\]
and then define, when $\kappa^{bad}(s) \neq 0$, the normal $N^{bad}(s)$ by
\[N^{bad}(s) := \frac{\dot{T}(s)}{\Vert \dot{T}(s) \Vert} = \frac{\dot{T}(s)}{\kappa^{bad}(s)}.\]
There seems to be little advantage to this ``bad" alternative and at least two drawbacks. The first drawback is that $N^{bad}(s)$ is not defined when $\kappa^{bad}(s)=0$, hence $N^{bad}(s)$ has at best a removable discontinuity when $\kappa^{bad}(s)=0$. The second drawback is that if $\gamma(s)$ changes concavity then $N^{bad}(s)$ has a jump discontinuity as for example in Figure \ref{frenetbetax3}.
\begin{figure}
\caption{The Difference Between the Frenet Frame and the Beta Frame}
\label{frenetbetax3}
\end{figure}
\subsection{The Standard Theory for Curves in $\mathbb{R}^3$}
Even though the alternative is bad, it is this bad alternative which is used in the standard Frenet framing for $C^3$ curves $\gamma: (a,b) \longrightarrow \mathbb{R}^3$ with $\Vert \dot{\gamma}(s)\Vert \equiv 1$. We first define Frenet's curvature $\kappa^f$ by
\[\kappa^f(s) := \Vert \dot{T}(s) \Vert\]
and then define, when $\kappa^f(s) \neq 0$, the Frenet (principal) normal $N^f$ by
\[N^f(s) := \frac{\dot{T}(s)}{\Vert \dot{T}(s) \Vert} = \frac{\dot{T}(s)}{\kappa^f(s)}.\]
Note that for planar curves $\kappa^f = \kappa^{bad}$ and $N^f = N^{bad}$. As mentioned above, for planar curves the Frenet frame may not agree with the positively oriented standard frame for curves in $\mathbb{R}^2$. Roughly speaking the purpose of this paper is to do away with ``bad" (or non-existent) normals whenever possible.
The Frenet (principal) binormal $B^f$ is defined by $B^f(s) = T(s) \times N^f(s)$.
If $\gamma$ is $C^3$ with $\Vert \dot{\gamma}(s)\Vert \equiv 1$ and $\kappa^f(s) \neq 0$, then $\tau^f(s)$ is defined by
\[\tau^f = \frac{\langle \dot{\gamma} \times \ddot{\gamma}, \dddot{\gamma}\rangle}
{{\kappa^f}^2}.\]
The torsion $\tau^f(s)$ measures the rate of change of the osculating plane, the plane spanned by $T(s)$ and $N^f(s)$.
One has the Frenet equations:
\begin{alignat*}{4}
\dot{T} &=& \kappa^f N^f, \\
\dot{N^f} &=-\kappa^f T &&+\tau^f B^f, \\
\dot{B^f} &=&-\tau N^f.
\end{alignat*}
The set, $\{T,N^f,B^f\}$, is called the Frenet frame of $\gamma$.
\section{Discussing the Problem}
In Chapter 1 of (Spivak, 1990) Spivak discusses why we cannot obtain a signed curvature $\kappa^{signed}$ for curves in three space and why we cannot, in general, hope to define torsion $\tau(s_0)$ at points where $\kappa^f(s_0)=0$.
With respect to signed curvature, Spivak points out that there is no natural way to pick a vector orthogonal to a given $T(s_0)$ in $\mathbb{R}^3$. Furthermore the Frenet frame, in particular the principle normal $N^f(s)$, is only defined on intervals where $\kappa^f(s) \neq 0$. There may be no consistent way to choose the normal after passing through a point $s_0$ with $\kappa^f(s_0)=0$. If $\kappa^f(s_0) = 0$, $N^f(s)$ may have a non-removable discontinuity at $s_0$. However, we are able, for a large set of curves, to define a new frame, the Beta frame, $\{T,N^\beta, B^\beta\}$, which is at least $C^0$ and is defined even when $\kappa^f(s) =0$. Furthermore $N^\beta(s)= \pm N^f(s)$, whenever $N^f(s)$ is defined. Once we have a global definition of $N^\beta$, we define the signed curvature $\kappa^\beta$ ``the good way" by
\[\dot{T} =: \kappa^\beta N^\beta.\]
For all $s$ we will have $\kappa^\beta(s) = \pm \kappa^f(s)$. If $\gamma$ is planar, then $N^\beta = N$ (the good normal) and $\kappa^\beta = \kappa^{signed}$.
With respect to torsion, Spivak argues that one cannot define $\tau^f(s)$ when $\kappa^f(s)=0$, because there exist examples where no reasonable definition would make sense.
\begin{example}\label{spivak}(Spivak's Example. See Figure \ref{spivakcurv}.) If $\gamma$ is the $C^\infty$ curve defined by
\[ \gamma(t) :=
\left\lbrace \begin{array}{cl} (s,e^{1/s^2},0) & \mbox{if}\; t>0, \\
(0,0,0) & \mbox{if}\;\ t=0,\\
(s,0,e^{1/s^2}) & \mbox{if}\;\ t<0.
\end{array} \right. \]
then $\tau(t)=0$ everywhere except $t=0$. But at $t=0$ the osculating plane jumps by an angle $\frac{\pi}{2}$. Any attempt to define $\tau(0)$ would involve distributions and delta functions, which we will not pursue in this paper.
\end{example}
\begin{figure}
\caption{Spivak's Example}
\label{spivakcurv}
\end{figure}
We take a different approach than Spivak. Instead of focusing on the examples where no reasonable definition is possible, we give conditions, satisfied by many curves of interest, that allow $\tau^\beta(s_0)$ to be defined even at points $s_0$ where $\kappa^f(s_0)=0$. Furthermore $\tau^\beta = \tau^f$ whenever $\tau^f$ is defined. Finally we will still have the Frenet equations:
\begin{alignat*}{4}
\dot{T} &=& \kappa^\beta N^\beta, \\
\dot{N^\beta} &=-\kappa^\beta T &&+\tau^\beta B^\beta, \\
\dot{B^\beta} &=&-\tau^\beta N^\beta.
\end{alignat*}
\section{The Bishop Frame}
Let $\gamma: (a,b) \longrightarrow \mathbb{R}^3$ be a $C^2$ with $\Vert \dot{\gamma} \Vert \equiv 1$. The construction of a Bishop frame \cite{B} is based on the idea of relatively parallel fields. In particular, a normal vector field $M(s)$ along a curve is called relatively parallel if
\[\dot{M}(s) = g(s) T(s) \]
for some function $g(s)$. A single unit normal vector $M_0$ at $\gamma(s_0)$ generates a unique relatively parallel unit normal vector field $M(s)$ along $\gamma$ with $M(s_0)=M_0$. Moreover, any orthonormal basis $\{T, M_{1_0},M_{2_0}\}$ at $\gamma(s_0)$ generates a unique $C^1$ orthonormal frame $\{T,M_1,M_2\}$. Bishop's equations are similar to the Frenet equations:
\begin{alignat*}{4}
\dot{T} & = & \kappa_1 M_1 &+\kappa_2 M_2, \\
\dot{M_1} & = -\kappa_1 T, &&\\
\dot{M_2} & = -\kappa_2 T. &&
\end{alignat*}
We have $\kappa^f(s)= \sqrt{\kappa_1^2(s) + \kappa_2^2(s)}$. If $\kappa^f(s) \neq 0$, then
\[N^f(s) = \frac{\kappa_1(s)}{\kappa^f(s)} M_1(s) + \frac{\kappa_2(s)}{\kappa^f(s)} M_2(s).\]
On sub-intervals of $(a,b)$ where $\kappa^f(s) \neq 0$, there exists a $C^0$ function $\theta(s)$ such that $N^f(s)=\cos \theta(s) M_1(s) + \sin \theta(s) M_2(s)$. If moreover $\gamma$ is $C^3$, then $\tau^f(s) = \dot{\theta}(s)$. In most applications the normal portion of the Bishop frame, $span\{M_1,M_2\}$, is usually written using this polar coordinate approach in complex form:
\[(\kappa_1,\kappa_2)= \kappa e^{i \int{\tau}}.\]
We will investigate these polar coordinates in some detail, but not using the complex form.
\begin{rem} The function $\int \kappa$ is called the turn of $\gamma$. We have $\theta = \int \tau$.
$\theta$ is related to the twist and the writhe of $\gamma$ which we won't discuss here.
\end{rem}
\section{Bishop's Normal Development Curve} \label{normdev}
If $\gamma$ is $C^2$ and $\Vert \dot{\gamma}(s) \Vert \equiv 1$, then
the curve $(\kappa_1(s),\kappa_2(s))=(r(s),\theta(s))$ in $\{M_1, M_2\}$ space is called Bishop's normal development of $\gamma$. The normal development of $\gamma$ is determined up to rotation by a constant angle in the $\{M_1,M_2\}$ plane and a curve $\gamma$ is determined up to congruence by its normal development. The parameter $s$ is an arclength parameter for $\gamma$, but in general is {\it not} an arclength parameter for the normal development of $\gamma$. The normal development of a line is the constant curve whose image is the origin and the normal development of a circle is the constant curve whose image is $(\kappa^f_{const} \neq 0,\theta_{const})$.
If $\gamma$ is planar, then it has vanishing torsion and the normal development is given by $(r(s),\theta_{const})$. For any $\gamma \in \mathbb{R}^3$, zeros of the normal development corresponds to points of zero curvature on $\gamma$. If the normal development $(r(s),\theta(s))$ approaches the origin along a line and leaves the origin along a different line, the corresponding $\gamma$ is like Spivak's Example \ref{spivak} above, it jumps from lying in one plane to lying in a different plane. The normal development of a helix is a constant speed circle around the origin. A $C^2$ curve is spherical if and only if its normal development lies on a line not through the origin \cite{B}.
If $\gamma$ is $C^3$, we have seen that when $\kappa^f(s) \neq 0$, we have, for some function $\theta(s)$, $\tau^f(s) = \dot{\theta}(s)$. In particular, $\tau^f(s)$ will change signs at the local extrema of $\theta(s)$. Curves of constant torsion $\pm 1$ correspond to $\theta(s)=\pm s + \theta_0$ with $r(s)$ arbitrary. Curves of constant curvature $1$ correspond to curves with $r(s) \equiv 1$ and $\theta(s)$ arbitrary.
\section{Curves in Polar Coordinates Through (0,0)}{\label{lift}}
\subsection{Polar Lifts}
As we have seen points where $\kappa^f(s)=0$ on $\gamma$ correspond to the points on the normal development with $(r(s),\theta(s))=(0,0)$. If $\kappa^f(s) \equiv 0$ on an interval, then $\gamma$ is a line segment on that interval, and the normal development remains at $(0,0)$ on that interval. Dealing with the case of ``piecewise" defined curves including line segments is a delicate problem which we will address elsewhere. We will only consider curves that have isolated points of zero curvature. We assume that $(\kappa_1(s),\kappa_2(s))$ has an isolated zero at $s_0$.
Recall that $(\kappa_1(s),\kappa_2(s))$ is $C^0$. Nevertheless there may not exist any pair of $C^0$ functions $\tilde{r}(s)$, $\tilde{\theta}(s)$ such that $(\tilde{r}(s),\tilde{\theta}(s)) = (\kappa_1(s),\kappa_2(s))$. Even if $(\kappa_1(s),\kappa_2(s))$ is $C^\infty$, there may not exist such $C^0$ functions $\tilde{r}(s)$, $\tilde{\theta}(s)$. More precisely, let the Cartesian plane be defined by
\[\mathbb{R}^2_{(x,y)} = \{(x,y) | -\infty < x < \infty, -\infty < y < \infty\},\]
and let the (extended) Polar plane be defined by
\[\mathbb{R}^2_{(\tilde{r},\tilde{\theta})} = \{(\tilde{r},\tilde{\theta)} | -\infty < \tilde{r} < \infty, -\infty < \tilde{\theta} < \infty\}.\]
Note that usually the definition of Polar plane restricts to $r >0$ and $\theta \in [0,2 \pi)$, but we will use ``Polar plane" in the extended sense as defined in $\mathbb{R}^2_{(\tilde{r},\tilde{\theta})}$.
Let $\pi: \mathbb{R}^2_{(r,\theta)} \longrightarrow \mathbb{R}^2_{(x,y)}$ be given by $\pi(r,\theta) = (r \cos \theta, r \sin \theta)$. Note that $\pi$ is $C^\omega$. Curves which are $C^0$ in the Polar plane project down to curves which are $C^0$ in the Cartesian plane. However not all $C^0$ curves in the Cartesian plane are projections of $C^0$ curves in the Polar plane. Examples include those Spivak-like curves which enter the origin from one direction $\theta_1$ and leave from a ``non-parallel" direction $\theta_2$. Figure \ref{nolift} shows this and two other cases.
\begin{figure}
\caption{Some Examples Where $\tilde{\theta}
\label{fig:spivaklift}
\label{fig:noliftoscil}
\label{fig:noliftspiral}
\label{nolift}
\end{figure}
Specifically we ask, given $(\kappa_1,\kappa_2): (a,b) \longrightarrow \mathbb{R}^2$ is $C^0$, under what conditions does there exist a $C^0$ lift $(\tilde{r},\tilde{\theta}): (a,b) \longrightarrow \mathbb{R}^2$ such that $\pi \circ (\tilde{r}(s),\tilde{\theta}(s)) = (\kappa_1(s), \kappa_2(s))$? If $(\kappa_1,\kappa_2)$ is never $(0,0)$ then finding a lift $(\tilde{r}(s),\tilde{\theta}(s))$ is trivial. If $(\kappa_1(s_0),\kappa_2(s_0)) = (0,0)$ is an isolated zero, then there exist a neighborhood $D$ of $s_0$ in $(a,b)$ such that $(\kappa_1(s),\kappa_2(s)) \neq (0,0)$ except at $s_0$. At each such isolated zero we require the existence of a corresponding $C^0$ lift $(\tilde{r},\tilde{\theta}): D \longrightarrow \mathbb{R}_{(\tilde{r},\tilde{\theta})}^2$. The first thought that comes at the reader's mind may be that the lift exists if and only if the derivatives in $s_0^+$ and $s_0^-$ coincide. However, Figure \ref{yeslift}(b), is a simply example to show this is not true. In fact much more subtle examples can be constructed. Figures \ref{yeslift} and \ref{liftoscil} show some simple cases where $C^0$ lifts exist. A unique $C^0$ global lift $(\tilde{r},\tilde{\theta})$ is then constructed by patching together the pieces through $(0,0)$ and pieces which avoid $(0,0)$.
At this point we'd like to emphasize that we cannot ``just take the Bishop frame". The problem is that although the Bishop frame is indeed global and $C^0$, the relationship with the Frenet frame (and in particular the principal normal) is not. To recover a $C^0$ ``principal normal" a detailed analysis of these zeros is required. Precisely the lack of such a global normal in the literature was the primary motivation of this paper.
\begin{figure}
\caption{Simple Examples Where $\tilde{\theta}
\label{fig:simpleL}
\label{fig:simpleK}
\label{fig:simpleV}
\label{yeslift}
\end{figure}
\begin{figure}
\caption{$(\tilde{r}
\label{liftoscil}
\end{figure}
\subsection{Main Lemma}
This subsection and the following Lemma are technical and unappealing, but straightforward.
We define $\hat{\theta}: \mathbb{R}^2\! - \!(0,0) \longrightarrow \left(-\frac{\pi}{2},\frac{\pi}{2} \right]$ by
\[ \hat{\theta}(x,y) :=
\left\lbrace \begin{array}{cl} \tan^{-1}\left( \frac{y}{x}\right) & \mbox{if}\; x \neq 0, \\
\frac{\pi}{2} & \mbox{if}\; x=0, y \neq 0, \\
\mbox{undefined} & \mbox{if}\; x=0, y=0.
\end{array} \right. \]
\noindent If $\phi = \mbox{Arg}(x+iy)$ is defined to be the unique argument of $x+iy$ in $[0,2\pi)$, then
\[ \hat{\theta} =
\left\lbrace \begin{array}{cl} \phi & \mbox{if}\; \phi \in \left[0,\frac{\pi}{2}\right], \\
\phi - \pi & \mbox{if}\; \phi \in \left(\frac{\pi}{2}, \frac{3 \pi}{2}\right], \\
\phi- 2\pi & \mbox{if}\; \phi \in \left(\frac{3 \pi}{2},2 \pi \right).
\end{array} \right. \]
A graph of $\hat{\theta}$ as a function of $\phi$ is shown in Figure \ref{canada2}.
\begin{figure}
\caption{$\hat{\theta}
\label{canada2}
\end{figure}
Note
\[ \hat{\theta}\left(\pi (\tilde{r}(s),\tilde{\theta}(s))\right) =
\left\lbrace \begin{array}{cl} \tilde{\theta}(s) \!\!\!\!\!\mod \!\pi & \mbox{if}\; 0 \leq \tilde{\theta}(s) \!\!\!\!\!\mod \!\pi \leq \frac{\pi}{2}, \\
\left(\tilde{\theta}(s) \!\!\!\!\!\mod \!\pi\right)-\pi & \mbox{if}\; \frac{\pi}{2} < \tilde{\theta}(s) \!\!\!\!\!\mod \!\pi < \pi.
\end{array} \right. \]
\noindent $\tan^{-1}$ and hence $\hat{\theta}$ jumps at $\frac{\pi}{2}$. Thus special care must be taken to deal with the case of $(\kappa_1,\kappa_2)$ oscillating across the $y$-axis ($x \equiv 0$) infinitely often as it approaches the origin. This would cause $\hat{\theta}$ to oscillate wildly between near $\frac{\pi}{2}$ and near $-\frac{\pi}{2}$. To avoid this cosmetic problem we (in this case only) rotate $(\kappa_1,\kappa_2)$ by an angle $\frac{\pi}{4}$ (so the curve is now oscillating harmlessly about $-\frac{\pi}{4}$). Then we can check its behavior without the jumping. We will refer to this special case as Case 3; Case 1 being $-\frac{\pi}{2} < \hat{\theta}^+ < \frac{\pi}{2}$ and Case 2 being $\hat{\theta}^+ = \frac{\pi}{2}$.
Let
\[ \hat{\theta}^+ := \lim_{s \rightarrow s_0^+} \hat{\theta}(\kappa_1(s), \kappa_2(s)), \]
and
\[ \hat{\theta}_{\frac{\pi}{2}}^+ := \lim_{s \rightarrow s_0^+}
\hat{\theta}\left(\frac{\sqrt{2}}{2}\kappa_1(s) +\frac{\sqrt{2}}{2}\kappa_2(s),
-\frac{\sqrt{2}}{2}\kappa_1(s) +\frac{\sqrt{2}}{2} \kappa_2(s)\right). \]
Note that these limits may not exist.
Finally we define
\[ \theta^+ :=
\left\lbrace \begin{array}{cl} \hat{\theta}^+ & \mbox{if}\; -\frac{\pi}{2} < \hat{\theta}^+ < \frac{\pi}{2}, \\
\frac{\pi}{2} & \mbox{if}\;\; \hat{\theta}_{\frac{\pi}{2}}^+ = -\frac{\pi}{4},\\
\mbox{undefined} & \mbox{otherwise}.
\end{array} \right. \]
Replacing $s \rightarrow s_0^+$ with $s \rightarrow s_0^-$ we similarly define $\theta^-$.
\begin{lem}\label{mainlem}
Let $(\kappa_1, \kappa_2)$ be $C^0$ with an isolated zero at $s_0$. Then there exists a $C^0$ lift $(\tilde{r},\tilde{\theta})$ near $s_0$ such that $\pi \circ (\tilde{r},\tilde{\theta}) = (\kappa_1,\kappa_2)$ if and only if both $\theta^+$ and $\theta^-$ exist and $\theta^+= \theta^-$.
\end{lem}
\begin{proof}
First assume there does exist a $C^0$ lift $(\tilde{r},\tilde{\theta})$ near $s_0$ such that $\pi \circ (\tilde{r},\tilde{\theta}) = (\kappa_1,\kappa_2)$. In other words for all $s$ near $s_0$ we have
\[\left(\tilde{r}(s) \cos \tilde{\theta}(s), \tilde{r}(s) \sin \tilde{\theta}(s)\right) = (\kappa_1(s),\kappa_2(s)).\]
Since the zero is isolated at $s_0$ we may assume $\tilde{r}(s) \neq 0$ near $s_0$.
We next prove $\theta^+$ exists. By definition this means that either $\hat{\theta}^+$ exists with $-\frac{\pi}{2} < \hat{\theta}^+ < \frac{\pi}{2}$ and/or $\hat{\theta}_{\frac{\pi}{2}}^+$ exists with $\hat{\theta}_{\frac{\pi}{2}}^+ = - \frac{\pi}{4}$. By continuity
\begin{equation}\label{main}
\lim_{s \rightarrow s_0^+} \tilde{r}(s) = 0 = \lim_{s \rightarrow s_0^-} \tilde{r}(s)\;\;\; \mbox{and}\;\; \lim_{s \rightarrow s_0^+} \tilde{\theta}(s) = \tilde{\theta}(s_0) = \lim_{s \rightarrow s_0^-} \tilde{\theta}(s).
\end{equation}
When $s \neq s_0$ we have
\begin{eqnarray*}
\hat{\theta}(\kappa_1(s),\kappa_2(s)) & = & \left\lbrace \begin{array}{cc}
\tan^{-1} \left( \frac{\tilde{r}(s) \sin \tilde{\theta}(s)}{\tilde{r}(s) \cos \tilde{\theta}(s)} \right) & \mbox{if}\; \cos \tilde{\theta}(s) \neq 0, \\
\frac{\pi}{2}& \mbox{if}\; \cos \tilde{\theta}(s) = 0.
\end{array} \right. \\
\Longrightarrow \hat{\theta}(s) & = & \left\lbrace \begin{array}{cc}
\tan^{-1} \left( \tan (\tilde{\theta}(s))\right) & \mbox{if}\; \tilde{\theta}(s) \bmod \pi \neq \frac{\pi}{2}, \\
\frac{\pi}{2}& \mbox{if}\; \tilde{\theta}(s) \bmod \pi = \frac{\pi}{2}.
\end{array} \right.
\end{eqnarray*}
By conditions (\ref{main}) eventually $\tilde{\theta}(s)$ is near $\tilde{\theta}(s_0)$. If $\tilde{\theta}(s_0) \bmod \pi \neq \frac{\pi}{2}$, then eventually $\tilde{\theta}(s) \bmod \pi \neq \frac{\pi}{2}$ and
\[\theta^+ = \hat{\theta}^+ = \tan^{-1} \left(\tan (\tilde{\theta}(s_0))\right).\]
If $\tilde{\theta}(s_0) \bmod \pi = \frac{\pi}{2}$, then we rotate by $\frac{\pi}{4}$ and by the same argument we have
\[\hat{\theta}_{\frac{\pi}{2}}^+ = \tan^{-1}\left(\tan(\tilde{\theta}(s_0)+\frac{\pi}{4})\right) = \tan^{-1}\left(\tan(\frac{3 \pi}{4})\right) = -\frac{\pi}{4}.\]
So $\theta^+ = \frac{\pi}{2}$. Thus in either case $\theta^+$ exists.
Similarly $\theta^-$ exists and by condition (\ref{main}) $\theta^+ = \theta^-$.
Conversely assume $\theta^+$ and $\theta^-$ exist and $\theta^+=\theta^-$. Let $(\tilde{r}^+(s),\tilde{\theta}^+(s))$ (resp. $(\tilde{r}^-(s),\tilde{\theta}^-(s))$ be any $C^0$ lift for $s > s_0$ (resp. $s < s_0$). Still assuming $r(s) \neq 0$ for $s \neq s_0$, without loss of generality assume both $\tilde{r}^+(s) > 0$ and $\tilde{r}^-(s) > 0$ for $s \neq s_0$.
First we consider the Case 1 where $-\frac{\pi}{2} < \hat{\theta}^+ < \frac{\pi}{2}$ (and hence $-\frac{\pi}{2} < \hat{\theta}^- < \frac{\pi}{2}$). We want to show there is a $C^0$ lift $(\tilde{r},\tilde{\theta})$. We claim $\lim_{s \rightarrow s_0^+} \tilde{\theta}^+(s)$ and $\lim_{s \rightarrow s_0^-} \tilde{\theta}^-(s)$ exist. More precisely we have
\[ -\frac{\pi}{2} < \lim_{s \rightarrow s_0^+} \tan^{-1} \left(\frac{\kappa_2(s)}{\kappa_1(s)}\right) < \frac{\pi}{2}.\]
Or
\[ -\frac{\pi}{2} < \lim_{s \rightarrow s_0^+} \tan^{-1} \left(\frac{\tilde{r}^+(s) \sin \tilde{\theta}^+(s)}{\tilde{r}^+(s) \cos \tilde{\theta}^+(s)}\right) < \frac{\pi}{2}.\]
Or
\[ -\frac{\pi}{2} < \lim_{s \rightarrow s_0^+} \tan^{-1} \left(\tan \tilde{\theta}^+(s)\right) < \frac{\pi}{2}.\]
Eventually $\tilde{\theta}^+(s) \bmod \pi$ avoids $\frac{\pi}{2}$. Thus eventually
\begin{equation}\label{pie}\tan^{-1} \left(\tan \tilde{\theta}^+(s)\right) =
\left\lbrace
\begin{array}{c} \tilde{\theta}^+(s) \bmod \pi \\
\mbox{or} \;\; \left( \tilde{\theta}^+(s) \bmod \pi \right) - \pi. \end{array} \right.
\end{equation}
In either case $\tilde{\theta}^+(s_0) = \lim_{s \rightarrow s_0^+} \tilde{\theta}^+(s)$ exists. By the same argument $\tilde{\theta}^-(s_0) = \lim_{s \rightarrow s_0^-} \tilde{\theta}^-(s)$ exists. Since $\theta^+=\theta^-$ we know by Equation (\ref{pie}) that $\tilde{\theta}^+(s_0)-\tilde{\theta}^-(s_0)$ is a multiple of $\pi$. Since we have assumed both $\tilde{r}^+(s) >0$ and $\tilde{r}^-(s)>0$ we can define $(\tilde{r}(s),\tilde{\theta}(s))$ depending on whether $(\kappa_1,\kappa_2)$ approaches the origin from the same or opposite directions as $s$ approaches $s_0$ from the left and right. In the first case we have $j(s_0)=\tilde{\theta}^+(s_0)-\tilde{\theta}^-(s_0) = 0 \bmod 2\pi$ and
\[
(\tilde{r}(s),\tilde{\theta}(s)) :=
\left\lbrace
\begin{array}{cc}
(\tilde{r}^-(s),\tilde{\theta}^-(s) + j(s_0))& \mbox{if}\;\; s < s_0,\\
(0,\tilde{\theta}^+(s_0))& \mbox{if}\;\; s = s_0, \\
(\tilde{r}^+(s),\tilde{\theta}^+(s))& \mbox{if}\;\; s > s_0.
\end{array}
\right.
\]
If $j(s_0)=\tilde{\theta}^+(s_0)-\tilde{\theta}^-(s_0) = \pi \bmod 2\pi$ then
\[
(\tilde{r}(s),\tilde{\theta}(s)) :=
\left\lbrace
\begin{array}{cc}
(-\tilde{r}^-(s),\tilde{\theta}^-(s) + j(s_0))& \mbox{if}\;\; s < s_0,\\
(0,\tilde{\theta}^+(s_0))& \mbox{if}\;\; s = s_0, \\
(\tilde{r}^+(s),\tilde{\theta}^+(s))& \mbox{if}\;\; s > s_0.
\end{array}
\right.
\]
In Case 2 we assume $\hat{\theta}^+=\frac{\pi}{2}$. Since this implies $\hat{\theta}_{\frac{\pi}{2}}^+ = -\frac{\pi}{4}$ we see that Case 2 is included in Case 3.
Finally we consider Case 3:
\[\hat{\theta}_{\frac{\pi}{2}}^+ = -\frac{\pi}{4}.\]
As discussed above $(\kappa_1,\kappa_2)$ is oscillating across the $y$-axis, but otherwise converges nicely. We can $C^0$ lift the rotated $(\kappa_1,\kappa_2)$ and then shift that $(\tilde{r},\tilde{\theta})$ by $\frac{\pi}{4}$.
\end{proof}
As mentioned above, if the conditions of Lemma \ref{mainlem} are valid at all points of zero curvature, then we have a global $C^0$ lift $(\tilde{r},\tilde{\theta})$ of $(\kappa_1,\kappa_2)$. Without loss of generality we will assume $\tilde{\theta}(0)=0$.
\section{The Beta Frame}
\subsection{Initial Conditions}
Without loss of generality we assume $\kappa^f(0) \neq 0$ and define $M_1(s), M_2(s)$ by the following initial conditions:
\begin{enumerate}
\item $\mbox{If $\gamma$ is planar, then } M_1(0)=N(0), M_2(0)=T(0) \times N(0)$,
\item $\mbox{If $\gamma$ is not-planar, then } M_1(0)=N^f(0), M_2(0)=B^f(0)$.
\end{enumerate}
\subsection{The Beta Normal, Signed Curvature and Binormal}
Assume that the normal development of $\gamma$ has a continuous lift $(\tilde{r},\tilde{\theta})$ with $\tilde{\theta}(0)=0$ as in Section \ref{lift} so that
\[(\kappa_1,\kappa_2) = (\tilde{r} \cos \tilde{\theta},\tilde{r} \sin\tilde{\theta}). \]
Then we can globally define $N^\beta$ by
\[N^\beta(s) := \cos{\tilde{\theta}(s)}\;M_1(s) + \sin{\tilde{\theta}(s)} \; M_2(s)\]
and note that
\[N^\beta = \pm N^f\]
whenever $N^f$ is defined.
We define our signed curvature $\kappa^\beta$ by
\[\dot{T}(s) =: \kappa^\beta(s) N^\beta(s).\]
Note
\[\kappa^\beta = \tilde{r}\]
and
\[\kappa^\beta = \pm \kappa^f\]
whenever $\kappa^f$ is defined.
We define $B^\beta$ by
\[B^\beta(s) := T(s) \times N^\beta(s)\]
and note that
\[B^\beta = -\sin \tilde{\theta}\; M_1 + \cos \tilde{\theta} \;M_2.\]
The globally defined frame $\{T,N^\beta,B^\beta \}$ is called the Beta frame. The Beta frame (when defined) is unique and is invariant under regular, orientation preserving, base point fixing reparametrizations. If the base point changes, it may happen that $N^\beta$ and $B^\beta$ globally switch signs.
\section{Torsion}
Finally we assume $\gamma$ is $C^3$, $\Vert \dot{\gamma}\Vert \equiv 1$ and assume there exists a continuous lift $(\tilde{r},\tilde{\theta})$ of $(\kappa_1,\kappa_2)$ as in the last section. In this case we have that $\kappa_1$, $\kappa_2$ are $C^1$ and after a bit of checking $\tilde{r}$ is $C^1$. $\tilde{\theta}$ is once again more difficult. Even if both $\kappa_1$, $\kappa_2$ are $C^1$, there is no guarantee that $\tilde{\theta}$ is $C^1$ if $\kappa^\beta(s_0)=0$ (even if $\Vert (\dot{\kappa_1}(s_0),\dot{\kappa_2}(s_0))\Vert \neq 0$. For example $(\tilde{r},\tilde{\theta}) = (s, s^\frac{1}{3})$. See Figure \ref{c0notc1}.
\begin{figure}
\caption{$(\tilde{r}
\label{c0notc1}
\end{figure}
Assuming $\tilde{\theta}(s)$ is $C^1$ at all curvature zero points, then the lift $(\tilde{r},\tilde{\theta})$ is globally $C^1$ and we define $\tau^\beta$ by
\[ \tau^\beta(s) := \dot{\tilde{\theta}}(s)\]
and note that
\[\tau^\beta = \tau^f\]
whenever $\tau^f$ is defined.
As promised we will still have the Frenet equations:
\begin{alignat*}{4}
\dot{T} &=& \kappa^\beta N^\beta, \\
\dot{N^\beta} &=-\kappa^\beta T &&+\tau^\beta B^\beta, \\
\dot{B^\beta} &=&-\tau^\beta N^\beta.
\end{alignat*}
\end{document} |
\begin{document}
\begin{frontmatter}
\title{A step towards proving de Polignac's Conjecture}
\author{J. Sellers}
\begin{abstract}
Consider the set of all natural numbers that are co-prime to primes less than or equal to a given prime. Then given a consecutive pair of numbers in that set with an arbitrary even gap, we prove there exists an unbounded number of actual prime pairs with that same gap. This conditional proof of de Polignac's conjecture constitutes a proof for a range of known gaps, but the full conjecture requires additional proof that such number pairs exist for all even gaps.
\end{abstract}
\end{frontmatter}
\section{Introduction}
French mathematician Alphonse de Polignac conjectured in 1849 that: "Every even number is the difference of two consecutive primes in infinitely many ways."\cite{dP,LD} The subsumed twin prime conjecture is more well known and is considered older, but its origin is not otherwise documented. de Polignac's conjecture, a generalization for arbitrary even gaps, is taken as the earliest documented statement that is inclusive of the twin prime conjecture. Work on prime gaps has application to both de Polignac's ocnjecture and the twin prime conjecture, but the twin prime conjecture appears to have been the primarily goal of most work.
Maynard in \cite{JM1} gives an excellent overview of approaches to the twin prime conjecture. The earliest result comes in the work of Hardy and Littlewood \cite{HL} where they proposed a prime pair counting function using a modified assumption about the Riemann Hypothesis to characterize the density of prime pairs.
Sieve theory has made the most significant recent progress. Originally proposed by Brun \cite{VB} as a modified form of the sieve of Eratosthenes and applied to the Goldbach Conjecture. His significant result proved that the sum of the reciprocal of twin primes converges. Sieve theory was further developed by Selberg \cite{AS} and has made significant advances applying the work of Bombieri, Friedlander, and Iwaneic \cite{BFI1,BFI2,BFI3} on the distribution of primes in arithmetic progression and then applying the results of Goldston, Pintz, and Yildririm \cite{GPY} on primes in tuples. This culminated in the work of Zhang \cite{YZ} who combined these approaches and proved the existence of a finite, though very large limit on gaps, for which there are infinite prime pairs. His method was subsequently modified to significantly reduce the gap limit, to 246,.\cite{JM2, poly1, poly2}.
Those latter approaches formulated sieves using a product of linear functions chosen to ensure finding at least two prime numbers in an infinite number of tuples of fixed finite size. Therefore, while it has produced significant progress, it does not demonstrate a result for prime pairs of a specific gap and is known to have inherent limitations for reducing the gap limit further.
The primary difference in this paper is that we work in the realm of relative primes rather than attempting to deal with primes directly, because relative primes are more easily predicted. The set of numbers prime to $P\le P_{k}$ includes the set of all prime numbers greater than $P_{k}$ and all composite numbers whose prime factors are all greater than $P_{k}$. All of these fall in the the two arithmetic progressions $6n+5$ and $6n+7$. All such relative primes between the composite numbers are actual prime numbers. The difficulty in predicting prime numbers derives from the inability to order composite numbers beyond $P_{k+1}^2<P_{k+1}P_{k+2}$ without knowing their actual values. However, we do know that all numbers less than $P_{k+1}^2$ that are prime to $P<P_{k}$ are actual prime numbers. In that domain our results are applicable to actual prime numbers.
The various combinations of prime factors $P\le P_{k}$ repeat identically in successive sequences of $P_{k}\#$ numbers. Using this, we define prospective primes, numbers prime to $P\le P_{k}$ for some $P_{k}$, among which all prime numbers geater than $P_{k}$ must occur.
We then apply a formulaic approach for the specification of prospective primes in successively larger sets of $P_{k}\# \rightarrow P_{k+1}\#$ numbers. We see that gaps between consecutive prospective primes propagate predictably between successively larger sets, whereas gaps between actual primes do not. This allows us to assess their distribution directly and prove they exist in a range where they must also be actual prime pairs of a given gap.
This work represents an extension of \cite{JS} which addressed only twin primes, extending it to gaps of arbitrary even numbers. In this approach there are two parts to proving de Polignac's conjecture. Part one, shown in this work, proves that given any consecutive prospective prime pair of even gap $g$, there exists an unbounded number of actual prime pairs with gap $g$. The second part, partially addressed in this work, requires one to prove there exists a pair of consecutive prospective primes for any arbitrary even gap. We show that such gaps exist between consecutive prospective prime pairs for $g=P_{k}\pm 1$ and $g=P_{k+1}-P_{k}$ for all $P_{k}$, however to complete the proof of de Polignac's conjecture one must show that such gaps exist for all even numbers.
\section{Definitions and framework}
$P$ = generic prime number
$P_{k} = k^{th}$ prime number $(P_{1}=2)$
$P_{k}\#= \prod_{i=1}^{k}P_{i}$
$S_{k}:=\left\{N: 5\le N \le 4+ P_{k}\#\right\}; \; N\in \mathbb{N}$
$S_{k}^{(m)}:=\left\{N: 5+mP_{k-1}\#\le N \le 4+(m+1)P_{k-1}\#\right\}$; where:
\[
0\le m\le P_{k}-1; \quad S_{k}^{(m)}\subset S_{k};\quad S_{k}^{(0)}=S_{k-1}
\]
\[
\cup_{m=0}^{P_{k}-1}S_{k}^{(m)}=S_{k}
\quad \& \quad
S_{k}^{(m)}\cap S_{k}^{(m')}=
\begin{cases}
\emptyset &\textrm{if} \quad m\ne m'\\
S_{k}^{(m)} &\textrm{if}\quad m= m'
\end{cases}
\]
$\widetilde{P}_{\{k\}}=$ unspecified prospective prime number in $S_{k}$:
\[
\forall{P} \left[P|\widetilde{P}_{\{k\}}\longrightarrow P> P_{k} \right]
\]
$\qquad\widetilde{P} =$ generic prospective prime ; prime to all $P\le P_{l}$ for unspecified $P_{l}$
$\:\;\widetilde{\mathbb{P}}_{k}:=\left\{\widetilde{P}_{\{k\}}\in S_{k}\right\}$, the set of all prospective primes in $S_{k}$
$\:\;\widetilde{\mathbb{P}}_{k}^{(m)}:=\left\{\widetilde{P}_{\{k\}}\in S_{k}^{(m)}\right\}$, the set of all prospective primes in subset $S_{k}^{(m)}$
$(\widetilde{PgP})=$ generic prospective prime pair with gap $g$
$(\widetilde{PgP})_{k}=$ generic prospective prime pair with gap $g$ in $S_{k}$
\begin{definition}
Two prospective prime numbers, $\widetilde{P}_{\{k\}} < \widetilde{P}_{\{k\}}'$ are considered consecutive prospective prime numbers, when there is no prospective prime number between them, i.e.:
\[
\forall{N} \left[ \left(\widetilde{P}_{\{k\}}<N<\widetilde{P}_{\{k\}}'\right)\longrightarrow \left(P|N\rightarrow P\le P_{k}\right)\right]
\]
\end{definition}
When we refer to prospective prime pairs we always mean consecutive prospective prime pairs.
Prospective prime numbers, prime to all $P\le P_{k}$ have the form:
\begin{equation}\label{E:genproprime}
\widetilde{P}_{\{k\}}=\left(\begin{array}{c} 5 \\ 7 \end{array}\right)+\sum_{j=3}^{k}m_{j}P_{j-1}\#
\end{equation}
For $\widetilde{P}_{\{k\}}\in S_{k}$, $m_{k}$ is constrained by: $0\le m_{k} \le P_{k}-1$. In addition two values of $m_{j}$ for each $j$, corresponding separately to the 5 and 7 in (\ref{E:genproprime}) are disallowed to avoid a result divisible by $P_{j}$.\footnote{If we allow all values $m_{j} \ge 0$, then
(\ref{E:genproprime}) represents the progressions $6n+5$ and $6n+7$.} This is best handled iteratively as in the following:
Going from $S_{k}\rightarrow S_{k+1}$ we get:
\begin{equation}\label{E: nextproprime}
\widetilde{P}_{\{k+1\}}=\widetilde{P}_{\{k\}}+m_{k+1}P_{k}\# \qquad 0 \le m_{k+1} \le P_{k+1}-1
\end{equation}
$\widetilde{P}_{\{k+1\}}$ remains prime to $P\le P_{k}$ and will be prime to $P_{k+1}$ as long as we insist $P_{k+1} \nmid \widetilde{P}_{\{k+1\}}$, enforced by $m_{k+1}\ne \widehat{m}_{k+1}$, where:\footnote{(\ref{E: defmhat}) follows from (\ref{E: nextproprime}) letting $\widetilde{P}_{\{k+1\}}\bmod{P_{k+1}}=0$}.
\begin{equation}\label{E: defmhat}
\widehat{m}_{k+1}=\frac{\alpha P_{k+1}-\widetilde{P}_{\{k\}}\bmod{P_{k+1}}}{\left(P_{k}\#\right)\bmod{P_{k+1}}}
\end{equation}
and where $\alpha$ is the smallest integer such that $\widehat{m}_{k+1}$ is an integer $\le P_{k+1}-1$. Also,
\[
\widetilde{P}_{\{k\}}\bmod{P_{k+1}}=0 \longleftrightarrow \alpha =0
\].
One can see from (\ref{E: defmhat}) that the values of $\widehat{m}_{k+1}$ are distinct for $\widetilde{P}_{\{k\}}$ belonging to distinct residue classes $\bmod{P_{k+1}}$ and all $\widetilde{P}_{\{k\}}$ in the same residue class $\bmod{P_{k+1}}$ have the same value for $\widehat{m}_{k+1}$.
Note that:
\begin{equation}\label{E: proprimeinsub}
\widetilde{P}_{\{k+1\}}=\widetilde{P}_{\{k\}}+m_{k+1}P_{k}\# \in S_{k+1}^{(m_{k+1})}
\end{equation}
Therefore each prospective prime number in $S_{k}$ generates one prospective prime number in all but one subset of $S_{k+1}$. The one disallowed subset being $S_{k+1}^{(\widehat{m}_{k+1})}$.
It follows from (\ref{E: proprimeinsub}) that for $m'>m$ and if $\widetilde{P}_{\{k\}}\in S_{k}^{(m)}$ and $\widetilde{P}_{\{k\}}'\in S_{k}^{(m')}$ then $\widetilde{P}_{\{k\}}<\widetilde{P}_{\{k\}}'$. Therefore, consecutive prospective primes can only occur within a subset or between the largest prospective prime in one subset and the least prospective prime in the next sequential subset.
It is also important to know that prospective primes using (\ref{E:genproprime}) are unique in accordance with the following lemma.
\begin{lemma}\label{L: uniqueness}
Given
\[
\widetilde{P}_{\{k\}}=\left(\begin{array}{c} 5\\7 \end{array}\right)+\sum_{j=3}^{k}m_{j}P_{j-1}\#
\]
and
\[
\widetilde{P}_{\{k\}}'=\left(\begin{array}{c} 5\\7 \end{array}\right)+\sum_{j=3}^{k}m_{j}'P_{j-1}\#
\]
where $0\le m_{j},m_{j}' \le P_{j}-1$.
Then,
\[
\widetilde{P}_{\{k\}}=\widetilde{P}_{\{k\}}' \longleftrightarrow m_{j}=m_{j}' \quad \textrm{for}\quad 3\le j \le k \quad \textrm{and both either start with 5 or both with 7}
\]
\end{lemma}
\begin{proof}
Taking: $\widetilde{P}_{\{k\}}'=\widetilde{P}_{\{k\}}$ gives:
\[
\sum_{j=3}^{k}(\pm\Delta m_{j})P_{j-1}\#=\left(\begin{array}{c} 0\\2 \end{array}\right)
\]
where the zero applies if $\widetilde{P}_{k}$ and $\widetilde{P}_{k}'$ both start with 5 or both start with 7, and 2 applies if one starts with 5 and the other starts with 7.
The smallest finite value for the left hand side of the equation is $6$. Therefore it cannot be solved by finite integral values of $\Delta m_{j}$ and the only solution is $\sum_{j=3}^{k}(\pm\Delta m_{j})P_{j-1}\#=0$, where $\Delta m_{j}=0$ for all $j$.
\end{proof}
\section{Prospective Prime pairs with gap $g$}
We call prospective prime numbers, prime to all $P\le P_{k}$, consecutive if there are no numbers prime to all $P\le P_{k}$ between them.\footnote{Consecutive prime numbers may be taken as consecutive prospective prime numbers, but only if there are no prospective prime numbers between them. }
Gaps between consecutive prospective prime pairs both propagate unchanged and are increased when generating prospective numbers via (\ref{E: nextproprime}). Increases occur due to the supplemental condition $m_{k+1}\ne \widehat{m}_{k+1}$. For example, let $\widetilde{P}_{\{k\}}<\widetilde{P}_{\{k\}}'< \widetilde{P}_{\{k\}}''$ be three consecutive prospective prime numbers in $S_{k}$, with gaps $g=\widetilde{P}_{\{k\}}'-\widetilde{P}_{\{k\}}$ and $g'=\widetilde{P}_{\{k\}}''-\widetilde{P}_{\{k\}}'$. Then Equation~(\ref{E: nextproprime}) gives the following numbers in $S_{k+1}$ which remain prime to $P\le P_{k}$:
\[
\widetilde{P}_{\{k+1\}}=\widetilde{P}_{\{k\}}+m_{k+1}P_{k}\#
\]
\[
\widetilde{P}_{\{k+1\}}'=\widetilde{P}_{\{k\}}'+m_{k+1}'P_{k}\#
\]
\[
\widetilde{P}_{\{k+1\}}''=\widetilde{P}_{\{k\}}''+m_{k+1}''P_{k}\#
\]
In cases where $m_{k+1}=m_{k+1}'=m_{k+1}''$ the gaps remain at $g$ and $g'$. However, we must consider the disallowed cases given by the supplemental condition (\ref{E: defmhat}), which is necessary so that the corresponding numbers in $S_{k+1}$ are prime to $P\le P_{k+1}$.
Note that $\widehat{m}_{k+1}$, $\widehat{m}_{k+1}'$, and $\widehat{m}_{k+1}''$ are distinct from each other unless $g\bmod{P_{k+1}}=0$, $g'\bmod{P_{k+1}}=0$, or $(g+g')\bmod{P_{k+1}}=0$. Then given that there are $P_{k+1}-1$ valid values for each, there are the following cases when $\widehat{m}_{k+1}$, $\widehat{m}_{k+1}'$, and $\widehat{m}_{k+1}''$ are distinct:\footnote{$g^{?}$ represents an unspecified gap, which is the gap from the disallowed prospective prime to the next larger or smaller prospective prime, respectively.}\\
\begin{enumerate}
\item \underline{$m_{k+1}=m_{k+1}'=m_{k+1}'' \ne \widehat{m}_{k+1},\widehat{m}_{k+1}', \widehat{m}_{k+1}''$:} Yeilds $P_{k+1}-3$ cases where both gaps are preserved, because all three of the corresponding prospective primes are allowed in those corresponding subsets:
\[
\widetilde{P}_{\{k\}}\leftarrow g \rightarrow \widetilde{P}_{\{k\}}'\leftarrow g' \rightarrow \widetilde{P}_{\{k\}}'' \quad\Longrightarrow\quad \widetilde{P}_{\{k+1\}}\leftarrow g \rightarrow \widetilde{P}_{\{k+1\}}'\leftarrow g' \rightarrow \widetilde{P}_{\{k+1\}}''
\]
\item \underline{$m_{k+1}=m_{k+1}'=m_{k+1}'' = \widehat{m}_{k+1}$:} Yeilds 1 case where only the second gap is preserved, because $\widetilde{P}_{\{k+1\}}$ is disallowed in $S_{k+1}^{(\widehat{m}_{k+1})}$:
\[
\widetilde{P}_{\{k\}}\leftarrow g \rightarrow \widetilde{P}_{\{k\}}'\leftarrow g' \rightarrow \widetilde{P}_{\{k\}}''\quad \Longrightarrow \quad \leftarrow g^{?} + g \rightarrow \widetilde{P}_{\{k+1\}}'\leftarrow g' \rightarrow \widetilde{P}_{\{k+1\}}''
\]
\item \underline{$m_{k+1}=m_{k+1}'=m_{k+1}'' = \widehat{m}_{k+1}'$:} Yeilds 1 case where the two gaps merge, because $\widetilde{P}_{\{k+1\}}'$ is disallowed in $S_{k+1}^{(\widehat{m}_{k+1}')}$:
\[
\widetilde{P}_{\{k\}}\leftarrow g \rightarrow \widetilde{P}_{\{k\}}'\leftarrow g' \rightarrow \widetilde{P}_{\{k\}}'' \quad\Longrightarrow\quad \widetilde{P}_{\{k+1\}}\leftarrow g + g' \rightarrow \widetilde{P}_{\{k+1\}}''
\]
\item \underline{$m_{k+1}=m_{k+1}'=m_{k+1}'' = \widehat{m}_{k+1}''$:} Yeilds 1 case where only the first gap is preserved, because $\widetilde{P}_{\{k+1\}}''$ is disallowed in $S_{k+1}^{(\widehat{m}_{k+1}'')}$:
\[
\widetilde{P}_{\{k\}}\leftarrow g \rightarrow \widetilde{P}_{\{k\}}'\leftarrow g' \rightarrow \widetilde{P}_{\{k\}}'' \quad\Longrightarrow\quad \widetilde{P}_{\{k+1\}}\leftarrow g \rightarrow \widetilde{P}_{\{k+1\}}'\leftarrow g' + g^{?} \rightarrow
\]
\end{enumerate}
One can see from this that if $\widehat{m}_{k}$, $\widehat{m}_{k}'$, $\widehat{m}_{k}''$ are not distinct, then case 1 would have $P_{k+1}-2$ cases if any two are equal and the third is distinct and would have $P_{k+1}-1$ cases if all three were equal.
Another important point from this example is why it is necessary to track prospective prime numbers rather than actual prime numbers. Consider the case in the above example where $\widetilde{P}_{\{j\}}=P_{\{j\}}$ and $\widetilde{P}_{\{j\}}''=P_{\{j\}}''$ are actual consecutive prime numbers. It is possible then that either one or both of $\widetilde{P}_{\{j+1\}}$ and $\widetilde{P}_{\{j+1\}}''$ may not be prime. If they are both prime it is possible that $\widetilde{P}_{\{j+1\}}'$ may also be prime. In these cases the gaps are not propagated unchanged and $\widetilde{P}_{\{j+1\}}$ and $\widetilde{P}_{\{j+1\}}''$ are not consecutive prime numbers. However, in the case of consecutive prospective prime numbers there are always predictable cases where the gaps are preserved and the prospective prime numbers remain consecutive. This is independent of whether the prospective prime numbers are prime or not. Consider, for example, the consecutive prime numbers in $S_{4}$, $113$ and $127$. While they are consecutive primes, they are not consecutive prospective primes because $121=11^2$ between them is a prospective prime in $S_{4}$, i.e., prime to $P\le 7$. Table~\ref{T: example1} shows how these three numbers propogate into $S_{5}$ along with their associated gaps.
\begin{table}[h]
\caption{
The table shows the propogation of consecutive prime numbers 113 and 127 from $S_{4}$ into $S_{5}$. The gap between prime numbers is only preserved in $S_{5}$ in cases where the intermediate prospective prime, 121, does not generate an actual prime and where the corresponding prospective primes generated by 113 and 127 are actual primes.
}\label{T: example1}
\begin{tabular}{|p{9mm}||p{6mm}|p{6mm}|p{6mm}|p{6mm}|p{6mm}|p{8mm}|p{8mm}|p{8mm}|p{8mm}|p{8mm}|p{8mm}|}
\multicolumn{12}{c}{$\widetilde{\mathbb{P}}_{5}^{(m)}=\widetilde{\mathbb{P}}_{4} +m\cdot 210 \qquad \textbf{bold} = \neg P \qquad \widehat{m}=\neg \widetilde{P}$}\\
\hline
m= & \; 0 & \; 1 & \; 2 & \; 3 &\; 4 & \: 5 & \: 6 & \: 7 & \: 8 & \: 9 & \; 10 \\
\hline
113 &113&\textbf{323} & 533 & 743 & 953 & 1163 & 1373 & 1583 & $\; \widehat{m}$ & 2003& 2213 \\
\hline
$\:\; g$ & & \;8 & \;8 & \;8 & \; 8 & \;8 & \;8 & \; 8 & & \; 8 & \: 8 \\
\hline
121 &$\; \widehat{m}$ & 331 & 541 & 751 & \textbf{961} & 1171 & 1381 & \textbf{1591}& 1801 & 2011 & 2221 \\
\hline
$\:\; g'$ & & \; 6 & \; 6 & \; 6 &\; 6 & & \; 6&\; 6 & \; 6 & \; 6 & \; 6 \\
\hline
127 &127 & 337 & 547 & 757 & 967 & $\; \widehat{m}$& \textbf{1387} & 1597& \textbf{1807} & 2017 & \textbf{2227} \\
\hline
$g+g'$ & 14 & & & & 14 & & & 14 & & & \\
\hline
\end{tabular}
\end{table}
The lesson here is that determining whether a prospective prime is an actual prime in a given subset is not as straightforward as predicting whether a prospective prime is present or disallowed in that subset as determined by $\widehat{m}$.
\subsection{Propagation of prospective prime pairs with gap $g$}
\begin{theorem}\label{T: noprotpp} Given set $S_{l}=\left\{N: 5 \le N \le 4+P_{l}\#\right\}$ containing a pair of consecutive prospective prime numbers, $(\widetilde{P}_{\{l\}},\widetilde{P}_{\{l\}}')$ with gap $\widetilde{P}_{\{l\}}'-\widetilde{P}_{\{l\}}=g$ and given any prime number, $P_{k} > P_{l}$, let $\mathring{n}_{k}^{g}$ be the number of prospective prime pairs $\left(\widetilde{P}_{\{k\}}, \widetilde{P}_{\{k\}}'\right)$ with gap $g$ in $S_{k}=\left\{N: 5 \le N \le 4+P_{k}\#\right\}$ that are derived from that prospective prime pair with gap $g$ in $S_{l}$, then
\[
\mathring{n}_{k}^{g} = \prod_{i=l+1}^{k}(P_{i}-2)\cdot \prod_{\substack{i=l+1\\ P_{i}|g }}^{k}\frac{(P_{i}-1)}{(P_{i}-2)}
\]
\end{theorem}
\begin{proof}
Given a consecutive prospective prime pair with gap $g$ in $S_{j}$, $\left(\widetilde{P}_{\{j\}}\widetilde{P}_{\{j\}}'\right)$, we can define prospective prime pairs with gap $g$ in $S_{j+1}$ by:
\begin{align}\label{E: defproprima-1}
\widetilde{P}_{\{j+1\}}=\widetilde{P}_{\left\{j\right\}}+m_{j+1}P_{j}\#\\
\widetilde{P}_{\{j+1\}}'=\widetilde{P}_{\left\{j\right\}}'+m_{j+1}P_{j}\#\notag
\end{align}
with the supplementary conditions: $0\le m_{j+1} \le P_{j+1}-1$, $m_{j+1}\ne \widehat{m}_{j+1}$ and $m_{j+1}\ne \widehat{m}_{j+1}'$ where:
\begin{align}\label{E: suppconda}\notag
\widehat{m}_{j+1}&=\frac{\alpha P_{j+1}-\widetilde{P}_{\left\{j\right\}}\bmod{P_{j+1}}}{\left(P_{j}\#\right)\bmod{P_{j+1}}}
\\
\\ \notag
\widehat{m}_{j+1}'&=\frac{\alpha' P_{j+1}-\widetilde{P}_{\left\{j\right\}}'\bmod{P_{j+1}}}{\left(P_{j}\#\right)\bmod{P_{j+1}}}
\end{align}
Given $m_{j+1}<P_{j+1}-1$ one can see that both $\widetilde{P}_{\{j+1\}}$ and $\widetilde{P}_{\{j+1\}}'$ are prime to all $P\le P_{j}$. Then the other supplementary condition guarantees that $\widetilde{P}_{\{j+1\}}$ and $\widetilde{P}_{\{j+1\}}'$ are both prime to $P_{j+1}$ and therefore they are a prospective prime pair with gap $g$ in $S_{j+1}$.
Given $\widetilde{P}_{\{j\}}'=\widetilde{P}_{\{j\}}+g$, (\ref{E: suppconda}) gives:
\begin{align}\label{E: disalloweddiff}\notag
\widehat{m}_{j+1}'&=\frac{\alpha' P_{j+1}-(\widetilde{P}_{\left\{j\right\}}+g)\bmod{P_{j+1}}}{\left(P_{j}\#\right)\bmod{P_{j+1}}} \\
&=\widehat{m}_{j+1}
+ \frac{\Delta\alpha \cdot P_{j+1}-g\bmod{P_{j+1}}}{\left(P_{j}\#\right)\bmod{P_{j+1}}}
\end{align}
where $\Delta \alpha$ is modified from $\alpha'-\alpha$ to account for separating out $g$ in the mod function, and is chosen as the least integer to make the second term an integer.
Consider the case where $g\bmod{P_{j+1}}=0$, then:
\[
\widetilde{P}_{\{j\}}'\bmod{P_{j+1}}=(
\widetilde{P}_{\{j\}}+g)\bmod{P_{j+1}}=\widetilde{P}_{\{j\}}\bmod{P_{j+1}}
\]
In that case, there is only one disallowed subset in $S_{j+1}$, so $(P_{\{j\}},P_{\{j\}}')$ generates $P_{j+1}-1$ prospective prime pairs with gap $g$ in $S_{j+1}$. If $g\bmod{P_{j+1}}\ne 0$ then
$m_{j+1}$ has $P_{j+1}-2$ allowed values and the prime pair $(P_{\{j\}},P_{\{j\}}')$ generates $P_{j+1}-2$ distinct prospective prime pairs with gap $g$ in $S_{j+1}$.
By the same procedure, those prospective prime pairs in $S_{j+1}$ each generate prospective prime pairs with gap $g$ in $S_{j+2}$:
\begin{align}\label{E: defproprima-2}
\widetilde{P}_{\{j+2\}}=\widetilde{P}_{\{j+1\}}+m_{j+2}P_{j+1}\#\\
\widetilde{P}_{\{j+2\}}'=\widetilde{P}_{\{j+1\}}'+m_{j+2}P_{j+1}\#\notag
\end{align}
with the supplementary conditions: $0\le m_{j+2} \le P_{j+2}-1$, $m_{j+2}\ne \widehat{m}_{j+2}$ and $m_{j+2}\ne \widehat{m}_{j+2}'$ where:
\begin{align}\label{E: suppconda-2}\notag
\widehat{m}_{j+2}&=\frac{\alpha_{j+2} P_{j+2}-\widetilde{P}_{\{j+1\}}\bmod{P_{j+2}}}{\left(P_{j+1}\#\right)\bmod{P_{j+2}}}
\\
\\ \notag
\widehat{m}_{j+2}'&=\frac{\alpha_{j+2}' P_{j+2}-\widetilde{P}_{\{j+1\}}'\bmod{P_{j+2}}}{\left(P_{j+1}\#\right)\bmod{P_{j+2}}}
\end{align}
Again, $\widehat{m}_{j+2}$ and $\widehat{m}_{j+2}'$ are distinct unless $g\bmod{P_{j+2}}=0$ in which case the corresponding prospective prime pair in $S_{j+1}$ generates $P_{j+2}-1$ instead of $P_{j+2}-2$ prospective primes with gap $g$ in $S_{j+2}$.
Furthermore we know from Lemma~\ref{L: uniqueness} that the prospective primes generated in this process are distinct so that the prime pairs are also distinct pairs.
Then following the same process, successively generating prospective prime pairs of gap $g$, in larger sets, e.g. going from $S_{j}$ to $S_{j+1}$, each prospective prime pair with gap $g$ in $S_{j}$ generates $P_{j+1}-1$ distinct prospective prime pairs of gap $g$ in $S_{j+1}$ if $P_{j+1}$ is a factor in $g$ and otherwise generates $P_{j+1}-2$ distinct prospective prime pairs of gap $g$ in $S_{j+1}$.
Therefore in going from $S_{l}$ to $S_{k}$ the number of prospective prime pairs with gap $g$ in $S_{k}$ that are generated from a prospective prime pair with gap $g$ in $S_{l}$ is given by $ \prod_{i=l+1}^{k}(P_{i}-2)\cdot \prod_{\substack{i=l+1\\ P_{i}|g }}^{k}\frac{(P_{i}-1)}{(P_{i}-2)}$. Therefore we have:
\[
\mathring{n}_{k}^{g} =\prod_{i=l+1}^{k}(P_{i}-2)\cdot \prod_{\substack{i=l+1\\ P_{i}|g }}^{k}\frac{(P_{i}-1)}{(P_{i}-2)}
\]
\end{proof}
Assuming there exists set $S_{l}$ that contains at least one prospective prime pair with gap $g$, if that set contains $n_{l}^{g}$ such prospective prime pairs, then the actual number of prospective prime pairs with gap $g$ in $S_{k}$, $k>l$, derived from those $n_{l}^{g}$ prospective prime pairs is:
\begin{equation}\label{E: totalprimepairs}
n_{k}^{g}\ge n_{l}^{g}\cdot \mathring{n}_{k}^{g}
\end{equation}
The equal sign holds if $g=2$, because prospective twin primes can all be generated from the single twin prime $(5,7)\in S_{2}$ using (\ref{E: nextproprime}) and (\ref{E: defmhat}), giving: \cite{JS}
\[
n_{k}^{2}=\prod_{i=3}^{k}(P_{i}-2)
\]
The formulas in Theorem~\ref{T: noprotpp} and (\ref{E: totalprimepairs}) will generally represent a minimum when considering the total prospective prime pairs with gap $g>2$ in a set. This occurs because new larger gaps are always generated in going to a larger set because of the supplemental condition (\ref{E: defmhat}).
\subsection{Distribution of Prospective Prime pairs with gap $g$}\label{S: distribution}.
We define $(\widetilde{PgP})_{j}=\left( \widetilde{P}_{\{j\}}, \widetilde{P}_{\{j\}}' \right)$ as a generic prospective prime pair with gap $g$ in $S_{j}$
In the following Lemmas we assume there exists a set $S_{l}$ with at least one prospective prime pair with gap $g$. In the Lemmas, the indices $j$ and $k$ are assumed to have values $>l+2$.
\begin{lemma}\label{L: distrofSj+1}
The set of $(\widetilde{PgP})_{j+1}\in S_{j+1}$ generated from a single $(\widetilde{PgP})_{j}\in S_{j}$ has each $(\widetilde{PgP})_{j+1}$ distributed to a distinct subset of $S_{j+1}$. Furthermore, if $g\bmod{P_{j+1}}=0$ they are distributed one each to all but one subset of $S_{j+1}$ and if $g\bmod{P_{j+1}}\ne 0$ they are distributed one each to all but two subsets of $S_{j+1}$.
\end{lemma}
\begin{proof}
Let $\widetilde{(PgP)}_{j+1}=\left( \widetilde{P}_{\{j+1\}}, \widetilde{P}_{\{j+1\}}' \right)$ be a prospective prime pair with gap $g$ in $S_{j+1}$ generated from $(\widetilde{PgP})_{j}$, where:
\begin{equation}\label{E: PgP 1}
(\widetilde{PgP})_{j+1}=(\widetilde{PgP})_{j}+m_{j+1}P_{j}\#
\end{equation}
This actually represents separate equations relating $\widetilde{P}_{\{j+1\}}$ to $\widetilde{P}_{\{j\}}$ and $\widetilde{P}_{\{j+1\}}'$ to $\widetilde{P}_{\{j\}}'$ both using the same value of $m_{j+1}$, where:
\[
0 \le m_{j+1} \le P_{j+1}-1
\]
and where additionally:
\begin{align}\label{E: PgPms}\notag
m_{j+1}\ne& \widehat{m}_{j+1} =\frac{\alpha_{j+1} P_{j+1}-\widetilde{P}_{\{j\}}\bmod{P_{j+1}}}{\left(P_{j}\#\right)\bmod{P_{j+1}}}\\
\textrm{and}&\\ \notag
m_{j+1}\ne& \widehat{m}_{j+1}'= \frac{\alpha_{j+1}' P_{j+1}-\widetilde{P}_{\{j\}}'\bmod{P_{j+1}}}{\left(P_{j}\#\right)\bmod{P_{j+1}}}
\end{align}
where $\alpha_{j+1}$ and $\alpha_{j+1}'$ represent the lowest integer values yielding integer solutions for $\widehat{m}_{j+1}$ and $\widehat{m}_{j+1}'$.
Given subsets of $S_{j+1}$:
\begin{equation}\label{E: subsetdef}
S_{j+1}^{(m)}=\left\{N: 5+mP_{j}\#\le N\le 4+(m+1)P_{j}\#\right\}
\end{equation}
one can see that:
\begin{equation}\label{E: PgP 2}
(\widetilde{PgP})_{j+1}=(\widetilde{PgP})_{j}+m_{j+1}P_{j}\#\in S_{j+1}^{(m_{j+1})}
\end{equation}
where $0\le m_{j+1}\le P_{j+1}-1$.
Therefore a fixed $(\widetilde{PgP})_{j}\in S_{j}$ generates one prospective prime pair with gap $g$ into each allowed subset of $S_{j+1}$. The disallowed subsets of $S_{j+1}$ are given by (\ref{E: PgPms}) and are $S_{j+1}^{(\widehat{m}_{j+1})}$ and $S_{j+1}^{(\widehat{m}_{j+1}')}$. These will be the same single disallowed subset if $g\bmod{P_{j+1}}=0$, because then
\[\widetilde{P}_{\{j\}}'\bmod{P_{j+1}}=\left(\widetilde{P}_{\{j\}}+g\right)\bmod{P_{j+1}}=\widetilde{P}_{\{j\}}\bmod{P_{j+1}}
\]
. Therefore, each $(\widetilde{PgP})_{j}\in S_{j}$ generates one corresponding $(\widetilde{PgP})_{j+1}$ into all but one or two of the $P_{j+1}$ subsets of $S_{j+1}$ respectively, depending on whether $g\bmod{P_{j+1}}=0$ or not.
\end{proof}
\begin{lemma}\label{L: distroinSj+2}
Given the set of $(\widetilde{PgP})_{j+2}\in S_{j+2}$ generated by a single $(\widetilde{PgP})_{j}\in S_{j}$, then the disallowed subsets $S_{j+2}^{(\widehat{m})}$ corresponding to the two comoponents of each $(\widetilde{PgP})_{j+2}$ are separately distinct.
\end{lemma}
\begin{proof}
Consider the set of $(\widetilde{PgP})_{j+1}\in S_{j+1}$ generated from the same $(\widetilde{PgP})_{j}\in S_{j}$, which we represent as: $\left\{(\widetilde{PgP})_{j+1}\right\}_{(\widetilde{PgP})_{j}}$. The $(\widetilde{PgP})_{j+1} \in \left\{(\widetilde{PgP})_{j+1}\right\}_{(\widetilde{PgP})_{j}}$ are distributed in $S_{j+1}$ as given by Lemma~\ref{L: distrofSj+1}, one each to all but one or two subsets of $S_{j+1}$.
Now consider the set of $(\widetilde{PgP})_{j+2}$ generated by the set of $\left\{(\widetilde{PgP})_{j+1}\right\}_{(\widetilde{PgP})_{j}}$. We represent this set as:
\begin{equation}\label{E: PGPinJ+2}
\left\{(\widetilde{PgP})_{j+2}\right\}_{(\widetilde{PgP})_{j}}= \left\{(\widetilde{PgP})_{j+1}\right\}_{(\widetilde{PgP})_{j}}+ m_{j+2}P_{j+1}\#
\end{equation}
where we consider that the second term on the right is added to both components of each member of the set represented as the first term on the right. We have supplementary conditions:
\[
0\le m_{j+2} \le P_{j+2}-1 \le \quad \textrm{and}\quad m_{j+2}\ne \widehat{m}_{j+2}, \widehat{m}_{j+2}'
\]
where, given $(\widetilde{PgP})_{j+1}=( \widetilde{P}_{\{j+1\}}, \widetilde{P}_{\{j+1\}}')$:
\begin{align}\label{E: suppconda-2b}\notag
\widehat{m}_{j+2}&=\frac{\alpha_{j+2} P_{j+2}-\widetilde{P}_{\{j+1\}}\bmod{P_{j+2}}}{\left(P_{j+1}\#\right)\bmod{P_{j+2}}}
\\
\\ \notag
\widehat{m}_{j+2}'&=\frac{\alpha_{j+2}' P_{j+2}-\widetilde{P}_{\{j+1\}}'\bmod{P_{j+2}}}{\left(P_{j+1}\#\right)\bmod{P_{j+2}}}
\end{align}
These represent two distinct disallowed subsets in $S_{j+2}$ unless $g\bmod{P_{j+2}}=0$ in which case there is only one disallowed subset.
By definition, each $\widetilde{P}_{j+1}\in \left\{(\widetilde{PgP})_{j+1}\right\}_{(\widetilde{PgP})_{j}}$ is generated using the same $(\widetilde{PgP})_{j}$. Therefore, from Equations~(\ref{E: suppconda-2b}) we have:
\begin{align}\label{E: suppconda-2c}\notag
\widehat{m}_{j+2}&=\frac{\beta_{j+2} P_{j+2}-\widetilde{P}_{\{j\}}-m_{j+1}\left(P_{j}\#\right)\bmod{P_{j+2}}}{\left(P_{j+1}\#\right)\bmod{P_{j+2}}}
\\
\\ \notag
\widehat{m}_{j+2}'&=\frac{\beta_{j+2}' P_{j+2}-\widetilde{P}_{\{j\}}-g\bmod{P_{j+2}}-m_{j+1}\left(P_{j}\#\right)\bmod{P_{j+2}}}{\left(P_{j+1}\#\right)\bmod{P_{j+2}}}
\end{align}
Where we use $\beta$ instead of $\alpha$ to represent possible changes to the integer values given the breakout of the mod arguments. However they still are the lowest integer values making $\widehat{m}_{j+2}$ and $\widehat{m}_{j+2}'$ integers.
One can see that for a given $\widetilde{P}_{\{j\}}$ and fixed $P_{j+2}$ the only variable in each of the equations in (\ref{E: suppconda-2c}) is $m_{j+1}$. According to Lemma~\ref{L: distrofSj+1} each $(\widetilde{PgP})_{j+1}\in \left\{(\widetilde{PgP})_{j+1}\right\}_{(\widetilde{PgP})_{j}}$ has a unique corresponding value of $m_{j+1}$, and therefore the values of $\widehat{m}_{j+2}$ and $\widehat{m}_{j+2}'$ are separately distinct corresponding to the values of $m_{j+1}$.
Therefore the disallowed subsets for each component of
\[(\widetilde{PgP})_{j+2} \in \left\{(\widetilde{PgP})_{j+2}\right\}_{(\widetilde{PgP})_{j}}
\]
namely $S_{j+2}^{(\widehat{m}_{j+2})}$ and $S_{j+2}^{(\widehat{m}_{j+2}')}$ are separately distinct.
\end{proof}
\begin{lemma}\label{L: delamhat}
The separation of disallowed subsets corresponding to the two components of each $(\widetilde{PgP})_{k}\in S_{k}$ is a constant in $S_{k}$.
\end{lemma}
\begin{proof}
Using (\ref{E: suppconda-2b}) with $k=j+2$ and $\widetilde{P}_{\{k-1\}}'=\widetilde{P}_{\{k-1\}}+g$ gives:
\begin{equation}\label{E: fixeddelta}
\Delta \widehat{m}_{k}=\frac{\Delta\alpha P_{k}-g\bmod{P_{k}}}{\left(P_{k-1}\#\right)\bmod{P_{k}}}
\end{equation}
\end{proof}
Where all quantities on the right hand side of (\ref{E: fixeddelta}) are fixed given $S_{k}$.
\begin{lemma}\label{L: distrofPgPinSj_2}
Given the set $\left\{(\widetilde{PgP})_{j+2}\right\}_{(\widetilde{PgP})_{j}}$ of $(\widetilde{PgP})_{j+2}\in S_{j+2}$ generated by a single $(\widetilde{PgP})_{j}\in S_{j}$, each subset $S_{j+2}^{(m)}$ contains a minimum of $P_{j+1}-4$ of the $(\widetilde{PgP})_{j+2}\in \left\{(\widetilde{PgP})_{j+2}\right\}_{(\widetilde{PgP})_{j}}$.
\end{lemma}
\begin{proof}
Restating (\ref{E: PGPinJ+2}):
\[
\left\{(\widetilde{PgP})_{j+2}\right\}_{(\widetilde{PgP})_{j}}= \left\{(\widetilde{PgP})_{j+1}\right\}_{(\widetilde{PgP})_{j}}+ m_{j+2}P_{j+1}\#
\]
Lemma~\ref{L: distrofSj+1} gives that the
$(\widetilde{PgP})_{j+1}\in
\left\{(\widetilde{PgP})_{j+1}\right\}_{(\widetilde{PgP})_{j}}
$
are distributed one to a subset across all but one or two subsets of $S_{j+1}$. That means there are at least $P_{j+1}-2$ distinct $(\widetilde{PgP})_{j+1}\in \left\{(\widetilde{PgP})_{j+1}\right\}_{(\widetilde{PgP})_{j}}$.
Applying Lemma~\ref{L: distrofSj+1} individually to each $(\widetilde{PgP})_{j+1}$ says that the corresponding $(\widetilde{PgP})_{j+2}$ are distributed one per subset across all but one or two subsets of $S_{j+2}$. This is true for each of the $P_{k+1}-2$ instances of $(\widetilde{PgP})_{j+1}$.
Lemma~\ref{L: distroinSj+2} says that the disallowed subsets of $S_{j+2}$ are separately distinct for the lesser and greater components of the resulting $(\widetilde{PgP})_{j+2}$. Therefore none of the $(\widetilde{PgP})_{j+2}$ have the same disallowed subset corresponding to their lesser components and the same for their greater components.
It is possible however for the disallowed subsets of $S_{j+2}$ to be the same for the opposite components of two $(\widetilde{PgP})_{j+2} \in \left\{(\widetilde{PgP})_{j+2}\right\}_{(\widetilde{PgP})_{j}}$. This can occur when:
\[
(\widetilde{PgP})_{j+1}=(\widetilde{PgP})_{j+1}' \pm (nP_{j+2} + g)
\]
This can only occur if $g\bmod{P_{j+2}}\ne 0$; i.e., where the corresponding $(\widetilde{PgP})_{j+1}$ has two disallowed subsets when generating prospective prime pairs in $S_{j+2}$.
This means that a subset of $S_{j+2}$ can have at most two exclusions of $(\widetilde{PgP})_{j+2}$ and therefore there are at least $P_{j+1}-4$ of the $(\widetilde{PgP})_{j+2}$
in each subset of $S_{j+2}$
\end{proof}
With these results we have the following theorem.
\begin{theorem}\label{T: distroPgP}
Given the set $S_{l}=\left\{N: 5 \le N \le 4+P_{l}\# \right\}$ containing at least one prospective prime pair with gap $g$. Then for $k>l+2$, consider the set
$S_{k}=\left\{N: 5 \le N \le 4+P_{k}\#\right\}$ with its $P_{k}$ subsets:
\[S_{k}^{(m)}=\left\{N: 5+mP_{k-1}\# \le N \le 4+(m+1)P_{k-1}\#\right\}
\]
$0 \le m \le P_{k}-1$.
Then if $\mathring{n}_{S_{k}^{(m)}}^{g}$ is the number of prospective prime pairs with gap $g$ in each subset $S_{k}^{(m)}\in S_{k}$ generated from a prospective prime pair with gap $g$ in $S_{l}$, then:
\[
\mathring{n}_{S_{k}^{(m)}}^{g}\ge \mathring{n}_{k-2}^{g}(P_{k-1}-4) = (P_{k-1}-4)\prod_{i=l+1}^{k-2}(P_{i}-2)\cdot \prod_{\substack{i=l+1\\ P_{i}|g }}^{k-2}\frac{(P_{i}-1)}{(P_{i}-2)}
\]
\end{theorem}
\begin{proof}
Given Lemma~\ref{L: distrofPgPinSj_2} we know that for each $(\widetilde{PgP})_{k-2} \in S_{k-2}$ we have a minimum of $P_{k-1}-4$ prospective prime pairs with gap $g$ in each of the subsets $S_{k}^{(m)}$. Then using Theorem~\ref{T: noprotpp} we know there are $\mathring{n}_{k-2}^{g}=\prod_{i=l+1}^{k-2}(P_{i}-2)\cdot \prod_{\substack{i=l+1\\ P_{i}|g }}^{k-2}\frac{(P_{i}-1)}{(P_{i}-2)}$ prospective prime pairs in $S_{k-2}$
Putting these two results together we get:
\begin{align}\label{E: primespairsinsub}\notag
\mathring{n}_{S_{k}^{(m)}}^{g} &\ge n_{k-2}^{g} \cdot (P_{k-1}-4) \\
&= (P_{k-1}-4) \prod_{i=l+1}^{k-2}(P_{i}-2)\cdot \prod_{\substack{i=l+1\\ P_{i}|g }}^{k-2}\frac{(P_{i}-1)}{(P_{i}-2)} \\ \notag
\end{align}
\end{proof}
\begin{corollary}
For sufficiently large $P_{k}$:
\[
\mathring{n}_{S_{k}^{(m)}}^{g}\ge\mathring{n}_{k-1}^{g}-2\mathring{n}_{k-2}^{g}
\]
\end{corollary}
\begin{proof}
We can also write the inequality~(\ref{E: primespairsinsub}) as:
\begin{align}\label{E: thmversion} \notag
\mathring{n}_{S_{k}^{(m)}}^{g} &\ge (P_{k-1}-4) \prod_{i=l+1}^{k-2}(P_{i}-2)\cdot \prod_{\substack{i=l+1\\ P_{i}|g }}^{k-2}\frac{(P_{i}-1)}{(P_{i}-2)} \\ \notag
&=[(P_{k-1}-2)-2] \prod_{i=l+1}^{k-2}(P_{i}-2)\cdot \prod_{\substack{i=l+1\\ P_{i}|g }}^{k-2}\frac{(P_{i}-1)}{(P_{i}-2)} \\ \notag
&=\left[\prod_{i=l+1}^{k-1}(P_{i}-2)-2\prod_{i=l+1}^{k-2}(P_{i}-2)\right]\cdot \prod_{\substack{i=l+1\\ P_{i}|g }}^{k-2}\frac{(P_{i}-1)}{(P_{i}-2)}\\
&=
\begin{cases}
\mathring{n}_{k-1}^{g}-2\mathring{n}_{k-2}^{g}&\textrm{if}\quad P_{k-1} \nmid g \\
\frac{(P_{k-1}-2)}{(P_{k-1}-1)}\mathring{n}_{k-1}^{g}-2\mathring{n}_{k-2}^{g} &\textrm{if}\quad P_{k-1}|g
\end{cases}
\end{align}
Note that by choosing $P_{k}$ sufficiently large, e.g., $P_{k}>P_{\boldsymbol{\pi}\left(P_{l}\#\right)}>g$, only the first case in (\ref{E: thmversion}) applies.
\end{proof}
\begin{corollary}\label{C: asymtoticlimit}
Given the minimum distribution of $(\widetilde{PgP})_{k}$ across the subsets of $S_{k}$ as in Theorem~\ref{T: distroPgP}, that minimum assymtotically approaches the average distribution of $(\widetilde{PgP})_{k}$ to subsets of $S_{k}$:
\[
\min{(\mathring{n}_{S_{k}^{(m)}}^{g})}\longrightarrow\frac{ \mathring{n}_{k}^{g}}{P_{k}}\quad \textrm{as} \quad k\longrightarrow \infty
\]
\end{corollary}
\begin{proof}
Given that $S_{k}$ has $P_{k}$ subsets, $S_{k}^{(m)}$, the stated minimum number of prospective prime pairs in each subset generated for each $(\widetilde{PgP})_{l}$ accounts for
\[
P_{k} \cdot(P_{k-1}-4)\prod_{i=l+1}^{k-2}(P_{i}-2)\cdot \prod_{\substack{i=l+1\\ P_{i}|g }}^{k-2}\frac{(P_{i}-1)}{(P_{i}-2)}
\]
of the $\mathring{n}_{k}^{g}$ total prospective prime pairs in $S_{k}$ for each $(\widetilde{PgP})_{l}$. Therefore the fraction of the total represented by the minimum is:
\begin{align}\notag
\frac{P_{k} \cdot \min{(\mathring{n}_{S_{k}^{(m)}}^{g})}}{\mathring{n}_{k}^{g}}
=& \frac{P_{k}(P_{k-1}-4)\prod_{i=l+1}^{k-2}(P_{i}-2)\cdot \prod_{\substack{i=l+1\\ P_{i}|g }}^{k-2}\frac{(P_{i}-1)}{(P_{i}-2)}}{\prod_{i=l+1}^{k}(P_{i}-2)\cdot \prod_{\substack{i=l+1\\ P_{i}|g }}^{k-2}\frac{(P_{i}-1)}{(P_{i}-2)}}\\ \notag
=&
\frac{P_{k}(P_{k-1}-4)}{(P_{k}-2)(P_{k-1}-2)}\quad\textrm{then letting}\quad \Delta=P_{k}-P_{k-1}\\ \notag
&=\frac{1-\frac{\Delta +2}{P_{k}-2}}{1-\frac{\Delta +2}{P_{k}}}<1
\end{align}
Therefore the ratio, which is less than $1$ approaches $1$ as $k$ gets large, proving the corollary.
\end{proof}
Corollary~\ref{C: asymtoticlimit} means that when we consider the distribution of prospective prime pairs with gap $g$ in $S_{k}$ that there is no systematic allotment of more prospective prime pairs to one or a few subsets and overall the difference in allotments averages out. Therefore we can say that prospective twin primes are fairly evenly distributed between the subsets of $S_{k}$.
Additionally, while each individual $(\widetilde{PgP})_{k-1}$ in a given subset of $S_{k-1}$ does not contribute to all subsets of $S_{k}$, collectively they do. To prove this we need to determine the contribution: $S_{k-1}^{(m)}\longrightarrow S_{k}^{(m')}$.
\begin{lemma}\label{L: equaldistro}
Given the set $S_{l}=\left\{N: 5 \le N \le 4+P_{l}\# \right\}$ containing at least one prospective prime pair with gap $g$, then for $k>l+4$,
each subset $S_{k-1}^{(m)}\subset S_{k-1}$ generates a minimum of $(P_{k-2}-6)\cdot \mathring{n}_{k-3}^{g}\quad$ $(\widetilde{PgP})_{k}$ into each subset $S_{k}^{(m')}\subset S_{k}$.
\end{lemma}
\begin{proof}
From Lemma~\ref{L: distrofPgPinSj_2} each subset $S_{k-1}^{(m)}$ contains a minimum of $P_{k-2}-4$ of $(\widetilde{PgP})_{k-1}\in \left\{(\widetilde{PgP})_{k-1}\right\}_{(\widetilde{PgP})_{k-3}}$. These can be expressed as:
\[
(\widetilde{PgP})_{k-1}=(\widetilde{PgP})_{k-3}+m_{k-2}P_{k-3}\#+mP_{k-2}\#
\]
where they are distinguished by $P_{k-2}-4$ distinct values of $m_{k-2}$.
Then the contribution of these to $S_{k}^{(m')}$ is:
\[
(\widetilde{PgP})_{k}=(\widetilde{PgP})_{k-3}+m_{k-2}P_{k-3}\#+mP_{k-2}\#+m'P_{k-1}\#\in S_{k}^{(m')}
\]
These are again distinguished by the $P_{k-2}-4$ distinct values of $m_{k-2}$ since we consider $m$ and $m'$ as constants, corresponding to two arbitrary subsets of $S_{k-1}$ and $S_{k}$ respectively.
Then we know from Lemma \ref{L: distroinSj+2} that for each value of $m_{k-2}$, each corresponding to a single $(\widetilde{PgP})_{k-2}\in S_{k-2}^{(m_{k-2})}$, that the disallowed subsets for each component of the resulting $(\widetilde{PgP})_{k}$ are separately distinct. But as discussed in the proof of Lemma \ref{L: distrofPgPinSj_2}, the disallowed subsets for the opposite components of two $(\widetilde{PgP})_{k}$ may be the same. Therefore at most two of the $(\widetilde{PgP})_{k}$ may be disallowed in subset $S_{k}^{(m')}$, leaving a minimum of $P_{k-2}-6$ prospective prime pairs with gap $g$ in $S_{k}^{(m')}$ that are generated by such prospective prime pairs in $S_{k-1}^{(m)}$.
Therefore given the existence of $S_{l}$ prescribed by the statement in the corollary, and given Theorem~\ref{T: noprotpp},we have:
\[
(P_{k-2}-6)\cdot \mathring{n}_{k-3}^{g}
\]
as the minimum contribution of $S_{k-1}^{(m)}$ to $S_{k}^{(m')}$.
\end{proof}
\begin{lemma}\label{L: uniformdistro}
With respects to minimum distributions of prospective prime pairs with gap $g$, the contribution of $S_{k-1}^{(m)}$ to $S_{k}^{(m')}$ in the process of generating prospective prime pairs into $S_{k}$ from $S_{k-1}$ is asymtotically uniform across all subsets $m$ and $m'$.
\end{lemma}
\begin{proof}
Lemma~\ref{L: equaldistro} gives the minimum contributions of prospective prime pairs with gap $g$ from subset $S_{k-1}^{(m)}$ to subset $S_{k}^{(m')}$ as:
\[
(P_{k-2}-6)\cdot \mathring{n}_{k-3}^{g}
\]
Given that there are $P_{k-1}$ subsets in $S_{k-1}$ the total contribution from all subsets of $S_{k-1}$ is, at a minimum:
\[
P_{k-1}\cdot(P_{k-2}-6)\cdot \mathring{n}_{k-3}^{g}
\]
Then we know the minimum distribution of prospective prime pairs with gap $g$ from $S_{k-1}$ to each subset of $S_{k}$ is given by Theorem~\ref{T: distroPgP} as:
\[
\mathring{n}_{S_{k}^{(m)}}^{g}\ge \mathring{n}_{k-2}^{g}(P_{k-1}-4)
\]
Taking the ratio of the minimum subset to subset contribution to the minimum contribution from set to subset gives:
\begin{align}\notag
\frac{P_{k-1}\cdot(P_{k-2}-6)\cdot \mathring{n}_{k-3}^{g}}{\mathring{n}_{k-2}^{g}(P_{k-1}-4)}=&\frac{P_{k-1}\cdot(P_{k-2}-6)}{(P_{k-2}-2)(P_{k-1}-4)}\\ \notag
=&\frac{P_{k-2}-6}{P_{k-2}-6+4\left(1-\frac{P_{k-2}}{P_{k-1}}\right)+\frac{8}{P_{k-1}}}
\end{align}
The ratio is less than $1$ and clearly tends to $1$ for large $k$ proving the Lemma.
\end{proof}
\section{Prime pairs with gap g}
The foregoing results now allow the following theorem that proves the existence of actual prime pairs with gap $g$ given prospective prime pairs with gap $g$.
\begin{theorem}\label{T: numtwinprimes}
Given a set $S_{r}=\left\{N: 5 \le N \le 4+P_{r}\#\right\}$ containing at least one prospective prime pair with gap $g$: $(\widetilde{PgP})_{r}$.
Pick $l\ge r$ and define $P_{k}=P_{\boldsymbol{\pi}\left(\sqrt{P_{l}\#}\right)}$, then let $\mathring{n}_{P_{k}\rightarrow P_{k+1}^2}^{g}$ be the number of prime pairs with gap $g$ between $P_{k}$ and $P_{k+1}^2$ that are generated from $(\widetilde{PgP})_{r}$, then:
\[
\mathring{n}_{p_{k}\rightarrow P_{k+1}^{2}}^{g}\ge
\mathring{n}_{l}^{g}\cdot \prod_{j=l}^{k-1}\frac{(P_{j}-4) }{(P_{j}-2)} \cdot \prod_{\substack{i=l\\ P_{i}|g }}^{k-1} \frac{(P_{i}-2)}{(P_{i}-1)}
\]
where, $\mathring{n}_{l}^{g}$ as in Theorem~\ref{T: noprotpp} is:
\[
\mathring{n}_{l}^{g}=\prod_{i=r+1}^{l}\left(P_{i}-2\right)\cdot\prod_{\substack{i=r+1 \\ P_{i}|g }}^{l}\frac{P_{i}-1}{P_{i}-2}
\]
is the number of prospective prime pairs with gap $g$ in $S_{l}$ that are derived from each such prospective prime pair in $S_{r}$.
\end{theorem}
\begin{proof}
Given $l$ and $P_{k}=P_{\boldsymbol{\pi}\left(\sqrt{P_{l}\#}\right)}$ consider the set of sequential natural numbers $S_{k}=\left\{5 \longrightarrow 4+P_{k}\#\right\}$. We will show that $S_{k}$ always contains prospective prime pairs, $(\widetilde{PgP})_{k} \in S_{k}$ prime to all $P \le P_{k}$ where $P_{k}< (\widetilde{PgP})_{k} < P_{k+1}^2$ and consequently those $(\widetilde{PgP})_{k}=(PgP)_{k}$ are actual prime pairs with gap $g$ and the number of such prime pairs meets the stated minimum.
Note that while $P_{l}\#+1$ is the largest prospective prime number in $S_{l}=\left\{5 \longrightarrow 4+P_{l}\#\right\}$ in that it is prime to all $P \le P_{l}$, it cannot be the square of a prime number.\footnote{Any prime number $>3$ has the form $6n\pm1$ and its square is then $36n^2\pm 12n +1$. Then equating $P_{l}\#+1$ to that square gives $6n^2\pm2n=\frac{P_{l}\#}{6}$. This cannot hold because the left side is even and the right is odd.}
Therefore, with the definition of $P_{k}$ we have:
\[
P_{k}^2 < P_{l}\# \longrightarrow P_{k}^2 \in S_{l}
\]
and given
$P_{k+1}^2=\left(P_{\boldsymbol{\pi}\left(\sqrt{P_{l}\#}\right)+1}\right)^2$, we have:
\[
P_{l}\# < P_{k+1}^2 < P_{l+1}\#\longrightarrow P_{k+1}^2 \in S_{l+1}\quad \& \quad P_{k+1}^2 \notin S_{l}
\]
Note that $P_{k+1}$ is the smallest prime number whose square is greater than $4+P_{l}\#$ and $P_{k}$ is the largest prime number whose square is less than $P_{l}\#$. Therefore all prospective prime numbers and prospective prime pairs in $S_{l}$ are less than $P_{k+1}^2$. It remains to show that some $(\widetilde{PgP})_{l}$ are greater than $P_{k}$ and are prime to all $P \le P_{k}$ which means some $(\widetilde{PgP})_{l}=(\widetilde{PgP})_{k}$ and being less than $P_{k+1}^2$ are therefore actual prime pairs with gap $g$. In doing this we will show the inequality for $\mathring{n}_{P_{k}\rightarrow P_{k+1}^2}^{g}$ holds.
To prove the theorem we must show there are some $(\widetilde{PgP})_{l}=(\widetilde{PgP})_{k}$. Given that:
\[(\widetilde{PgP})_{l} \in S_{l}=S_{l+1}^{(0)}\subset S_{l+2}^{(0)}\subset \cdots \subset S_{k}^{(0)}
\]
This requires $m_{j}=0$ at each stage of: $(\widetilde{PgP})_{k}=(\widetilde{PgP})_{l}+\sum_{j=l+1}^{k}m_{j}P_{j-1}\#$.
We know, $S_{l+1}^{(0)}$ contains a minimum number of prospective prime pairs with gap $g$, represented as $\min(\mathring{n}_{S_{l+1}^{(0)}}^{g})$ and given by Theorem~\ref{T: distroPgP}, which are prime to $P\le P_{l+1}$ and since $m_{l+1}=0$, $(\widetilde{PgP})_{l+1}=(\widetilde{PgP})_{l}$.
Then given $S_{l+2}^{(0)}=S_{l+1}$ we know again from Theorem~\ref{T: distroPgP} that $S_{l+2}^{(0)}$ has a minimum number of prospective prime pairs with gap $g$ represented as $\min(\mathring{n}_{S_{l+2}^{(0)}}^{g})$ which are prime to $P\le P_{l+2}$. However all subsets of $S_{l+1}$ have contributed prospective prime pairs with gap $g$ to $S_{l+2}^{(0)}$ and we need to only consider those contributed by $S_{l+1}^{(0)}$.
Lemmas~\ref{L: equaldistro} and \ref{L: uniformdistro} showed that all subsets of $S_{l+1}$ contribute the same minimum number of prospective prime pairs to all subsets of $S_{l+2}$ and that the contributions remain uniform asymtotically for large $l$. Then the fraction of prospective prime pairs with gap $g$ in $S_{l+2}^{(0)}$ generated from $(\widetilde{PgP})_{l}=(\widetilde{PgP})_{l+1} \in S_{l+1}^{(0)}$ is therefore given by:
\[
\frac{\min\left(\mathring{n}_{S_{l+1}^{(0)}}^{g}\right)}{\mathring{n}_{l+1}^{g}}\min\left(\mathring{n}_{S_{l+2}^{(0)}}^{g}\right) = \textrm{minimum number of}\quad (\widetilde{PgP})_{l+2}=(\widetilde{PgP})_{l}
\]
Then we have $\min\left(n_{S_{l+3}^{(0)}}^{g}\right)$ prospective prime pairs, $(\widetilde{PgP})_{l+3}\in S_{l+3}^{(0)}$ derived from all $(\widetilde{PgP})_{l+2}\in S_{l+2}$. The fraction of those derived from the set of $(\widetilde{PgP})_{l+2}=(\widetilde{PgP})_{l}\in S_{l+2}^{(0)}$ is:
\begin{multline}\notag
\frac{\min\left(\mathring{n}_{S_{l+1}^{(0)}}^{g}\right)}{\mathring{n}_{l+1}^{g}}\cdot\frac{\min\left(\mathring{n}_{S_{l+2}^{(0)}}^{g}\right)}{\mathring{n}_{l+2}^{g}}\cdot \min\left(\mathring{n}_{S_{l+3}^{(0)}}^{g}\right)\\
= \textrm{minimum number of}\quad (\widetilde{PgP})_{l+3}=(\widetilde{PgP})_{l}
\end{multline}
Carrying this process forward up to the number of $(\widetilde{PgP})_{k}=(\widetilde{PgP})_{l}$, where then $P_{k}<(\widetilde{PgP})_{l}\le P_{k+1}^2$, gives:
\begin{equation}\label{E: numtleqg}
\mathring{n}_{p_{k}\rightarrow P_{k+1}^{2}}^{g} \ge \min\left(\mathring{n}_{S_{k}^{(0)}}^{g}\right) \prod_{j=l+1}^{k-1}\frac{\min\left(\mathring{n}_{S_{j}^{(0)}}^{g}\right)}{\mathring{n}_{j}^{g}}
\end{equation}
Expanding this using Theorem~\ref{T: distroPgP}
and Theorem~\ref{T: noprotpp} we get:
\begin{align}\label{E: abc}\notag
\mathring{n}_{p_{k}\rightarrow P_{k+1}^{2}}^{g}\ge& (P_{k-1}-4) \prod_{i=r+1}^{k-2}(P_{i}-2)\cdot \prod_{\substack{i=r+1\\ P_{i}|g }}^{k-2}\frac{(P_{i}-1)}{(P_{i}-2)}\cdot \\ \notag
&\cdot
\prod_{j=l+1}^{k-1}\frac{(P_{j-1}-4) \prod_{i=r+1}^{j-2}(P_{i}-2)\cdot \prod_{\substack{i=r+1\\ P_{i}|g }}^{j-2}\frac{(P_{i}-1)}{(P_{i}-2)}}{\prod_{i=r+1}^{j}(P_{i}-2)\cdot \prod_{\substack{i=r+1\\ P_{i}|g }}^{j}\frac{(P_{i}-1)}{(P_{i}-2)}}\\ \notag
=&
(P_{k-1}-4) \prod_{i=r+1}^{k-2}(P_{i}-2)\cdot \prod_{j=l+1}^{k-1}\frac{(P_{j-1}-4) \prod_{i=r+1}^{j-2}(P_{i}-2) }{\prod_{i=r+1}^{j}(P_{i}-2)} \cdot\\ \notag
&\cdot \prod_{\substack{i=r+1\\ P_{i}|g }}^{k-2}\frac{(P_{i}-1)}{(P_{i}-2)}\cdot\prod_{\substack{j=l+1\\ P_{i}|g }}^{k-1} \frac{\prod_{\substack{i=r+1\\ P_{i}|g }}^{j-2}\frac{(P_{i}-1)}{(P_{i}-2)}}{\prod_{\substack{i=r+1\\ P_{i}|g }}^{j}\frac{(P_{i}-1)}{(P_{i}-2)}} \\ \notag
=&
(P_{k-1}-4) \prod_{i=r+1}^{l}(P_{i}-2)\cdot \prod_{i=l+1}^{k-2}(P_{i}-2)\cdot \prod_{j=l+1}^{k-1}\frac{(P_{j-1}-4) }{{(P_{j}-2)(P_{j-1}-2)} }\\ \notag
&\cdot \prod_{\substack{i=r+1\\ P_{i}|g }}^{k-2}\frac{(P_{i}-1)}{(P_{i}-2)}\cdot \prod_{\substack{j=l+1\\ P_{j}|g }}^{k-1} \frac{(P_{j}-2)}{(P_{j}-1)}\frac{(P_{j-1}-2)}{(P_{j-1}-1)} \\ \notag
=&\prod_{i=r+1}^{l}(P_{i}-2)\cdot
\prod_{\substack{i=r+1\\ P_{i}|g }}^{l}\frac{(P_{i}-1)}{(P_{i}-2)}\cdot\prod_{j=l}^{k-1}\frac{(P_{j}-4)}{(P_{j}-2)}\cdot
\prod_{\substack{i=l\\ P_{i}|g }}^{k-1} \frac{(P_{i}-2)}{(P_{i}-1)}\\
&=\mathring{n}_{l}^{g}\cdot \prod_{j=l}^{k-1}\frac{(P_{j}-4) }{(P_{j}-2)} \cdot \prod_{\substack{i=l\\ P_{i}|g }}^{k-1} \frac{(P_{i}-2)}{(P_{i}-1)}
\end{align}
This is clearly a possitive function and we want to show it is a monotonically increasing function with values greater than $1$. To do this we look at the case for $l\rightarrow l+1$ and $k\rightarrow k'=\boldsymbol{\pi}(\sqrt{P_{l+1}\#})$:
\begin{align}\notag
\mathring{n}_{p_{k'}\rightarrow P_{k'+1}^{2}}^{g}&\ge\mathring{n}_{l+1}^{g}\cdot \prod_{j=l+1}^{k'-1}\frac{(P_{j}-4) }{(P_{j}-2)} \cdot \prod_{\substack{i=l+1\\ P_{i}|g }}^{k'-1} \frac{(P_{i}-2)}{(P_{i}-1)}\\ \notag
&=\prod_{r+1}^{l+1}(P_{i}-2)\prod_{\substack{i=r+1\\ P_{i}|g }}^{l+1} \frac{(P_{i}-2)}{(P_{i}-1)}\cdot \prod_{j=l+1}^{k'-1}\frac{(P_{j}-4) }{(P_{j}-2)} \cdot \prod_{\substack{i=l+1\\ P_{i}|g }}^{k'-1} \frac{(P_{i}-2)}{(P_{i}-1)}\\ \notag
&=\mathring{n}_{l}^{g}\cdot(P_{l+1}-2)\cdot\left(\frac{P_{l+1}-2}{P_{l+1}-1}\right)_{P_{l+1}|g}\cdot \frac{(P_{l}-2)}{(P_{l}-4)}\cdot\prod_{i=l}^{k-1}\frac{(P_{i}-4)}{(P_{i}-2)}\cdot\\ \notag
&\cdot\prod_{i=k}^{k'-1}\frac{(P_{i}-4)}{(P_{i}-2)}\cdot\left(\frac{P_{l}-1}{P_{l}-2}\right)_{P_{l}|g}\cdot\prod_{\substack{i=l\\ P_{i}|g }}^{k-1} \frac{(P_{i}-2)}{(P_{i}-1)}\cdot\prod_{\substack{i=k\\ P_{i}|g }}^{k'-1} \frac{(P_{i}-2)}{(P_{i}-1)}\\ \notag
&=\mathring{n}_{p_{k}\rightarrow P_{k+1}^{2}}^{g}\cdot
(P_{l+1}-2)\cdot\left(\frac{P_{l+1}-2}{P_{l+1}-1}\right)_{P_{l+1}|g}\cdot \frac{(P_{l}-2)}{(P_{l}-4)}\cdot\\ \notag
&\cdot\prod_{i=k}^{k'-1}\frac{(P_{i}-4)}{(P_{i}-2)}\cdot\left(\frac{P_{l}-1}{P_{l}-2}\right)_{P_{l}|g}\cdot\prod_{\substack{i=k\\ P_{i}|g }}^{k'-1} \frac{(P_{i}-2)}{(P_{i}-1)}\\ \notag
&=\mathring{n}_{p_{k}\rightarrow P_{k+1}^{2}}^{g}\cdot
(P_{l+1}-2)\cdot \frac{(P_{l}-2)}{(P_{l}-4)}\cdot\prod_{i=k}^{k'-1}\frac{(P_{i}-4)}{(P_{i}-2)}\cdot\\ \notag
&\cdot\left(\frac{P_{l+1}-2}{P_{l+1}-1}\right)_{P_{l+1}|g}\cdot\left(\frac{P_{l}-1}{P_{l}-2}\right)_{P_{l}|g}\cdot\prod_{\substack{i=k\\ P_{i}|g }}^{k'-1} \frac{(P_{i}-2)}{(P_{i}-1)}\\ \notag
\end{align}
If we choose $l$ sufficiently large so that $P\ge P_{l}\rightarrow P\nmid g$, we can ignore the second line of products, giving:
\begin{align}\label{E: successiveterms}
\mathring{n}_{p_{k'}\rightarrow P_{k'+1}^{2}}^{g}
&\ge
\mathring{n}_{p_{k}\rightarrow P_{k+1}^{2}}^{g}\cdot
(P_{l+1}-2)\cdot \frac{(P_{l}-2)}{(P_{l}-4)}\cdot\prod_{i=k}^{k'-1}\frac{(P_{i}-4)}{(P_{i}-2)}
\end{align}
Then the last product factor gives:
\begin{equation}\label{E: approxcase}
\prod_{i=k}^{k'-1}\frac{(P_{i}-4)}{(P_{i}-2)}=\prod_{i=k}^{k'-1}\left(1-\frac{2}{P_{i}-2}\right)\\
\ge 1-\frac{2(k-k')}{P_{k}}
\end{equation}
Then given $k'=\boldsymbol{\pi}(\sqrt{P_{l}\#})$, giving:
\[
k'\approx\frac{\sqrt{P_{l+1}\#}}{\ln{\sqrt{P_{l+1}\#}}} =\frac{\sqrt{P_{l+1}}\sqrt{P_{l}\#}}{\ln{\sqrt{P_{l+1}}}+\ln{\sqrt{P_{l}\#}}}
\]
Ignoring $\ln{\sqrt{P_{l+1}}}$ relative to $\ln{\sqrt{P_{l}\#}}$ and noting that $k\approx \frac{\sqrt{P_{l}\#}}{\ln{\sqrt{P_{l}\#}}}$, gives:
\[
k'\approx \sqrt{P_{l+1}}\cdot k
\]
Using this in (\ref{E: approxcase}) gives:
\begin{align}\label{E: approxfinal}
\prod_{i=k}^{k'-1}\frac{(P_{i}-4)}{(P_{i}-2)}
&\ge 1-\frac{2\sqrt{P_{l+1}}}{\ln{P_{k}}}\ge 1-\frac{2\sqrt{P_{l+1}}}{\ln{\sqrt{P_{l+1}\#}}}
\end{align}
Therefore $\prod_{i=k}^{k'-1}\frac{(P_{i}-4)}{(P_{i}-2)}$, while remaining $<1$ is a monotonically increasing function assymtotically approaching $1$. The approximation (\ref{E: approxfinal}) is conservative:\footnote{The approximation used in (\ref{E: approxfinal}) allows negative values for small $l$, but is positive for $l\ge 8$, while the term being approximated clearly always has a positive value.}, and using it for the last term in (\ref{E: successiveterms}) gives for example:
\[
l= 9:\qquad \mathring{n}_{p_{k'}\rightarrow P_{k'+1}^{2}}^{g}
\ge
1.4\cdot \mathring{n}_{p_{k}\rightarrow P_{k+1}^{2}}^{g}
\]
\[
l= 10:\qquad \mathring{n}_{p_{k'}\rightarrow P_{k'+1}^{2}}^{g}
\ge
4.5 \cdot\mathring{n}_{p_{k}\rightarrow P_{k+1}^{2}}^{g}
\]
\[
l= 15:\qquad \mathring{n}_{p_{k'}\rightarrow P_{k'+1}^{2}}^{g}
\ge
18.7\cdot\mathring{n}_{p_{k}\rightarrow P_{k+1}^{2}}^{g}
\]
\end{proof}
Given Theorem~\ref{T: numtwinprimes} we can prove the following theorem:
\begin{theorem}\label{T: main}
Given a set $S_{r}=\left\{N: 5 \le N \le 4+P_{r}\#\right\}$ containing at least one prospective prime pair with gap $g$, then given any number $M$ there is always a prime pair with gap $g$ greater than $M$.
\end{theorem}
\begin{proof}
Pick integer $l>r$ so that $P_{k}=P_{\boldsymbol{\pi}\left(\sqrt{P_{l}\#}\right)}>M$.Then we know from Theorem~(\ref{T: numtwinprimes}) that there is always a prime pair with gap $g$ greater than $P_{k}$.
\end{proof}
\section{Prime gaps for which de Polignac's conjecture holds}
Given Theorem~\ref{T: numtwinprimes} we need only show the existence of a set $S_{k}$ containing a pair of consecutive prospective prime numbers with a secific gap $g$ to prove de Polignac's conjecture holds for that gap.
\begin{lemma}\label{L: primegaps}
Given any prime number $P_{k}>3$, then $P_{k}$ and $P_{k+1}$ are consecutive prospective prime numbers in $S_{k-1}$.
\end{lemma}
\begin{proof}
Consider the set $S_{k-1}=\left\{N: 5 \le N \le 4+P_{k-1}\#\right\}$ and its subset of prospective prime numbers, $\widetilde{\mathbb{P}}_{k-1}$.
Then we know that all prospective prime numbers in $\widetilde{\mathbb{P}}_{k-1}$ that are less than $P_{k}^2$ are actual prime numbers.
For $P_{k}>3$, we have:
\[
P_{k+1}<P_{k-1}\#+4\quad \textrm{and consequently}\quad P_{k},P_{k+1} \in \widetilde{\mathbb{P}}_{k-1}
\]
and because $P_{k},P_{k+1}<P_{k}^2$, any prospective prime number between them must also be an actual prime number. But $P_{k}$ and $P_{k+1}$ are consecutive prime numbers, so there can be no prospective prime numbers between them and they are consecutive prospective prime numbers as well as consecutive actual prime numbers in $S_{k-1}$.
\end{proof}
The following theorem follows directly from Theorem~\ref{T: main} together with Lemma~\ref{L: primegaps}
\begin{theorem}\label{T: depolignacPkpm2}
For all $P_{k}>3 $ there exists infinitely many consecutive prime pairs with gaps $g=P_{k+1}-P_{k}$.
\end{theorem}
Now consider the gaps between subsets, where we use the following definitions:
\begin{definition}
\[
\widetilde{P}_{\{k\}}^{(m)<}:=\min \left\{ \widetilde{P}\in S_{k}^{(m)} \right\}
\]
\[
\widetilde{P}_{\{k\}}^{(m)>}:=\max \left\{ \widetilde{P}\in S_{k}^{(m)} \right\}
\]
\end{definition}
Then the subset gap is defined as:
\begin{definition}
\[
g_{\Delta_{SS_{k}}}:=\widetilde{P}_{\{k\}}^{(m)<}-\widetilde{P}_{\{k\}}^{(m-1)>}
\]
\end{definition}
\begin{lemma}\label{L: subsetgaps}
Given set $S_{k}=\left\{N: 5 \le N \le 4+P_{k}\#\right\}$ and its $P_{k}$ subsets $S_{k}^{(m)}=\left\{N: 5+m\cdot P_{k-1}\# \le N \le 5+(m+1)\cdot P_{k-1}\#\right\}$ with $P_{k}-1$ associated gaps, $g_{\Delta_{SS_{k}}}$, then:
\begin{equation}\notag
g_{\Delta_{SS_{k}}}=
\begin{cases}
P_{k}-1 \quad \textrm{for $P_{k}-2$ gaps}\\
P_{k}+1 \quad \textrm{for one gap}
\end{cases}
\end{equation}
\end{lemma}
\begin{proof}
The smallest prospective prime in $S_{k-1}$ is $P_{k}$ and the largest two prospective primes in $S_{k-1}$ are $\widetilde{P}_{k-1}^{\pm}:=P_{k-1}\#\pm 1$.
For $S_{k-1}\rightarrow S_{k}$ we use (\ref{E: nextproprime}) subject to the supplementary condition (\ref{E: defmhat}) to generate prospective primes in $S_{k}$. Note that given $P_{k}\in \widetilde{\mathbb{P}}_{k-1}$, then for $\widetilde{P}_{k}=P_{k}+mP_{k-1}\#$ all values of $m$ except $m=0$ are allowed, making $P_{k+1}$ the least prospective prime in the zeroth subset of $\widetilde{\mathbb{P}}_{k}$, and making $P_{k}+mP_{k-1}\#$ the least prospective prime in all other subsets of $\widetilde{\mathbb{P}}_{k}$. Therefore we have:
\begin{equation}\label{E: firstpprime}
\widetilde{P}_{\{k\}}^{(m)<}=
\begin{cases}
P_{k+1}&\textrm{for}\quad m=0 \\
P_{k}+m P_{k-1}\#&\textrm{for}\quad 1\le m\le P_{k}-1
\end{cases}
\end{equation}
and
\begin{equation}\label{E: lastpprime}
\widetilde{P}_{\{k\}}^{(m)>}=
\begin{cases}
(m +1)P_{k-1}\# +1 &\textrm{if}\quad m \ne \widehat{m}^+\\
(m +1)P_{k-1}\# -1 &\textrm{if}\quad m= \widehat{m}^+
\end{cases}
\end{equation}
$\widehat{m}^+$ represents the disallowed subset for $\widetilde{P}_{\{k\}}=\widetilde{P}_{k-1}^{+}+mP_{k-1}\#$, which however is allowed for $\widetilde{P}_{k-1}^{-}=\widetilde{P}_{k-1}^{+}-2$, where:
\[
\widehat{m}^+=\frac{\alpha P_{k}-\widetilde{P}_{k-1}^{+}\bmod{P_{k}}}{P_{k-1}\#\bmod{P_{k}}} \\
\]
In regards to (\ref{E: lastpprime}) note that $\widehat{m}^+\ne P_{k}-1$ because using the maximum value is always allowed for $P_{k-1}^{\pm}$ where using it in (\ref{E: nextproprime}) gives:
\[
\widetilde{P}_{k-1}^{\pm}+(P_{k}-1)P_{k-1}\# =P_{k-1}\#\pm 1 +(P_{k}-1)P_{k-1}\# =P_{k}\#\pm 1 =P_{k}^{\pm}
\]
Therefore, $\widehat{m}^+$ associated with $\widetilde{P}_{k-1}^{+}$ can only have a value in the range $0$ to $P_{k}-2$ associated with the greatest prospective prime in each subset of $\widetilde{\mathbb{P}}_{k}$. This leaves one subset of $\widetilde{\mathbb{P}}_{k}$, namely $\mathbb{P}_{k}^{(\widehat{m}^+)}$, $0\le \widehat{m}^+\le P_{k}-2$ where $\widehat{m}^+ P_{k-1}\#-1$ is the greatest prospective prime and where $mP_{k-1}\#+1$ is the greatest prospective prime in the remainder of the subsets. Therefore, there are $P_{k}-2$ cases where:
\[
g_{\Delta_{SS_{k}}}=\widetilde{P}_{\{k\}}^{(m)<}-\widetilde{P}_{\{k\}}^{(m-1)>}= (P_{k}+mP_{k-1}\#)-( mP_{k-1}\# +1)=P_{k}-1
\]
$1\le m\le P_{k}-1$, and $m-1 \ne \widehat{m}^+$;
and one case where:
\[
g_{\Delta_{SS_{k}}}=\widetilde{P}_{\{k\}}^{(\widehat{m}^+)<}-\widetilde{P}_{\{k\}}^{(\widehat{m}^+ -1)>}= (P_{k}+\widehat{m}^+P_{k-1}\#)-( \widehat{m}^+P_{k-1}\# -1)=P_{k}+1
\]
\end{proof}
\begin{corollary}\label{C: twogapvalues}
Every set $S_{k}$ has at least $P_{k}-2$ prospective prime pairs with gap $g=P_{k}-1$ and at least one prospective prime pair with gap $g=P_{k}+1$
\end{corollary}
\begin{proof}
This follows directly from Lemma~\ref{L: subsetgaps} recognizing that gaps between subsets are gaps between prospective prime pairs. The "at least" follows because internal to subsets there are prime pairs with gaps that may be the same or may differ from the subset gaps.
\end{proof}
The following theorem follows directly from Theorem~\ref{T: main} and Corollary~\ref{C: twogapvalues}
\begin{theorem}\label{T: depolignacPkpm1}
For all $P_{k}$ there exists infinitely many consecutive prime pairs with gaps $g=P_{k} \pm 1$.
\end{theorem}
\end{document} |
\begin{document}
\title{FedST: Secure Federated Shapelet Transformation for Time Series Classification
}
\author{Zhiyu Liang \and
Hongzhi Wang
}
\institute{Zhiyu Liang. \at
Harbin Institute of Technology, Harbin, China \\
\email{zyliang@hit.edu.cn}
\and
Hongzhi Wang. \at
Harbin Institute of Technology, Harbin, China \\
\email{wangzh@hit.edu.cn}
}
\date{Received: date / Accepted: date}
\maketitle
\begin{abstract}
This paper explores how to customize time series classification (TSC) methods with the help of external data in a privacy-preserving federated learning (FL) scenario. To the best of our knowledge, we are the first to study on this essential topic. Achieving this goal requires us to seamlessly integrate the techniques from multiple fields including Data Mining, Machine Learning, and Security. In this paper, we systematically investigate existing TSC solutions for the centralized scenario and propose FedST, a novel FL-enabled TSC framework based on a shapelet transformation method. We recognize the federated shapelet search step as the kernel of FedST. Thus, we design a basic protocol for the FedST kernel that we prove to be secure and accurate. However, we identify that the basic protocol suffers from efficiency bottlenecks and the centralized acceleration techniques lose their efficacy due to the security issues. To speed up the federated protocol with security guarantee, we propose several optimizations tailored for the FL setting. Our theoretical analysis shows that the proposed methods are secure and more efficient. We conduct extensive experiments using both synthetic and real-world datasets. Empirical results show that our FedST solution is effective in terms of TSC accuracy, and the proposed optimizations can achieve three orders of magnitude of speedup.
\keywords{Time series classification \and Federated Learning \and Time series features \and Time series shapelets}
\end{abstract}
\section{Introduction}
\textit{Time series classification} (TSC) aims to predict the class label for given time series samples. It is one of the most important problem for data analytics, with applications in various scenarios~\cite{susto2018time,dheepadharshani2019multivariate,ramirez2019computational}.
Despite the impressive performance existing TSC algorithms have been achieving~\cite{bagnall16bakeoff,ismail2019deep,abanda2019review,ruiz2021great,middlehurst2021hive,tang2021omni,dempster2021minirocket,tan2022multirocket}, they usually make an ideal assumption that the user has free access to enough labeled data. However, it is quite difficult to collect and label the time series for real-world applications.
For instance, small manufacturing businesses monitor their production lines using sensors to analyze the working condition. Since the data sequences related to specific conditions, e.g., a potential failure of an instrument, are usually rare pieces located in unknown regions of the whole monitoring time series, the users have to manually identify the related pieces for labeling, which can be expensive due to the need of professional knowledge. As a consequence, it is costly for these businesses to benefit from the advanced TSC solutions, as they have no enough labeled data to learn accurate models.
To deal with the problem, a natural idea is to enrich the local training data by gathering the labeled samples from external data sources, e.g., the other businesses that run the same instrument. However, it has been increasingly difficult for organizations to combine their data due to privacy concerns~\cite{yang2019federated,voigt2017eu}.
\subsection{Motivation}
\textit{Is it possible to develop accurate TSC methods with the help of external datasets in a privacy-preserving manner?} A recent concept, named \textit{Federated Learning} (FL)~\cite{mcmahan2017communication}, provides us an inspiration~\cite{fedtsc}. FL aims to enable multiple businesses to jointly train a model without revealing their private data to each other. An example of using FL to enrich the training time series data is shown in Figure~\ref{fig:fedtsc}. However, although many FL solutions have been studied in the literature~\cite{yang2019federated,li2020review,zhang2021survey,kairouz2021advances,mammen2021federated}, their main focus is the training of the general models, including tree models~\cite{wu13privacy,fu2021vf2boost,fang2021large,abspoel2020secure,cheng2021secureboost,li2020practical}, linear models~\cite{nikolaenko2013privacy,mohassel2017secureml,chaudhuri2008privacy,aono2016scalable,yang2019parallel}, and neural networks~\cite{mcmahan2017communication,shokri2015privacy,mcmahan2017learning,mcmahan2016federated,bonawitz2017practical,abadi2016deep,fu2022blindfl}, which have limitations for the TSC problem.
\begin{figure}
\caption{Example of enabling federated learning to enrich the training time series data. A business who owns some training time series samples (blue) collaborates with the partners who have additional training samples (green) to jointly build the TSC model. They follow some secure protocols to avoid disclosing their private training data.}
\label{fig:fedtsc}
\end{figure}
First, the tree-based and linear classifiers are shown weak in capturing the temporal patterns for classifying time series~\cite{bagnall16bakeoff}, while the accuracy of neural networks usually relies on the hyper-parameter tunning, which is still a challenging problem in the FL scenario. Second, many real-world TSC applications~\cite{ghalwash2013extraction,ye2011time,ramirez2019computational,perez2015fast} expect the classification decisions to be explainable, e.g., the users know why a working condition is determined as a fault. However, the time series sample usually has a large number of data points (e.g., 537 on average for the 117 fixed-length datasets of the UCR Archive \cite{DBLP:journals/corr/abs-1810-07758}), which are taken as independent variables by the general models. The explanation can be much complex with so much input variables.
Facing the above limitations, we propose to specifically design FL solutions for the TSC problem. The basic idea is to extend the customized centralized TSC approaches to the federated setting. To achieve this goal, we have proposed FedTSC~\cite{fedtsc}, a brand new FL system tailored for TSC, and demonstrated its utility in VLDB. In this paper, we elaborate on the design ideas and essential techniques of a main internal of the system, i.e., the novel \underline{F}ederated \underline{S}hapelet \underline{T}ransformation (FedST) framework. We design FedST based on the centralized shapelet transformation method due to the following benefits.
First, the shapelet transformation method not only achieves competitive accuracy over existing centralized TSC approaches~\cite{bagnall16bakeoff}, but also serves as an essential component of the ensemble classifier named HIVE-COTE 2.0, which is currently state-of-the-art centralized TSC model~\cite{middlehurst2021hive}. Second, the method adopts the shapelet-based features rather than the raw time series as the input of the classification models. The features represent the similarity between the time series and a set of shapelets (i.e., the salient subsequences), which can be order-of-magnitude less in number compared to the raw data points and very intuitive to understand~\cite{hills2014classification}. Thus, building classifiers on top of the shapelet-based features can simplify the explanation. Third, the shapelets used to transform the raw time series can be extracted in an anytime manner to flexibly balance the classifiation accuracy and the efficiency (see Section~\ref{fedst_kernel}), which are beneficial for practical utility. Figure~\ref{fig:interpretability} is an illustration of the shapelet-based features.
\begin{figure}
\caption{Illustration of the shapelet-based features. A shapelet is a salient subsequence that represents the shape unique to certain classes. With a few shapelets of high distinguishing ability, each time series sample is transformed into a low-dimensional feature vector representing how similar (distant) the sample is to these shapelets. The classification is made and explained based on the few features rather than the abundant datapoints of the raw time series. In this example, the time series similar to shapelet 1 (orange) and distant to shapelet 2 (blue) are classified into class 1 and vice versa.}
\label{fig:interpretability}
\end{figure}
\subsection{Challenges and contributions}
Although it is practical to extend the centralized approach to the federated setting, it is unexplored how to achieve both security and efficiency during the federated shapelet search (FedSS) step, which is the kernel of the FedST framework (see Section~\ref{fedst_kernel} in detail).
The goal of the federated shapelet search is to jointly utilize the distributed labeled time series data to find the shapelets with the highest quality for distinguishing the classes. To ensure security of the federated computation, a natural idea is to extend the centralized shapelet search using secure multi-party computation (MPC)~\cite{yao1982protocols,damgaard2012multiparty,keller2020mp,keller2013architecture}. Following that, we first develop $\Pi_{FedSS-B}$, the basic protocol to achieve FedSS. Benefiting from MPC, we show that this protocol is secure and effective.
However, by our analysis, the basic protocol suffers from low efficiency due to the high overhead incurred by MPC during the \textit{shapelet distance computation} and the \textit{shapelet quality measurement} stages. Although there are acceleration techniques in the centralized scenario~\cite{mueen2011logical,keogh2006lb_keogh,ye2011time,rakthanmanon2012searching}, we prove that these methods are insecure in the FL setting and thus to be unfeasible. Consequently, we propose acceleration methods tailored for the FL setting with security guarantee to tackle the efficiency bottlenecks of $\Pi_{FedSS-B}$.
For shapelet distance computation, we identify the Euclidean norm computation as the efficiency bottleneck, so we propose a speed-up method based on a novel secure dot-product protocol. For quality measurement, we first design an optimization to reduce the duplicated time-consuming interactive operations with secure sorting. Then, we propose to further boost the efficiency through an acceptable trade-off of classification accuracy. We show both \textit{theoretically} and \textit{empirically} the effectiveness of these techniques.
{\setlength{\parindent}{0cm}
\textbf{Contributions.} We summarize our contributions as follows.
\begin{enumerate}
\item We investigate the customized FL solution for time series classification. In particular, we propose FedST, the first shapelet-based FL method which extends the centralized shapelet transformation to the federated scenario to make use of its advantages in terms of accuracy, interpretability and flexibility.
\item We present $\Pi_{FedSS-B}$, a basic federated protocol for the FedST kernel, i.e., the federated shapelet search, which adopts MPC to achieve security. We analyze the protocol in terms of security, effectiveness, and efficiency. We identify the efficiency bottlenecks of $\Pi_{FedSS-B}$ and the invalidity of the centralized speed-up techniques due to the security issue. To boost the protocol efficiency, we propose acceleration methods tailored for the FL setting, which are theoretically secure and are more scalable and efficient than the basic protocol.
\item We conduct extensive experiments to evaluate our solutions, which has three major observations. (1) Our FedST offers superior accuracy comparable to the non-private approach. (2) Each of our proposed acceleration approaches is individually effective, and they together bring up to three orders of magnitude of speedup. (3) The proposed trade-off method provides up to 8.31x speedup over our well-optimized protocol while guaranteeing comparable accuracy. We further demonstrate the interpretabiltiy and flexibiltiy of our our framework.
\end{enumerate}
}
{\setlength{\parindent}{0cm}
\textbf{Organization.} We introduce the preliminaries in Section~\ref{sec:pre}. We propose the FedST framework and talk about the FedST kernel, i.e., federated shapelet search, in Section~\ref{solution_overview}. The basic protocol of the federated shapelet search is presented and analyzed in Section~\ref{fedss_bs}. We elaborate on the acceleration methods tailored for the two efficiency bottlenecks of the basic protocol in Section~\ref{Distance_Acceleration} and~\ref{measurement_acceleration}, respectively. We show experimental results in Section~\ref{exp}. We illustrate how to incorporate differential privacy to further enhance the security in Section~\ref{dp-protect} and conclude this paper in Section~\ref{sec:conclusion}.
}
\section{Related Work}
Our work is related to federated learning, feature-based time series classification, and privacy protection.
\subsection{Federated Learning}
Recently, there have been numerous works that dedicate to the federated learning of the general models, including the linear models~\cite{nikolaenko2013privacy,mohassel2017secureml,chaudhuri2008privacy,aono2016scalable,yang2019parallel}, the tree models~\cite{wu13privacy,fu2021vf2boost,fang2021large,abspoel2020secure,cheng2021secureboost,li2020practical}, and the neural networks~\cite{mcmahan2017communication,shokri2015privacy,mcmahan2017learning,mcmahan2016federated,bonawitz2017practical,abadi2016deep,zhang2020batchcrypt,fu2022blindfl}. However, none of them achieve the same goal as our solution, because these general models have limitations in tackling the TSC problem~\cite{bagnall16bakeoff} in terms of accuracy and interpretability. There are also FL solutions designed for specific tasks~\cite{mcmahan2017learning,liu2020secure,wang2021efficient,huang2021personalized,10.14778/3494124.3494125,li2021federated,tong2022hu,muhammad2020fedfast,li2021privacy,chen2022fedmsplit,smith2017federated}. These methods target scenarios that are completely different from ours. As a result, we propose to tailor FL method for TSC. In specific, we contribute to proposing the secure FedST framework to take advantage of the shapelet transformation in terms of accuracy, interpretability and flexibility, and addressing the security and efficiency issues within the framework.
\subsection{Feature-based Time Series Classification}
Instead of directly building classifiers upon the raw time series, transforming the time series into low-dimensional or sparse feature vectors can not only achieve competitive classification accuracy, but also simplify the explanation.
In summary, there are three types of TSC methods based on different explainable features, i.e., the shapelet-based methods~\cite{ye2011time,mueen2011logical,hills2014classification,bostrom2017binary,grabocka2014learning,li2021shapenet,liang2021efficient} that determine the class labels based on the localized shapes, the interval-based methods~\cite{middlehurst2020canonical,cabello2020fast,middlehurst2021hive} that classify the time series based on the statistics at some specific time ranges, and the dictionary-based approaches~\cite{le2017time,large2019time,middlehurst2019scalable,middlehurst2020temporal} that utilize the pattern frequency as features. These types of methods can be complement with each other to contribute to state-of-the-art accuracy~\cite{lines2018time,bagnall2020tale,middlehurst2021hive}. This work focuses on developing a novel framework with a series of optimization techniques taking advantage of the shapelet-based approaches, while we would like to present our contributions~\cite{fedtsc} of enabling FL for interval-based and dictionary-based TSC in the future.
Shapelet-based TSC is first proposed by~\cite{ye2011time}. In the early works, the shapelets are discovered in a company with a decision tree training, where a shapelet is found at each tree node to determine the best split of the node~\cite{ye2011time,mueen2011logical,lines2012alternative}. To benefit from the other classifiers, a shapelet transformation framework~\cite{hills2014classification} is proposed that decouples the shapelet discovery from the decision tree training and produces a transformed dataset that can be used in conjunction with any classifier. Several works are raised to speedup the shapelet search~\cite{ye2011time,keogh2006lb_keogh,mueen2011logical,rakthanmanon2012searching} and improve the shapelet quality~\cite{bostrom2017binary}.
Another line of works dedicate to jointly learning the shapelets and the classifiers~\cite{grabocka2014learning,liang2021efficient,ma2019triple,li2021shapenet,ma2020adversarial,fang2018efficient,hou2016efficient}. However, the learning-based methods are much more complex because they incur several additional hyperparameters that highly affect the accuracy. Besides, they are inflexible due to the coupling of the shapelet and classifier, and cannot run in the anytime fashion to trade off the classification accuracy and the efficiency.
Based on the above discussions, we take advantage of the shapelet transformation method~\cite{hills2014classification,bostrom2017binary,bagnall2020tale} to develop our FL solution. Nevertheless, our work differs from existing studies because we carefully consider the security and efficiency issues in a brand new FL scenario.
\subsection{Privacy Protection} Data privacy is one of the most essential problems in FL~\cite{yang2019federated,li2020federated,kairouz2021advances}. Several techniques have been studied by existing works. Secure Multi-Party Computation~\cite{yao1982protocols} is a general framework that offers secure protocols for many arithmetic operations~\cite{damgaard2012multiparty,keller2020mp,keller2013architecture}. These operations are efficient for practical utility~\cite{aly2019benchmarking,chen2019secure,li2020practical,mohassel2017secureml,li2021privacy,wu13privacy} under the semi-honest model that most FL works consider, while they can also be extended to the malicious model through zero-knowledge proofs~\cite{goldreich1994definitions}.
Homomorphic Encryption (HE) is another popular technique in FL~\cite{cheng2021secureboost,fu2021vf2boost,10.14778/3494124.3494125,zhang2020batchcrypt,wu13privacy}, which allows a simple implementation of the secure addition. However, HE does not support some complex operations (e.g., division and comparison). The encryption and decryption are also computationally intensive~\cite{wu13privacy,fu2021vf2boost}.
Compared to the solutions based on MPC and HE which aim to protect the intermediate information during the federated computation, an orthogonal line of works adopt the Differential Privacy (DP) to protect the privacy for the outputs, such as the parameters of the learned models. It works by adding noises to the private data~\cite{wang2021efficient,wei2020federated,liu2021projected,li2021federated,Pan2022FedWalkCE} to achieve a trade-off between the precision and the degree of privacy for a target function. Thus, DP can usually complement with MPC and HE.
In this paper, we mainly adopt MPC to ensure no intermediate information is disclosed during the complex computations of FedST, because it provides the protocols for the required arithmetic operations. We also illustrate that the private data can be further protected with privacy guarantee by incorporating DP.
\section{Preliminaries}\label{sec:pre}
This section presents the preliminaries, including the target problem of the paper and the two building blocks of the proposed FedST, i.e., the shapelet transformation and the secure multi-party computation.
\subsection{Problem Statement}
Time series classification (TSC) is the problem of creating a function that maps from the space of input time series samples to the space of class labels~\cite{bagnall16bakeoff}. A time series (sample) is defined as a sequence of data points
$ T = (t_1,\ldots,t_p,\ldots,t_N )$
ordered by time, where $t_p$ is the observation at timestamp $p$, and $N$ is the length. The class label $y$ is a discrete variable with $C$ possible values. i.e., $y \in \{c\}_{c=1}^C$ where $C \ge 2$.
Typically, TSC is achieved by using a training data set $TD=\{(T_j, y_j)\}_{j=1}^M$ to build a model that can output either predicted class values or class distributions for previously unseen time series samples, where the instance $(T_j, y_j)$ represents the pair of the $j$-th time series sample and the corresponding label.
Specifically, in this paper we target the TSC problem in a federated setting, denoted as the FL-enabled TSC problem defined as follows.
\begin{definition}[FL-enabled TSC problem]
Given a party $P_0$ (named initiator) who owns a training data set $TD^0$ and $n - 1$ partners $P_1, \ldots, P_{n-1}$ (named participants) who hold the labeled series $TD^1, \ldots, TD^{n-1}$ collected from the same area (e.g., monitoring the same type of instruments), where $TD^i = \{(T^i_j, y^i_j)\}_{j=1}^{M_i}$, the goal of the problem is to coordinate the parties to build TSC models $\mathcal{M}$ for the initiator $P_0$ without revealing the local data $TD^0$, $\ldots$, $TD^{n-1}$ to each other.
\end{definition}
Note that every party in the group can act as the initiator to benefit from the federated learning. For ease of exposition, we denote $\sum_{i=0}^{n-1}M_i = M$ and $\bigcup_{i=0}^{n-1}TD^i=TD$. Ideally, the performance of $\mathcal{M}$ should be lossless compared to that of the model trained in a centralized scenario using the combined data $TD$.
Similar to previous FL works~\cite{wu13privacy,fu2021vf2boost,fu2022blindfl,li2021federated,tong2022hu}, we consider the semi-honest model where each party follows the protocols but may try to infer the private information from the received messages, while our method can be extended to the malicious model through zero-knowledge proofs~\cite{goldreich1994definitions}. Unlike existing FL works that usually conditionally allow disclosure of some private data~\cite{fu2021vf2boost,10.14778/3494124.3494125}, we adopt a stricter security definition~\cite{wu13privacy,mohassel2017secureml} to ensure \textit{no intermediate information is disclosed}.
\begin{definition}[Security]
Let $\mathcal{F}$ be an \textit{ideal} functionality such that the parties send their data to a trusted party for computation and receive the final results from the party. Let $\Pi$ be a \textit{real-world} protocol executed by the parties. We say that $\Pi$ securely realizes $\mathcal{F}$ if for each adversary $\mathcal{A}$ attacking the real interaction, there exists a simulator $\mathcal{S}$ attacking the ideal interaction, such that for all environments $\mathcal{Z}$, the quantity $\arrowvert \Pr[REAL(\mathcal{Z}, \mathcal{A}, \Pi, \lambda) = 1] - \Pr[IDEAL(\mathcal{Z}, \mathcal{S}, \mathcal{F}, \lambda) = 1] \arrowvert$ is negligible (in $\lambda$).
\label{definition:security}
\end{definition}
Intuitively, the simulator $\mathcal{S}$ must achieve the same effect in the ideal interaction as the adversary $\mathcal{A}$ achieves in the real interaction. In this paper, we identify the ideal functionality as the federated search of the high-quality shapelets, which is the kernel of the proposed FedST framework (see Section~\ref{fedst_kernel} in detail). Therefore, we contribute to design secure and efficient protocols to achieve the functionality in the real FL scenario.
The federated setting in this paper is similar to the horizontal and cross-silo FL~\cite{kairouz2021advances,mammen2021federated} because the data are horizontally partitioned across a few businesses and each of them has considerable but insufficient data. However, unlike the mainstream FL solutions that usually rely on a trust server~\cite{huang2021personalized,zhang2020batchcrypt,marfoq2020throughput}, we remove this dependency considering that identifying such a party can cause additional costs~\cite{10.14778/3494124.3494125}. Besides, the security definition we adopt is stricter than many existing FL works as mentioned above. Therefore, our setting can be more practical but challenging.
\subsection{Shapelet Transformation}\label{TSC model}
\textit{Time series shapelets} are defined as representative subsequences that discriminate the classes. Denote $S = (s_1, \ldots, s_L)$ a shapelet generated from $TD=\{(T_j, y_j)\}_{j=1}^M$ and the length of $T_j$ is $N$, where $L \le N$. Let $T_j[s, l]$ denote the subseries of $T_j = (t_{j, 1}, \ldots, t_{j, N})$ that starts at the timestamp $s$ and has length $l$, i.e.,
\begin{equation}
T_j[s, l] = (t_{j, s}, \ldots, t_{j, s + l - 1}), 1 \leq s \leq N - l + 1,
\end{equation}
the distance between the shapelet and the $j$-th time series is defined as the minimum Euclidean norm (ignore the square root) between $S$ and the $L$-length subseries of $T_j$, i.e.,
\begin{equation}
d_{T_j, S} = \mathop{\min}_{p \in \{1, \ldots, N - L + 1\}} ||S - T_j[p, L]||^2.
\label{eq:shapelet_dis}
\end{equation}
By definition, $d_{T_j, S}$ reflects the similarity between a localized shape of $T_j$ and $S$, which is a class-specific feature. The quality of $S$ can be measured by computing the distances to all series in $TD$, i.e., $D_S = \{d_{T_j, S}\}_{j=1}^M$, and evaluating the differences in distribution of the distances between class values $\{y_j\}_{j=1}^M$. The state-of-the-art method of shapelet quality measurement is to use the \textit{Information Gain (IG) with a binary strategy}~\cite{bostrom2017binary}. Each distance $d_{T_j, S} \in D_S$ is considered as a splitting threshold, denoted as $\tau$. The threshold is used to partition the dataset $D_S$ into $D_S^{\tau,L}$ and $D_S^{\tau,R}$, such that $D_S^{\tau,L} = \{d_{T_j, S}|d_{T_j, S} \le \tau\}_{j=1}^M$ and $D_S^{\tau,R} = D_S \setminus D_S^{\tau, L}$. The quality of $S$ is the maximum information gain among the thresholds, i.e.,
\begin{equation}
\begin{split}
Q_{IG}(S) &= \mathop{\max}_{\forall \tau}\ H(D_S) - (H(D_S^{\tau,L}) + H(D_S^{\tau,R})),
\end{split}\label{eq:IG}
\end{equation}
where
\begin{equation}
H(D) = -(p\log_2p + (1-p)\log_2(1-p)),
\end{equation}
$p = \frac{|D_{y(S)}|}{|D|}$is the fraction of samples in $D$ that belongs to the class of the sample generating $S$,
$y(S) \in \{c\}_{c=1}^C$ and
$D_{y(S)} = \{d_{T_j, S}| y_j = y(S)\}_{j=1}^M$.
In shapelet transformation, a set of candidates are randomly sampled from the possible subsequences of $TD$. After measuring the quality of all candidates, the $K$ subsequences with the highest quality are chosen as the shapelets, which are denoted as $\{S_k\}_{k=1}^K$. The shapelets are used to transform the original dataset $TD$ into a newly tabular dataset of $K$ features, where each attribute represents the distance between the shapelet and the original series, i.e., $D = \{(\mathbf{X}_j, y_j)\}_{j=1}^M$ where $\mathbf{X}_j = (d_{T_j, S_1}, \ldots, d_{T_j, S_K})$. The unseen series are transformed in the same way for prediction. $D$ can be used in conjunction with any classifier, such as the well-known intrinsically interpretable decision tree and logistic regression~\cite{molnar2022}.
\subsection{Secure Multiparty Computation}\label{mpc}
\textit{Secure multiparty computation (MPC)}~\cite{yao1982protocols} allows participants to compute a function over their inputs while keeping the inputs private. In this paper, we utilize the additive secret sharing scheme for MPC~\cite{damgaard2012multiparty} since it offers the protocols of the common arithmetic operations applicable to practical situations~\cite{chen2019secure,li2021privacy}. It performes in a field $\mathbb{Z}_q$ for a prime $q$. We denote a value $x \in \mathbb{Z}_q$ that is additively shared among parties as
\begin{equation}
\langle x \rangle = \{\langle x \rangle_0, \ldots, \langle x \rangle_{n-1}\},
\end{equation}
where $\langle x \rangle _i$ is a random \textit{share} of $x$ hold by party $P_i$.
Suppose $x$ is a private value of $P_i$. To secretly share $x$, $P_i$ randomly chooses $\langle x \rangle_j \in \mathbb{Z}_q$ and sends it to $P_j(j \neq i)$. Then, $P_i$ sets $\langle x \rangle_i = x - \sum_j \langle x \rangle_j \mod q$. To reconstruct $x$, all parties reveal their \textit{shares} to compute $x = \sum_{i=0}^{n-1} \langle x \rangle_i \mod q$. For ease of exposition, we omit the modular operation in the rest of the paper.
Under the additive secret sharing scheme, a function $z = \mathit{f} (x, y)$ is computed by using a MPC protocol that takes $\langle x \rangle$ and $\langle y \rangle$ as input and outputs the secret shared $\langle z \rangle$. In this paper, we mainly use the following MPC protocols as building blocks:
(a) \textit{Addition}: $\langle z \rangle = \langle x \rangle + \langle y \rangle$
(b) \textit{Multiplication}: $\langle z \rangle = \langle x \rangle \cdot \langle y \rangle$
(c) \textit{Division}: $\langle z \rangle = \langle x \rangle / \langle y \rangle$
(d) \textit{Comparison}: $\langle z \rangle = \langle x \rangle \overset{?}{<} \langle y \rangle:\langle 1 \rangle:\langle 0 \rangle$
(e) \textit{Logarithm}: $\langle z \rangle = \log_2(\langle x \rangle)$
We refer readers to \cite{beaver1991efficient,catrina2010secure,catrina2010improved,aly2019benchmarking} for the detailed implementation of the operations.
In addition, given the result $\langle b \rangle = \langle x \rangle \overset{?}{<} \langle y \rangle:\langle 1 \rangle:\langle 0 \rangle$, the smaller one of two values $ \langle x \rangle $, $\langle y \rangle$ can be securely assigned to $\langle z \rangle$, as:
(f) \textit{Assignment}: $\langle z\rangle =\langle b\rangle \cdot \langle x\rangle + (1-\langle b\rangle)\cdot \langle y\rangle$.
With the assignment protocol, it is trivial to perform the \textit{maximum}, \textit{minimum}, and \textit{top-K} computation for a list of secret shares by sequentially comparing and swapping the adjacent elements in the list using the secure comparison and assignment protocols.
\section{Solution Overview}\label{solution_overview}
This section overviews our FL-enabled TSC framework, which is a key component of our FedTSC system~\cite{fedtsc} and is built based on the centralized shapelet transformation~\cite{hills2014classification,bostrom2017binary,bagnall2020tale}. We provide the framework overview in Section~\ref{fedst_framework}. Then, we identify the FedST kernel in Section~\ref{fedst_kernel}.
\subsection{FedST Framework}\label{fedst_framework}
\begin{figure}
\caption{An illustration of the FedST framework.}
\label{fig:fedst_framework}
\end{figure}
Overall, FedST has two stages: (1) federated shapelet search; (2) federated data transformation and classifier training. The two stages are illustrated in Figure~\ref{fig:fedst_framework}.
In the first stage, all parties jointly search for the $K$ best shapelets $\{S_k\}_{k=1}^K$ from a candidate set $\mathcal{SC}$.
Note that $P_0$ requires the found shapelets to explain the shapelet-based features, so the shapelet candidates in $\mathcal{SC}$ are only generated by $P_0$ to ensure the local time series of the participants cannot be accessed by the initiator. This may raise a concern that the shapelets will be missed if they do not occur in $TD_0$. Fortunately, since the high-quality shapelets are usually highly redundant in the training data, it is shown enough to find them by checking some randomly sampled candidates rather than all possible subsequences~\cite{bagnall2020tale,gordon2012fast}. Hence, it is feasible to generate $\mathcal{SC}$ by $P_0$ in our cross-silo setting where each business has considerable (but insufficient) data. We also verify this issue in Section~\ref{exp:accuracy} and \ref{exp:flexibility}.
In stage two, the time series data $TD^i$ in each party is transformed into the $K$ dimensional secretly shared tabular data as:
\begin{equation}
\langle D^i \rangle = \{(\langle\mathbf{X}_j^i\rangle, \langle y_j^i \rangle)\}_{j=1}^{M_i}, \forall i \in \{0,\ldots,n-1\},
\end{equation}
where
\begin{equation}
\langle\mathbf{X}_j^i\rangle = (\langle d_{T_j^i, S_1}\rangle$, \ldots, $\langle d_{T_j^i, S_K}\rangle).
\end{equation}
Then, a standard classifier is built over the joint secretly shared data set $\langle D \rangle = \bigcup_{i=0}^{n-1}\langle D^i \rangle$.
Note that there is always a trade-off between security and accuracy/interpretability in FL. To achieve a good balance, FedST ensures only $P_0$ learns the shapelets and classifiers, while nothing else can be revealed to the parties. This degree of privacy has been shown practical by many FL systems~\cite{fu2021vf2boost,fu2022blindfl,wu13privacy}. Additionally, we illustrate in Section~\ref{dp-protect} that we can further enhance the security by incorporating differential privacy~\cite{dwork2014algorithmic}, guaranteeing that the revealed outputs leak limited information about the private training data.
\subsection{FedST Kernel: Federated Shapelet Search}\label{fedst_kernel}
The transformed data set $\langle D \rangle$ is a common tabular data set with continuous attributes, which can be used in conjunction with any standard classifier. Consequently, {any classifier training protocol} built for secretly shared data (e.g.,~\cite{mohassel2017secureml,abspoel2020secure,zheng2021cerebro,chen2019secure}) can be {seamlessly integrated into our framework.} Nevertheless, there exists no protocol that tackles the orthogonal problem of federated shapelet search and data transformation. Further, the data transformation is to compute the distances between each training series and shapelet, which is just a subroutine of the shapelet search. Thus, the key technical challenge within our FedST is to design secure and efficient protocols to achieve the \textit{federated shapelet search (Stage 1 in Figure~\ref{fig:fedst_framework})}, which becomes the kernel part of FedST.
Formally, we define the functionality of the federated shapelet search, $\mathcal{F}_{FedSS}$, as follows.
\begin{definition}[Federated shapelet search, $\mathcal{F}_{FedSS}$]
Given the time series datasets distributed over the parties, i.e., $TD^0$, \ldots, $TD^{n-1}$, and the shapelet candidates $\mathcal{SC}$ generated from $TD^0$, the goal of $\mathcal{F}_{FedSS}$ is to search for the $K$ best shapelets $\{S_k|S_k \in \mathcal{SC}\}_{k=1}^K$ for $P_0$ by leveraging the distributed data sets.\label{definition:FedSS}
\end{definition}
\noindent
To realize ${\mathcal{F}_{FedSS}}$ under the security defined in Definition~\ref{definition:security}, a straightforward thought is to design security protocols by extending the centralized method to the FL setting using MPC. Following this, we present \textbf{${\Pi_{FedSS-B}}$} (Section~\ref{protocol_description}), the protocol that achieves our basic idea. We show the protocol is \textit{secure} and \textit{effective} (Section~\ref{protocol_discussion}), but we identify that {it suffers from \textit{low efficiency} due to the high communication overhead incurred by MPC and the failure of the pruning techniques due to the security issue (Section~\ref{protocol_bottleneck}).}
To tackle the efficiency issue, we propose \textit{secure acceleration techniques tailored for the FL setting} that dramatically boost the protocol efficiency by optimizing the two bottlenecked processes of $\Pi_{FedSS-B}$, i.e., the \textit{distance computation} (Section~\ref{Distance_Acceleration}) and the \textit{quality measurement} (Section~\ref{measurement_acceleration}). Experiment results show that each of these techniques is \textit{individually effective} and they together contribute to \textbf{three orders of magnitude of speedup} (Section~\ref{exp:efficiency}).
Besides, since the evaluation of each shapelet candidate is in a randomized order and independent from the others, FedSS can perform in an anytime fashion~\cite{bagnall2020tale,gordon2012fast}. That is, the user announces a time contract, so that the evaluation stops once the running time exceeds the contract, and only the assessed candidates are considered in the following steps. Since this strategy relies only on the publicly available running time, it is feasible in the FL setting~\cite{fedtsc} to flexibly balance the accuracy and efficiency. We verify this issue in Section~\ref{exp:flexibility}.
\section{Basic protocol $\Pi_{FedSS-B}$}\label{fedss_bs}
We now introduce the basic protocol $\Pi_{FedSS-B}$, which is extended from the centralized shapelet search using MPC to protect the intermediate information (Section~\ref{protocol_description}). We discuss the protocol in terms of security, effectiveness, and efficiency in Section~\ref{protocol_discussion}, and analyze the bottlenecks of the protocol in Section~\ref{protocol_bottleneck}.
\subsection{Protocol Description}\label{protocol_description}
$\Pi_{FedSS-B}$ is outlined in Algorithm~\ref{alg:fedss_bs}. The parties jointly assess the quality of each candidate and then select the $K$ best as the shapelets. The algorithm performs in three steps. First, the parties compute the distance between the samples and each candidate (Lines 2-8). Second, the parties evaluate the quality of the candidate over the secretly shared distances and labels (Lines 9). Finally, the parties jointly retrieve the $K$ candidates with the highest quality and reveal the shares of the indices to $P_0$ to recover the selected shapelets (Lines 10-11). These three steps are described as follows.
{\setlength{\parindent}{0cm}
\textbf{Distance Computation.} Since the candidates are locally generated by $P_0$, the distance between the samples of $P_0$ and the candidates can be locally computed. After that, $P_0$ secretly shares the results to enable the subsequent steps (Lines 3-5).
}
To compute the distances between the samples of each participant $P_i$ and the candidates (Lines 6-8), the MPC operations have to be adopted. For example, to compute $d_{T_j^i, S}$, $P_i$ and $P_0$ secretly share $T_j^i$ and $S$ respectively. Next, the parties jointly compute each Euclidean norm $\langle ||S, T^i_j[p, L]||^2 \rangle$ using MPC. At last, the parties jointly determine the shapelet distance $\langle d_{T_j^i, S} \rangle$ by Eq.~\ref{eq:shapelet_dis} using the secure minimum operation (see Section~\ref{mpc}).
\begin{algorithm}[t]
\normalem
\caption{Basic Protocol $\Pi_{FedSS-B}$}
\label{alg:fedss_bs}
\SetKwData{Or}{\textbf{or}}
\DontPrintSemicolon
\KwIn {$TD^i=\{( T_j^i , y_j^i )\}_{j=1}^{M_i}$, $i = 0,\ldots,n-1$: local datasets\\ \ \ \ \ \ \ \
\ $\mathcal{SC}$: A set of shapelet candidates locally generated by $P_0$
\\ \ \ \ \ \ \ \ \ \ \ $K$: the number of shapelets}
\KwOut {$\{S_k\}_{k=1}^{K}$: shapelets revealed to $P_0$}
\For{$S \in \mathcal{SC}$}{
\For{$i \in \{0,\ldots,n-1\}$}{
\If{$i == 0$}{
\For{$j \in \{1, \ldots, M_0\}$}{$P_0$ locally computes $d_{T^0_j, S}$ and secretly shares the result among all parties \;}
}
\Else{
\For{$j \in \{1, \ldots, M_i\}$}{All parties jointly compute $\langle d_{T^i_j, S} \rangle$\;}
}
}
All parties jointly compute the quality $\langle Q_{IG}(S) \rangle$ over the secretly shared distances and labels \;
All parties jointly find the $K$ candidates with the highest quality and reveal the indices $\{\langle I_k \rangle \}_{k=1}^K$ to $P_0$ \;
}
\Return{$\{S_k = \mathcal{SC}_{I_k}\}_{k=1}^K$}
\end{algorithm}
{\setlength{\parindent}{0cm}
\textbf{Quality Measurement.} Based on Eq~\ref{eq:IG}, to compute the IG quality of $S \in \mathcal{SC}$ (Line 9), we need to {securely partition the dataset $D_S$ using each threshold $\tau$ and compute the number of samples belonging to each class $c\ (c \in \{1, \ldots,C\})$ for $D_S$, $D_S^{\tau,L}$, and $D_S^{\tau, R}$.} We achieve it over the secretly shared distances and labels by leveraging the \textit{indicating vector} defined as follows.
}
\begin{definition}[Indicating Vector]
Given a dataset $D = \{x_j\}_{j=1}^M$ and a subset $A \subseteq D$, we define the indicating vector of $A$, denoted as $\bm{\gamma}_{A \subseteq D}$, as a vector of size $M$ whose $j$-th ($j\in\{1,\ldots,M\}$) entry represents whether $x_j$ is in $A$, i.e., $\boldsymbol{\gamma}_{A \subseteq D}[j] = 1$ if $x_j \in A$, and $0$ otherwise.
\end{definition}
For example, for $D = \{x_1, x_2, x_3\}$ and $A = \{x_1,x_3\}$, the indicating vector of $A$ is $\boldsymbol{\gamma}_{A \subseteq D} = (1, 0, 1)$. Suppose that $\boldsymbol{\gamma}_{A_1 \subseteq D}$ and $\boldsymbol{\gamma}_{A_2 \subseteq D}$ are the indicating vectors of $A_1$ and $A_2$, respectively, we have
\begin{equation}
\boldsymbol{\gamma}_{A_1 \subseteq D} \cdot \boldsymbol{\gamma}_{A_2 \subseteq D} = |A_1 \cap A_2|,
\end{equation}
where $|A_1 \cap A_2|$ is the cardinality of $A_1 \cap A_2$. Specifically, we have $\boldsymbol{\gamma}_{A_1 \subseteq D} \cdot \boldsymbol{1} = |A_1|$.
With the indicating vector, we securely compute $\langle Q_{IG}(S) \rangle$ as follows.
At the beginning, $P_0$ generates a vector of size $C$ to indicate the class of $S$, i.e.,
\begin{equation}
\boldsymbol{\gamma}_{y(S)} = \boldsymbol{\gamma}_{\{y(S)\} \subseteq \{c\}_{c=1}^C},
\end{equation}
and secretly shares the vector among all parties.
Next, for each splitting threshold
\begin{equation}
\langle \tau \rangle \in \bigcup_{i=0}^{n-1} \{\langle d_{T_j^i,S} \rangle\}_{j=1}^{M_i},
\end{equation}
the parties jointly compute the secretly shared vector
\begin{equation}
\begin{split}
\langle \boldsymbol{\gamma}_L \rangle &= \langle \boldsymbol{\gamma}_{D_S^{\tau,L} \subseteq D_S} \rangle,\\
\langle \boldsymbol{\gamma}_R \rangle &= \langle \boldsymbol{\gamma}_{D_S^{\tau,R} \subseteq D_S} \rangle = \boldsymbol{1} - \langle \boldsymbol{\gamma}_L \rangle,
\end{split}
\end{equation}
where
\begin{equation}
\langle \boldsymbol{\gamma}_{D_S^{\tau,L} \subseteq D_S}[j] \rangle = \langle d_{T_j^i, S}\rangle \overset{?}{<} \langle \tau \rangle,\ j \in \{1, \ldots, M\}.
\end{equation}
Meanwhile, each party $P_i$ secretly shares the vector $\boldsymbol{\gamma}^{i}_{TD^{i}_{c} \subseteq TD^{i}}$ to indicate its samples that belong to each class $c$. Denote the indicating vectors of all parties as
\begin{equation}
\langle \boldsymbol{\gamma}_{c} \rangle$ $= (\langle \boldsymbol{\gamma}^0_{TD^0_{c} \subseteq TD^0}\rangle, \ldots, \langle \boldsymbol{\gamma}^{n-1}_{TD^{n-1}_{c} \subseteq TD^{n-1}}\rangle),
\end{equation}
which indicates the samples in $D_S$ that belong to class $c$, i.e.,
\begin{equation}
\langle \boldsymbol{\gamma}_{c} \rangle = \langle \boldsymbol{\gamma}_{TD_{c} \subseteq TD} \rangle = \langle \boldsymbol{\gamma}_{D_{S,c} \subseteq D_S} \rangle.
\end{equation}
As such, the parties compute the following statistics using MPC:
\begin{equation}
\begin{split}
\langle |DS_{\tau, L}| \rangle &= \langle \boldsymbol{\gamma}_L \rangle \cdot \boldsymbol{1},\\
\langle |DS_{\tau, R}| \rangle &= |DS| - \langle |DS_{\tau, L}| \rangle,\\
\langle |D_{S,y(S)}| \rangle &= \langle \boldsymbol{\gamma}_{y(S)} \rangle \cdot (\langle \boldsymbol{\gamma}_{1} \rangle \cdot \boldsymbol{1}, \ldots,\langle \boldsymbol{\gamma}_{C} \rangle \cdot \boldsymbol{1}),\\
\langle |D_{S, y(S)}^{\tau, L}| \rangle &= \langle \boldsymbol{\gamma}_{y(S)} \rangle \cdot (\langle \boldsymbol{\gamma}_{{1}} \rangle \cdot \langle \boldsymbol{\gamma}_L \rangle, \ldots, \langle \boldsymbol{\gamma}_{{C}} \rangle \cdot \langle \boldsymbol{\gamma}_L \rangle),\\
\langle |D_{S, y(S)}^{\tau, R}| \rangle &= \langle \boldsymbol{\gamma}_{y(S)} \rangle \cdot (\langle \boldsymbol{\gamma}_{{1}} \rangle \cdot \langle \boldsymbol{\gamma}_R \rangle, \ldots, \langle \boldsymbol{\gamma}_{{C}} \rangle \cdot \langle \boldsymbol{\gamma}_R \rangle).
\end{split}\label{eq:IG_stats}
\end{equation}
Given the statistics in Equation~\ref{eq:IG_stats} and the public value $|DS|=M$, the parties can jointly compute $\langle Q_{IG}(S) \rangle$ by Eq.~\ref{eq:IG}.
{\setlength{\parindent}{0cm}
\textbf{Shapelet Retrieval.} Given the quality of the candidates in secret shares, the parties jointly retrieve the indices of the $K$ best shapelets (Line 10) by securely comparing the adjacent quality values and then swapping the values and the corresponding indices based on the comparison results (see Section~\ref{mpc}). The indices are output to $P_0$ to recover the jointly selected shapelets (Line 11).
}
\subsection{Protocol Discussion}\label{protocol_discussion}
This section analyzes $\Pi_{FedSS-B}$ in terms of security, effectiveness, and efficiency.
{\setlength{\parindent}{0cm}
\textbf{Security.} The security of $\Pi_{FedSS-B}$ is guaranteed by the following Theorem:
}
\begin{theorem}
$\Pi_{FedSS-B}$ is secure under the security defined in Definition~\ref{definition:security}.
\label{theorem:fedst_basic_security}
\end{theorem}
\begin{proof}[Proof Sketch]
In $\Pi_{FedSS-B}$, all joint computations are executed using MPC. With the indicating vector, the secure computations are data-oblivious. An adversary learns no additional information. The security follows.
\end{proof}
{\setlength{\parindent}{0cm}
\textbf{Effectiveness.} We discuss the protocol effectiveness in terms of classification accuracy.
$\Pi_{FedSS-B}$ is directly extended from the centralized approach by using the secret-sharing-based MPC operations, which have considerable computation precision~\cite{catrina2010secure,catrina2010improved,aly2019benchmarking}. Therefore, it is expected that the accuracy of FedST has no difference from the centralized approach. The experiment results in Section~\ref{exp:accuracy} validate this issue.
}
\begin{figure}
\caption{Throughputs (\#operations per second) of different MPC operations executed by three parties. Secure addition is much more efficient than the others because it is executed without communication~\cite{catrina2010secure}
\label{fig:mpc_throughputs}
\end{figure}
{\setlength{\parindent}{0cm}
\textbf{Efficiency.} As shown in Figure~\ref{fig:mpc_throughputs}, the secret-sharing-based MPC is usually bottlenecked by communication rather than computation. Therefore, \textit{it is more indicative to analyze the efficiency by considering the complexity of only the interactive operations}, including the secure multiplication, division, comparison, and logarithm operations. We follow this metric for efficiency analysis in the paper.
}
$\Pi_{FedSS-B}$ in Algorithm~\ref{alg:fedss_bs} takes $O(|\mathcal{SC}| \cdot MN^2)$ for distance computation. The quality measurement has a complexity of $O(|\mathcal{SC}| \cdot M^2)$. Securely finding the top-$K$ candidates has a complexity of $O(|\mathcal{SC}| \cdot K)$. Since $K$ is usually a small constant, the total complexity of $\Pi_{FedSS-B}$ can be simplified as $O(|\mathcal{SC}| \cdot(MN^2 + M^2))$.
\begin{figure}
\caption{Illustration of the Euclidean norm pruning and its information disclosure.}
\label{fig:abandon}
\end{figure}
\subsection{Bottleneck Analysis}\label{protocol_bottleneck}
As discussed in Section~\ref{protocol_discussion}, $\Pi_{FedSS-B}$ is secure and effective to enable federated shapelet search. However, the basic protocol has expensive time cost in the FL setting for both \textit{distance computation} and \textit{quality measurement} steps, which bottleneck the efficiency of the protocol. Two major reasons are as below.
{\setlength{\parindent}{0cm}
\textbf{\textit{Reason I. Heavy Communication Overhead.}} As discussed in Section~\ref{protocol_discussion}, $\Pi_{FedSS-B}$ takes $O(|\mathcal{SC}|\cdot MN^2)$ and $O(|\mathcal{SC}\cdot|M^2)$ expensive interactive operations to compute the distance and measure the quality for all candidates, which dominate in the complexity.
Therefore, {the efficiency of $\Pi_{FedSS-B}$ is bottlenecked by the first two steps, i.e., distance computation and quality measurement.}
}
{\setlength{\parindent}{0cm}
\textbf{\textit{Reason II. Failure of Acceleration Techniques.}} Even using only local computation, repeatedly computing the distance and quality for all candidates are time-consuming~\cite{ye2011time}. To tackle this, existing studies propose pruning strategies for acceleration~\cite{mueen2011logical,keogh2006lb_keogh,ye2011time,rakthanmanon2012searching}. Unfortunately, {the pruning techniques are inevitably \textit{data-dependent}, which violates the security of Definition~\ref{definition:security} that requires the federated computation oblivious.} Thus, we have to abandon these acceleration strategies in the FL setting. We show the security issue in Theorem~\ref{theorem:distance_pruning_security} and Theorem~\ref{theorem:IG_pruning_security}.
}
\begin{theorem}
Protocol $\Pi_{FedSS-B}$ is insecure under the security defined in Definition ~\ref{definition:security} if using the Euclidean norm pruning strategies proposed in~\cite{keogh2006lb_keogh} and \cite{rakthanmanon2012searching}.
\label{theorem:distance_pruning_security}
\end{theorem}
\begin{proof}[Proof Sketch]
Figure~\ref{fig:abandon} illustrates the Euclidean norm pruning. The basic idea is to maintain a best-so-far Euclidean norm at each timestamp $p \in \{1,\ldots,N-L_S+1\}$, and incrementally compute the sum of the squared differences between each pair of data points when computing $||S-T^i_j[p,L_S]||^2$ (left). Once the sum exceeds the best-so-far value, the current norm computation can be pruned (right). In the FL setting, although we can incrementally compute the sum and compare it with the best-so-far value using MPC, the comparison result must be disclosed when determining the pruning, which cannot be achieved by the simulator $\mathcal{S}$ that attacks the ideal interaction $\mathcal{F}_{FedSS}$ in Definition~\ref{definition:FedSS}. The security is violated.
\end{proof}
Similarly, We have the following theorem.
\begin{theorem}
Protocol $\Pi_{FedSS-B}$ is insecure under the security defined in Definition ~\ref{definition:security} if using the IG quality pruning strategies proposed in~\cite{ye2011time} and \cite{mueen2011logical}.
\label{theorem:IG_pruning_security}
\end{theorem}
{\setlength{\parindent}{0cm}
We omit the proof because it is similar to the proof of Theorem~\ref{theorem:distance_pruning_security}.
\textbf{Optimizations.} To remedy the efficiency issue of the protocol $\Pi_{FedSS-B}$, we propose \textit{acceleration methods tailored for the FL setting} to improve the efficiency of the {distance computation} and the {quality measurement steps.} }
For distance computation, we propose to speed up the bottlenecked Euclidean norm computation based on \textit{a novel secure dot-product protocol} (Section~\ref{Distance_Acceleration}).
For quality measurement, we first propose a secure \textit{sorting-based acceleration} to reduce the duplicated interactive operations for computing IG (Section~\ref{sorting_based_acceleration}). Then, we propose to \textit{tap an alternative F-stat measure} to further improve the efficiency with comparable accuracy (Section~\ref{trade_off}).
Experiment results show that each of these three techniques is individually effective and they together brings up to \textit{three orders of magnitude of speedup} to $\Pi_{FedSS-B}$. Further, compared to our well-optimized IG, the F-stat-based method in Section~\ref{trade_off} gives 1.04-8.31x of speedup while guaranteeing \textit{no statistical difference} in TSC accuracy. (Section~\ref{exp:efficiency}).
\section{Shapelet Distance Acceleration}\label{Distance_Acceleration}
In $\Pi_{FedSS-B}$, the distance between a candidate $S$ and the $M-M_0$ samples $T^i_j(\forall j, i \neq 0)$ is straightforwardly computed using MPC. Based on Eq~\ref{eq:shapelet_dis}, the interactive operations used include:
\begin{enumerate}
\item $L_S(N - L_S + 1)(M-M_0)$ pairwise multiplications for the Euclidean norm;
\item $(N - L_S + 1)(M - M_0)$ times of both comparisons and assignments for the minimum.
\end{enumerate}
Because the shapelet length $L_S$ is up to $N$ where $N >> 1$, the efficiency is dominated by the Euclidean norm. Thus, {it is necessary to accelerate the distance computation by improving the efficiency of the bottlenecked \textit{Euclidean norm}}.
The work of~\cite{ioannidis2002secure} proposes a two-party dot-product protocol (as Algorithm~\ref{alg:original_dp}) that we find is both computation and communication efficient for the calculation between one vector and many others. It motivates us that we can compute the Euclidean norm between a candidate $S$ and the total $(N - L_S + 1)(M-M_0)$ subseries of the participants using the dot-product protocol. Unfortunately, the protocol in Algorithm~\ref{alg:original_dp} (denoted as the raw protocol) has weak security that violates Definition~\ref{definition:security}.
To overcome the limitation, we propose ${\Pi_{DP}}$, \textit{a secure dot-product protocol} that enhances the raw protocol using MPC. We prove that this novel protocol not only follows the security of Definition~\ref{definition:security}, but also effectively accelerates the Euclidean norm. We describe the acceleration method in Section~\ref{DP-based-ED}. Then, we analyze the security deficiency of the raw protocol and propose our $\Pi_{DP}$ in Section~\ref{security_analysis_and_enhancement}.
\begin{algorithm}[t]
\caption{The Two-Party Dot-Product Protocol of~\cite{ioannidis2002secure} }
\label{alg:original_dp}
\SetKwData{Or}{\textbf{or}}
\DontPrintSemicolon
\KwIn {$\boldsymbol{x} \in \mathbb{R}^L$ from $P_0$; $\boldsymbol{y} \in \mathbb{R}^L$ from $P_i$ ($i \in \{1,\ldots,n-1\}$)}
\KwOut {$\beta$ to $P_0$ and $\alpha$ to $P_i$, satisfying $\beta - \alpha = \boldsymbol{x}^T \cdot \boldsymbol{y}$}
\textbf{Party $\boldsymbol{P_0}$} randomly chooses $\boldsymbol{Q}$, $r$, $\boldsymbol{f}$, $R_1$, $R_2$, $R_3$, $\mathbf{x}_i$ ($i \in \{1,\ldots, d\}, i \neq r$) and creates $\mathbf{X}$. Then, it computes $b $, $\boldsymbol{U}$, $\boldsymbol{c}$, $\boldsymbol{g}$, and sends $\boldsymbol{U}$, $\boldsymbol{c}$, $\boldsymbol{g}$ to $P_i$ \;
\textbf{Party $\boldsymbol{P_i}$} randomly chooses $\alpha$, creates $\boldsymbol{y}^\prime$, computes and sends to $P_0$ the value $a$, $h$ \;
\textbf{Party $\boldsymbol{P_0}$} computes $\beta$ \;
\end{algorithm}
\subsection{Dot-Product-based Euclidean Norm}\label{DP-based-ED}
Given two vectors $\boldsymbol{x} \in \mathbb{R}^L$ from $P_0$ and $\boldsymbol{y} \in \mathbb{R}^L$ from $P_i$, Algorithm~\ref{alg:original_dp} computes the dot-product $\boldsymbol{x}\cdot\boldsymbol{y}$ as follows.
\noindent
\textbf{(i)} $P_0$ chooses a random matrix $\boldsymbol{Q} \in \mathbb{R}^{d \times d} (d \ge 2)$, a random value $r \in \{1, \ldots, d\}$, a random vector $\boldsymbol{f} \in \mathbb{R}^{L+1}$ and three random values $R_1, R_2, R_3$, and selects $s - 1$ random vectors
\begin{equation}
\mathbf{x}_i \in \mathbb{R}^{L+1},\ i \in \{1,\ldots, r-1, r+1, \ldots, d\}.
\end{equation}
Next, it creates a matrix
\begin{equation}
\mathbf{X} \in \mathbb{R}^{d \times (L + 1)},
\end{equation}
whose $i$-th row ($i \neq r $) is $\mathbf{x}_i$ and $r$-th row is
\begin{equation}
\boldsymbol{x}^{\prime T} = (x_1, \ldots, x_L, 1).
\end{equation}
Then, $P_0$ locally computes
\begin{equation}
b = \sum_{j=1}^d \boldsymbol{Q}_{j,r},
\end{equation}
\begin{equation}
\boldsymbol{U} = \boldsymbol{Q} \cdot \boldsymbol{X},
\end{equation}
\begin{equation}
\boldsymbol{c} = \sum_{i \in \{1,\ldots,d\}, i \neq r}(\boldsymbol{x}_i^T \cdot \sum_{j=1}^{d}\boldsymbol{Q}_{j,i}) + R_1R_2\boldsymbol{f}^T,
\end{equation}
\begin{equation}
\boldsymbol{g} = R_1R_3\boldsymbol{f}.
\end{equation}
Finally, it sends $\boldsymbol{U}$, $\boldsymbol{c}$, $\boldsymbol{g}$ to $P_i$ (Line 1);
\noindent
\textbf{(ii)} $P_i$ chooses a random value $\alpha$ to generate
\begin{equation}
\boldsymbol{y}^\prime = (y_1, \ldots, y_L, \alpha)^T.
\end{equation}
Next, it computes and sends to $P_0$ two scalars $a$ and $h$ (Line 2), as:
\begin{equation}
a = \sum_{j=1}^{d}\boldsymbol{U}_{j}\cdot\boldsymbol{y}^\prime - \boldsymbol{c}\cdot\boldsymbol{y}^\prime,
\end{equation}
\begin{equation}
h = \boldsymbol{g}^T\cdot\boldsymbol{y}^\prime.
\end{equation}
\noindent
\textbf{(iii)} $P_0$ locally computes $\beta$ (Line 3) as:
\begin{equation}
\beta = \frac{a}{b} + \frac{hR_2}{bR_3}.
\end{equation}
Given $\beta$ and $\alpha$, the result satisfies $\boldsymbol{x}^T\cdot\boldsymbol{y} = \beta - \alpha$.
The \textit{Euclidean norm computation} in our federated shapelet search can benefit from the above protocol, since each $||S, T^i_j[p, L_S]||^2$ can be represented as
\begin{equation}
\begin{split}
||S, T^i_j[p, L_S]||^2 =& \sum_{p^\prime=1}^{L_S} (s_{p^\prime})^2 + \sum_{p^\prime=1}^{L_S} (t_{p^\prime+p-1})^2 \\&+ 2\sum_{p^\prime=1}^{L_S} s_{p^\prime}t_{p^\prime+p-1},
\end{split}
\end{equation}
where the term
\begin{equation}
z = \sum_{p^\prime=1}^{L_S} s_{p^\prime}t_{p^\prime+p-1} = \boldsymbol{S}^T\cdot\boldsymbol{T^i_j[p,L_S]}
\end{equation}
can be computed by $P_0$ and $P_i$ jointly executing the protocol to get $\beta$ and $\alpha$, respectively. The terms $\sum_{p^\prime=1}^{L_S} (s_{p^\prime})^2$ and $\sum_{p^\prime=1}^{L_S} (t_{p^\prime+p-1})^2$ can be locally computed by the two parties. To this end, all parties aggregate the three terms in secret shares using non-interactive secure addition.
Using the above dot-product protocol, the
total communication cost for the $(N-L_S+1)(M-M_0)$ Euclidean norm between $S$ and the subseries of the participants is reduced from $O(L_S(N-L_S+1)(M-M_0))$ to $O(L_S) + O((N - L_S + 1)(M-M_0))$.
\begin{algorithm}[t]
\caption{Secure Dot-Product Protocol $\Pi_{DP}$ (Ours)}
\label{alg:secure_dp}
\SetKwData{Or}{\textbf{or}}
\DontPrintSemicolon
\KwIn {$\boldsymbol{x} \in \mathbb{R}^L$ from $P_0$; $\boldsymbol{y} \in \mathbb{R}^L$ from $P_i$ ($i \in \{1,\ldots,n-1\}$)}
\KwOut {$\langle z \rangle$ secretly shared by all parties, satisfying $z=\boldsymbol{x}^T \cdot \boldsymbol{y}$}
\textbf{Party $\boldsymbol{P_0}$} and \textbf{Party $\boldsymbol{P_i}$} represent each element of their input vectors as fixed-point number encoded in $\mathbb{Z}_q$ as used in MPC \\
\textbf{Party $\boldsymbol{P_0}$} independently and randomly chooses each value of $\boldsymbol{Q}$, $\boldsymbol{f}$, $R_1$, $R_2$, $R_3$, $\mathbf{x}_i$ ($i \in \{1,\ldots, d\}, i \neq r$) from $\mathbb{Z}_q$, $r \in {\{1,\ldots, d\}}$, creates $\mathbf{X}$, computes $b$, $\boldsymbol{U}$, $\boldsymbol{c}$, $\boldsymbol{g}$, and sends $\boldsymbol{U}$, $\boldsymbol{c}$, $\boldsymbol{g}$ to $P_i$. \\
\textbf{Party $\boldsymbol{P_i}$} randomly chooses $\alpha \in \mathbb{Z}_q$, creates $\boldsymbol{y}^\prime$, and computes the value $a$, $h$. Then, \textit{$P_i$ sends only $h$ to $P_0$} \\
\textbf{All Parties} \textit{jointly compute $\langle z \rangle = \langle \beta \rangle - \langle \alpha \rangle = \langle \frac{1}{b} \rangle\cdot\langle a \rangle + \langle \frac{hR_2}{bR_3} \rangle- \langle \alpha \rangle$ using MPC} \\
\end{algorithm}
\subsection{Security Analysis and Enhancement}\label{security_analysis_and_enhancement}
Although the protocol in Algorithm~\ref{alg:original_dp} benefits the efficiency of the distance computation, {it is unavaliable due to the security issue.}
\begin{theorem}
The protocol of Algorithm~\ref{alg:original_dp} is insecure under the security defined in Definition~\ref{definition:security}.
\end{theorem}
\begin{proof}[Proof Sketch]
Consider an adversary $\mathcal{A}$ that attacks $P_0$. By executing the raw protocol in Algorithm~\ref{alg:original_dp}, $\mathcal{A}$ receives the messages $a$ and $h$. For ease of exposition, we represent the matrix $\boldsymbol{U}$ as a row of the column vectors, i.e.,
\begin{equation}
\boldsymbol{U} = (\boldsymbol{u}_1, \ldots, \boldsymbol{u}_{L+1}),
\end{equation}
where
\begin{equation}
\boldsymbol{u}_{i} = (\boldsymbol{U}_{1,i},\ldots,\boldsymbol{U}_{d,i})^T, i \in \{1,\ldots,L+1\}.
\end{equation}
Denote
\begin{equation}
\boldsymbol{c} = (c_1, \ldots, c_{L+1})
\end{equation}
and
\begin{equation}
\boldsymbol{g}^T = (g_1, \ldots, g_{L+1}).
\end{equation}
Recall that $\boldsymbol{v}^\prime = (\boldsymbol{v}^T, \alpha)^T$. Thus, it has
\begin{equation}
\begin{split}
a = \sum_{j=1}^{d}\boldsymbol{U}_j \cdot \boldsymbol{v}^\prime - \boldsymbol{c} \cdot \boldsymbol{v}^\prime = \boldsymbol{e}_1^T \cdot \boldsymbol{v} + w\alpha,
\end{split}\label{eq:a_alpha}
\end{equation}
\begin{equation}
h = \boldsymbol{g}^T \cdot \boldsymbol{v}^\prime = \boldsymbol{e}_2^T \cdot \boldsymbol{v} + g_{L+1}\alpha,
\label{eq:h_alpha}
\end{equation}
where
\begin{equation}
\boldsymbol{e}_1 = (\sum\boldsymbol{u}_{1} + c_{1}, \ldots, \sum\boldsymbol{u}_{L} + c_{L})^T,
\end{equation}
\begin{equation}
w = (\sum\boldsymbol{u}_{L+1} + c_{L+1}),
\end{equation}
\begin{equation}
\boldsymbol{e}_2 = (g_1, \ldots, g_L)^T.
\end{equation}
Based on Eq~\ref{eq:a_alpha}-\ref{eq:h_alpha}, $\mathcal{A}$ knows that
\begin{equation}
g_{L+1}a - wh = (g_{L+1}\boldsymbol{e}_1^T - w\boldsymbol{e}_2^T) \cdot \boldsymbol{v},
\end{equation}
where $g_{L+1}\boldsymbol{e}_1^T - w\boldsymbol{e}_2^T$ is created locally in $P_0$. Obviously, the probability distribution of $g_{L+1}a - wh$ is dependent on the private data $\boldsymbol{v}$, which cannot be simulated by any $\mathcal{S}$.
\end{proof}
{\setlength{\parindent}{0cm}
\textbf{Our novel protocol $\boldsymbol{\Pi_{DP}}$.} To securely achieve the acceleration, we propose $\Pi_{DP}$, \textit{a novel dot-product protocol that follows the security} in Definition~\ref{definition:security}. The basic idea is to {enhance the security of Algorithm~\ref{alg:original_dp} using MPC and the finite field arithmetic}. This solution is simple but rather effective in terms of both security and efficiency.
}
$\Pi_{DP}$ is presented in Algorithm~\ref{alg:secure_dp}. It has three differences to the raw protocol:
\begin{enumerate}
\item $P_0$ and $P_i$ represent each element of their input vectors as fixed-point number encoded in $\mathbb{Z}_q$ as used in MPC~\cite{catrina2010secure,catrina2010improved,aly2019benchmarking} (Line 1), generates each random masking value from the same field $\mathbb{Z}_q$, and compute $b$, $\boldsymbol{U}$, $\boldsymbol{c}$, $\boldsymbol{g}$, and $a$, $h$ in $\mathbb{Z}_q$~\cite{catrina2010secure} (Lines 2-3);
\item $P_i$ only sends $h$ to $P_0$ but keeps $a$ private (Line 3);
\item the value $\beta - \alpha$ is jointly computed by all parties using MPC (Line 4).
\end{enumerate}
Note that the protocol incurs only one additional interactive operation when computing $\langle z \rangle = \langle \frac{1}{b} \rangle \langle a \rangle$. Thus, computing the Euclidean norm between $S$ and the $M-M_0$ series requires still $O(L_S) + O((N - L_S + 1)(M-M_0))$, which is \textit{much smaller} compared to the directly using of the MPC operations in $\Pi_{FedSS-B}$.
More importantly, we verify the security guarantee of $\Pi_{DP}$.
\begin{theorem}
$\Pi_{DP}$ is secure under the security definition defined in Definition~\ref{definition:security}.
\end{theorem}
\begin{proof}[Proof Sketch]
Since the secretly-sharing-based MPC are secure, we focus on the messages beyond it.
We describe two simulators $\mathcal{S}_0$ and $\mathcal{S}_i$ that simulate the messages of the adversaries for party $P_0$ and $P_i$, respectively.
We first present $\mathcal{S}_0$. Similar to Eq~\ref{eq:h_alpha}, when receiving the message $h$, the adversary knows
\begin{equation}
h = (\boldsymbol{e}_2^T \cdot \boldsymbol{v} + g_{L+1}\alpha)\ mod\ q
\end{equation}
Since the masking values $\boldsymbol{e}_2^T$, $g_{L+1}$, and $\alpha$ are independently and uniformly sampled from $\mathbb{Z}_q$, the distribution of $h$ is equal to $h^\prime = g_{L+1}\alpha\ mod\ q$. In the ideal interaction, $\mathcal{S}_0$ independently and randomly chooses $\alpha$ and $g_{L+1}$ from $\mathbb{Z}_q$ to compute and send $h^\prime$ to the adversary. Indeed the views of the environment in both ideal and real interactions are indistinguishable.
Next, we discuss $\mathcal{S}_i$. By executing $\Pi_{DP}$ in the real interaction, the adversary of $P_i$ receives $\boldsymbol{U}$, $\boldsymbol{c}$, $\boldsymbol{g}$. Both $\boldsymbol{c}$ and $\boldsymbol{g}$ are derived from independent and randomly chosen values. Thus, $\mathcal{S}_i$ can follow the same procedure to compute them. Without loss of generality, we assume $r = 1$ and $d=2$. Then, $\boldsymbol{U} = \boldsymbol{Q} \cdot \boldsymbol{X}$ follows
\begin{equation}
\begin{split}
\left(\begin{array}{cc}
\boldsymbol{Q}_{1,1}x_1 + \boldsymbol{Q}_{1,2}\boldsymbol{x}_{2,1}\ \ & \boldsymbol{Q}_{2,1}x_1 + \boldsymbol{Q}_{2,2}\boldsymbol{x}_{2,1}\\
\ldots\ \ & \ldots\\
\boldsymbol{Q}_{1,1}x_L + \boldsymbol{Q}_{1,2}\boldsymbol{x}_{2,L}\ \ & \boldsymbol{Q}_{2,1}x_L + \boldsymbol{Q}_{2,2}\boldsymbol{x}_{2,L} \\
\boldsymbol{Q}_{1,1} + \boldsymbol{Q}_{1,2}\boldsymbol{x}_{2,L+1}\ \ &
\boldsymbol{Q}_{2,1} + \boldsymbol{Q}_{2,2}\boldsymbol{x}_{2,L+1}
\end{array}\right)^T
\end{split}
\end{equation}
Note that we omit the modular operations at each entry for ease of exposition. The value of each entry is masked by a unique triplet, e.g., $(\boldsymbol{Q}_{11}, \boldsymbol{Q}_{12}, \boldsymbol{x}_{21})$ at the entry (1,1). Because the values of these triplets are independently and randomly chosen from $\mathbb{Z}_q$, the elements of $\boldsymbol{U}$ are independent and identically distributed. Similar to $\mathcal{S}_0$, $\mathcal{S}_i$ can simulate $\boldsymbol{U}$ by computing $\boldsymbol{U}^\prime$, where
\begin{equation}
\boldsymbol{U}^\prime_{i,j} = \boldsymbol{Q}_{i,k}\boldsymbol{x}_{i,j}\ mod\ q,\ k \in \{1,\ldots,d\},
\end{equation}
and sends it along with $\boldsymbol{c}$, $\boldsymbol{g}$ to the adversary. The views of the environment in both ideal and real interaction are identically distributed.
In summary, the simulators achieve the same effect that the adversaries achieve. The security follows.
\end{proof}
With the security guarantee, we can integrate $\Pi_{DP}$ into $\Pi_{FedSS-B}$ to accelerate the distance computation. The protocol $\Pi_{DP}$ can also serve as a building block for other applications.
\section{Quality Measurement Acceleration}\label{measurement_acceleration}
Empirically, evaluating the shapelet quality using IG with the binary strategy (Section~\ref{TSC model}) is the {state-of-the-art method in terms of TSC accuracy}. However, computing IG in the FL setting suffers from a {severe efficiency issue}. The reasons are concluded as follows.
\begin{enumerate}
\item A large number ($M$) of thresholds will be evaluated for each candidate;
\item Evaluating different thresholds incurs duplicated interactive operations;
\item Evaluating one threshold is already inefficient mainly because the required secure division and logarithm operations are expensive (as illustrated in Figure~\ref{fig:mpc_throughputs});
\item The IG pruning strategies lose their efficacy due to the security issue (Section~\ref{protocol_bottleneck}).
\end{enumerate}
To consider both accuracy and efficiency, we propose to speed up the quality measurement in $\Pi_{FedSS-B}$ in two aspects:
\noindent
\textbf{O1: Accelerating IG computation.} To benefit from IG in terms of TSC accuracy, we propose a speed-up method to reduce as many interactive operations as possible in computing IG based on \textit{secure sorting} (Section~\ref{sorting_based_acceleration}), which tackles the problem in reason 1.
\noindent
\textbf{O2: Tapping alternative measures.} As the problems of 1, 3 and 4 are the \textit{inherent deficiencies} of IG which is difficult to avoid, we propose a trade-off method tailored for the FL setting by tapping other measures that are much more secure-computation-efficient than IG, at the cost of acceptable loss of TSC accuracy (Section~\ref{trade_off}).
\subsection{Sorting-based IG Acceleration}\label{sorting_based_acceleration}
The straightforward IG computation in Section~\ref{protocol_description} is inefficient since it incurs $O(M^2)$ \textit{secure comparisons} for $\langle \boldsymbol{\gamma}_L \rangle$, and $O(M^2)$ \textit{secure multiplications} for $\langle |DS_{\tau, L, y(S)}| \rangle$ and $\langle |DS_{\tau, R, y(S)}| \rangle$. Inspired by~\cite{mueen2011logical} and~\cite{abspoel2020secure}, we propose to {securely reduce the duplicated interactive operations by pre-sorting the secretly shared distances and labels} before computing each $Q_{IG}(S)$.
Assuming $\langle D_S \rangle = \bigcup_{i=0}^{n-1}\{\langle d_{T_j^i, S} \rangle\}_{j=1}^{M_i}$ are arranged in an \textit{ordered sequence}, i.e.,
\begin{equation}
\langle D_S^\prime \rangle = \{\langle d_j \rangle\}_{j=1}^M,\label{eq:sorting-a1}
\end{equation}
where
\begin{equation}
d_{j_1} < d_{j_2}, \forall\ 1 \le j_1 < j_2 \le M.\label{eq:sorting-a2}
\end{equation}
As such, for each threshold $\langle \tau \rangle = \langle d_{j} \rangle$, we can get $\boldsymbol{\gamma}_L^\prime$ without using secure comparison, as:
\begin{equation}
\boldsymbol{\gamma}_L^\prime = \boldsymbol{\gamma}_{DS^\prime_{\tau, L} \subseteq DS^\prime}
\end{equation}
where
\begin{equation}
\boldsymbol{\gamma}_L^\prime[j^\prime] = \begin{cases}
1, \quad & j^\prime < j\\
0, \quad & otherwise
\end{cases}
\end{equation}
Meanwhile, if $\langle \boldsymbol{\gamma}_{c} \rangle (c \in \{1,\ldots,C\})$ is permuted into $\langle \boldsymbol{\gamma}_{c}^\prime \rangle$ such that for each entry $j^\prime$, $\langle \boldsymbol{\gamma}_{c}^\prime \rangle$ and $\langle D_S^\prime \rangle$ indicates the same sample, i.e.,
\begin{equation}
\langle \boldsymbol{\gamma}_{c}^\prime \rangle[j^\prime] = \langle \boldsymbol{\gamma}^{i}_{TD^{i}_{c} \subseteq TD^{i}}\rangle[j] \iff \langle d_{j^\prime} \rangle = \langle d_{T_j^{i}, S} \rangle, \label{eq:sorting-a3}
\end{equation}
we can compute the statistics in Eq.~\ref{eq:IG_stats} by replacing $\langle \boldsymbol{\gamma}_L \rangle$, $\langle \boldsymbol{\gamma}_R \rangle$, $\langle \boldsymbol{\gamma}_{c} \rangle$ with $\boldsymbol{\gamma}_L^\prime$, $\boldsymbol{\gamma}_R^\prime = \boldsymbol{1} - \boldsymbol{\gamma}_L^\prime$, $\langle \boldsymbol{\gamma}_{c}^\prime \rangle$, respectively.
Note that the newly produced $\boldsymbol{\gamma}_L^\prime$ is in plaintext thanks to the order of $\langle DS^\prime \rangle$. Thus, it requires only $O(C)$ secure multiplications to compute the statistics in Eq~\ref{eq:IG_stats} for each threshold, where $C$ is a small constant representing the number of classes.
\begin{figure}
\caption{Running examples of sorting the sequence ($\langle 3 \rangle$, $\langle 5 \rangle$, $\langle 4 \rangle$, $\langle 1 \rangle$) using Quicksort and the sorting network.}
\label{fig:example_quicksort}
\label{fig:sorting_network}
\end{figure}
Based on the above observation, the key to the acceleration is to \textit{securely sort} the secretly shared \textit{distances} and the \textit{indicating vectors} of the class labels. Note that to satisfy the security in Definition~\ref{definition:security}, no any intermediate information can be disclosed during the federated sorting, including not only the secretly shared values but also their order.
Although we can protect each of the values using MPC, the common sorting algorithms, e.g., Quicksort or Merge sort, rely on the \textit{order information} of the sort keys to reduce complexity. As a result, the order information will be disclosed during their running, which violates the security in Definition~\ref{definition:security}. We take the Quicksort as an example to illustrate the leakage of the order information, as is shown in Figure~\ref{fig:example_quicksort}.
To address the security problem while achieving a complexity smaller than $O(M^2)$, we adopt the sorting network~\cite{batcher1968sorting} to securely sort the distances. Given an input size, the sorting network has a \textit{fixed order of comparison operations}, regardless of the order of the input sequence~\cite{batcher1968sorting}. Therefore, we can protect both the value and the order information by \textit{performing the comparison and swaping operations using the secure comparison and assignment protocols} (see Section~\ref{mpc}) respectively. Figure~\ref{fig:sorting_network} is a running example of combining the sorting network and the MPC protocols.
The distances $\langle {D_S} \rangle$ is taken as the sorting key to permute both $\langle D_S \rangle$ and $\langle \boldsymbol{\gamma}_{c} \rangle$ ($c \in \{1,\ldots,C\}$) consistently. The output corresponds to the assumption in Equation~\ref{eq:sorting-a1}-\ref{eq:sorting-a2} and~\ref{eq:sorting-a3}. The sorting network takes $O(M\log^2M)$ interactive operations for the input of size $M$~\cite{batcher1968sorting}. Thus, the complexity of computing each $\langle Q_{IG}(S) \rangle$ becomes $O(M\log^2M)$, which is much smaller than the $O(M^2)$ in $\Pi_{FedSS-B}$.
\begin{theorem}
The sorting-based acceleration is secure under the security definition defined in Definition~\ref{definition:security}.
\end{theorem}
\begin{proof}[Proof Sketch]
The main difference between the acceleration method and the basic protocol for the IG computation is the usage of the sorting network, which is proved to be data-oblivious~\cite{bogdanov2014practical}. Thus, the security of the sorting-based acceleration follows.
\end{proof}
\subsection{Alternative-Measures-based Trade-off}\label{trade_off}
As discussed at the beginning
of Section~\ref{measurement_acceleration}, {although IG-based method is superior in TSC accuracy, it is naturally difficult to efficiently compute this metric.} To further accelerate the quality measure step, we propose to tap \textit{alternative measures} that can be securely and more efficiently achieved in the FL setting, while guaranteeing comparable TSC accuracy.
The shapelet quality can be evaluated by other measures, such as Kruskal-Wallis (KW)~\cite{lines2012alternative}, Mood's Median (MM)~\cite{lines2012alternative}, and ANOVA F (F-stat) test~\cite{hills2014classification}. However, these quality measures are less considered in recent works~\cite{bostrom2017binary,bagnall2020tale,middlehurst2021hive}, since they have no significant advantage over IG in terms of both accuracy and efficiency, especially when the binary strategy~\cite{bostrom2017binary} and the IG pruning technique~\cite{mueen2011logical} are integrated. { In the brand new FL scenario, the \textit{expensive communication cost} incurred by interactive operations and the \textit{failure of the pruning} for computing IG remind us to reexamine these alternatives.}
{\setlength{\parindent}{0cm}
\textbf{F-stat-based quality measurement.} As shown in~\cite{hills2014classification}, using F-stat for TSC slightly outperforms the methods with KW and MM in terms of accuracy. More essentially, F-stat performs with $O(M)$ secure multiplications and $C+1$ secure divisions in the FL setting, while both KW and MM require $O(M\log^2M)$ secure comparison and assignment operations because they rely on secure sorting, and they also need $C$ times of divisions. Thus, we choose F-stat as the alternative measure to achieve the trade-off.
}
Given $D_S = \{d_{T_j, S}\}_{j=1}^M$ and $\{y_{j}\}_{j=1}^M$ where $y_j \in \{c\}_{c=1}^C$, the F-stat is defined as:
\begin{equation}
Q_F(S) = \frac{\sum_{c=1}^C (\overline{D}_{S,c} - \overline{D}_S)^2 / (C - 1)}{\sum_{c=1}^C \sum_{y_j = c} (d_{T_j, S} - \overline{D}_{S, c})^2 / (M - C)},
\label{eq:F}
\end{equation}
where $\overline{D}_{S,c} = \frac{\sum_{d \in D_{S,c}} d}{|D_{S, c}|}$ is the mean distance w.r.t. class $c$ with $D_{S, c} = \{d_{T_j, S}|y_j=c\}_{j=1}^M$, and $\overline{D}_S$ is the mean of all distances.
Similar to the IG computing in Section~\ref{protocol_description}, we leverage the \textit{indicating vector} to indicate whether each sample belongs to each of the $C$ classes. Given
\begin{equation}
\langle D_S \rangle = \bigcup_{i=0}^{n-1} \{\langle d_{T_j^i,S} \rangle\}_{j=1}^{M_i},
\end{equation}
and the indicating vector $\langle \boldsymbol{\gamma}_{c} \rangle$ ($ c \in \{1,\ldots,C\}$) as:
\begin{equation}
\langle \boldsymbol{\gamma}_{c} \rangle = (\langle \boldsymbol{\gamma}^0_{TD^0_{c} \subseteq TD^0}\rangle, \ldots,\langle \boldsymbol{\gamma}^{n-1}_{TD^{n-1}_{c} \subseteq TD^{n-1}}\rangle),
\end{equation}
the parties jointly compute the terms:
\begin{equation}
\begin{split}
\langle \overline{D}_{S, c} \rangle &= \frac{\langle \boldsymbol{D_S} \rangle \cdot \langle \boldsymbol{\gamma}_{c} \rangle}{\langle \boldsymbol{\gamma}_{c} \rangle \cdot \boldsymbol{1} }, c \in \{1,\ldots,C\}\\
\langle \overline{D}_S \rangle &= \frac{\langle \boldsymbol{D_S} \rangle \cdot \boldsymbol{1}}{M}.
\end{split}
\end{equation}
Next, they jointly compute:
\begin{equation}
\sum_{y_j = c} (d_{T_j, S} - \overline{D}_{S, c})^2 = \langle \boldsymbol{d_{c}} \rangle \cdot \langle \boldsymbol{d_{c}} \rangle, c \in \{1,\ldots,C\},
\end{equation}
where
\begin{equation}
\langle \boldsymbol{d_{c}} \rangle[j] = \langle \boldsymbol{\gamma}_{c} \rangle[j] \cdot (\langle d_{T_j, S} \rangle - \overline{D}_{S, c}), j \in \{1,\ldots,M\}.
\end{equation}
Then, the parties can jointly compute $\langle Q_F(S) \rangle$ by Equation~\ref{eq:F}.
The protocol for $\langle Q_F(S) \rangle$ has a complexity of $O(M)$, while the computation of $\langle Q_{IG}(S) \rangle$ using our optimization in Section~\ref{sorting_based_acceleration} still takes $O(M\log^2M)$ interactive operations. Moreover, the empirical evaluation in Section~\ref{exp:efficiency} shows that the F-stat-based FedST achieves comparable accuracy to the prior IG-based solution.
\begin{theorem}
The F-stat-based shapelet quality measurement is secure under the security definition defined in Definition~\ref{definition:security}.
\end{theorem}
\begin{proof}[Proof Sketch]
Similar to the IG-based method in Section~\ref{protocol_description} and Section~\ref{sorting_based_acceleration}, the input and output of the F-stat are both secret shares. The MPC operations and indicating vectors are used to make the computation data-oblivious. The security follows.
\end{proof}
\section{Experiments}\label{exp}
In this section, we empirically evaluate the effectiveness of the \texttt{FedST} method and the acceleration techniques.
\subsection{Experimental Setup}
Our experimental setups are as follows:
{
\setlength{\parindent}{0cm}
\textbf{Implementation.} FedST is implemented in Python. We utilize the SPDZ library~\cite{keller2020mp} for semi-honest additive-secret-sharing-based MPC. The security parameter is $\kappa = 40$, which ensures that the probability of information leakage, i.e., the quantity in Definition~\ref{definition:security} is less than $2^{-\lambda}$ ($\lambda=\kappa$)~\cite{catrina2010secure,catrina2010improved}.
}
{
\setlength{\parindent}{0cm}
\textbf{Environment.} We build a cross-silo federated learning environment by running parties in isolated 16G RAM and 8 core Platinum 8260 CPUs docker containers installed with Ubuntu 20.04 LTS. The parties communicate with each other through the docker bridge network with 4Gbps bandwidth.
}
{
\setlength{\parindent}{0cm}
\textbf{Datasets.} We use both the \textit{real-world datasets} and the \textit{synthetic datasets} for evaluation at the following two different scales.
}
To evaluate the effectiveness of \texttt{FedST} framework, we use the popular 117 fixed-length TSC datasets of the UCR Archive \cite{DBLP:journals/corr/abs-1810-07758} that are collected from different types of applications, such as ECG or motion recognition. In the cross-silo and horizontal setting, each business has considerable (but still insufficient) training samples for every class. Thus, we randomly partition the training samples into 3 equal-size subsets to ensure each party has at least two samples for each class. Since there are 20 small datasets that cannot be partitioned as above, we omit them and test on the remaining 97 datasets.
To investigate the effectiveness of the acceleration techniques, we first assess the efficiency improvement of these techniques using the synthetic datasets. Since the secure computation is data-independent, we randomly generate the synthetic datasets of varying parameters. Next, we compare the F-stat to the prior IG measure in terms of both accuracy and efficiency on the 97 UCR datasets to validate the effectiveness of the trade-off.
{
\setlength{\parindent}{0cm}
\textbf{Metrics.} We use the \textit{accuracy} to evaluate the classification, which is measured as the number of samples that are correctly predicted over the testing dataset. For efficiency, we measure the \textit{running time} of the protocols in each step.
}
\begin{figure}
\caption{Pairwise comparison between \texttt{FedST}
\label{subfig:vs local}
\label{subfig:vs global}
\label{subfig:vs locals}
\label{subfig:vs localt}
\label{fig:1v1}
\end{figure}
\subsection{Effectiveness of the FedST Framework}\label{exp:accuracy}
\textbf{Baselines.} Since the advantage of the shapelet transformation against other TSC methods has been widely shown~\cite{bagnall16bakeoff,bagnall2020tale,middlehurst2021hive}, we focus on investigating the \textit{effectiveness of enabling FL for TSC} in terms of classification accuracy. To achieve this goal, we compare our \texttt{FedST} with the four baselines:
- \texttt{LocalST}: the currently available solution that $P_0$ performs the centralized shapelet transformation with only its own data;
- \texttt{GlobalST}: the ideal solution that $P_0$ uses the data of all parties for centralized shapelet transformation without privacy protection;
- \texttt{LocalS+FedT}: a variant of \texttt{FedST} that $P_0$ executes the shapelet search step locally and collaborates with the participants for federated data transformation and classifier training;
- \texttt{FedS+LocalT}: a variant of \texttt{FedST} that $P_0$ locally performs data transformation and classifier training using the shapelets found through federated shapelet search.
Following the centralized setting~\cite{bagnall2020tale,hills2014classification}, we adopt random forest as the classifier over the transformed data for all the methods. The candidates are sampled with the length ranging from $min(3, \frac{N}{4})$ to $N$. The candidate set size is $\frac{MN}{2}$ and number of shapelets $K$ is set to $\min\{\frac{N}{2},200\}$ and is reduced to 5 by clustering the found shapelets. The prior IG is used for assessing the shapelet quality.
{
\setlength{\parindent}{0cm}
\textbf{Pairwise comparison.} Figure~\ref{fig:1v1} reports the pairwise accuracy comparison of our \texttt{FedST} against the competitors.
}
Figure~\ref{subfig:vs local} shows that \texttt{FedST} is more accurate than the \texttt{LocalST} on most of the datasets. It indicates the effectiveness of our basic idea of enabling FL to improve the TSC accuracy. Figure~\ref{subfig:vs global} shows that \texttt{FedST} achieves accuracy close to the non-private \texttt{GlobalST}, which coincides with our analysis in Section~\ref{protocol_discussion}. The slight difference can be caused by two reasons. First, the global method samples the shapelets from all data, while in \texttt{FedST} the candidates are only generated by $P_0$ for the interpretability constraints. Second, in secret sharing, the float values are encoded in fixed-point representation for efficiency, which results in the truncation. Fortunately, we show later in Figure~\ref{fig:cd_ablation} that there is \textit{no statistically significant difference} in accuracy between \texttt{FedST} and \texttt{GlobalST}. From Figure~\ref{subfig:vs locals} and Figure~\ref{subfig:vs localt}, we can see that both the variants are much worse than \texttt{FedST}. It means that both the stages of \texttt{FedST} are indispensable.
{
\setlength{\parindent}{0cm}
\textbf{Multiple comparisons.} We present the critical difference diagram~\cite{demvsar2006statistical} of the methods in Figure~\ref{fig:cd_ablation}. It reports the \textit{mean ranking of accuracy} among the 97 UCR datasets. The competitors falling in one clique (the bold horizontal line) have no statistical difference, while the opposite for the methods from different cliques. Figure~\ref{fig:cd_ablation} shows that \texttt{FedST} is \textit{no statistically different} from \texttt{GlobalST} and they both statistically significantly outperforms \texttt{LocalST}.
It is notable that the variant conducting only local shapelet search (\texttt{LocalS+FedT}), even using all parties' data for transformation, is slightly inferior to \texttt{LocalST}. The reason could be that the locally selected shapelets have poor quality due to the lack of training data, which may cause the transformed data to be more misleading to degrade the accuracy. In comparison, \texttt{FedS+LocalT} performs better than \texttt{LocalST}, because the shapelet quality is improved by FL with more training data used for shapelet search. Both variants are much inferior to \texttt{FedST}, which indicates the positive effect of FL for both stages.
}
\begin{figure}
\caption{Critical difference diagram for our \texttt{FedST}
\label{fig:cd_ablation}
\end{figure}
\begin{figure*}
\caption{Performance of varying dataset size $M$ (default 512), series length $N$ (default 100), number of parties $n$ (default 3), and candidate set size $|\mathcal{SC}
\label{subfig:dis_M}
\label{subfig:dis_N}
\label{subfig:dis_n}
\label{subfig:measure_M}
\label{subfig:measure_n}
\label{subfig:search_M}
\label{subfig:search_N}
\label{subfig:search_n}
\label{subfig:search_SC}
\label{fig:efficiency_overall}
\end{figure*}
\subsection{Effectiveness of the Acceleration Techniques}\label{exp:efficiency}
\textbf{Efficiency improvement.} To assess the effectiveness of the proposed acceleration approaches, we first investigate their \textit{efficiency improvement} using the synthetic datasets of varying dataset size ($M$), time series length ($N$), number of parties ($n$) and candidate set size $|SC|$. The average length of the shapelet candidates and the number of shapelets ($K$) are fixed to the moderate values of $0.6N$ and 200, respectively. Overall, the results presented in Figure~\ref{fig:efficiency_overall} coincide with our complexity analysis.
{
\setlength{\parindent}{0.2cm}
\textbf{1) Distance computation.} Figure~\ref{subfig:dis_M}-\ref{subfig:dis_n} report the time of computing the shapelet distance between a candidate $S$ and all training samples $T^i_j$ w.r.t. $M$, $N$, and $n$. The time for both $\Pi_{FedSS-B}$ that directly uses MPC (\texttt{\texttt{$d$-MPC}}) and the optimization leveraging the proposed secure dot-product protocol (\texttt{\texttt{$d$-MPC}+$\Pi_{\mathtt{DP}}$}) scale linearly to $M$ and $n$. However, \texttt{\texttt{$d$-MPC}+$\Pi_{\mathtt{DP}}$} can achieve up to 30x of speedup over \texttt{$d$-MPC} for the default $N=100$. The time of \texttt{$d$-MPC} increases more quickly than \texttt{$d$-MPC}+$\Pi_{\mathtt{DP}}$ as $N$ increases, because the complexity of \texttt{$d$-MPC} is quadratic w.r.t. $N$ while our proposed \texttt{$d$-MPC}+$\Pi_{\mathtt{DP}}$ has a linear complexity of interactive operations.
}
We also show the time of finding the minimum Euclidean norm (\texttt{Find-Min}), which is a subroutine of the shapelet distance computation. The results show that \texttt{Find-Min} is much faster than \texttt{$d$-MPC}, which is consistent with our analysis in Section~\ref{Distance_Acceleration} that the time of \texttt{$d$-MPC} is dominated by the \textit{Euclidean norm computation}. In comparison, the time of \texttt{$d$-MPC}+$\Pi_{\texttt{DP}}$ is very close to the time of \texttt{Find-Min} because the Euclidean norm computation time is substantially reduced (more than 58x speedup) with our $\Pi_{DP}$.
{ \setlength{\parindent}{0.2cm}
\textbf{2) Quality measurement.}} We show the time of quality measurement for each candidate $S$ with varying $M$ and $n$ in Figure~\ref{subfig:measure_M}-\ref{subfig:measure_n}. Compared to the IG computation in the basic protocol ($\mathtt{Q_{IG}}$), our proposed secure-sorting-based computation (\texttt{$\mathtt{Q_{IG}}$}+\texttt{Sorting}) achieves a similar performance when $M$ is small, but the time of $\mathtt{Q_{IG}}$ increases much faster than \texttt{$\mathtt{Q_{IG}}$+Sorting} as $M$ increases because $\mathtt{Q_{IG}}$ has a quadratic complexity to $M$. In comparison, the time of \texttt{$\mathtt{Q_{IG}}$+Sorting} is dominated by the secure sorting protocol (\texttt{Sorting}), which has a complexity of $O(M\log^2M)$. The optimized \texttt{$\mathtt{Q_{IG}}$+Sorting} is also more scalable to $n$ than $\mathtt{Q_{IG}}$.
Using F-stat in the quality measurement step ($\mathtt{Q_{F}}$) can achieve more than 65x of speedup over the optimized \texttt{$\mathtt{Q_{IG}}$+Sorting}. It is also noteworthy that $\mathtt{Q_{F}}$ is much faster than \texttt{Sorting} which bottlenecks the time of securely computing the KW and MM, as mentioned in Section~\ref{trade_off}. That is why we consider the F-stat for the acceleration.
{ \setlength{\parindent}{0.2cm}
\textbf{3) Federated shapelet search.} Finally, we assess the \textit{total running time of the federated shapelet search protocol} with each proposed acceleration technique. The results are reported in Figure~\ref{subfig:search_M}-\ref{subfig:search_SC}.
}
Overall, an individual $\Pi_{DP}$-based acceleration ($+\Pi_{\mathtt{DP}}$) brings 1.01-73.59x of improvement over $\Pi_{FedSS-B}$. The sorting-based (\texttt{+Sorting}) technique gives 1.01-96.17x of speedup alone and the F-stat-based method (+$\mathtt{Q_{F}}$) individually achieves 1.01-107.76x of speedup. The combination of these techniques is always more effective than each individual. $\Pi_{DP}$-based and Sorting-based methods together (+$\Pi_{\mathtt{DP}}$+\texttt{Sorting}) contribute 15.12-630.97x of improvement, while the combination of the $\Pi_{DP}$-based and F-stat-based techniques (+$\Pi_{\mathtt{DP}}$+$\mathtt{Q_{F}}$) boosts the protocol efficiency by 32.22-2141.64x.
We notice from Figure~\ref{subfig:search_M} that the time of $\Pi_{FedSS-B}$ is dominated by the distance computation when $M$ is small. In this case, $+\Pi_{\texttt{DP}}$ is more effective. With the increase of $M$, the quality measurement step gradually dominates the efficiency. As a result, the \texttt{+Sorting} and +$\mathtt{Q_{F}}$ play a more important role in acceleration. Similarly, Figure~\ref{subfig:search_N} shows that the efficiency is dominated by the quality measurement when $N$ is small and is gradually dominated by the distance computation with $N$ increases. The acceleration techniques for these two steps are always complementary with each other.
It is also worth noting that the time of all competitors is nearly \textit{in direct proportion to} $|\mathcal{SC}|$, as shown in Figure~\ref{subfig:search_SC}. The result is consistent with our analysis in Section~\ref{protocol_discussion} that the time for securely \textit{finding the top-$K$ candidates} (Algorithm~\ref{alg:fedss_bs} Line 10), which has a complexity of $O(K\cdot |\mathcal{SC}|)$, is \textit{negligible} compared to the time of distance computation and quality measurement. That is why we mainly dedicate to accelerating these two steps.
\begin{figure}
\caption{Accuracy and federated shapelet search time of \texttt{FedST}
\label{fig:acc_vs_time}
\end{figure}
{ \setlength{\parindent}{0cm}
\textbf{Effectiveness of the trade-off strategy.} We investigate the effectiveness of the F-stat-based protocol in \textit{trading off TSC accuracy and the protocol efficiency.} Specifically, we evaluate both the accuracy and the federated shapelet search time for the two versions of \texttt{FedST} that adopt either the prior $Q_{IG}$ (\texttt{FedST-$\mathtt{Q_{IG}}$}) or the more efficient $Q_{F}$ (\texttt{FedST-$\mathtt{Q_{F}}$}). The experiments are conducted using 97 UCR datasets with the same setting as Section~\ref{exp:accuracy}. Both the $\Pi_{DP}$-based and the sorting-based speedup methods are adopted.
}
As shown in Figure~\ref{fig:acc_vs_time} top, \texttt{FedST-$\mathtt{Q_{F}}$} is faster than \texttt{FedST-$\mathtt{Q_{IG}}$} on all 97 datasets. The efficiency improvement is 1.04-8.31x while the average speedup on the 97 datasets is 1.79x. Meanwhile, \texttt{FedST-$\mathtt{Q_{F}}$} is better than \texttt{FedST-$\mathtt{Q_{IG}}$} on 41 of the 97 datasets in terms of accuracy (Figure~\ref{fig:acc_vs_time} bottom). The average accuracy of \texttt{FedST-$\mathtt{Q_{F}}$} is just $0.5\%$ lower than that of \texttt{FedST-$\mathtt{Q_{IG}}$}. Figure~\ref{fig:cd_main_accuracy} shows the critical difference diagram for these two methods and the two FL baselines (\texttt{LocalST} and \texttt{GlobalST}). The result indicates that \texttt{FedST-$\mathtt{Q_{F}}$} achieves the same level of accuracy as \texttt{FedST-$\mathtt{Q_{IG}}$} and \texttt{GlobalST}, and is significantly better than \texttt{LocalST}. In summary, our proposed F-stat-based trade-off strategy can effectively improve the efficiency of the federated shapelet search while guaranteeing comparable accuracy to the superior IG-based method.
\begin{figure}
\caption{Critical difference diagram for \texttt{FedST}
\label{fig:cd_main_accuracy}
\end{figure}
\subsection{Study of Interpretability}\label{exp:interpretability}
Figure~\ref{fig:interpretation} demonstrates the interpretability of \texttt{FedST} using a real-world motion classification problem named GunPoint~\cite{DBLP:journals/corr/abs-1810-07758}. The data track the centroid of the actors' right hand for two types of motions. For ``Gun'' class, they draw a replicate gun from a hip-mounted holster, point it at a target, then return the gun to the holster and their hands to their sides. For ``No gun (Point)'', the actors have their gun by their sides, point with their index fingers to a target, and then return their hands. The best shapelets of the two classes are shown in Figure~\ref{subfig:GunPoint_shapelets}, which are derived from the data of the initiator and represent the class-specific features, i.e., the hand tracks of drawing the gun ($S_1$) and putting down the hand ($S_2$). We transform all time series samples into the distances to these shapelets and visualize the results in Figure~\ref{subfig:GunPoint_ST}. As can be seen, instead of considering the original time series space which has 150 data points per sample, by using the two shapelets, the classification can be explained with the concise rule that the samples more similar to $S_1$ and distant from $S_2$ belong to class ``Gun'' (red), and the opposite for the ``No gun'' data (blue). The study indicates that the shapelet-based features are easier to interpret than the raw time series.
\begin{figure}
\caption{Interpretability study using GunPoint~\cite{DBLP:journals/corr/abs-1810-07758}
\label{subfig:GunPoint_shapelets}
\label{subfig:GunPoint_ST}
\label{fig:interpretation}
\end{figure}
\subsection{Study of Flexibility}\label{exp:flexibility}
We further investigate the flexibility of \texttt{FedST} as discussed in Section~\ref{fedst_kernel}. We evaluate the accuracy and the protocol running time on each of the 97 UCR datasets with the time contract varying from $10\%$ to $90\%$ of the maximum running time (the time evaluated in Section~\ref{exp:efficiency} using IG). Figure~\ref{fig:flexibility} reports the results. Overall, the accuracy increases with more time is allowed, while the real running time is always close to the contract. It validates the effectiveness of balancing the accuracy and efficiency using the user-defined time contract, which is benificial for the practical utility.
Note that with only 10\% running time (approximate 10\% candidates assessed at random), FedST can achieve at least 77\% of the maximum accuracy among the 97 datasets, which implies that the high-quality shapelets are highly redundant. The results also confirm the feasibility of generating candidates from $P_0$ in the cross-silo setting where each party has considerable (but insufficient) data.
\begin{figure}
\caption{The accuracy (top) and real running time (bottom) w.r.t. the user-defined time contract.}
\label{fig:flexibility}
\end{figure}
\section{Further Enhancement by Incorporating Differential Privacy}\label{dp-protect}
As discussed in Section~\ref{fedst_framework}, FedST allows only the found shapelets and the learned models to be revealed to the initiator $P_0$. In this section, we illustrate that we can incorporate differential privacy (DP)~\cite{dwork2014algorithmic} for additional privacy protection, guaranteeing that the released shapelets and models disclose limited information about the private training data of the parties. The differential privacy is defined as follows.
\begin{definition}[Differential Privacy]
Formally, a function $f$ satisfies $(\epsilon,\delta)$-DP, if for any two data sets $D$ and $D^\prime$ differing in a single record and any output $O$ of $f$, we have
\begin{equation}
\Pr[f(D)\in O] \le e^\epsilon \cdot \Pr[f(D^\prime)\in O] + \delta,
\end{equation}
where $\epsilon$ is the privacy budget controlling the tradeoff between the accuracy of $f$ and the degree of privacy protection that $f$ offers.
\end{definition}
Intuitively, the function $f$ is differentially private since the probability of producing a given output (e.g., shapelets or models) is not highly dependent on whether a particular data record exists in $D$. As a result, the information about each private record cannot be inferred from the output with a high probability.
In FedST, we have two main stages: the federated shapelet search that produces the $K$ best shapelets, and the data transformation and classifier training step which builds the classification model and outputs the model parameters. Many existing works have studied DP algorithms to protect the parameters of the commonly used models~\cite{wu13privacy,abadi2016deep,chaudhuri2008privacy}, which can be seamlessly integrated into FedST. Therefore, we elaborate below on how to incorporate DP to the federated shapelet search.
As defined in Definition~\ref{definition:FedSS}, the federated shapelet search takes the parties' training time series data and the shapelet candidates as the input, and the output is the $K$ candidates with the highest quality ($Q_{IG}$ or $Q_F$ depending on the used measure). Note that the quality of each candidate is evaluated using all training time series samples. Therefore, we can keep the private training samples from being disclosed by \textit{protecting the quality of each individual candidate}. To achieve this goal, we make the quality of the candidates \textit{noisy} before retrieving the $K$ best ones.
Concisely, the parties jointly add secretly shared noises to the quality of all candidates using the secure random number generator~\cite{keller2020mp}. The noise for each candidate should be identically and independently distributed and follows a Laplace distribution whose parameter is public and related to $\epsilon$, which is referred to as the \textit{Laplace mechanism}~\cite{dwork2014algorithmic}. To this end, the parties retrieve the candidate with the \textit{maximum} quality by executing the secure comparison and assignment protocols (see Section~\ref{mpc}) and reveal the index to $P_0$. The two steps are repeated $K$ times to find the noisy $K$ best shapelets.
The above algorithm for finding the maximum is referred to as the \textit{Report Noisy Max} algorithm~\cite{dwork2014algorithmic}, which is $(\epsilon, 0)$-differentially private. Thus, according to Theorem 3 in~\cite{NEURIPS2019_b139e104}, the algorithm of retrieving the $K$ best shapelet candidates by calling the Report Noisy Max algorithm is $(\epsilon^\prime, \delta^\prime)$-DP for any $\delta^\prime \ge 0$ where
\begin{equation}
\epsilon^\prime = \min \left\{ \epsilon K, \epsilon K(\frac{e^\epsilon-1}{e^\epsilon+1}) + \sqrt{2\epsilon^2 K\ln(\frac{1}{\delta^\prime})} \right\}.
\end{equation}
In conclusion, the integration of DP provides an additional layer to protect the privacy of the revealed shapelets and models, which can further enhance the security of FedST.
\section{Conclusion and future works}\label{sec:conclusion}
This work studies how to customize federated learning solution for the time series classification problem. We systematically investigate existing TSC methods for the centralized scenario and propose FedST, a novel FL-enabled TSC framework based on the centralized shapelet transformation. We design the security protocol $\Pi_{FedSS-B}$ for the FedST kernel, analyze its effectiveness, and identify its efficiency bottlenecks. To accelerate the protocol, we propose specific optimizations tailored for the FL setting. Both theoretical analysis and experiment results show the effectiveness of our proposed FedST framework and the acceleration techniques.
In the future, we would like to consider other types of interpretable features to complement FedST. Further, we wish to develop high-performance system to support industrial-scale applications.
\end{document} |
\begin{document}
\title{On the mixed even-spin Sherrington-Kirkpatrick model with ferromagnetic interaction}
\author{Wei-Kuo Chen\footnote{Department of Mathematics, University of California at Irvine, 340 Rowland Hall, Irvine, CA 92697-3875, USA, email: weikuoc@uci.edu}}
\maketitle
\begin{abstract}
We study a spin system with both mixed even-spin Sherrington-Kirkpatrick (SK) couplings and Curie-Weiss (CW) interaction. Our main results are: (i) The thermodynamic limit of the free energy is given by a variational formula involving the free energy of the SK model with a change in the external field. (ii) In the presence of a centered Gaussian external field, the positivity of the overlap and the extended Ghirlanda-Guerra identities hold on a dense subset of the temperature parameters. (iii) We establish a general inequality between the magnetization and overlap. (iv) We construct a temperature region in which the magnetization can be quantitatively controlled and deduce different senses of convergence for the magnetization depending on whether the external field is present or not. Our approach is based on techniques from the study of the CW and SK models and results in convex analysis.
\end{abstract}
{MSC: 60K35, 82B44}
\boldsymbol{\sigma}mallskip
{ Keywords: Ferromagnetic interaction; Ghirlanda-Guerra identities; Parisi formula; Sherrington-Kirkpatrick model; Ultrametricity}
\boldsymbol{\sigma}ection{Introduction}
The Sherrington-Kirkpatrick (SK) model formulated by Sherrington and Kirkpatrick \cite{SK75} is one of the most important mean field spin glasses with the aim of understanding strange magnetic properties of certain alloys. In the recent decades, many essential conjectures proposed by physicists have been intensively studied in the mathematical community, including the validity of the Parisi formula and the ultrametricity of the overlap. In this paper, we are interested in the SK model coupled with the familiar Curie-Weiss (CW) ferromagnetic interaction. There have been a few studies of this model so far \cite{ACCR10,CGL99,Tou95} (one may also refer to \cite{CCT05} for a much more difficult coupling, the SK model with Ising interaction). However, rigorous results are very limited and mainly restricted to the high temperature regime. The main reason is that in this case, the effect of the (non random) ferromagnetic interaction can be linearly approximated. The model then becomes essentially the SK model with a slight perturbation on the external field. Therefore as one might expect, in the high temperature region, many properties of the SK model are also valid in our model. Indeed, following the same arguments as \cite{Talag101} or \cite{Talag102}, one can prove (see \cite{ACCR10}) that for this model, the thermodynamic limit of the free energy exists, the magnetization and overlap in the limit concentrate on a singleton, the central limit theorem for the free energy holds, and the Thouless-Anderson-Palmer system of equations is valid.
\boldsymbol{\sigma}mallskip
We will be concerned with the more general mean field model with both the mixed even-spin SK couplings and ferromagnetic interaction (SKFI) and address the following questions: (i) How can one compute the thermodynamic limit of the free energy of the SKFI model? (ii) Which properties does the SKFI model inherit from the CW and SK models? (iii) Is there any general relation between the magnetization and overlap? (iv) Can one give a quantitative control on the magnetization or the overlap in general? Our answers to these problems will be stated in Section \ref{MR} and will also cover the situation in the low temperature regime.
\boldsymbol{\sigma}mallskip
Let us now give the description of the SKFI model, which depends on two quantities: the (inverse) temperature parameter $(\beta,\boldsymbol{\beta})\in\mathcal{B}$ and external field $h,$ where $\mathcal{B}=\{(\beta,(\beta_p)_{p\geq 1}):\beta\geq 0,\,\,\boldsymbol{\sigma}um_{p\geq 1}2^p\beta_p^2<\infty\}$
and $h$ is a Gaussian random variable (possibly degenerate). One may think of $\beta$ as the temperature for the CW interaction and $\boldsymbol{\beta}$ as the temperature for the SK couplings. Let us emphasize that $\beta_p$ may take negative values, while since we are concerned with the ferromagnetic interaction, the CW temperature $\beta$ only takes nonnegative values. For each positive integer $N,$ set the configuration space $\Sigma_N=\left\{-1,+1\right\}^N.$ Let $(h_i)_{i\leq N}$ be i.i.d. copies of $h.$ For a given temperature $(\beta,\boldsymbol{\beta})\in\mathcal{B}$ and external field $h,$ the SKFI model has Hamiltonian
\begin{align}\label{model:eq1}
H_N(\boldsymbol{\sigma})=\frac{\beta N}{2}m(\boldsymbol{\sigma})^2+H_N^{SK}(\boldsymbol{\sigma})+\boldsymbol{\sigma}um_{i\leq N}h_i\boldsymbol{\sigma}igma_i,
\end{align}
where the quantity $m=m(\boldsymbol{\sigma}):=N^{-1}\boldsymbol{\sigma}um_{i\leq N}\boldsymbol{\sigma}igma_i$ is called the magnetization per site. Here, $H_{N}^{SK}$ is the mixed even $p$-spin interactions for the SK model, that is,
\begin{align}\label{model:eq2}
H_{N}^{SK}(\boldsymbol{\sigma})=\boldsymbol{\sigma}um_{p\geq 1}\frac{\beta_p}{N^{p-1/2}}\boldsymbol{\sigma}um_{1\leq i_1,\ldots,i_{2p}\leq N}g_{i_1,\ldots,i_{2p}}\boldsymbol{\sigma}igma_{i_1}\cdots
\boldsymbol{\sigma}igma_{i_{2p}},
\end{align}
where $\boldsymbol{g}=(g_{i_1,\ldots,i_{2p}}:1\leq i_1,\ldots,i_{2p}\leq N, p\geq 1)$ are i.i.d. standard Gaussian
r.v.s independent of $(h_i)_{i\leq N}$. One may see easily that the covariance of $H_{N}^{SK}$ is a function of the overlap $R_{1,2}=R(\boldsymbol{\sigma}^1,\boldsymbol{\sigma}^2):=N^{-1}\boldsymbol{\sigma}um_{i\leq N}\boldsymbol{\sigma}igma_i^1\boldsymbol{\sigma}igma_i^2$ through $\mathbb{E}H_{N}^{SK}(\boldsymbol{\sigma}^1)H_{N}^{SK}(\boldsymbol{\sigma}^2)=N\xi(R_{1,2}),$ where $\xi(x):=\boldsymbol{\sigma}um_{p\geq 1}\beta_p^2x^{2p}.$
\boldsymbol{\sigma}mallskip
We define the partition function, Gibbs measure, and free energy for the SKFI model, respectively, by $Z_N=Z_N(\beta,\boldsymbol{\beta},h)=\boldsymbol{\sigma}um_{\boldsymbol{\sigma}\in\Sigma_N}\exp H_N(\boldsymbol{\sigma})$, $G_N(\boldsymbol{\sigma})=\exp H_N(\boldsymbol{\sigma})/Z_N$, and $F_N=F_N(\beta,\boldsymbol{\beta},h)=N^{-1}\mathbb{E}\ln Z_N.$ We will use $\boldsymbol{\sigma}^1,\boldsymbol{\sigma}^2,$ etc. to denote the {\it replicas} sampled independently from $G_N.$ For any real-valued function $f$ on $\Sigma_N^n$, we define its Gibbs average corresponding to the Gibbs measure $G_N$ as
$$
\left<f\right>=\boldsymbol{\sigma}um_{\boldsymbol{\sigma}^1,\ldots,\boldsymbol{\sigma}^n\in\Sigma_N}f(\boldsymbol{\sigma}^1,\ldots,\boldsymbol{\sigma}^n)G_N(\boldsymbol{\sigma}^1)
\cdots G_N(\boldsymbol{\sigma}^n).
$$
In the case of $\beta=0,$ our model is known as the mixed even $p$-spin SK model (see \cite{Talag102}) and
we will use $Z_N^{SK}$, $G_N^{SK}$, $F_N^{SK}$, and $\left<\cdot\right>^{SK}$ to denote its partition function, Gibbs measure, free energy, and
Gibbs average, respectively. On the other hand, if $\boldsymbol{\beta}=\mathbf{0}$, our model reduces to the Curie-Weiss (CW) model and
$Z_N^{CW}$, $G_N^{CW},$ $F_N^{CW},$ and $\left<\cdot\right>^{CW}$ are also defined in the same manner.
\boldsymbol{\sigma}ection{Main results}\label{MR}
Our main results will be stated in this section. Proofs are deferred to Section \ref{proof}. Throughout the paper, we will use $I(E)$ to denote the indicator function for the event $E.$
\boldsymbol{\sigma}ubsection{The thermodynamic limit of the free energy}\label{main:sec1}
Let us begin by illustrating the different natures of the SKFI, CW, and SK models. The SK model has been widely studied, see, for example, \cite{SK75} and \cite{Talag03} for details. By an application of the Gaussian interpolation technique, Guerra and Toninelli proved \cite{GuTo02} that
$\{\mathbb{E}\ln Z_N^{SK}\}_{N\geq 1}$ is superadditive and as a consequence
\begin{align}\label{main:eq0}
F^{SK}(\boldsymbol{\beta},h):=\lim_{N\rightarrow\infty}F_N^{SK}(\boldsymbol{\beta},h)
\end{align}
exists. Using Jensen's inequality, it is easy to see that $\{\mathbb{E}\ln Z_N^{CW}\}_{N\geq 1}$ is subadditive, which ensures the existence of the thermodynamic limit of the free energy for the CW model. However, if $\beta\neq 0$ and $\boldsymbol{\beta}\neq\mathbf{0}$, neither superadditivity nor subadditivity obviously holds for $\{\mathbb{E}\ln Z_N\}_{N\geq 1}$ in the SKFI model. The existence of the thermodynamic limit of the free energy for the SKFI model was firstly shown in \cite{GuTo03}. Our first main result regarding the formula of the thermodynamic limit of the free energy for the SKFI model is stated as follows:
\begin{theorem}\label{main:thm1}
For any $(\beta,\boldsymbol{\beta})\in\mathcal{B}$, we have
\begin{align}\label{main:thm1:eq1}
F(\beta,\boldsymbol{\beta},h):=\lim_{N\rightarrow\infty}F_N(\beta,\boldsymbol{\beta},h)=\max_{\mu\in\left[-1,1\right]}
\left\{F^{SK}(\boldsymbol{\beta},\beta\mu+h)-\frac{\beta\mu^2}{2}\right\}.
\end{align}
\end{theorem}
\boldsymbol{\sigma}mallskip
For any given $(\beta,\boldsymbol{\beta})$ and $h,$ we set
\begin{align}\label{main:eq1}
\Omega=\Omega(\beta,\boldsymbol{\beta},h)={\rm Argmax}_{\mu\in\left[-1,1\right]}\left\{F^{SK}(\boldsymbol{\beta},\beta\mu+h)-\frac{\beta\mu^2}{2}\right\}.
\end{align}
The following proposition says that the magnetization is essentially supported on $\Omega(\beta,\boldsymbol{\beta},h).$
\begin{proposition}\label{main:prop1}
For any open subset $U$ of $\left[-1,1\right]$ with $$
\inf\left\{|x-y|:x\in U,y\in \Omega\right\}>0,$$ we have for every
$N,$
\begin{align}\label{main:prop1:eq2}
\mathbb{E}\left<I(m\in U)\right>\leq
K\exp\left(-\frac{N}{K}\right),
\end{align}
where $K$ is a constant independent of $N.$ In particular,
\begin{align}\label{main:prop1:eq1}
\lim_{N\rightarrow\infty}\left<I(m\in U)\right>=0\quad a.s.
\end{align}
\end{proposition}
\boldsymbol{\sigma}ubsection{Positivity of the overlap}\label{smpo}
In the SK model with external field, Talagrand \cite{Talag102} proved that the overlap is essentially greater than a positive constant
with high probability, and deduced from this fact that the extended Ghirlanda-Guerra identities hold.
In this section, we prove that these results are ``typically'' valid in the SKFI model.
\boldsymbol{\sigma}mallskip
Before we state our main results, let us recall the formulation of the Parisi formula and some known results regarding the differentiability of the Parisi measure. Let $\mathcal{M}_0$ be the collection of probability measures on $\left[0,1\right]$ that
consist of a finite number of point masses.
For each $\nu\in\mathcal{M}_0$, we consider a function $\Phi_\nu(x,q)$ defined on $\boldsymbol{\beta}bb{R}\times\left[0,1\right]$ with
$\Phi_\nu(x,1)=\ln\cosh(x)$ and satisfying the PDE
\begin{align}\label{pos:eq0}
\frac{\partial\Phi_\nu}{\partial q}=-\frac{1}{2}\xi''(q)\left(\frac{\partial^2\Phi_\nu}{\partial x^2}+\nu(\left[0,q\right])\left(
\frac{\partial\Phi_\nu}{\partial x}\right)^2\right).
\end{align}
The Parisi formula states that
the thermodynamic limit of the free energy of the SK model with temperature $\boldsymbol{\beta}$ and external field $h$ can be represented as
\begin{align}\label{pos:eq1}
F^{SK}(\boldsymbol{\beta},h)=\inf_{\nu\in\mathcal{M}_0}\mathcal{P}(\boldsymbol{\beta},h,\nu),
\end{align}
where
$$
\mathcal{P}(\boldsymbol{\beta},h,\nu):=\ln 2+\mathbb{E}\Phi_\nu(h,0)-\frac{1}{2}\theta(1)+\frac{1}{2}\int_0^1\theta(q)\nu(dq),\quad \nu\in\mathcal{M}_0
$$
and $\theta(q):=q\xi'(q)-\xi(q).$ The validity of this formula was firstly verified in the work of Talagrand \cite{Tal06} and was later extended to the general mixed $p$-spin SK model \cite{Pan112} and the spherical SK model \cite{C12,Talag062}. Let $\mathcal{M}$ be the space of all probability measures on $\left[0,1\right]$ endowed with the weak topology. Since $\mathcal{P}(\boldsymbol{\beta},h,\cdot)$ is Lipschitz with respect to the metric (see \cite{Gu03} and \cite{Talag06}):
\begin{align}\label{metric}
d(\nu_1,\nu_2):=\int_0^1|\nu_1(\left[0,q\right])-\nu_2(\left[0,q\right])|dq,\quad\nu_1,\nu_2\in\mathcal{M}_0,
\end{align}
$\mathcal{P}$ can be extended continuously to $\mathcal{M}.$ From
the compactness of $\mathcal{M}$, the infimum $(\ref{pos:eq1})$ is
achieved and any $\nu\in\mathcal{M}$ that achieves the infimum is
called a Parisi measure. Arguments of \cite{Pan08} and \cite{Talag06} imply the differentiability of the Parisi formula
\begin{align}\label{pos:eq2}
\frac{\partial}{\partial\beta_p}F^{SK}(\boldsymbol{\beta},h)=\beta_p\left(1-\int_0^1 q^{2p}\nu_{\boldsymbol{\beta},h}(dq)\right)
\end{align}
and give the moment computation for $|R_{1,2}|$ via
\begin{align}\label{pos:eq3}
\lim_{N\rightarrow\infty}\mathbb{E}\left<R_{1,2}^{2p}\right>^{SK}=\int_0^1q^{2p}\nu_{\boldsymbol{\beta},h}(dq)
\end{align}
provided $\beta_p\neq 0$, where $\nu_{\boldsymbol{\beta},h}$ is a Parisi measure. In the case of $\beta_p\neq 0$ for all $p\geq 1,$ $(\ref{pos:eq3})$ implies that $\nu_{\boldsymbol{\beta},h}$ is the limiting distribution of $|R_{1,2}|$; if, in addition, $h$ is nondegenerate, it is well-known that the Parisi measure takes nonnegative values and, again, from $(\ref{pos:eq3}),$ the Parisi measure is the limiting distribution of the overlap $R_{1,2}$ (see Chapter 14 of \cite{Talag102} for detailed discussions).
\boldsymbol{\sigma}mallskip
Recall the set $\Omega$ from $(\ref{main:eq1}).$ Let us denote by $\mathcal{B}_d$ the collection of all $(\beta,\boldsymbol{\beta})\in\mathcal{B}$ that satisfy $\beta>0$ and
\begin{align}\label{pos:eq5}
\mbox{either $|\Omega(\beta,\boldsymbol{\beta},h)|=1$ or $\Omega(\beta,\boldsymbol{\beta},h)=\left\{\mu,-\mu\right\}$ for some $0<\mu<1.$}
\end{align}
The following proposition gives the connection between the set $\mathcal{B}_d$ and the differentiability of $F(\beta,\boldsymbol{\beta},h)$ with respect to $\beta$.
\begin{proposition}\label{pos:prop1}
Suppose that $(\beta,\boldsymbol{\beta})\in \mathcal{B}$ with $\beta>0$. Then $\frac{\partial F}{\partial\beta} (\beta,\boldsymbol{\beta},h)$ exists if and only if $(\beta,\boldsymbol{\beta})\in \mathcal{B}_d.$
\end{proposition}
Note that $(\beta,\boldsymbol{\beta})\mapsto F(\beta,\boldsymbol{\beta},h)$ is a continuous convex function on the space of all $(\beta,\boldsymbol{\beta})\in\mathcal{B}$ with $\beta>0$. Such space is obviously open in the separable Banach space $\{(\beta,\boldsymbol{\beta}):\beta^2+\boldsymbol{\sigma}um_{p\geq 1}2^p\beta_p^2<\infty\}$ endowed with the norm $|(\beta,\boldsymbol{\beta})|=(\beta^2+\boldsymbol{\sigma}um_{p\geq 1}2^p\beta_p^2)^{1/2}.$ It follows, by Mazur's theorem (Theorem 1.20 \cite{Phe89}), that the set where $F(\cdot,\cdot,h)$ is G\^{a}teaux-differentiable (in the sense that the directional derivative exists in all directions) is a dense $G_\delta$ set in $\mathcal{B}$ contained in $\mathcal{B}_d.$ This means that typically the magnetization concentrates either on a singleton or two distinct values, which are symmetric with respect to the origin. This property coincides with the behavior of the CW model.
\boldsymbol{\sigma}mallskip
We prove that analogues of $(\ref{pos:eq2})$ and $(\ref{pos:eq3})$ also hold for the SKFI model in certain temperature region. For technical purposes, we assume that $h$ is centered. Let us denote by $\mathcal{B}'$ the collection of all $(\beta,\boldsymbol{\beta})\in\mathcal{B}$ with $\beta>0$ and $\beta_p\neq 0$ for all $p\geq 1$. Set $\mathcal{B}_d'=\mathcal{B}_d\cap \mathcal{B}'.$ Notice that $\mathcal{B}_d'$ is a $G_\delta$ subset in $\mathcal{B}'$ and that, concluding from the convexity of $F(\cdot,\boldsymbol{\beta},h)$ for every fixed $(\boldsymbol{\beta},h)$, $F(\cdot,\boldsymbol{\beta},h)$ is differentiable for all but countably many $\beta.$ Thus, using Proposition \ref{pos:prop1}, $\mathcal{B}_d'$ forms a dense $G_\delta$ subset in $\mathcal{B}'.$
\begin{theorem}\label{pos:thm1} If $(\beta,\boldsymbol{\beta})\in \mathcal{B}_d'$, then for every $p\geq 1,$ we have
\begin{align}\label{pos:thm1:eq1}
\frac{\partial F}{\partial \beta_p}(\beta,\boldsymbol{\beta},h)=\beta_p\left(1-\int_0^1q^{2p}\nu_{\boldsymbol{\beta},\beta\mu+h}(dq)\right)
\end{align}
and
\begin{align}\label{pos:thm1:eq2}
\lim_{N\rightarrow\infty}\mathbb{E}\left<R_{1,2}^{2p}\right>=\int_0^1q^{2p}\nu_{\boldsymbol{\beta},\beta\mu+h}(dq),
\end{align}
where $\nu_{\boldsymbol{\beta},\beta\mu+h}$ is the unique Parisi measure for the SK model with temperature $\boldsymbol{\beta}$, external field $\beta\mu+h$,
and $\mu\in\Omega(\beta,\boldsymbol{\beta},h).$
\end{theorem}
From $(\ref{pos:thm1:eq1})$, it means that the limiting distribution of $|R_{1,2}|$ is determined by the Parisi measure $\nu_{\boldsymbol{\beta},\beta\mu+h}.$ If $\mathbb{E}h^2\neq 0,$ we will prove that $\nu_{\boldsymbol{\beta},\beta\mu+h}$ gives the limiting distribution of the overlap $R_{1,2}$ relying on Talagrand's positivity of the overlap in the SK model. The precise statement of the latter is described as follows: Let $\mathbb{E} h^2\neq 0$. Consider a Parisi measure $\nu$ and the smallest point $c$ in the support of $\nu.$ Then $c>0$ and for any $c'<c,$ there exists some constant $K$ independent of $N$ such that
\begin{align}\label{com:eq0}
\mathbb{E}\left<I\left(R_{1,2}\leq c'\right)\right>^{SK}\leq K\exp\left(-\frac{N}{K}\right).
\end{align}
As for the SKFI model, we have a weaker version of Talagrand's positivity.
\begin{theorem}\label{pos:thm2}
Suppose that $\mathbb{E}h^2\neq 0$. Let $(\beta,\boldsymbol{\beta})\in \mathcal{B}_d'$ and $\nu_{\boldsymbol{\beta},\beta\mu+h}$ be the Parisi measure of the SK model stated in
Theorem $\ref{pos:thm1}.$ Suppose that $c$ is the smallest value in the support of $\nu_{\boldsymbol{\beta},\beta\mu+h}$. Then $c>0$ and for every $0<c'<c,$ we have
\begin{align}\label{pos:thm2:eq1}
\lim_{N\rightarrow\infty}\mathbb{E}\left<I\left(R_{1,2}\leq c'\right)\right>=0
\end{align}
and for every continuous function $f$ on $\left[-1,1\right],$
\begin{align}\label{pos:thm2:eq2}
\lim_{N\rightarrow\infty}\mathbb{E}\left<f(R_{1,2})\right>=\int_0^1f(q)\nu_{\boldsymbol{\beta},\beta\mu+h}(dq).
\end{align}
\end{theorem}
The equation $(\ref{pos:thm2:eq1})$ implies that in our model the overlap is greater than or equal to a positive constant $c$ with high probability, mirroring the same phenomenon in the SK model with Gaussian external field. More importantly, $(\ref{pos:thm2:eq2})$ means that the limiting law of the overlap of the SKFI model is the same as that of the SK model with a shifted external field $\beta\mu+h.$
\begin{proposition}\label{pos:prop2:GGI}
Let $(\beta,\boldsymbol{\beta})\in \mathcal{B}_d'.$ If $\mathbb{E}h^2\neq 0,$ then the sequence $(G_N)$ of Gibbs measures of the SKFI model satisfies
the extended Ghirlanda-Guerra (EGG) identities, that is, for each $n$ and each continuous function $\psi$
on $\boldsymbol{\beta}bb{R}$, we have
\begin{align}\label{pos:prop2:GGI:eq1}
\lim_{N\rightarrow\infty}\boldsymbol{\sigma}up_f\left|n\mathbb{E}\left<\psi(R_{1,n+1})f\right>-\mathbb{E}\left<\psi(R_{1,2})\right>\mathbb{E}\left<f\right>-\boldsymbol{\sigma}um_{2\leq l\leq n}E\left<\psi(R_{1,l})f\right>
\right|=0,
\end{align}
where the supremum is taken over all (non random) functions $f$ on $\Sigma_N^n$ with $|f|\leq 1.$
\end{proposition}
These identities were firstly discovered by Ghirlanda and Guerra in the context of the SK model with $\psi(x)=x.$ Later, they were generalized to the mixed $p$-spin SK models and also mixed $p$-spin spherical SK models, see Chapter 12 \cite{Talag102} for details. The importance of the EGG identities are due to the conjecture that they yield the ultrametric property of the overlaps, that is, under the Gibbs measure,
the event
\begin{align}\label{add:eq1}
R_{1,2}\geq \min(R_{1,3},R_{2,3})
\end{align}
has probability nearly one. This conjecture was recently confirmed by Panchenko \cite{Pan11}. Thus, from the Baffionni-Rosati theorem, the limiting behavior of the Gibbs measure can be characterized by the Poisson-Dirichlet cascades, which is closely related to the replica symmetry breaking scheme in the computation of the Parisi formula, see Chapter 15 \cite{Talag102}. For the applications of the EGG identities, the readers are referred to \cite{CP12} and \cite{Pan110}.
\boldsymbol{\sigma}mallskip
In the same fashion, Proposition \ref{pos:prop2:GGI} implies the ultrametric structure $(\ref{add:eq1})$ of the overlap in the SKFI model. The proof of Proposition \ref{pos:prop2:GGI} is based on the concentration of the Hamiltonian and the positivity of the overlap. As the argument has been explained in great detail in Chapter 12 in \cite{Talag102}, the proof will be omitted in this paper. We will present an immediate application of the EGG identities in Theorem \ref{add:thm} below that yields a general inequality between the magnetization and overlap.
\boldsymbol{\sigma}ubsection{An inequality between the magnetization and overlap}\label{main:sec3}
In this section, we present an inequality between the magnetization and overlap. Again, for technical purposes, we assume that the external field $h$ is centered throughout this section. Let us first motivate our idea by considering the original SK model ($\beta_p=0$ for every $p\geq 2$) with ferromagnetic interaction. It is well-known that in this case the magnetization and overlap of the SKFI model in the high temperature regime ($\beta_1$ and $\beta$ are very small) are concentrated essentially at single values in the sense that
\begin{align*}
\mathbb{E}\left<(m-\mu)^{2k}\right>&\leq \frac{K}{N^{k}}\\
\mathbb{E}\left<(R_{1,2}-q)^{2k}\right>&\leq \frac{K}{N^k}
\end{align*}
for every $k\geq 1,$ where $K$ is a constant independent of $N$ and $(\mu,q)$ is the unique solution to
\begin{align*}
\mu&=\mathbb{E}\tanh(\beta_1z\boldsymbol{\sigma}qrt{2q}+\beta\mu+h)\\
q&=\mathbb{E}\tanh^2(\beta_1z\boldsymbol{\sigma}qrt{2q}+\beta\mu+h)
\end{align*}
for some standard Gaussian r.v. $z$ independent of $h.$ For the proof, one may follow the same argument as \cite{ACCR10}. As one can see immediately from the Cauchy-Schwarz inequality, $\mu^2\leq q,$ that is, the overlap is essentially bounded from below by the square of the magnetization. It is natural to ask whether in general a similar relation between the magnetization and overlap holds or not. Using the fundamental property $(\ref{pos:eq5})$ of the magnetization and the EGG identities for the overlaps, we will prove that the answer is in the affirmative. Recall that $\mathcal{B}_d'$ is a dense $G_\delta$ set in $\mathcal{B}'$ and from $(\ref{main:prop1:eq2})$ and $(\ref{pos:eq5})$, if $(\beta,\boldsymbol{\beta})\in\mathcal{B}_d'$, there exists some $0\leq\mu<1$ such that
\begin{align}
\label{add:eq0}
\lim_{N\rightarrow\infty}\mathbb{E}\left<I(||m|-\mu|\leq \varepsilon)\right>=1
\end{align}
for all $\varepsilon>0.$ Our main result is stated as follows.
\begin{theorem}\label{add:thm}
Let $(\beta,\boldsymbol{\beta})\in \mathcal{B}_d'.$ We have that
\begin{enumerate}
\item if $\mathbb{E}h^2=0,$ $\lim_{N\rightarrow\infty}\mathbb{E}\left<I(\mu^2-\varepsilon\leq |R_{1,2}|)\right>=1$ for every $\varepsilon>0;$
\item if $\mathbb{E}h^2\neq 0,$ $\lim_{N\rightarrow\infty}\mathbb{E}\left<I(\mu^2-\varepsilon\leq R_{1,2})\right>=1$ for every $\varepsilon>0.$
\end{enumerate}
\end{theorem}
\boldsymbol{\sigma}mallskip
In other words, $\mu^2$ provides a lower bound for the support of the Parisi measure $\nu_{\boldsymbol{\beta},\beta\mu+h}.$ From $(\ref{add:eq0})$, Theorem \ref{add:thm} also means that for $(\boldsymbol{\sigma},\boldsymbol{\sigma}^1,\boldsymbol{\sigma}^2)$ sampled from $EG_N^{\otimes 3}$, essentially $m(\boldsymbol{\sigma})^2\leq |R_{1,2}(\boldsymbol{\sigma}^1,\boldsymbol{\sigma}^2)|$ if $\mathbb{E}h^2=0$ and $m(\boldsymbol{\sigma})^2\leq R_{1,2}(\boldsymbol{\sigma}^1,\boldsymbol{\sigma}^2)$ if $\mathbb{E}h^2\neq 0.$
\boldsymbol{\sigma}mallskip
\boldsymbol{\sigma}ubsection{A quantitative control on the magnetization}\label{Sec4}
We will construct a temperature region where the effect of the ferromagnetic interaction is much stronger than the effect of the mixed even $p$-spin interactions. In this region, we can control the magnetization quantitatively away from the origin and deduce different senses of convergence of the magnetization depending on whether the external field is present or not. Suppose, throughout this section, that the external field $h$ is centered satisfying
\begin{align}\label{Sec4:eq1}
\mathbb{E}e^{2|h|}<\frac{1}{\max_{\beta\geq 0}\frac{\beta}{\cosh^2\beta}}.
\end{align}
Notice that $\max_{\beta\geq 0}\beta/\cosh^2\beta<1.$ This ensures the existence of $h.$ The assumption $(\ref{Sec4:eq1})$ is just for technical purposes that might possibly be omitted (see the remark right after Lemma \ref{com:lem1} below). The description of the temperature region involves the function $f$ in the variational formula for the thermodynamic limit of the free energy of the CW model,
\begin{align}\label{com:eq1}
f(\mu,\beta):=F^{SK}(\mathbf{0},\beta\mu+h)-\frac{\beta\mu^2}{2}=\ln 2+\mathbb{E}\ln \cosh(\beta\mu+h)-\frac{\beta\mu^2}{2}
\end{align}
for $\mu\in\left[-1,1\right]$ and $\beta\in \left(\alpha,\infty\right)$, where $\alpha$ satisfies $\alpha \mathbb{E}1/\cosh^2 h=1.$ Some basic properties of $f$ can be summarized in the following technical proposition.
\begin{proposition}\label{com:prop1}
For each fixed $\beta\in\left(\alpha,\infty\right),$ the global maximum of $f(\cdot,\beta)$ over $\left[0,1\right]$ is uniquely achieved
at some $\mu(\beta)\in\left(0,1\right).$
As functions of $\beta,$ $\mu(\beta)$ and $f(\mu(\beta),\beta)$
are strictly increasing, continuous, and differentiable such that
\begin{align}\label{com:prop1:eq2}
\lim_{\beta\rightarrow\alpha+}\mu(\beta)=0,\,\,\lim_{\beta\rightarrow\infty}\mu(\beta)=1,\,\,
\mbox{and}\,\,
\lim_{\beta\rightarrow\infty}f(\mu(\beta),\beta)=\infty.
\end{align}
\end{proposition}
Suppose that $u$ is any number satisfying $0<u<1.$ From Proposition \ref{com:prop1}, there exists a unique $\beta_u\in\left(\alpha,\infty\right)$
such that $\mu(\beta_u)=u.$ Define $\delta_u:\left[\beta_u,\infty\right)\rightarrow \left[0,\infty\right)$ by
\begin{align}\label{com:thm1:eq0}
\delta_u(\beta)=f(\mu(\beta),\beta)-f(u,\beta).
\end{align}
\begin{proposition}\label{com:prop2}
$\delta_u$ is strictly increasing and $\lim_{\beta\rightarrow\infty}\delta_u(\beta)=\infty.$
\end{proposition}
Recall the definition of $\mathcal{B}_d'$ from $(\ref{pos:eq5})$ and also $\xi(x)=\boldsymbol{\sigma}um_{p\geq 1}\beta_p^2x^{2p}.$ Suppose that $u$ is any number satisfying $0<u<1.$ We define a temperature region,
\begin{align}\label{add:eq7}
\mathcal{R}_u=\left\{(\beta,\boldsymbol{\beta})\in \mathcal{B}_d':\beta>\beta_u\,\,\mbox{and}\,\,\xi(1)<2\delta_u(\beta)\right\}.
\end{align}
Recall $\mu$ from $(\ref{pos:eq5}).$ Notice that $\nu_{\boldsymbol{\beta},\beta\mu+h}$ is the limiting distribution of the overlap in the SKFI model with temperature $(\beta,\boldsymbol{\beta})$ and external field $h$ and also in the SK model with temperature $\boldsymbol{\beta}$ and external field $\beta\mu+h.$ In the case of the original SK model ($\beta_p=0$ for all $p\geq 2$) with external field $\beta\mu+h$, if $(\beta,\boldsymbol{\beta})$ satisfies $\beta>\beta_u$ and $\xi(1)<2\delta_u(\beta)$, one sees, from Proposition \ref{com:prop2}, the definition $(\ref{pos:eq5})$ of $\mu$, and our main results in Theorem \ref{com:thm0} below, that $\beta>\beta_u$ can be arbitrary large and $\beta_1$ lies very likely inside the conjectured high temperature region (below the Almeida-Thouless line) of the original SK model, that is,
$$
\mathbb{E}\frac{2\beta_1^2}{\cosh^4(\beta_1z\boldsymbol{\sigma}qrt{2q}+\beta\mu+h)}<1,
$$
which means that $\nu_{\boldsymbol{\beta},\beta\mu+h}$ is expected to present essentially high temperature behavior, that is, $\nu_{\boldsymbol{\beta},\beta\mu+h}$ consists of a single point mass, where $z$ is a standard Gaussian r.v. independent of $h$ and $q$ is the unique solution to $q=\mathbb{E}\tanh^2(\beta_1z\boldsymbol{\sigma}qrt{2q}+\beta\mu+h)$. Therefore, heuristically in the region $\mathcal{R}_u$, the SKFI model has low CW and high SK temperatures. The idea of the region $\mathcal{R}_u$ comes from the observation that since $\xi(1)$ is very small comparing to $\beta$, the magnetization in the SKFI model behaves very much the same as in the CW model. Thus, if the magnetization in the CW model is away from the origin, it will also be the case in the SKFI model. Now our main result is stated as follows. Recall $\Omega$ from $(\ref{main:eq1}).$
\begin{theorem}
\label{com:thm0}
For $0<u<1$, we have $\Omega(\beta,\boldsymbol{\beta},h)\boldsymbol{\sigma}ubset \left[-1,-u\right)\cup\left(u,1\right]$ for all $(\beta,\boldsymbol{\beta})\in \mathcal{R}_u$.
\end{theorem}
In other words, from Proposition \ref{main:prop1}, the magnetization is basically bounded away from the set $[-u,u]$. As an immediate consequence of the symmetry of the magnetization and the positivity of the overlap, we have the following proposition.
\begin{proposition}\label{com:thm1} The following statements hold:
\begin{enumerate}
\item Let $0<u<1$ and $\mathbb{E}h^2=0.$ For $(\beta,\boldsymbol{\beta})\in\mathcal{R}_u,$ there exists some $\mu\in (u,1)$ such that $\left<I\left(\left|m-\mu\right|\leq \varepsilon\right)\right>$ and $\left<I\left(\left|m+\mu\right|\leq \varepsilon\right)\right>$ converge to $1/2$ a.s. for all $0<\varepsilon<\mu.$
\item Let $0<u<1/2$ and $\mathbb{E}h^2\neq 0$. For $(\beta,\boldsymbol{\beta})\in\mathcal{R}_u,$ there exists some $\mu\in (u,1)$ such that $\left<I\left(\left|m-\mu\right|\leq \varepsilon\right)\right>$ and $\left<I\left(\left|m+\mu\right|\leq \varepsilon\right)\right>$ converge to Bernoulli$(1/2)$ r.v.s for all $0<\varepsilon<\mu.$
\end{enumerate}
\end{proposition}
\boldsymbol{\sigma}mallskip
The first statement is well-known in the CW model without external field. The proof follows immediately from the symmetry of the magnetization under the Gibbs measure. When $\mathbb{E}h^2\neq 0$ this symmetry does not hold, which leads to a different sense of convergence. One may also refer to \cite{APZ91} for the conditional self-averaging property of the magnetization that naturally leads to a similar result as the second statement of Proposition \ref{com:thm1} in the case of the CW model with random external field. However, since the SKFI model contains SK couplings, it seems not applicable to deduce the second statement of Proposition \ref{com:thm1} in the same approach as \cite{APZ91}. As will be seen in the proof, we control the magnetization using the overlap and conclude the announced result via the positivity of the overlap.
\boldsymbol{\sigma}ection{Proofs}\label{proof}
In Section $\ref{proof:sec1},$ we prove the main results in Section \ref{main:sec1} via the usual approach in the CW model. We proceed to study the differentiability of the thermodynamic limit of the free energy of the SKFI model in Section $\ref{proof:sec3}$ and conclude the results in Section \ref{smpo}. Section \ref{EGG} is devoted to proving Theorem \ref{add:thm} using the EGG identities. Finally, in Section \ref{proof:sec4}, we demonstrate how to control the magnetization quantitatively on the temperature region $R_u$ and deduce Theorem \ref{com:thm0} and Proposition \ref{com:thm1}. For convenience, throughout the paper, for any given $a,b\in\mathbb{R},$ we define $\delta_{a,b}=1$ if $a=b$ and $\delta_{a,b}=0$ if $a\neq b;$ for any given set $P,$ $|P|$ denotes the cardinality of $P$.
\boldsymbol{\sigma}ubsection{Approaches from the Curie-Weiss model}\label{proof:sec1}
We will prove Theorem \ref{main:thm1} and Proposition \ref{main:prop1} by the usual approaches in the CW model.
Lemma $\ref{lem1}$ is a consequence of a classical result in convex analysis,
while Lemma $\ref{lem2}$ is a standard application of Gaussian concentration of measure, see \cite{Talag03}.
These will play essential roles in our proofs.
\begin{lemma}\label{lem1}
For fixed $(\beta,\boldsymbol{\beta})\in\mathcal{B},$ $\{F_N^{SK}(\boldsymbol{\beta},\beta\cdot+h)\}_{N\geq 1}$ is a sequence of convex functions converging to
$F^{SK}(\boldsymbol{\beta},\beta\cdot+h)$ uniformly on $\left[-1,1\right]$ and $F^{SK}(\boldsymbol{\beta},\beta\cdot+h)$
is continuous and convex.
\end{lemma}
\begin{proof}
Define $p(\mu)=F^{SK}(\boldsymbol{\beta},\beta\mu+h)$ and $p_N(\mu)=F_N^{SK}(\boldsymbol{\beta},\beta\mu+h)$ on $\boldsymbol{\beta}bb{R}$ for each $N\geq 1.$
Since $$
p_N''=N\beta^2\left(\mathbb{E}\left<m^2\right>^{SK}-\left(\mathbb{E}\left<m\right>^{SK}\right)^2\right)\geq 0,$$
$\left\{p_N\right\}$ is a sequence
of convex functions on $\boldsymbol{\beta}bb{R}$ and converges pointwise from $(\ref{main:eq0})$. Note that here $\left<\cdot\right>^{SK}$ is the Gibbs average
of the SK model with temperature $\boldsymbol{\beta}$ and external field $\beta\mu+h.$
A classical result in convex analysis, which can be found in \cite{RoVa73}, finishes our proof:
Let $\left\{p_N\right\}_{N\geq 1}$ be a sequence of convex functions on $\boldsymbol{\beta}bb{R}$ converging to $p$
pointwise. Then $p$ is a continuous and convex function and the convergence of $\left\{p_N\right\}_{N\geq 1}$ to $p$
is uniform on any bounded interval.
\end{proof}
The proof of Lemma $\ref{lem2}$ is left to the reader.
\begin{lemma}\label{lem2}
For each $N$, we set $\Theta_N=\left\{-1,-1+\frac{2}{N},\ldots,1-\frac{2}{N},1\right\}$
and $$\triangle_\mu=\frac{1}{N}\ln Z_N^{SK}(\boldsymbol{\beta},\beta\mu+h)-\frac{1}{N}\mathbb{E}\ln Z_N^{SK}(\boldsymbol{\beta},\beta\mu+h)$$
for $\mu\in\left[-1,1\right].$ Then for every $N\geq 1,$
\begin{align}\label{lem2:eq1}
P\left(\max_{\mu\in\Theta_N}|\triangle_\mu|\geq t\right)\leq K\exp\left(-\frac{t^2N}{K}\right),\quad t\geq 0
\end{align}
and
\begin{align}\label{lem2:eq2}
\mathbb{E}\max_{\mu\in\Theta_N}|\triangle_\mu|\leq \frac{K}{N^{1/4}},
\end{align}
where $K$ is a constant independent of $N.$
\end{lemma}
\begin{Proof of theorem} $\bf\ref{main:thm1}:$ Let $\mu$ be any real number. Since $m^2\geq 2\mu m-\mu^2,$ it is easy to see
\begin{align}\label{main:thm1:proof:eq1}
Z_N(\beta,\boldsymbol{\beta},h)&\geq Z_N^{SK}(\boldsymbol{\beta},\beta\mu+h)\exp\left(-\frac{N\mu^2\beta}{2}\right)
\end{align}
and this implies
\begin{align*}
\liminf_{N\rightarrow\infty}F_N(\beta,\boldsymbol{\beta},h)&\geq
\max_{\mu\in\left[-1,1\right]}\left\{F^{SK}(\boldsymbol{\beta},\beta\mu+h)-\frac{\mu^2\beta}{2}\right\}.
\end{align*}
On the other hand, let us observe that $m\in \Theta_N$ can take only $N+1$ distinct values.
Write $1=\boldsymbol{\sigma}um_{\mu\in\Theta_N}\delta_{\mu,m}.$
If $m=\mu,$ then $m^2=2\mu m-\mu^2.$ So by exchanging the order of summations,
\begin{align*}
Z_N(\beta,\boldsymbol{\beta},h)
&=\boldsymbol{\sigma}um_{\mu\in \Theta_N}\exp\left(-\frac{N\beta\mu^2}{2}\right)
\boldsymbol{\sigma}um_{\boldsymbol{\sigma}}\delta_{\mu,m}\exp\left(H_{N}^{SK}(\boldsymbol{\sigma})+\boldsymbol{\sigma}um_{i\leq N}(\beta\mu+h_i)\boldsymbol{\sigma}igma_i\right)\\
&\leq \boldsymbol{\sigma}um_{\mu\in \Theta_N}\exp\left(-\frac{N\beta\mu^2}{2}\right)
\boldsymbol{\sigma}um_{\boldsymbol{\sigma}}\exp\left(H_{N}^{SK}(\boldsymbol{\sigma})+\boldsymbol{\sigma}um_{i\leq N}(\beta\mu+h_i)\boldsymbol{\sigma}igma_i\right)\\
&=\boldsymbol{\sigma}um_{\mu\in \Theta_N}\exp\left(-\frac{N\beta\mu^2}{2}+Z_N^{SK}(\boldsymbol{\beta},\beta\mu+h)\right).
\end{align*}
Therefore,
\begin{align*}
F_N(\beta,\boldsymbol{\beta},h)
&\leq \frac{\ln(N+1)}{N}+\mathbb{E}\left[
\max_{\mu\in \Theta_N}\left\{-\frac{\beta\mu^2}{2}+\frac{1}{N}\ln Z_N^{SK}(\boldsymbol{\beta},\beta\mu+h)\right\}\right]\\
&\leq \frac{\ln(N+1)}{N}+\max_{\mu\in \Theta_N}\left\{-\frac{\beta\mu^2}{2}+F_N^{SK}(\boldsymbol{\beta},\beta\mu+h)\right\}+\mathbb{E}\max_{\mu\in \Theta_N}|\triangle_\mu|.
\end{align*}
From $(\ref{lem2:eq2})$, we obtain
\begin{align*}
\limsup_{N\rightarrow\infty}F_N(\beta,\boldsymbol{\beta},h)
&\leq\max_{\mu\in\left[-1,1\right]}\left\{F^{SK}(\boldsymbol{\beta},\beta\mu+h)-\frac{\beta\mu^2}{2}\right\}+
\limsup_{N\rightarrow\infty}\mathbb{E}\max_{\mu\in \Theta_N}|\triangle_\mu|.
\end{align*}
and by using Lemma $\ref{lem2}$, we are done.
\end{Proof of theorem}
\begin{Proof of proposition} $\bf\ref{main:prop1}:$
It is easy to see that if $(\ref{main:prop1:eq2})$ holds, then using the exponential bound of $(\ref{main:prop1:eq2})$,
$(\ref{main:prop1:eq1})$ follows
immediately. So we only prove $(\ref{main:prop1:eq2}).$
As in Theorem $\ref{main:thm1},$ by exchanging the order of summations, we obtain
\begin{align*}
&\left<I(m\in U)\right>Z_N(\beta,\boldsymbol{\beta},h)\\
&=\boldsymbol{\sigma}um_{\mu\in \Theta_N}I(\mu\in U)\exp\left(-\frac{N\beta\mu^2}{2}\right)\boldsymbol{\sigma}um_{\boldsymbol{\sigma}\in\Sigma_N}
\delta_{\mu,m}\exp\left(-H_{N}^{SK}(\boldsymbol{\sigma})+\boldsymbol{\sigma}um_{i\leq N}(\beta\mu+h_i)\boldsymbol{\sigma}igma_i\right)\\
&\leq \boldsymbol{\sigma}um_{\mu\in \Theta_N}I(\mu\in U)\exp\left(-\frac{N\beta\mu^2}{2}\right)Z_N^{SK}(\boldsymbol{\beta},\beta\mu+h)\\
&\leq (N+1)\exp\left( N\boldsymbol{\sigma}up_{\mu\in \Theta_N\cap U}\left\{\frac{1}{N}\ln Z_N^{SK}(\boldsymbol{\beta},\beta\mu+h)-
\frac{\beta\mu^2}{2}\right\}\right).
\end{align*}
From $(\ref{main:thm1:proof:eq1}),$
\begin{align}\label{FEFE:eq1}
\left<I(m\in U)\right>&\leq (N+1)\exp N\left(\max_{\mu\in \Theta_N\cap U}W_\mu-\max_{\mu\in \Theta_N}W_\mu\right),
\end{align}
where $$
W_\mu:=\frac{1}{N}\ln Z_N^{SK}(\boldsymbol{\beta},\beta\mu+h)-\frac{\beta\mu^2}{2}.
$$
We claim that $\lim_{N\rightarrow\infty}\boldsymbol{\sigma}up_{\mu\in\Theta_N\cap O}W_\mu=\boldsymbol{\sigma}up_{\mu\in O}\left\{
F^{SK}(\boldsymbol{\beta},\beta\mu+h)-\beta\mu^2/2\right\}$ for every open subset $O$ of $\left[-1,1\right].$
For convenience, we set
\begin{align*}
\Gamma_\mu&=F_N^{SK}(\boldsymbol{\beta},\beta\mu+h)-\frac{\beta\mu^2}{2},\\
F_\mu&=F^{SK}(\boldsymbol{\beta},\beta\mu+h)-\frac{\beta\mu^2}{2}.
\end{align*}
For any $\mu,$ observe that $-|\triangle_\mu|+\Gamma_\mu\leq W_\mu=\triangle_\mu+\Gamma_\mu\leq |\triangle_\mu|+\Gamma_\mu$
and thus,
\begin{align}\label{FEFE:eq2}
-\max_{\mu\in\Theta_N\cap O}|\triangle_\mu|+\max_{\mu\in\Theta_N\cap O}\Gamma_\mu\leq
\max_{\mu\in \Theta_N\cap O}W_\mu\leq \max_{\mu\in\Theta_N\cap O}|\triangle_\mu|+
\max_{\mu\in \Theta_N\cap O}\Gamma_\mu.
\end{align}
Using Lemma $\ref{lem1}$ and $\ref{lem2}$, this completes the proof of our claim since
\begin{align*}
\boldsymbol{\sigma}up_{\mu\in O}F_\mu=\liminf_{N\rightarrow\infty}\max_{\mu\in\Theta_N\cap O}\Gamma_\mu
\leq\liminf_{N\rightarrow\infty}\max_{\mu\in \Theta_N\cap U}|W_\mu|
\end{align*}
and
\begin{align*}
\boldsymbol{\sigma}up_{\mu\in O}F_\mu=\limsup_{N\rightarrow\infty}\max_{\mu\in \Theta_N\cap O}\Gamma_\mu
\geq\limsup_{N\rightarrow\infty}\max_{\mu\in \Theta_N\cap U}|W_\mu|.
\end{align*}
To obtain $(\ref{main:prop1:eq2}),$ we write from $(\ref{FEFE:eq1}),$
$$
\mathbb{E}\left<I(m\in U)\right>\leq (N+1)\mathbb{P}(A_N)+(N+1)\exp\left(-N\varepsilon\right)\mathbb{P}(A_N^c),
$$
where
\begin{align*}
\varepsilon&=2\left(\max_{\left[-1,1\right]}F_\mu-\boldsymbol{\sigma}up_{U}F_\mu\right)>0,\\
A_N&=\left\{\max_{\Theta_N\cap U}W_\mu-\max_{\Theta_N}W_\mu\leq -\varepsilon\right\}.
\end{align*}
Since
$$
\max_{\Theta_N}\Gamma_\mu-\max_{\Theta_N\cap U}\Gamma_\mu\rightarrow\max_{\left[-1,1\right]}F_\mu-\boldsymbol{\sigma}up_{U}F_\mu=\frac{\varepsilon}{2},
$$
it follows that for sufficiently large $N,$
$$
\max_{\Theta_N}\Gamma_\mu-\max_{\Theta_N\cap U}\Gamma_\mu\leq \frac{3\varepsilon}{4}.
$$
Now use $(\ref{FEFE:eq2})$ to obtain
\begin{align*}
\mathbb{P}(A_N)&\leq
\mathbb{P}\left(\max_{\Theta_N\cap U}\Gamma_\mu-\max_{\Theta_N}\Gamma_\mu\leq -\varepsilon+\max_{\Theta_N\cap U}|\triangle_\mu|+\max_{\Theta_N}|\triangle_\mu|\right)\\
&\leq \mathbb{P}\left(\max_{\Theta_N}|\triangle_\mu|\geq \frac{1}{2}\left(\varepsilon+\max_{\Theta_N\cap U}\Gamma_\mu-\max_{\Theta_N}\Gamma_N\right)\right)\\
&\leq \mathbb{P}\left(\max_{\Theta_N}|\triangle_\mu|\geq \frac{\varepsilon}{8}\right).
\end{align*}
So for large enough $N$ and from equation $(\ref{lem2:eq1}),$ we get
$$
\mathbb{E}\left<I(m\in U)\right>\leq (N+1)\left(K\exp\left(-\frac{\varepsilon^2 N}{64K}\right)+\exp(-N\varepsilon)\right)
$$
and this establishes $(\ref{main:prop1:eq2}).$
\end{Proof of proposition}
\boldsymbol{\sigma}ubsection{The differentiability of $F(\beta,\boldsymbol{\beta},h)$ in $(\beta,\boldsymbol{\beta})$}\label{proof:sec3}
We will study the differentiability of $F(\beta,\boldsymbol{\beta},h)$ with respect to $\beta$ and $\beta_p$ for every $p\geq 1$ in this section using the standard results in convex analysis. From this, we deduce the main results in Section \ref{smpo}. First let us recall that the thermodynamic limit $F^{SK}(\boldsymbol{\beta},h)$ of the free energy in the mixed even $p$-spin SK model can be characterized by the Parisi formula for any $\boldsymbol{\beta}$ with $\boldsymbol{\sigma}um_{p\geq 1}2^p\beta_p^2<\infty$ and Gaussian r.v. $h$ (possibly degenerate). Using this variational formula and the usual trick concerning the differentiability of the convex functions, it is well-known \cite{Pan08,Talag06} that $F^{SK}(\boldsymbol{\beta},h)$ is differentiable with respect to $\beta_p$ for every $p\geq 1.$ For each $x\in\boldsymbol{\beta}bb{R}$, we consider the mixed even $p$-spin SK model with temperature $\boldsymbol{\beta}$ and external field $x+h.$ One may see that following a similar argument as \cite{Pan08,Talag06}, the function $F^{SK}(\boldsymbol{\beta},x+h)$ is differentiable with respect to $x.$ More precisely, the following statement holds.
\begin{proposition}\label{pos:prop2}
Let $\{W_t\}_{t\geq 0}$ be a standard Brownian motion. For every fixed $\boldsymbol{\beta}$ and $h,$ $F^{SK}(\boldsymbol{\beta},x+h)$ is differentiable in $x$
and
\begin{align}\label{pos:prop2:eq1}
\frac{\partial F^{SK}}{\partial x}(\boldsymbol{\beta},x+h)=\mathbb{E}\left[\tanh(x+h+W_{\xi'(1)})\exp S(x)\right],
\end{align}
where $S(x)$ is some r.v. depending only on the Parisi measure ${\nu_{\boldsymbol{\beta},x+h}}$ and $\xi$ such that $\mathbb{E}\exp S(x)=1$ for every $x\in\boldsymbol{\beta}bb{R}.$
\end{proposition}
Now let us turn to the study of the differentiability of $F(\beta,\boldsymbol{\beta},h)$ in $(\beta,\boldsymbol{\beta})$. Recall from $(\ref{main:thm1:eq1})$ that the thermodynamic limit of the free energy of the SKFI model, $F(\beta,\boldsymbol{\beta},h),$ is obtained by maximizing
\begin{align}\label{pos:proof:eq0}
f(\mu,\beta,\boldsymbol{\beta}):= F^{SK}(\boldsymbol{\beta},\beta\mu+h)-\frac{\beta\mu^2}{2}
\end{align}
over all $\mu\in \left[-1,1\right].$ Let us observe that for fixed $\mu,$ $f$ is convex in $\beta$ and $\beta_p$ for each $p\geq 1.$
Such an optimization problem is of great importance in the analysis of convex optimization. The differentiability of $F(\beta,\boldsymbol{\beta},h)$ in $\beta$ and $\beta_p$ for each $p\geq 1$ relies on the following classical theorem in convex analysis.
\begin{theorem}[Danskin \cite{BD99}]\label{pos:proof:thm1}
Let $I_1$ be an open interval and $I_2$ be a compact interval.
Suppose that $g$ is a continuous function defined from $I_1\times I_2$ to
$\boldsymbol{\beta}bb{R}$ such that for every fixed $y,$
$g(\cdot,y)$ is convex and $\frac{\partial g}{\partial x}(x,y)$ exists for every
$(x,y)\in I_1\times I_2.$
Define $G:I_1\rightarrow\boldsymbol{\beta}bb{R}$ by $G(x)=\max_{y\in I_2}g(x,y)$.
Then
$$
\frac{dG}{dx+}(x)=\max_{y\in \Omega_g(x)}\frac{\partial g}{\partial x}(x,y)
$$
and
$$
\frac{dG}{dx-}(x)=\min_{y\in \Omega_g(x)}\frac{\partial g}{\partial x}(x,y),
$$
where $\frac{dG}{d x+}$ and $\frac{dG}{d x-}$ are the right and left partial derivatives of $G$ with respect to $x$, respectively, and $\Omega_g(x)$ is the argmax of $g(x,\cdot)$ on $I_2$ for each $x\in I_1.$ In particular, if $\Omega_g(x)$ consists of a single element, then $G$ is differentiable at $x.$
\end{theorem}
Before turning to the proof of our main results, we need two technical lemmas.
\begin{lemma}[Griffith]
Suppose that $\left\{g_n\right\}$ is a sequence of differentiable convex functions defined on an open interval $I$.
If $\left\{g_n\right\}$ converges pointwise to $g$ and $g$ is differentiable at $x,$ then $\lim_{n\rightarrow \infty}g_n'(x)=g'(x).$
\end{lemma}
\begin{lemma}\label{pos:lem2}
Let $(\beta,\boldsymbol{\beta})\in\mathcal{B}$. If $\beta>0,$ then $\Omega(\beta,\boldsymbol{\beta},h)\boldsymbol{\sigma}ubset(-1,1).$
\end{lemma}
\begin{proof}
Notice that for fixed $(\beta,\boldsymbol{\beta})\in\mathcal{B},$ $f(\cdot,\beta,\boldsymbol{\beta})$ is a well-defined function on $\boldsymbol{\beta}bb{R}.$
Since $|m(\boldsymbol{\sigma})|\leq 1$ for every $\boldsymbol{\sigma}\in\Sigma_N$, this implies
\begin{align*}
F^{SK}(\boldsymbol{\beta},\beta\mu+h)
&=\lim_{N\rightarrow\infty}\frac{1}{N}\mathbb{E}\left[\ln\boldsymbol{\sigma}um_{\boldsymbol{\sigma}\in\Sigma_N}
\exp\left(-H_{N}^{SK}(\boldsymbol{\sigma})+\boldsymbol{\sigma}um_{i\leq N}h_i\boldsymbol{\sigma}igma_i+\beta\mu\boldsymbol{\sigma}um_{i\leq N}\boldsymbol{\sigma}igma_i\right)\right]\\
&\leq F^{SK}(\boldsymbol{\beta},h)+\beta|\mu|
\end{align*}
and $f(\mu,\beta,\boldsymbol{\beta})\rightarrow -\infty$ as $|\mu|\rightarrow\infty.$ So the global maximum of $f(\cdot,\beta,\boldsymbol{\beta})$ is
achieved. Suppose that $\mu$ is any maximizer.
Then $\frac{\partial f}{\partial\mu}(\mu,\beta,\boldsymbol{\beta})=0$ and $(\ref{pos:prop2:eq1})$ together yield $$\mathbb{E}\left[\tanh(\beta\mu+h+W_{\xi'(1)})
\exp S(\beta\mu)\right]=\mu,$$ where $S$ is defined in Proposition \ref{pos:prop2}.
Since $|\tanh|<1$ and $\mathbb{E}\left[\exp S(\beta\mu)\right]=1,$ it means $\mu\in\left(-1,1\right).$ So $$
\Omega(\beta,\boldsymbol{\beta},h)=\mbox{Argmax}_{\mu\in\left[-1,1\right]}f(\mu,\beta,\boldsymbol{\beta})=\mbox{Argmax}_{\mu\in\boldsymbol{\beta}bb{R}}f(\mu,\beta,\boldsymbol{\beta})\boldsymbol{\sigma}ubset\left(-1,1\right).$$
\end{proof}
\begin{Proof of proposition} ${\bf\ref{pos:prop1}:}$
For fixed $\mu$ and $\boldsymbol{\beta},$ since $F^{SK}(\boldsymbol{\beta},\beta\mu+h)-\beta\mu^2/2$
is convex and differentiable in $\beta,$ it follows by Danskin's theorem that
\begin{align}\label{pos:prop1:proof:eq1}
\frac{\partial F}{\partial \beta+}(\beta,\boldsymbol{\beta},h)=\max_{\mu\in\Omega(\beta,\boldsymbol{\beta},h)}
\left(\mu\left.\frac{\partial F^{SK}}{\partial y}(\boldsymbol{\beta},y+h)\right|_{y=\beta\mu}-\frac{\mu^2}{2}\right)
\end{align}
and
\begin{align}\label{pos:prop1:proof:eq2}
\frac{\partial F}{\partial \beta-}(\beta,\boldsymbol{\beta},h)=
\min_{\mu\in\Omega(\beta,\boldsymbol{\beta},h)}
\left(\mu\left.\frac{\partial F^{SK}}{\partial y}(\boldsymbol{\beta},y+h)\right|_{y=\beta\mu}-\frac{\mu^2}{2}\right).
\end{align}
Suppose that $F(\beta,\boldsymbol{\beta},h)$ is differentiable at $\beta$. If $|\Omega(\beta,\boldsymbol{\beta},h)|\geq 3,$
then from Proposition \ref{pos:prop2} and Lemma \ref{pos:lem2}, there exist some $\mu_1,\mu_2\in\Omega(\beta,\boldsymbol{\beta},h)\boldsymbol{\sigma}ubset(-1,1)$ with $|\mu_1|<|\mu_2|$
such that
$$
\mu_1=\left.\frac{\partial F^{SK}}{\partial y}(\boldsymbol{\beta},y+h)\right|_{y=\beta\mu_1}\quad\mbox{and}\quad
\mu_2=\left.\frac{\partial F^{SK}}{\partial y}(\boldsymbol{\beta},y+h)\right|_{y=\beta\mu_2}.
$$
From these two equations, $(\ref{pos:prop1:proof:eq1})$, and $(\ref{pos:prop1:proof:eq2})$, we obtain
\begin{align}\label{pos:prop1:proof:eq3}
\frac{\partial F}{\partial \beta+}(\beta,\boldsymbol{\beta},h)\geq \frac{1}{2}\mu_2^2>\frac{1}{2}\mu_1^2
\geq \frac{\partial F}{\partial \beta-}(\beta,\boldsymbol{\beta},h),
\end{align}
which contradicts to our assumption that $F$ is differentiable. Hence, $|\Omega(\beta,\boldsymbol{\beta},h)|\leq 2$ and if $\mu_1,\mu_2\in\Omega(\beta,\boldsymbol{\beta},h)$ are distinct, then $\mu_1=-\mu_2.$ So $(\beta,\boldsymbol{\beta})\in\mathcal{B}_d.$ Conversely, suppose that $(\beta,\boldsymbol{\beta})\in \mathcal{B}_d.$ If $|\Omega(\beta,\boldsymbol{\beta},h)|=1,$ then we are done. If $|\Omega(\beta,\boldsymbol{\beta},h)|=2$ and $\mu_1,\mu_2\in\Omega(\beta,\boldsymbol{\beta},h)$ with $\mu_1=-\mu_2,$
then from Lemma \ref{pos:lem2}, $(\ref{pos:prop1:proof:eq1})$, $(\ref{pos:prop1:proof:eq2}),$ and $(\ref{pos:prop1:proof:eq3}),$
we have $\frac{\partial F}{\partial \beta+}(\beta,\boldsymbol{\beta},h)=\frac{\partial F}{\partial \beta-}(\beta,\boldsymbol{\beta},h).$
So $F(\beta,\boldsymbol{\beta},h)$ is differentiable at $\beta$ and this completes the proof.
\end{Proof of proposition}
\begin{Proof of theorem} ${\bf\ref{pos:thm1}:}$
For any $(\beta,\boldsymbol{\beta})\in\mathcal{B}_d',$ since $h$ is centered, $f(\mu,\beta,\boldsymbol{\beta})$ is symmetric in $\mu$ and we may represent $F(\beta,\boldsymbol{\beta},h)$ as
\begin{align*}
F(\beta,\boldsymbol{\beta},h)=\max_{\mu\in\left[0,1\right]}f(\mu,\beta,\boldsymbol{\beta}).
\end{align*}
Let $(\beta,\boldsymbol{\beta})\in \mathcal{B}_d'.$ Then either $\Omega(\beta,\boldsymbol{\beta},h)=\left\{0\right\}$ or $\Omega(\beta,\boldsymbol{\beta},h)=\left\{\mu,-\mu\right\}$ for some $\mu\neq 0.$
This means that $\Omega(\beta,\boldsymbol{\beta},h)\cap\left[0,1\right]$ consists of a single element, say $\mu.$ By Danskin's theorem we obtain
$$
\frac{\partial F}{\partial \beta_p+}(\beta,\boldsymbol{\beta},h)=
\frac{\partial F^{SK}}{\partial \beta_p}(\boldsymbol{\beta},\beta\mu+h)=
\frac{\partial F}{\partial \beta_p-}(\beta,\boldsymbol{\beta},h).
$$
This proves that $F(\beta,\boldsymbol{\beta},h)$ is differentiable with respect to
every $\beta_p$ and from $(\ref{pos:eq2})$ the equation $(\ref{pos:thm1:eq1})$ follows. Using
Gaussian integration by parts, we have
$$
\frac{\partial }{\partial\beta_p}\frac{1}{N}\mathbb{E}\ln Z_N(\beta,\boldsymbol{\beta},h)=\beta_p(1-\mathbb{E}\left<R_{1,2}^{2p}\right>).
$$
By Griffith's lemma, this implies that
$$
\frac{\partial F}{\partial\beta_p}(\beta,\boldsymbol{\beta},h)=\beta_p\left(1-\lim_{N\rightarrow\infty}\mathbb{E}\left<R_{1,2}^{2p}\right>\right)
$$
and from $(\ref{pos:thm1:eq1})$, we get $(\ref{pos:thm1:eq2})$.
\end{Proof of theorem}
\begin{Proof of theorem} ${\bf\ref{pos:thm2}:}$
Note that by Talagrand's positivity, $c>0$.
From $(\ref{pos:thm1:eq2})$
and a continuity argument, for every continuous function $f$ on $\left[0,1\right],$
\begin{align}\label{pos:eq6}
\lim_{N\rightarrow\infty}\mathbb{E}\left<f(|R_{1,2}|)\right>=\int_0^1f(q)\nu_{\boldsymbol{\beta},\beta\mu+h}(dq).
\end{align}
In particular, let $f_0:\left[0,1\right]\rightarrow\boldsymbol{\beta}bb{R}$ be the continuous function satisfying $f_0(x)=1$ if $0\leq x\leq c'$,
$f_0(x)=(c-c')^{-1}(c-x)$ if $c'<x<c$, and $f_0(x)=0$
if $c\leq x\leq 1.$ Then from $(\ref{pos:eq6})$,
\begin{align}\label{pos:thm2:proof:eq1}
\lim_{N\rightarrow\infty}\mathbb{E}\left<I(|R_{1,2}|\leq c')\right>\leq \lim_{N\rightarrow\infty}\mathbb{E}\left<f_0(|R_{1,2}|)\right>\leq
\nu_{\boldsymbol{\beta},\beta\mu+h}(\left[0,c\right))=0.
\end{align}
Define $F(t)= F(\beta,\boldsymbol{\beta},th)$ and $F^{SK}(t)=F(\boldsymbol{\beta},th)$ for $t\in\boldsymbol{\beta}bb{R}$. Recall that since $h$ is centered Gaussian, $F^{SK}(t)$ is differentiable in $t$ by \cite{Pan08}. Thus, the same argument as Theorem $\ref{pos:thm1}$ implies that
$F(t)$ is differentiable at $t=1$ and so
\begin{align*}
\lim_{N\rightarrow\infty}\mathbb{E}\left<R_{1,2}\right>=\int_0^1q\nu_{\boldsymbol{\beta},\beta\mu+h}(dq).
\end{align*}
On the other hand, letting $f(x)=x$ and using $(\ref{pos:eq6}),$
\begin{align}
\begin{split}\label{pos:thm2:proof:eq3}
\lim_{N\rightarrow\infty}\mathbb{E}\left<R_{1,2}^-\right>&=\frac{1}{2}\left(\lim_{N\rightarrow\infty}\mathbb{E}\left<|R_{1,2}|\right>-
\lim_{N\rightarrow\infty}\mathbb{E}\left<R_{1,2}\right>\right)\\
&=\frac{1}{2}\left(\int_0^1qd\nu_{\boldsymbol{\beta},\beta\mu+h}(q)-\int_0^1qd\nu_{\boldsymbol{\beta},\beta\mu+h}(q)\right)\\
&=0.
\end{split}
\end{align}
Thus, from $(\ref{pos:thm2:proof:eq1})$ and $(\ref{pos:thm2:proof:eq3})$ and applying the Markov inequality, we obtain $(\ref{pos:thm2:eq1})$ since
\begin{align*}
\lim_{N\rightarrow\infty}\mathbb{E}\left<I(R_{1,2}\leq c')\right>&\leq \limsup_{N\rightarrow\infty}\mathbb{E}\left<I(|R_{1,2}|\leq c')\right>
+\limsup_{N\rightarrow\infty}\mathbb{E}\left<I(R_{1,2}^->c')\right>\\
&\leq \limsup_{N\rightarrow\infty}\frac{1}{c'}\mathbb{E}\left<R_{1,2}^-\right>\\
&=0.
\end{align*}
From this and $(\ref{pos:eq6})$, we conclude $(\ref{pos:thm2:eq2})$ since for any continuous function $f$ on $\left[0,1\right],$
\begin{align*}
\lim_{N\rightarrow \infty}\mathbb{E}\left<f(|R_{1,2}|)\right>=&\lim_{N\rightarrow\infty}
\mathbb{E}\left<f(-R_{1,2})I(R_{1,2}<0)\right>+\mathbb{E}\left<f(R_{1,2})I(R_{1,2}\geq 0)\right>\\
=&\lim_{N\rightarrow\infty}\mathbb{E}\left<f(R_{1,2})I(R_{1,2}\geq 0)\right>\\
=&\lim_{N\rightarrow\infty}
\mathbb{E}\left<f(R_{1,2})I(R_{1,2}<0)\right>+\mathbb{E}\left<f(R_{1,2})I(R_{1,2}\geq 0)\right>\\
=&\lim_{N\rightarrow \infty}\mathbb{E}\left<f(R_{1,2})\right>
\end{align*}
for every continuous function $f$ on $\left[-1,1\right].$
\end{Proof of theorem}
\boldsymbol{\sigma}ubsection{An application of the extended Ghirlanda-Guerra identities}\label{EGG}
This section is devoted to proving Theorem $\ref{add:thm}$ using the EGG identities. Let $(\beta,\boldsymbol{\beta})\in\mathcal{B}_d'$. Recall that from Proposition \ref{pos:prop2:GGI}, the EGG identities $(\ref{pos:prop2:GGI:eq1})$ hold under the assumption $\mathbb{E}h^2\neq 0.$ In the case of $\mathbb{E}h^2=0,$ we have the following weaker identities that can be derived in the same way as Proposition $\ref{pos:prop2:GGI}$: for each $n$ and each continuous function $\psi$ on $\mathbb{R},$
\begin{align}
\label{EGG:proof:eq1}
\lim_{N\rightarrow\infty}\boldsymbol{\sigma}up_f\left|n\mathbb{E}\left<\psi(|R_{1,n+1}|f)\right>-\mathbb{E}\left<\psi(|R_{1,2}|)\right>\mathbb{E}\left<f\right>-\boldsymbol{\sigma}um_{2\leq \ell\leq n}\mathbb{E}\left<\psi(|R_{1,\ell}|)f\right>\right|=0,
\end{align}
where the supremum is taken over all (non random) functions $f$ on $\Sigma_N^n$ with $|f|\leq 1.$ Let us remark that $(\ref{pos:prop2:GGI:eq1})$ obviously implies $(\ref{EGG:proof:eq1}).$
\boldsymbol{\sigma}mallskip
Recall from Theorems \ref{pos:thm1} and \ref{pos:thm2} that the Parisi measure $\nu_{\boldsymbol{\beta},\beta\mu+h}$ is a probability measure defined $[0,1]$ that describes the limiting distribution of $|R_{1,2}|$ for both cases $\mathbb{E}h^2=0$ and $\mathbb{E}h^2\neq 0.$ Let $\nu_N$ be the distribution of the array of all overlaps $(|R_{\ell,\ell'}|)_{\ell,\ell'\geq 1}$ under the Gibbs average $\mathbb{E}\left<\cdot\right>.$ By compactness, the sequence $(\nu_N)$ converges weakly over subsequences but, for simplicity of notation, we will assume that $\nu_N$ converges weakly to the limit $\nu.$ We will still use the notations $(|R_{\ell,\ell'}|)_{\ell,\ell'\geq 1}$ to denote the elements of the overlap array in the limit and, again, for simplicity of notations we will denote by $\mathbb{E}$ the expectation with respect to the measure $\nu.$ Using these notations, $(\ref{EGG:proof:eq1})$ implies
\begin{align}\label{add:proof:eq0}
\mathbb{E} \psi(|R_{1,n+1}|)f=\frac{1}{n}\mathbb{E}\psi(|R_{1,2}|)\mathbb{E}f+\frac{1}{n}\boldsymbol{\sigma}um_{\ell=2}^n \mathbb{E}\psi(|R_{1,\ell}|)f
\end{align}
for all bounded measurable functions $f$ of the overlaps on $n$ replicas and bounded measurable function $\psi$ on $\mathbb{R}.$ We will need the following essential lemma.
\begin{lemma}
\label{add:lem1} Let $(\beta,\boldsymbol{\beta})\in\mathcal{B}_d'.$ Suppose that $A$ is any measurable subset of $\left[0,1\right]$.
Set $A_n=\{|R_{\ell,\ell'}|\in A,\,\,\forall \ell\neq \ell'\leq n\}.$ Then $\nu(A_n)\geq \nu_{\boldsymbol{\beta},\beta\mu+h}(A)^n.$
\end{lemma}
\begin{proof}
For any $n\geq 1$, observe that
\begin{align}
\label{add:proof:eq2}
I_{A_{n+1}}\geq I_{A_{n}}-\boldsymbol{\sigma}um_{\ell\leq n}I(|R_{\ell,n+1}|\notin A)I_{A_n}.
\end{align}
For all $1\leq \ell\leq n$, applying $(\ref{add:proof:eq0})$ and using symmetry of the overlaps,
\begin{align*}
\mathbb{E}I(|R_{\ell,n+1}|\notin A)I_{A_n}&= \frac{1}{n}\nu_{\boldsymbol{\beta},\beta\mu+h}(A^c)\nu(A_n)+\frac{1}{n}\boldsymbol{\sigma}um_{\ell'\neq \ell}^n \mathbb{E}I(|R_{\ell,\ell'}|\notin A)I_{A_n}\\
&= \frac{1}{n}\nu_{\boldsymbol{\beta},\beta\mu+h}(A^c)\nu(A_n)
\end{align*}
and, therefore, from $(\ref{add:proof:eq2})$, $\nu(A_{n+1})\geq \nu_{\boldsymbol{\beta},\beta\mu+h}(A)\nu(A_n).$
Thus, an induction argument yields the result.
\end{proof}
\begin{Proof of theorem} ${\bf \ref{add:thm}:}$ If $\mathbb{E}h^2\neq 0,$ then from the positivity of the overlap and the first statement, the second statement follows immediately. So we only need to prove the first statement. If $\varepsilon\geq\mu^2$, we are obviously done. Suppose that $\varepsilon<\mu^2$ and the announced result fails. Then $\liminf_{N\rightarrow\infty}\mathbb{E}\left<I(|R_{1,2}|\geq \mu^2-\varepsilon)\right><1$ for some $\varepsilon>0$ or equivalently, $0<\limsup_{N\rightarrow\infty}\mathbb{E}\left<I(|R_{1,2}|<\mu^2-\varepsilon)\right>.$ Without loss of generality, we may assume that $\nu_{\boldsymbol{\beta},\beta\mu+h}$ is continuous at $\mu^2-\varepsilon$. Then $\nu_{\boldsymbol{\beta},\beta\mu+h}([0,\mu^2-\varepsilon))>0$ and from Lemma \ref{add:lem1}, $\nu(A_n)>0$ for every $n,$ where $A_n$ is defined in the statement of Lemma $\ref{add:lem1}$ using $A=[0,\mu^2-\varepsilon).$ Let $\boldsymbol{\sigma}^1,\ldots,\boldsymbol{\sigma}^n$ be $n$ replicas and $a_1,\ldots,a_n\in\left\{-1,1\right\}$ such that $a_\ell m(\boldsymbol{\sigma}^\ell)=|m(\boldsymbol{\sigma}^\ell)|$ for $1\leq \ell\leq n.$ From the Cauchy-Schwarz inequality,
\begin{align}\label{add:proof:eq5}
N\boldsymbol{\sigma}um_{\ell\leq n}|m(\boldsymbol{\sigma}^\ell)|&= N\boldsymbol{\sigma}um_{\ell\leq n}m(a_\ell\boldsymbol{\sigma}^\ell)=\mathbf{1}\cdot \boldsymbol{\sigma}um_{\ell\leq n}a_\ell\boldsymbol{\sigma}^\ell\leq\boldsymbol{\sigma}qrt{N}\left\|\boldsymbol{\sigma}um_{\ell\leq n}a_\ell\boldsymbol{\sigma}^\ell\right\|,
\end{align}
where $\|\cdot\|$ is the Euclidean distance in $\mathbb{R}^N.$ Notice that
\begin{align}\label{add:proof:eq4}
\left\|\boldsymbol{\sigma}um_{\ell\leq n}a_\ell\boldsymbol{\sigma}^\ell\right\|^2&= nN+\boldsymbol{\sigma}um_{\ell\neq \ell'\leq n}a_\ell a_{\ell'}\boldsymbol{\sigma}^{\ell}\cdot\boldsymbol{\sigma}^{\ell'}\leq Nn+N\boldsymbol{\sigma}um_{\ell\neq \ell'\leq n}|R_{\ell,\ell'}|.
\end{align}
Combining $(\ref{add:proof:eq5})$ and $(\ref{add:proof:eq4})$,
\begin{align}
\label{add:proof:eq1}
\boldsymbol{\sigma}um_{\ell\leq n}|m(\boldsymbol{\sigma}^\ell)|\leq \left(n+\boldsymbol{\sigma}um_{\ell\neq \ell'\leq n}|R_{\ell,\ell'}|\right)^{1/2}
\end{align}
From this inequality, applying $\nu(A_n)>0$ together with the openness of $A_n$, we obtain
\begin{align}
\label{add:proof:eq3}
\liminf_{N\rightarrow\infty}\mathbb{E}\left<I\left(|m(\boldsymbol{\sigma}^1)|+\cdots+|m(\boldsymbol{\sigma}^n)|<(n+(\mu^2-\varepsilon)n(n-1))^{1/2}\right)\right>>0.
\end{align}
On the other hand, let us pick $0<\varepsilon'<\varepsilon$ and notice that for each $1\leq \ell\leq n,$ $$\lim_{N\rightarrow\infty}\mathbb{E}\left<I\left(\left||m(\boldsymbol{\sigma}^\ell)|-\mu\right|<\mu-(\mu^2-\varepsilon')^{1/2}\right)\right>=1.$$
We conclude, from this, $(\ref{add:proof:eq3}),$ and the triangle inequality, that for each $n\geq 1,$ with nonzero probability,
\begin{align*}
n(\mu^2-\varepsilon')^{1/2}=n(\mu-\mu+(\mu^2-\varepsilon')^{1/2})\leq \boldsymbol{\sigma}um_{\ell\leq n}|m(\boldsymbol{\sigma}^\ell)|< (n+(\mu^2-\varepsilon)n(n-1))^{1/2},
\end{align*}
and this means $\varepsilon<\varepsilon',$ a contradiction.
\end{Proof of theorem}
\boldsymbol{\sigma}ubsection{Controlling the magnetization using the CW free energy}\label{proof:sec4}
In this section, we will demonstrate how to control the magnetization quantitatively using the thermodynamic limit of the free energy of the CW model.
From this, we conclude the main results in Section \ref{Sec4}. Recall that the external field $h$ in Section \ref{Sec4} is a centered Gaussian r.v. satisfying $(\ref{Sec4:eq1}).$ First, let us establish a technical lemma that will be used in Proposition \ref{com:prop1}.
\begin{lemma}\label{com:lem1}
Suppose that $h$ is centered Gaussian satisfying $(\ref{Sec4:eq1}).$ Then
$\beta \mathbb{E}1/\cosh^2(\beta+h)<1$ for every $\beta\geq 0.$
\end{lemma}
Let us remark that the technical condition $(\ref{Sec4:eq1})$ is only used here throughout the paper, while the inequality will play a crucial role that ensures the validity of our main results. According to the simulation data, the inequality in Lemma \ref{com:lem1} should be also valid even without the assumption $(\ref{Sec4:eq1})$. However, the proof for this general case seems much more involved and too distracted. For clarity, we will only focus on the $h$ satisfying $(\ref{Sec4:eq1}).$
\boldsymbol{\sigma}mallskip
\begin{Proof of lemma} ${\bf \ref{com:lem1}:}$
Let $\beta \geq 0.$ We claim that $\cosh^2\beta/\cosh^2(\beta+x)<\exp(2|x|)$ for all $x\neq 0.$ To see this, define $g(x)=2(\ln\cosh\beta-\ln\cosh(\beta+x))$. Then $g(0)=0$ and $g'(x)=-2\tanh(\beta+x).$ For each $x,$ using mean value theorem, we obtain
\begin{align*}
g(x)=g(0)+g'(x')x=-2x\tanh(\beta+x')\leq 2|x|
\end{align*}
for some $x'\in (0,x)$ if $x>0$ or $x'\in (x,0)$ if $x<0.$ This completes the proof of our claim and consequently, Lemma $\ref{com:lem1}$ follows from the assumption on $h,$
\begin{align*}
\beta \mathbb{E}\frac{1}{\cosh^2(\beta+h)}&=\frac{\beta}{\cosh^2\beta}\mathbb{E}\frac{\cosh^2\beta}{\cosh^2(\beta+h)}
\leq\frac{\beta}{\cosh^2\beta}\mathbb{E}\exp(2|h|)<1.
\end{align*}
\end{Proof of lemma}
\begin{Proof of proposition} ${\bf \ref{com:prop1}:}$ Recall from $(\ref{com:eq1})$ that $f(\mu,\beta)$ is defined for $\mu\in\left[-1,1\right]$ and $\beta\in (\alpha,\infty)$ for some $\alpha$ satisfying $\alpha\mathbb{E}1/\cosh^2h=1.$
A simple computation yields the first three partial derivatives of $f(\mu,\beta)$ with respect to $\mu:$
\begin{align}
\begin{split}
\frac{\partial f}{\partial \mu}(\mu,\beta)&=\beta\left(\mathbb{E}\tanh(\beta\mu+h)-\mu\right),\\
\frac{\partial^2 f}{\partial\mu^2}(\mu,\beta)&=\beta\left(\beta \mathbb{E}\frac{1}{\cosh^2(\beta\mu+h)}-1\right),
\end{split}\notag\\
\begin{split}\label{com:prop1:eq1}
\frac{\partial^3f}{\partial \mu^3}(\mu,\beta)&=-2\beta^3\mathbb{E}\frac{\tanh(\beta\mu+h)}{\cosh^2(\beta\mu+h)}.
\end{split}
\end{align}
Let us recall a useful lemma from the proof of
Proposition A.14.1 in \cite{Talag102}: Let $\phi$ be an increasing bounded function on $\boldsymbol{\beta}bb{R}$ satisfying $\phi(-y)=-\phi(y)$ and $\phi''(y)<0$ for $y>0.$
Then for every $\mu\geq 0$ and center Gaussian random variable $z,$
$$
\mathbb{E}\phi(z+\mu)\phi'(z+\mu)\geq 0.
$$
Applying this lemma
to $\phi(y)=\tanh(y)$, we have $\frac{\partial^3 f}{\partial\mu^3}<0$ for every $\mu>0$ from $(\ref{com:prop1:eq1})$.
It implies that
$\frac{\partial^2 f}{\partial \mu^2}(\cdot,\beta)$ is strictly decreasing on $\left[0,1\right]$.
By the definition of $\alpha$ and Lemma $\ref{com:lem1}$, we also know that
$\frac{\partial^2f}{\partial\mu^2}(0,\beta)>0$ and $\frac{\partial^2f}{\partial\mu^2}(1,\beta)<0.$
So $\frac{\partial^2f}{\partial\mu^2}(\cdot,\beta)$ has a unique zero
in $\left(0,1\right)$ and so does $\frac{\partial f}{\partial\mu}(\cdot,\beta)$ since $\frac{\partial f}{\partial\mu}(0,\beta)=0$ and
$\frac{\partial f}{\partial\mu}(1,\beta)<0.$
Let $\mu(\beta)\in(0,1)$ be the zero of
$\frac{\partial f}{\partial\mu}(\cdot,\beta)$. Hence,
$\frac{\partial f}{\partial\mu}(\cdot,\beta)>0$ on $\left(0,\mu(\beta)\right)$ and $\frac{\partial f}{\partial\mu}(\cdot,\beta)<0$ on
$\left(\mu(\beta),1\right),$ which implies that in $\left[0,1\right],$ $f(\cdot,\beta)$
attains its unique global maximum at $\mu(\beta).$
\boldsymbol{\sigma}mallskip
The continuity and differentiability of $\mu(\cdot)$ follow from the implicit function theorem.
It is then clear that $f(\mu(\cdot),\cdot)$ is continuous and differentiable.
Since
\begin{align}\label{com:prop1:proof:extra1}
\mathbb{E}\tanh(\beta\mu(\beta)+h)=\mu(\beta),
\end{align}
by taking derivative on both sides, we obtain
\begin{align*}
\left(\mu(\beta)+\beta\mu'(\beta)\right)\mathbb{E}\frac{1}{\cosh^2(\beta\mu(\beta)+h)}=\mu'(\beta)
\end{align*}
and so
$$
\mu'(\beta)=-\frac{\beta\mu(\beta)}{\frac{\partial^2 f}{\partial\mu^2}(\mu(\beta),\beta)}E\frac{1}{\cosh^2(\beta\mu(\beta)+h)}.
$$
Since $\mu(\beta)$ is greater than the unique zero of
$\frac{\partial^2 f}{\partial\mu^2}(\cdot,\beta)$ in $(0,1)$, $\frac{\partial^2 f}{\partial\mu^2}(\mu(\beta),\beta)<0$ and
this means that $\mu(\cdot)$ is a strictly increasing function. We also show the monotonicity of $f(\mu(\cdot),\cdot)$ by using $(\ref{com:prop1:proof:extra1}),$
\begin{align*}
\frac{df}{d\beta}(\mu(\beta),\beta)&=(\mu(\beta)+\beta\mu'(\beta))\mathbb{E}\tanh(\beta\mu(\beta)+h)-\frac{\mu(\beta)^2}{2}-\beta\mu'(\beta)\mu(\beta)\\
&=(\mu(\beta)+\beta\mu'(\beta))\mu(\beta)-\frac{\mu(\beta)^2}{2}-\beta\mu'(\beta)\mu(\beta)\\
&=\frac{1}{2}\mu(\beta)^2.
\end{align*}
\boldsymbol{\sigma}mallskip
Finally, we check $(\ref{com:prop1:eq2})$. First notice that the solution of $\mathbb{E}\tanh(\alpha x+h)=x$ for $x\in\left[0,1\right]$ is unique and equals $0.$
This can be verified by the same argument as in the first part of our proof. Thus, from $(\ref{com:prop1:proof:extra1}),$
$$
\mathbb{E}\tanh\left(\alpha \lim_{\beta\rightarrow\alpha+}\mu(\beta)+h\right)=\lim_{\beta\rightarrow\alpha+}\mu(\beta)
$$
implies $\lim_{\beta\rightarrow\alpha+}\mu(\beta)=0.$
Since $\beta\mu(\beta)\rightarrow\infty$ as $\beta\rightarrow\infty,$ we obtain, by the dominated convergence theorem,
$$
\lim_{\beta\rightarrow\infty}\mu(\beta)=\lim_{\beta\rightarrow\infty}
\mathbb{E}\tanh(\beta\mu(\beta)+h)=1.
$$
Since by the monotonicity of $\mu(\cdot)$ and the mean value theorem $$
f(\mu(\beta),\beta)-f(\mu(\beta'),\beta')\geq \frac{\mu(\beta')^2}{2}(\beta-\beta')$$
for $\beta>\beta'>\alpha,$ this implies that $\lim_{\beta\rightarrow\infty}f(\mu(\beta),\beta)=\infty$ and completes our proof.
\end{Proof of proposition}
\begin{Proof of proposition} ${\bf \ref{com:prop2}:}$
Notice that
$\mathbb{E}\tanh(\beta\mu+h)$ is a strictly increasing function in $\mu$ since $\frac{d}{d\mu}\mathbb{E}\tanh(\beta\mu+h)
=\beta \mathbb{E}1/\cosh^2(\beta\mu+h)>0$ and that $\mu(\beta)=\mathbb{E}\tanh(\beta\mu(\beta)+h)$ since $\mu(\beta)\in(0,1)$ is the maximizer of $f(\cdot,\beta)$
on $\left[0,1\right].$
Thus, for $\beta>\beta_u,$
\begin{align*}
\frac{d}{d\beta}\left(f(\mu(\beta),\beta)-f(u,\beta)\right)&=\frac{1}{2}(\mu(\beta)^2+u^2)-u\mathbb{E}\tanh(\beta u+h)\\
&>\frac{1}{2}\left(\mu(\beta)^2+u^2\right)-u\mathbb{E}\tanh(\beta\mu(\beta)+h)\\
&=\frac{1}{2}\left(\mu(\beta)^2+u^2\right)-u\mu(\beta)\\
&=\frac{1}{2}(\mu(\beta)-u)^2\\
&>0
\end{align*}
and this implies that $\delta_u$ is strictly increasing. Since $\mu(\cdot)$ is strictly increasing, from this inequality, we
can further conclude that $\lim_{\beta\rightarrow\infty}\delta_u(\beta)=\infty.$
\end{Proof of proposition}
\begin{Proof of theorem} ${\bf \ref{com:thm0}:}$
Recall the definitions for $f(\mu,\beta)$ and $f(\mu,\beta,\boldsymbol{\beta})$ from $(\ref{com:eq1})$ and $(\ref{pos:proof:eq0})$.
Then $f(\mu,\beta,\mathbf{0})=f(\mu,\beta).$
We claim that for every $(\beta,\boldsymbol{\beta})$ and $\mu\in\left[-1,1\right],$ we have
$$
f(\mu,\beta)\leq f(\mu,\beta,\boldsymbol{\beta})\leq f(\mu,\beta)+\frac{1}{2}\xi(1).
$$
To prove this, let $\mathbb{E}_{\boldsymbol{g}}$ be the expectation on the randomness of the disorder $\boldsymbol{g}$ and $\mathbb{E}_{h}$ be the expectation on the randomness of $(h_i)_{i\leq N}.$ Then we can rewrite
\begin{align*}
\frac{1}{N}\mathbb{E}\ln Z_N^{SK}(\beta,\boldsymbol{\beta},h)-\frac{1}{N}\mathbb{E}\ln Z_N^{SK}(\beta,\mathbf{0},h)=\frac{1}{N}
\mathbb{E}_h\mathbb{E}_{\boldsymbol{g}}\ln \left<\exp H_N^{SK}(\boldsymbol{\sigma})\right>^{CW},
\end{align*}
where $\left<\cdot\right>^{CW}$ is the Gibbs average for the CW model. From Jensen's inequality and using $\mathbb{E}_{\boldsymbol{g}}\exp H_N^{SK}(\boldsymbol{\sigma})=\exp\left(N\xi(1)/2\right)$ and $\mathbb{E}_{\boldsymbol{g}}H_N^{SK}(\boldsymbol{\sigma})=0$ for every $\boldsymbol{\sigma}\in\Sigma_N,$ the proof for our claim is completed since
\begin{align*}
\mathbb{E}_h\mathbb{E}_{\boldsymbol{g}}\ln \left<\exp H_N^{SK}(\boldsymbol{\sigma})\right>^{CW}
&\leq
\mathbb{E}_h\ln \left<\mathbb{E}_{\boldsymbol{g}}\exp H_N^{SK}(\boldsymbol{\sigma})\right>^{CW}
=\frac{1}{2}N\xi(1)
\end{align*}
and
\begin{align*}
\mathbb{E}_h\mathbb{E}_{\boldsymbol{g}}\ln \left<\exp H_N^{SK}(\boldsymbol{\sigma})\right>^{CW}&\geq
\mathbb{E}_h\mathbb{E}_{\boldsymbol{g}}\left<H_N^{SK}(\boldsymbol{\sigma})\right>^{CW}=0.
\end{align*}
Now, suppose $(\beta,\boldsymbol{\beta})\in \mathcal{R}_u.$ Recall from the definition of $\mathcal{R}_u$, $\beta>\beta_u$ and $\xi(1)\leq 2\delta_u(\beta).$ From Proposition $\ref{com:prop1}$, since $\mu(\cdot)$ is strictly increasing, we have $\mu(\beta)>\mu(\beta_u)=u$ for every $\beta>\beta_u$. On the other hand, since $f(\cdot,\beta)$ is strictly increasing on $\left[0,\mu(\beta)\right]$, it follows that from the definition of $\mathcal{R}_u$ and our claim,
\begin{align*}
f(\mu,\beta,\boldsymbol{\beta})&\leq f(\mu,\beta)+\frac{1}{2}\xi(1)\\
&< f(u,\beta)+\frac{1}{2}\xi(1)\\
&=f(\mu(\beta),\beta)-\delta_u(\beta)+\frac{1}{2}\xi(1)\\
&\leq f(\mu(\beta),\beta,\boldsymbol{\beta})-\delta_u(\beta)+\frac{1}{2}\xi(1)\\
&<f(\mu(\beta),\beta,\boldsymbol{\beta})
\end{align*}
for every $\mu\in\left[0,u\right].$ Since $h$ is centered, $f(\cdot,\beta)$ and $f(\cdot,\beta,\boldsymbol{\beta})$ are even functions on $\left[-1,1\right]$. Thus, we may also conclude $f(-\mu,\beta,\boldsymbol{\beta})<f(-\mu(\beta),\beta,\boldsymbol{\beta})$ for $\mu\in\left[0,u\right],$
which means
$$
\Omega(\beta,\boldsymbol{\beta},h)=\mbox{Argmax}_{\mu\in\left[-1,1\right]}f(\mu,\beta,\boldsymbol{\beta})\boldsymbol{\sigma}ubset \left[-1,-u\right)\cup\left(u,1\right]
$$
and we are done.
\end{Proof of theorem}
The following fundamental lemma will be used in the proof of Proposition \ref{com:thm1}.
\begin{lemma}\label{lem3}
Suppose that $(X_N)$ is a sequence of random variables with $0\leq X_N\leq 1$ for each $N.$
If $\lim_{N\rightarrow\infty}\mathbb{E}X_N=1/2$ and $\lim_{N\rightarrow\infty}\mathbb{E}X_N(1-X_N)=0,$ then $\left\{X_N\right\}$
converges to a Bernoulli$\left(1/2\right)$ r.v. weakly.
\end{lemma}
\begin{proof}
First we claim that $\mathbb{E}X_N^n\rightarrow 1/2$ for each $n\geq 1$ by induction. From the given condition,
this holds for $n=1.$ Suppose that this is true for some $n\geq 1.$ Then using the fact that $0\leq X_N\leq 1,$ we obtain
$$
\left|\mathbb{E}X_N^{n+1}-\mathbb{E}X_N^n\right|=\mathbb{E}X_N^n(1-X_N)\leq \mathbb{E}X_N(1-X_N)\rightarrow 0.
$$
Therefore, $\lim_{N\rightarrow \infty}\mathbb{E}X_N^{n+1}=\lim_{N\rightarrow\infty}\mathbb{E}X_N^n=1/2$ and this completes the proof of our claim.
Now, by using the dominated convergence theorem and our claim, the announced statement follows since
\begin{align*}
\lim_{N\rightarrow\infty}\mathbb{E}\exp(itX_N)&=\lim_{N\rightarrow\infty}\boldsymbol{\sigma}um_{n=0}^\infty \frac{(it)^n}{n!}\mathbb{E}X_N^n
=\boldsymbol{\sigma}um_{n=0}^\infty\frac{(it)^n}{n!}\lim_{N\rightarrow\infty }\mathbb{E}X_N^n\\
&=\frac{1}{2}+\frac{e^{it}}{2}=\mathbb{E}\exp(itX),
\end{align*}
where $X$ is Bernoulli$\left(1/2\right).$
\end{proof}
\begin{Proof of proposition} ${\bf \ref{com:thm1}:}$
From the definition of $\mathcal{R}_u$, Lemma $\ref{pos:lem2}$, and Theorem $\ref{com:thm0}$, there exists some $\mu\in (u,1)$ such that $\Omega(\beta,\boldsymbol{\beta},0)=\left\{\mu,-\mu\right\}.$ Since $\left(-u,u\right)$ has a positive distance to $\Omega(\beta,\boldsymbol{\beta},h),$ Proposition $\ref{main:prop1}$ implies
\begin{align}
\lim_{N\rightarrow\infty}\left<I(|m|\geq u)\right>=1.
\label{com:thm1:proof:eq1}
\end{align}
If $\mathbb{E}h^2=0,$ then $\left<I(m\in A)\right>=\left<I(m\in -A)\right>$ for every $A\boldsymbol{\sigma}ubset\left[-1,1\right]$, where $-A:=\{-x:x\in A\}.$ Thus, the first statement follows from $(\ref{com:thm1:proof:eq1})$. Next, let $\mathbb{E}h^2\neq 0$ and $1/2<u<1.$ Recall that $\boldsymbol{\sigma}^1$ and $\boldsymbol{\sigma}^2$ are two configurations sampled independently
from the Gibbs measure $G_N$ with respect to the same realization $\boldsymbol{g}.$
Set $$m_1=m_1(\boldsymbol{\sigma}^1)=\frac{1}{N}\boldsymbol{\sigma}um_{i\leq N}\boldsymbol{\sigma}igma_i^1\quad\mbox{and}\quad m_2=m_2(\boldsymbol{\sigma}^2)=\frac{1}{N}\boldsymbol{\sigma}um_{i\leq N}\boldsymbol{\sigma}igma_i^2.$$
We claim that
\begin{align}\label{com:thm1:proof:eq2}
\left\{m_1\in\left[u,1\right],m_2\in\left[-1,-u\right]\right\}
\boldsymbol{\sigma}ubset\left\{R_{1,2}\leq 1-2u\right\}.
\end{align}
Set
\begin{align*}
&P_1^+=\left\{1\leq i\leq N:\boldsymbol{\sigma}igma_i^1=1\right\},\,\, P_1^-=\left\{1\leq i\leq N:\boldsymbol{\sigma}igma_i^1=-1\right\},\\
&P_2^+=\left\{1\leq i\leq N:\boldsymbol{\sigma}igma_i^2=1\right\},\,\, P_2^-=\left\{1\leq i\leq N:\boldsymbol{\sigma}igma_i^2=-1\right\}.
\end{align*}
Suppose $m_1\in\left[u,1\right]$ and $m_2\in\left[-1,-u\right].$
Let $k$ be the smallest integer such that $u\leq {k}/{N}.$
Since $2|P_1^+|-N=|P_1^+|-|P_1^-|\geq k$ and $2|P_2^-|-N=|P_2^-|-|P_2^+|\geq k,$ it implies $|P_1^+|\geq (k+N)/2$ and $|P_2^-|\geq (k+N)/2.$ Consequently,
\begin{align*}
|P_1^+\cap P_2^-|&= |P_1^+|-|P_1^+\cap P_2^+|\geq |P_1^+|-|P_2^+|\\
&\geq \frac{k+N}{2}-\left(N-\frac{k+N}{2}\right)\geq k
\end{align*}
and our claim $(\ref{com:thm1:proof:eq2})$ follows from
\begin{align*}
NR_{1,2}&= \boldsymbol{\sigma}um_{i\leq N}\boldsymbol{\sigma}igma_i^1\boldsymbol{\sigma}igma_i^2\\
&= \boldsymbol{\sigma}um_{P_1^+\cap P_2^+}\boldsymbol{\sigma}igma_i^1\boldsymbol{\sigma}igma_i^2+\boldsymbol{\sigma}um_{P_1^-\cap P_2^-}\boldsymbol{\sigma}igma_i^1\boldsymbol{\sigma}igma_i^2+\boldsymbol{\sigma}um_{P_1^+\cap P_2^-}\boldsymbol{\sigma}igma_i^1\boldsymbol{\sigma}igma_i^2+\boldsymbol{\sigma}um_{P_1^-\cap P_2^+}
\boldsymbol{\sigma}igma_i^1\boldsymbol{\sigma}igma_i^2\\
&= |P_1^+\cap P_2^+|+|P_1^-\cap P_2^-|-\left(|P_1^+\cap P_2^-|+|P_1^-\cap P_2^+|\right)\\
&\leq |P_2^+|+|P_1^-|-|P_1^+\cap P_2^-|\\
&\leq \left(N-\frac{k+N}{2}\right)+\left(N-\frac{k+N}{2}\right)-k\\
&= N-2k\\
&\leq N(1-2u).
\end{align*}
Now, set $X_N=\left<I(m\geq u)\right>$. From the independence of $m^1$ and $m^2,$ $u>1/2$, $(\ref{com:thm1:proof:eq2})$, the positivity of the overlap, and then $(\ref{com:thm1:proof:eq1}),$ we obtain
\begin{align*}
\mathbb{E}\left[X_N(1-X_N)\right]
&= \mathbb{E}\left[\left<I(m_1\geq u)\right>\left(\left<I(m_2\leq -u)\right>+\left<I(|m_2|<u)\right>\right)\right]\\
&\leq \mathbb{E}\left[\left<I(m_1\geq u)\right>\left<I(m_2\leq -u)\right>\right]
+\mathbb{E}\left[\left<I(|m_2|<u)\right>\right]\\
&\leq \mathbb{E}\left<I(R_{1,2}\leq 1-2u)\right>+\mathbb{E}\left[\left<I(|m_2|<u)\right>\right]\\
& \rightarrow 0.
\end{align*}
On the other hand, since $h$ is centered, it is easy to derive $\mathbb{E}\left<I(m\geq u)\right>=\mathbb{E}\left<I(m\leq -u)\right>$
and from $(\ref{com:thm1:proof:eq1})$, we deduce $\mathbb{E}X_N\rightarrow{1}/{2}.$ Consequently, from Lemma $\ref{lem3},$ $(X_N)$
converges weakly to a Bernoulli$\left(1/2\right)$ r.v. Write $X_N=\left<I(|m-\mu|\leq \varepsilon)\right>+Y_N$
for
$$
Y_N:=-\left<I(|m-\mu|\leq \varepsilon,m< u)\right>+\left<I(|m-\mu|>\varepsilon,m\geq u)\right>.
$$
If $0<\varepsilon<\mu,$ then
$$
|Y_N|\leq\left<I(0\leq m\leq u)\right>+\left<I(|m-\mu|>\varepsilon,m\geq u)\right>\rightarrow 0\,\,a.s.
$$
and it follows that $\left<I(|m-\mu|\leq \varepsilon)\right>$ converges weakly to a Bernoulli$\left(1/2\right)$ r.v.
Since $$\lim_{N\rightarrow\infty}\left<I(|m-\mu|\leq \varepsilon,|m+\mu|\leq \varepsilon)\right>=1$$ a.s., we also obtain that
$\left<I(|m+\mu|\leq \varepsilon)\right>$ converges weakly to a Bernoulli$\left(1/2\right)$ r.v. and this completes the proof of the second announced result.
\end{Proof of proposition}
\end{document} |
\begin{document}
\begin{center}
{\large \bf Uniformly $S$-Noetherian rings}
Wei Qi$^{a}$,\ Hwankoo Kim$^{b}$,\ Fanggui Wang$^{c}$,\ Mingzhao Chen$^{d}$,\ Wei Zhao$^{e}$
{\footnotesize a.\ School of Mathematics and Statistics, Shandong University of Technology, Zibo 255049, China\\
b.\ Division of Computer and Information Engineering, Hoseo University, Asan 31499, Republic of Korea\\
c.\ School of Mathematical Sciences, Sichuan Normal University, Chengdu 610068, China\\
d.\ College of Mathematics and Information Science, Leshan Normal University, Leshan 614000, China\\
e.\ School of Mathematics, ABa Teachers University, Wenchuan 623002, China
}
\end{center}
\centerline { \bf Abstract}
\leftskip10truemm \rightskip10truemm \noindent
Let $R$ be a ring and $S$ a multiplicative subset of $R$. Then $R$ is called a uniformly $S$-Noetherian ($u$-$S$-Noetherian for abbreviation) ring provided there exists an element $s\in S$ such that for any ideal $I$ of $R$, $sI \subseteq K$ for some finitely generated sub-ideal $K$ of $I$. We give the Eakin-Nagata-Formanek Theorem for $u$-$S$-Noetherian rings. Besides, the $u$-$S$-Noetherian properties on several ring constructions are given. The notion of $u$-$S$-injective modules is also introduced and studied. Finally, we obtain the Cartan-Eilenberg-Bass Theorem for uniformly $S$-Noetherian rings.
\vbox to 0.3cm{}\\
{\it Key Words:} $u$-$S$-Noetherian rings, $S$-Noetherian rings, $u$-$S$-injective modules, ring constructions.\\
{\it 2010 Mathematics Subject Classification:} 13E05, 13A15.
\leftskip0truemm \rightskip0truemm
\section{Introduction}
Throughout this article, $R$ is always a commutative ring with identity. For a subset $U$ of an $R$-module $M$, we denote by $\langle U\rightarrowngle$ the submodule of $M$ generated by $U$. A subset $S$ of $R$ is called a multiplicative subset of $R$ if $1\in S$ and $s_1s_2\in S$ for any $s_1\in S$, $s_2\in S$. Recall from Anderson and Dumitrescu \cite{ad02} that a ring $R$ is called an \emph{$S$-Noetherian ring} if for any ideal $I$ of $R$, there is a finitely generated sub-ideal $K$ of $I$ such that $sI\subseteq K$ for some $s\in S$. Cohen's Theorem, Eakin-Nagata Theorem and Hilbert Basis Theorem for $S$-Noetherian rings are given in \cite{ad02}. Many algebraists have paid considerable attention to the notion of $S$-Noetherian rings, especially in the $S$-Noetherian properties of ring constructions. In 2007, Liu \cite{l07} characterized a ring $R$ when the generalized power series ring $[[R^{M,\leq}]]$ is an $S$-Noetherian ring under some additional conditions. In 2014, Lim and Oh \cite{lO14} obtained some $S$-Noetherian properties on amalgamated algebras along and ideal. They \cite{lO15} also studied $S$-Noetherian properties on the composite semigroup rings and the composite generalized series rings next year. In 2016, Ahmed and Sana \cite{as16} gave an $S$-version of Eakin-Nagata-Formanek Theorem for $S$-Noetherian rings in the case where $S$ is finite. Very recently, Kim, Mahdou, and Zahir \cite{kmz21} established a necessary and sufficient condition for a bi-amalgamation to inherit the $S$-Noetherian property. Some generalizations of $S$-Noetherian ring can be found in \cite{bh18,kkl14}.
However, in the definition of $S$-Noetherian rings, the choice of $s\in S$ such that $sI\subseteq K\subseteq I$ with $K$ finitely generated is dependent on the ideal $I$. This dependence sets many obstacles to the further study of $S$-Noetherian rings. The main motivation of this article is to introduce and study a ``uniform'' version of $S$-Noetherian rings. In fact, we say a ring $R$ is \emph{uniformly $S$-Noetherian} ($u$-$S$-Noetherian for abbreviation) provided there exists an element $s\in S$ such that for any ideal $I$ of $R$, $sI \subseteq K$ for some finitely generated sub-ideal $K$ of $I$. Trivially, Noetherian rings are $u$-$S$-Noetherian, and $u$-$S$-Noetherian rings are $S$-Noetherian. Some counterexamples are given in Example \ref{exam-not-ut} and Example \ref{exam-not-ut-1}. We also consider the notion of $u$-$S$-Noetherian modules (see Definition \ref{us-no-module}), and then obtain the Eakin-Nagata-Formanek Theorem for $u$-$S$-Noetherian modules (see Theorem \ref{u-s-noe-char}) which generalizes some part of the result in \cite[Corollary 2.1]{as16}. The $S$-extension property of $S$-Noetherian modules is given in Proposition \ref{s-u-noe-s-exact}. In Section $3$, we mainly consider the $u$-$S$-Noetherian properties on some ring constructions, including trivial extensions, pullbacks and amalgamated algebras along an ideal (see Proposition \ref{trivial extension-usn}, Proposition \ref{pullback-usn} and Proposition \ref{amag-usn}). In Section $4$, we first introduce the notion of $u$-$S$-injective modules $E$ for which ${\rm Hom}_R(-,E)$ preserves $u$-$S$-exact sequences (see Definition \ref{u-S-tor-ext}), and then characterize it by $u$-$S$-torsion properties of the ``Ext'' functor in Theorem \ref{s-inj-ext}. The Baer's Criterion for $u$-$S$-injective modules is given in Proposition \ref{s-inj-baer}. Finally, we obtain the Cartan-Eilenberg-Bass Theorem for uniformly $S$-Noetherian rings as follows (see Theorem \ref{s-injective-ext}):
\begin{theo}\label{s-injective-ext}
Let $R$ be a ring, $S$ a multiplicative subset of $R$ consisting of non-zero-divisors. Then the following assertions are equivalent:
\begin{enumerate}
\item $R$ is $u$-$S$-Noetherian;
\item any direct sum of injective modules is $u$-$S$-injective;
\item any direct union of injective modules is $u$-$S$-injective.
\end{enumerate}
\end{theo}
\section{$u$-$S$-Noetherian rings and $u$-$S$-Noetherian modules}
Let $R$ be a ring and $S$ a multiplicative subset of $R$. Recall from \cite{ad02} that $R$ is called an $S$-Noetherian ring (resp., $S$-PIR) provided that for any ideal $I$ of $R$ there exists an element $s\in S$ and a finitely (resp., principally) generated sub-ideal $K$ of $I$ such that $sI\subseteq K$. Note that the choice of $s$ is decided by the ideal $I$. Now we introduce some ``uniform'' versions of $S$-Noetherian rings and $S$-PIRs.
\begin{definition} Let $R$ be a ring and $S$ a multiplicative subset of $R$.
\begin{enumerate}
\item $R$ is called a $u$-$S$-Noetherian ring provided there exists an element $s\in S$ such that for any ideal $I$ of $R$, $sI \subseteq K$ for some finitely generated sub-ideal $K$ of $I$.
\item $R$ is called a $u$-$S$-Principal ideal ring $($$u$-$S$-PIR for short$)$ provided there exists an element $s\in S$ such that for any ideal $I$ of $R$, $sI \subseteq (a)$ for some element $a\in I$.
\end{enumerate}
\end{definition}
If the element $s$ can be chosen to be the identity in the definition of $u$-$S$-Noetherian rings, then $u$-$S$-Noetherian rings are exactly Noetherian rings. Thus, every Noetherian ring is $u$-$S$-Noetherian. However, the converse does not hold generally.
\begin{example}\label{exam-not-ut}
Let $R=\frak prod\limits_{i=1}^{\infty}\frak mathbb{Z}_2$ be the countable infinite direct product of $\frak mathbb{Z}_2$, then $R$ is not Noetherian. Let $e_i$ be the element in $R$ with the $i$-th component $1$ and others $0$. Denote $S=\{1,e_i|i\geq 1\}$. Then $R$ is $u$-$S$-Noetherian. Indeed, let $I$ be an ideal of $R$. Then if all elements in $I$ have $1$-th components equal to $0$, we have $e_1I=0$. Otherwise $e_1I=e_1R$. Thus $e_1I$ is principally generated. Consequently $R$ is a $u$-$S$-PIR, and so is $u$-$S$-Noetherian.
\end{example}
Let $R$ be a ring, $M$ an $R$-module and $S$ a multiplicative subset of $R$. For any $s\in S$, there is a multiplicative subset $S_s=\{1,s,s^2,....\}$ of $S$. We denote by $M_s$ the localization of $M$ at $S_s$. Certainly, $M_s\cong M\otimes_RR_s$
\begin{lemma} \label{s-loc-u-noe}
Let $R$ be a ring and $S$ a multiplicative subset of $R$. If $R$ is a $u$-$S$-Noetherian ring $($resp., $u$-$S$-PIR$)$, then there exists an element $s\in S$ such that $R_{s}$ is a Noetherian ring $($resp., PIR$)$.
\end{lemma}
\begin{proof} Since $R$ is $u$-$S$-Noetherian, there exists an element $s\in S$ satisfies that for any ideal $I$ of $R$ there is a finitely (resp., principally) generated sub-ideal $K$ of $I$ such that $sI\subseteq K$. Let $J$ be an ideal of $R_{s}$. Then there exists an ideal $I'$ of $R$ such that $J= I'_s$, and hence $sI' \subseteq K'$ for some finitely (resp., principally) generated sub-ideal $K'$ of $I'$. So $J= I'_s= K_s$ is finitely (resp., principally) generated ideal of $R_{s}$. Consequently, $R_{s}$ is a Noetherian ring (resp., PIR).
\end{proof}
\begin{proposition} \label{s-loc-u-noe-fini}
Let $R$ be a ring and $S$ a multiplicative subset of $R$ consisting of finite elements. Then $R$ is a $u$-$S$-Noetherian ring $($resp., $u$-$S$-PIR$)$ if and only if $R$ is an $S$-Noetherian ring $($resp., $S$-PIR$)$.
\end{proposition}
\begin{proof} Suppose $R$ is a $u$-$S$-Noetherian ring $($resp., $u$-$S$-PIR$)$. Then trivially $R$ is an $S$-Noetherian ring (resp., $S$-PIR). Suppose $S=\{s_1,...,s_n\}$ and set $s=s_1\cdots s_n$. Suppose $R$ is an $S$-Noetherian ring (resp., $S$-PIR). Then for any ideal $I$ of $R$, there is a finitely (resp., principally) generated sub-ideal $J$ of $I$
such that $s_II\subseteq J$ for some $s_I\in S$. Then $sI\subseteq s_II\subseteq J$. So $R$ is a $u$-$S$-Noetherian ring $($resp., $u$-$S$-PIR$)$.
\end{proof}
The following example shows $S$-Noetherian rings are not $u$-$S$-Noetherian in general.
\begin{example}\label{exam-not-ut-1}
Let $R=k[x_1,x_2,....]$ be the countably infinite variables polynomial ring over a field $k$. Set $S=R-\{0\}$. Then $R$ is an $S$-Noetherian ring. However, $R$ is not $u$-$S$-Noetherian.
\end{example}
\begin{proof}
Certainly, $R$ is an $S$-Noetherian ring. Indeed, let $I$ be a non-zero ideal of $R$. Suppose $0\not=s\in I$. Then $sI\subseteq sR\subseteq I$. Thus $I$ is $S$-principally generated. So $R$ is an $S$-PIR and thus an $S$-Noetherian ring.
We claim that $R$ is not $u$-$S$-Noetherian. Assume on the contrary that $R$ is $u$-$S$-Noetherian. Then $R_{s}$ is a Noetherian ring for some $s\in S$ by Lemma \ref{s-loc-u-noe}. If $n$ is the minimal number such that $x_m$ does not divide any monomial of $s$ for any $m\geq n$. Then $R_{s}\cong T[x_n,x_{n+1},....]$ where $T=k[x_1,x_2,....,x_{n-1}]_s$. Obviously, $R_{s}\cong T[x_n,x_{n+1},....]$ is not Noetherian since the ideal generated by $\{x_n,x_{n+1},....\}$ is not a finitely generated ideal of $T[x_n,x_{n+1},....]$. So $R$ is not $u$-$S$-Noetherian.
\end{proof}
Recall from \cite{ad02} that an $R$-module $M$ is called an $S$-Noetherian module if every submodule of $M$ is $S$-finite, that is, for any submodule $N$ of $M$, there is an element $s\in S$ and a finitely generated $R$-module $F$ such that $sN\subseteq F\subseteq N$. Note that the choice of $s$ is decided by the submodule $N$. The rest of this section mainly studies a ``uniform'' version of $S$-Noetherian modules. Let $\{M_j\}_{j\in \Gamma}$ be a family of $R$-modules and $N_j$ a submodule of $M_j$ generated by $\{m_{i,j}\}_{i\in \Lambda_j}\subseteq M_j$ for each $j\in \Gamma$. Recall from \cite{z21} that a family of $R$-modules $\{M_j\}_{j\in \Gamma}$ is \emph{$u$-$S$-generated} (with respective to $s$) by $\{\{m_{i,j}\}_{i\in \Lambda_j}\}_{j\in \Gamma}$ provided that there exists an element $s\in S$ such that $sM_j\subseteq N_j$ for each $j\in \Gamma$, where $N_j=\langle \{m_{i,j}\}_{i\in \Lambda_j}\rightarrowngle$. We say a family of $R$-modules $\{M_j\}_{j\in \Gamma}$ is \emph{$u$-$S$-finite} (with respective to $s$) if the set $\{m_{i,j}\}_{i\in \Lambda_j}$ can be chosen as a finite set for each $j\in \Gamma$.
\begin{definition}\label{us-no-module} Let $R$ be a ring and $S$ a multiplicative subset of $R$. An $R$-module $M$ is called a $u$-$S$-Noetherian $R$-module provided the set of all submodules of $M$ is $u$-$S$-finite.
\end{definition}
Let $R$ be a ring and $S$ a multiplicative subset of $R$. Recall from \cite{z21}, an $R$-module $T$ is called a \emph{$u$-$S$-torsion module} provided that there exists an element $s\in S$ such that $sT=0$. Obviously, $u$-$S$-torsion modules are $u$-$S$-Noetherian. A ring $R$ is $u$-$S$-Noetherian if and only if it is $u$-$S$-Noetherian as an $R$-module. It is well known that an $R$-module $M$ is Noetherian if and only if $M$ satisfies ascending chain condition on submodules, if and only if $M$ satisfies the maximal condition (see \cite{n93}). In 2016, Ahmed et al. \cite{as16} obtained an $S$-version of this result provided $S$ is a finite set and called it the $S$-version of Eakin-Nagata-Formanek Theorem. Next we will give a uniformly $S$-version of Eakin-Nagata-Formanek Theorem for any multiplicative subset $S$ of $R$.
First, we recall from \cite[Definition 2.1]{as16} some modified notions of $S$-stationary ascending chains of $R$-modules and $S$-maximal elements of a family of $R$-modules. Let $R$ be a ring, $S$ a multiplicative subset of $R$ and $M$ an $R$-module. Denote by $M^{\bullet}$ an ascending chain $M_1\subseteq M_2\subseteq ... $ of submodules of $M$. An ascending chain $M^{\bullet}$ is called \emph{stationary with respective to $s$} if there exists $k\geq 1$ such that $sM_n\subseteq M_k$ for any $n\geq k$. Let $\{M_i\}_{i\in \Lambda}$ be a family of submodules of $M$. We say an $R$-module $M_0\in \{M_i\}_{i\in \Lambda}$ is \emph{maximal with respective to $s$} provided that if $M_0\subseteq M_i$ for some $M_i\in \{M_i\}_{i\in \Lambda}$, then $sM_i\subseteq M_0$.
\begin{theorem} \label{u-s-noe-char} {\bf (Eakin-Nagata-Formanek Theorem for uniformly $S$-Noetherian rings)}
Let $R$ be a ring and $S$ a multiplicative subset of $R$. Let $M$ be an $R$-module. Then the following assertions are equivalent:
\begin{enumerate}
\item $M$ is $u$-$S$-Noetherian;
\item there exists an element $s\in S$ such that any ascending chain of submodules of $M$ is stationary with respective to $s$;
\item there exists an element $s\in S$ such that any non-empty of submodules of $M$ has a maximal element with respective to $s$.
\end{enumerate}
\end{theorem}
\begin{proof} $(1)\Rightarrow (2):$ Let $M_1\subseteq M_2\subseteq ... $ be an ascending chain of submodules of $M$. Set $M_0=\bigcup\limits_{i=1}^{\infty}M_i$. Then there exist an element $s\in S$ and a finitely generated submodule $N_i$ of $M_i$ such that $sM_i\subseteq N_i$ for each $i\geq 0$.
Since $N_0$ is finitely generated, there exists $k\geq 1$ such that $N_0\subseteq M_k$. Thus $sM_0\subseteq M_k$. So $sM_n\subseteq M_k$ for any $n\geq k$.
$(2)\Rightarrow (3):$ Let $\Gamma$ be a nonempty of submodules of $M$. On the contrary, we take any $M_1\in \Gamma$. Then $M_1$ is not a maximal element with respective to $s$ for any $s\in S$. Thus there is $M_2\in \Gamma$ such that $s M_2\not\subseteq M_1$. Since $M_2$ is not a maximal element with respective to $s$, there is $M_3\in \Gamma$, such that $s M_3\not\subseteq M_2$. Similarly, we can get an ascending chain $M_1\subseteq M_2\subseteq ...\subseteq M_n\subseteq M_{n+1}\subseteq ...$ such that $sM_{n+1}\not\subseteq M_n$ for any $n\geq 1$. Obviously, this ascending chain is not stationary with respective to any $s\in S$.
$(3)\Rightarrow (1):$ Let $N$ be a submodule of $M$ and $s\in S$ the element in $(3)$. Set $\Gamma=\{A\subseteq N| $ there is a finitely generated submodule $F_A$ of $A$ satisfies $sA\subseteq F_A\}$. Since $0\in \Gamma$, $\Gamma$ is nonempty. Thus $\Gamma$ has a maximal element $A$. If $A\not=N$, then there is an element $x\in N-A$. Since $F_1=F_A+Rx$ is a finitely generated submodule of $A_1=A+Rx$ such that $sA_1\subseteq F_1$, we have $F_1\in \Gamma$, which contradicts the choice of maximality of $A$.
\end{proof}
\begin{corollary} \label{u-s-noe-ring-char}
Let $R$ be a ring and $S$ a multiplicative subset of $R$. Then the following assertions are equivalent:
\begin{enumerate}
\item $R$ is $u$-$S$-Noetherian;
\item there exists an element $s\in S$ such that any ascending chain of ideals of $R$ is stationary with respective to $s$;
\item there exists an element $s\in S$ such that any nonempty of ideals of $R$ has a maximal element with respective to $s$.
\end{enumerate}
\end{corollary}
We can rediscover the following result by Proposition \ref{s-loc-u-noe-fini}.
\begin{corollary} \cite[Corollary 2.1]{as16}\label{u-s-noe-char-s}
Let $R$ be a ring and $S$ a multiplicative subset of $R$ consisting of finite elements. Then the following assertions are equivalent:
\begin{enumerate}
\item $R$ is an $S$-Noetherian ring;
\item every increansing sequence of ideals of $R$ is $S$-stationary;
\item every nonempty set of ideals of $R$ has an $S$-maximal element.
\end{enumerate}
\end{corollary}
Recall from \cite{z21}, an $R$-sequence $M\xrightarrow{f} N\xrightarrow{g} L$ is called \emph{$u$-$S$-exact} provided that there is an element $s\in S$ such that $s{\rm Ker}(g)\subseteq {\rm Im}(f)$ and $s{\rm Im}(f)\subseteq {\rm Ker}(g)$. An $R$-homomorphism $f:M\rightarrow N$ is an \emph{$u$-$S$-monomorphism} $($resp., \emph{$u$-$S$-epimorphism}, \emph{$S$-isomorphism}$)$ provided $0\rightarrow M\xrightarrow{f} N$ $($resp., $M\xrightarrow{f} N\rightarrow 0$, $0\rightarrow M\xrightarrow{f} N\rightarrow 0$ $)$ is $u$-$S$-exact. It is easy to verify that an $R$-homomorphism $f:M\rightarrow N$ is a $u$-$S$-monomorphism $($resp., $u$-$S$-epimorphism$)$ if and only if ${\rm Ker}(f)$ $($resp., ${\rm Coker}(f))$ is a $u$-$S$-torsion module.
\begin{lemma}\label{s-exct-tor}\cite[Proposition 2.8]{z21}
Let $R$ be a ring and $S$ a multiplicative subset of $R$. Let $0\rightarrow A\xrightarrow{f} B\xrightarrow{g} C\rightarrow 0$ be a $u$-$S$-exact sequence. Then $B$ is $u$-$S$-torsion if and only if $A$ and $C$ are $u$-$S$-torsion.
\end{lemma}
\begin{lemma}\label{s-exct-diag}
Let $R$ be a ring and $S$ a multiplicative subset of $R$. Let
$$\xymatrix@R=20pt@C=25pt{
0 \ar[r]^{}&A_1\ar@{^{(}->}[d]^{i_A}\ar[r]& B_1 \ar[r]^{\frak pi_1}\ar@{^{(}->}[d]^{i_B}&C_1\ar[r] \ar@{^{(}->}[d]^{i_C} &0\\
0 \ar[r]^{}&A_2\ar[r]&B_2 \ar[r]^{\frak pi_2}&C_2\ar[r] &0\\}$$
be a commutative diagram with exact rows, where $i_A, i_B$ and $i_C$ are embedding maps. Suppose $s_A A_2\subseteq A_1$ and $s_C C_2\subseteq C_1$ for some $s_A\in S, s_C\in S$. Then $s_As_CB_2\subseteq B_1$.
\end{lemma}
\begin{proof} Let $x\in B_2$. Then $\frak pi_2(x)\in C_2$. Thus $s_C\frak pi_2(x)=\frak pi_2(s_Cx)\in C_1$. So we have $\frak pi_1(y)=\frak pi_2(y)=\frak pi_2(s_Cx)$ for some $y\in B_1$. Thus $s_Cx-y=a_2$ for some $a_2\in A_2$. It follows that $s_As_Cx=s_Ay+s_Aa_2\in B_1$. Consequently, $s_As_CB_2\subseteq B_1$.
\end{proof}
\begin{lemma} \label{s-u-noe-exact}
Let $R$ be a ring and $S$ a multiplicative subset of $R$. Let $0\rightarrow A\rightarrow B\rightarrow C\rightarrow 0$ be an exact sequence. Then $B$ is $u$-$S$-Noetherian if and only if $A$ and $C$ are $u$-$S$-Noetherian.
\end{lemma}
\begin{proof} It is easy to verify that if $B$ is $u$-$S$-Noetherian, so are $A$ and $C$. Suppose $A$ and $C$ are $u$-$S$-Noetherian. Let $\{B_i\}_{i\in \Lambda}$ be the set of all submodules of $B$. Then there exists an element $s_1\in S$ such that $s_1(A\cap B_i)\subseteq K_i\subseteq A\cap B_i$ for some finitely generated $R$-module $K_i$ and any $i\in \Lambda$, since $A$ is $u$-$S$-Noetherian. There also exists an element $s_2\in S$ such that $s_2(B_i+A)/A\subseteq L_i\subseteq (B_i+A)/A$ for some finitely generated $R$-module $L_i$ and any $i\in \Lambda$, since $C$ is $u$-$S$-Noetherian. Let $N_i$ be the finitely generated submodule of $B_i$ generated by the finite generators of $K_i$ and finite pre-images of generators of $L_i$. Consider the following natural commutative diagram with exact rows: $$\xymatrix@R=20pt@C=25pt{
0 \ar[r]^{}&K_i\ar@{^{(}->}[d]\ar[r]&N_i \ar[r]\ar@{^{(}->}[d]&L_i\ar[r] \ar@{^{(}->}[d] &0\\
0 \ar[r]^{}&A\cap B_i\ar[r]&B_i \ar[r]&(B_i+A)/A \ar[r] &0.\\}$$
Set $s=s_1s_2\in S$. We have $sB_i\subseteq N_i\subseteq B_i$ by Lemma \ref{s-exct-diag}. So $B$ is $u$-$S$-Noetherian.
\end{proof}
\begin{proposition} \label{s-u-noe-s-exact}
Let $R$ be a ring and $S$ a multiplicative subset of $R$. Let $0\rightarrow A\rightarrow B\rightarrow C\rightarrow 0$ be a $u$-$S$-exact sequence. Then $B$ is $u$-$S$-Noetherian if and only if $A$ and $C$ are $u$-$S$-Noetherian
\end{proposition}
\begin{proof} Let $0\rightarrow A\xrightarrow{f} B\xrightarrow{g} C\rightarrow 0$ be a $u$-$S$-exact sequence. Then there exists an element $s\in S$ such that $ s{\rm Ker}(g)\subseteq {\rm Im}(f)$ and $ s{\rm Im}(f)\subseteq {\rm Ker}(g)$. Note that ${\rm Im}(f)/s{\rm Ker}(g)$ and ${\rm Ker}(g)/s{\rm Im}(f)$ are $u$-$S$-torsion. If ${\rm Im}(f)$ is $u$-$S$-Noetherian, then the submodule $s{\rm Im}(f)$ of ${\rm Im}(f)$ is $u$-$S$-Noetherian. Thus ${\rm Ker}(g)$ is $u$-$S$-Noetherian by Lemma \ref{s-u-noe-exact}. Similarly, if ${\rm Ker}(g)$ is $u$-$S$-Noetherian, then ${\rm Im}(f)$ is $u$-$S$-Noetherian. Consider the following three exact sequences:
$0\rightarrow{\rm Ker}(g) \rightarrow B\rightarrow {\rm Im}(g)\rightarrow 0,\quad 0\rightarrow{\rm Im}(g) \rightarrow C\rightarrow {\rm Coker}(g)\rightarrow 0,$ and $0\rightarrow{\rm Ker}(f) \rightarrow A\rightarrow {\rm Im}(f)\rightarrow 0$
with ${\rm Ker}(f)$ and ${\rm Coker}(g)$ $u$-$S$-torsion. It is easy to verify that $B$ is $u$-$S$-Noetherian if and only if $A$ and $C$ are $u$-$S$-Noetherian by Lemma \ref{s-u-noe-exact}.
\end{proof}
\begin{corollary} \label{s-u-noe-u-iso}
Let $R$ be a ring, $S$ a multiplicative subset of $R$ and $ M\xrightarrow{f} N$ a $u$-$S$-isomorphism. If one of $M$ and $N$ is $u$-$S$-Noetherian, so is the other.
\end{corollary}
\begin{proof} It follows from Proposition \ref{s-u-noe-exact} since $0\rightarrow M\xrightarrow{f} N\rightarrow 0\rightarrow 0$ is a $u$-$S$-exact sequence.
\end{proof}
Let $\frak p$ be a prime ideal of $R$. We say an $R$-module $M$ is \emph{$u$-$\frak p$-Noetherian} provided that $M$ is $u$-$(R\setminus\frak p)$-Noetherian. The next result gives a local characterization of Noetherian modules.
\begin{proposition}\label{s-noe-m-loc-char}
Let $R$ be a ring and $M$ an $R$-module. Then the following statements are equivalent:
\begin{enumerate}
\item $M$ is Noetherian;
\item $M$ is $u$-$\frak p$-Noetherian for any $\frak p\in {\rm Spec}(R)$;
\item $M$ is $u$-$\frak m$-Noetherian for any $\frak m\in {\rm Max}(R)$.
\end{enumerate}
\end{proposition}
\begin{proof} $(1)\Rightarrow (2)\Rightarrow (3):$ Trivial.
$(3)\Rightarrow (1):$ Let $N$ be an submodule of $M$. Then for each $\frak m\in {\rm Max}(R)$, there exists an element $s^{\frak m}\in R\setminus\frak m$ and a finitely generated submodule $F^{\frak m}$ of $N$ such that $s^{\frak m}N\subseteq F^{\frak m}$. Since $\{s^{\frak m} \frak mid \frak m \in {\rm Max}(R)\}$ generated $R$, there exist finite elements $\{s^{\frak m_1},...,s^{\frak m_n}\}$ such that $N=\langle s^{\frak m_1},...,s^{\frak m_n}\rightarrowngle N\subseteq F^{\frak m_1}+...+F^{\frak m_n}\subseteq N$. So $N=F^{\frak m_1}+...+F^{\frak m_n}$. It follows that $N$ is finitely generated, and thus $M$ is Noetherian.
\end{proof}
\begin{corollary}\label{s-noe-m-loc-char}
Let $R$ be a ring. Then the following statements are equivalent:
\begin{enumerate}
\item $R$ is a Noetherian ring;
\item $R$ is a $u$-$\frak p$-Noetherian ring for any $\frak p\in {\rm Spec}(R)$;
\item $R$ is a $u$-$\frak m$-Noetherian ring for any $\frak m\in {\rm Max}(R)$.
\end{enumerate}
\end{corollary}
\section{$u$-$S$-Noetherian properties on some ring constructions}
In this section, we mainly consider the $u$-$S$-Noetherian properties on trivial extensions, pullbacks and amalgamated algebras along an ideal. For more on these ring constructions, one can refer to \cite{DW09,lO14}.
Let $R$ be a commutative ring and $M$ be an $R$-module. Then the \emph{trivial extension} of $R$ by $M$ denoted by $R(+)M$ is equal to $R\bigoplus M$ as $R$-modules with coordinate-wise addition and multiplication $(r_1,m_1)(r_2,m_2)=(r_1r_2,r_1m_2+r_2m_1)$. It is easy to verify that $R(+)M$ is a commutative ring with identity $(1,0)$. Let $S$ be a multiplicative subset of $R$. Then it is easy to verify that $S(+)M=\{(s,m)|s\in S, m\in M\}$ is a multiplicative subset of $R(+)M$. Now, we give a $u$-$S$-Noetherian property on the trivial extension.
\begin{proposition}\label{trivial extension-usn} Let $R$ be a commutative ring, $S$ a multiplicative subset of $R$ and $M$ an $R$-module. Then $R(+)M$ is a $u$-$S(+)M$-Noetherian ring if and only if $R$ is a $u$- $S$-Noetherian ring and $M$ is a $u$-$S$-Noetherian $R$-module.
\end{proposition}
\begin{proof} Note that we have an exact sequence of $R(+)M$-modules: $$0\rightarrow 0(+)M\xrightarrow{i} R(+)M\xrightarrow{\frak pi} R\rightarrow 0.$$ Suppose $R(+)M$ is a $u$-$S(+)M$-Noetherian ring. Let $\{I_i\}_{i\in \Lambda}$ be the set of all ideals of $R$. Then $\{I_i(+)M\}_{i\in \Lambda}$ is a set of ideals of $R(+)M$. So there is an element $(s,m)\in S(+)M$ and finitely generated sub-ideals $O_i$ of $I_i(+)M$ such that $(s,m)I_i(+)M\subseteq O_i$. Thus $sI_i\subseteq \frak pi(O_i)\subseteq I_i$. Suppose $O_i$ is generated by $\{(r_{1,i},m_{1,i}),...,(r_{n,i},m_{n,i})\}$. Then it is easy to verify that $\frak pi(O_i)$ is generated by $\{r_{1,i},...,r_{n,i}\}$. So $R$ is a $u$-$S$-Noetherian ring. Let $\{M_i\}_{i\in \Gamma}$ be the set of all submodules of $M$. Then $\{0(+)M_i\}_{i\in \Gamma}$ is a set of ideals of $R(+)M$. Thus there is an element $(s',m')\in S(+)M$ and finitely generated sub-ideals $O'_i$ of $0(+)M_i$ such that $(s',m')0(+)M_i\subseteq O'_i$. So $s'M_i\subseteq N_i\subseteq M_i$ where $0(+)N_i=O'_i$. Suppose that $O'_i$ is generated by $\{(r'_{1,i},m'_{1,i}),...,(r'_{n,i},m'_{n,i}\}$. Then it is easy to verify that $N_i$ is generated by $\{m'_{1,i},...,m'_{n,i}\}$. Thus $M$ is a $u$-$S$-Noetherian $R$-module.
Suppose $R$ is a $u$-$S$-Noetherian ring and $M$ is a $u$-$S$-Noetherian $R$-module. Let $O^{\bullet}: O_1\subseteq O_2\subseteq ...$ be an ascending chain of ideals of $R(+)M$. Then there is an ascending chain of ideals of $R$: $\frak pi(O^{\bullet}): \frak pi(O_1)\subseteq \frak pi(O_2)\subseteq ...$. Thus there is an element $s\in S$ which is independent of $O^{\bullet}$ satisfying that there exists $k\in \frak mathbb{Z}^{+}$ such that $s\frak pi(O_n)\subseteq \frak pi(O_k)$ for any $n\geq k$. Similarly, $O^{\bullet}\cap 0(+)M: O_1\cap 0(+)M\subseteq O_2\cap 0(+)M\subseteq ...$ is an ascending chain of sub-ideals of $0(+)M$ which are equivalent to some submodules of $M$. So there is an element $s'\in S$ satisfying that there exists $k'\in \frak mathbb{Z}^{+}$ such that $s'O_n\cap 0(+)M\subseteq O_k\cap 0(+)M$ for any $n\geq k'$. Let $l=\frak max(k,k')$ and $n\geq l$. Consider the following natural commutative diagram with exact rows:
$$\xymatrix@R=20pt@C=25pt{
0 \ar[r]^{}&O_l\cap 0(+)M \ar@{^{(}->}[d]\ar[r]&O_l \ar[r]\ar@{^{(}->}[d]&\frak pi(O_l)\ar[r] \ar@{^{(}->}[d] &0\\
0 \ar[r]^{}&O_n\cap 0(+)M \ar[r]&O_n \ar[r]&\frak pi(O_n) \ar[r] &0.\\}$$
Set $t=ss'$. Then we have $tO_n\subseteq O_l$ for any $n\geq l$ by Lemma \ref{s-exct-diag}. So $R(+)M$ is a $u$- $S(+)M$-Noetherian ring by Theorem \ref{u-s-noe-char}.
\end{proof}
Let $\alpha: A\rightarrow C$ and $\beta: B\rightarrow C$ be ring homomorphisms. Then the subring $$D:= \alpha \times_C \beta:= \{(a, b)\in A\times B | \alpha(a) =\beta(b)\}$$ of $A\times B$ is called the \emph{pullback} of $\alpha$ and $\beta$. Let $D$ be a pullback of $\alpha$ and $\beta$. Then there is a pullback diagram in the category of commutative rings:
$$\xymatrix@R=20pt@C=25pt{
D\ar[d]^{p_B}\ar[r]^{p_A}& A \ar[d]^{\alpha}\\
B\ar[r]^{\beta}&C. \\
}$$
If $S$ is a multiplicative subset of $D$, then it is easy to verify that $p_A(S):=\{p_A(s)\in A|s\in S\}$ is a multiplicative subset of $A$. Now, we give a $u$-$S$-Noetherian property on the pullback diagram.
\begin{proposition}\label{pullback-usn} Let $\alpha: A\rightarrow C$ be a ring homomorphism and $\beta: B\rightarrow C$ a surjective ring homomorphism. Let $D$ be the pullback of $\alpha$ and $\beta$. If $S$ is a multiplicative subset of $D$, then
the following assertions are equivalent:
\begin{enumerate}
\item $D$ is a $u$-$S$-Noetherian ring;
\item $A$ is a $u$-$p_A(S)$-Noetherian ring and ${\rm Ker}(\beta)$ is a $u$-$S$-Noetherian $D$-module.
\end{enumerate}
\end{proposition}
\begin{proof} Let $D$ be the pullback of $\alpha$ and $\beta$. Since $\beta$ is a surjective ring homomorphism, so is $p_A$. Then there is a short exact sequence of $D$-modules:
$$0\rightarrow {\rm Ker}(\beta)\rightarrow D\rightarrow A\rightarrow 0.$$ By Proposition \ref{s-u-noe-s-exact}, $D$ is a $u$-$S$-Noetherian $D$-module if and only if ${\rm Ker}(\beta)$ and $A$ are $u$-$S$-Noetherian $D$-modules. Since $p_A$ is surjective, the $D$-submodules of $A$ are exactly the ideals of the ring $A$. Thus $A$ is a $u$-$S$-Noetherian $D$-module if and only if $A$ is a $u$-$p_A(S)$-Noetherian ring.
\end{proof}
Let $f:A\rightarrow B$ be a ring homomorphism and $J$ an ideal of $B$. Following from \cite{df09} the \emph{amalgamation} of $A$ with $B$ along $J$ with respect to $f$, denoted by $A\bowtie^fJ$, is defined as $$A\bowtie^fJ=\{(a,f(a)+j)|a\in A,j\in J\},$$ which is a subring of of $A \times B$. Following from \cite[Proposition 4.2]{df09}, $A\bowtie^fJ$ is the pullback $\widehat{f}\times_{B/J}\frak pi$,
where $\frak pi:B\rightarrow B/J$ is the natural epimorphism and $\widehat{f}=\frak pi\circ f$:
$$\xymatrix@R=20pt@C=25pt{
A\bowtie^fJ\ar[d]^{p_B}\ar[r]_{p_A}& A\ar[d]^{\widehat{f}}\\
B\ar[r]^{\frak pi}&B/J. \\
}$$
For a multiplicative subset $S$ of $A$, set $S^\frak prime:= \{(s, f (s)) | s \in S\},$ and $f(S):=\{f(s)\in B|s\in S\}$. Then it is easy to verify that $S^\frak prime$ and $f(S)$ are multiplicative subsets of $A\bowtie^fJ$ and $B$ respectively.
\begin{lemma} \label{s-u-noe-epi}
Let $\alpha:R\rightarrow R^\frak prime$ be a surjective ring homomorphism and $S$ a multiplicative subset of $R$. If $R$ is a $u$-$S$-Noetherian ring, then $R^\frak prime$ is a $u$-$\alpha(S)$-Noetherian ring.
\end{lemma}
\begin{proof} Since $R$ is $u$-$S$-Noetherian, there is an element $s\in S$ such that for any ideal $J$ of $R$, there exists a finitely generated sub-ideal $F_J$ of $J$ satisfying $sJ\subseteq F_J$. Let $I$ be an ideal of $R^\frak prime$. Since $\alpha:R\rightarrow R^\frak prime$ is a surjective ring homomorphism, there exists an ideal $\alpha^{-1}(I)$ of $R$ such that $\alpha(\alpha^{-1}(I))=I$. Thus there exists a finitely generated sub-ideal $F_{\alpha^{-1}(I)}$ of $\alpha^{-1}(I)$ satisfying $s\alpha^{-1}(I)\subseteq F_{\alpha^{-1}(I)}$. So $\alpha(F_{\alpha^{-1}(I)})$ is a finitely generated sub-ideal of $I$ satisfying $\alpha(s)I\subseteq \alpha(F_{\alpha^{-1}(I)})$.
\end{proof}
\begin{proposition}\label{amag-usn} Let $f :A\rightarrow B$ be a ring homomorphism, $J$ an ideal of $B$ and $S$ a multiplicative subset of $A$. Set $S^\frak prime= \{(s, f (s)) | s \in S\}$ and $f(S)=\{f(s)\in B|s\in S\}$. Then the following statements
are equivalent:
\begin{enumerate}
\item $A\bowtie^fJ$ is a $u$-$S^\frak prime$-Noetherian ring;
\item $A$ is a $u$-$S$-Noetherian ring and $J$ is a $u$-$S^\frak prime$-Noetherian $A\bowtie^fJ$-module $($with the $A\bowtie^fJ$-module structure naturally induced by $p_B$, where $p_B : A\bowtie^fJ\rightarrow B$ defined by $(a,f(a)+j)\rightarrow f(a)+j)$;
\item $A$ is a $u$-$S$-Noetherian ring and $f(A)+J$ is a $u$-$f(S)$-Noetherian ring.
\end{enumerate}
\end{proposition}
\begin{proof} $(1)\Leftrightarrow(2)$ Follows from Proposition \ref{pullback-usn}.
$(1)\Rightarrow(3)$: By Proposition \ref{pullback-usn}, $A$ is a $u$-$S$-Noetherian ring. By \cite[Proposition 5.1]{df09}, there is a short exact sequence $0\rightarrow f^{-1}(J)\times \{0\}\rightarrow A\bowtie^fJ \rightarrow f(A)+J\rightarrow 0$ of $A\bowtie^fJ$-modules. Note that any $A\bowtie^fJ$-submodule of $f(A)+J$ is exactly an ideal of $f(A)+J$. Since $p_B(S^\frak prime)=f(S),$ we conclude that $f(A)+J$ is a $u$- $f(S)$-Noetherian ring by Proposition \ref{s-u-noe-s-exact}.
$(3)\Rightarrow(2)$: Let $f(s)$ be an element in $f(S)$ such that for any ideal of $f(A)+J$ is $u$- $f(S)$-Noetherian with respective to $f(s)$. Then for any $A\bowtie^fJ$-submodule $J_0$ of $J$, $J_0$ is an ideal of $f(A)+J$ since every $A\bowtie^fJ$-submodule of $J$ is an ideal of $f(A)+J$. Since $f(A)+J$ is $f(S)$-Noetherian, there exists $j_1,...,j_k\in J_0$ such that $f(s)J_0\subseteq \langle j_1,...,j_k\rightarrowngle (f(A)+J)\subseteq J_0$. Hence we obtain
$$(s,f(s))J_0\subseteq A\bowtie^fJ j_1+...+A\bowtie^fJ j_k\subseteq J_0.$$
Thus $J$ is $u$-$S^\frak prime$-Noetherian with respective to $(s,f(s)).$
\end{proof}
\section{Cartan-Eilenberg-Bass Theorem for uniformly $S$-Noetherian rings}
It is well known that an $R$-module $E$ is \emph{injective} provided that the induced sequence $0\rightarrow {\rm Hom}_R(C,E)\rightarrow {\rm Hom}_R(B,E)\rightarrow {\rm Hom}_R(A,E)\rightarrow 0$ is exact for any exact sequence $0\rightarrow A\rightarrow B\rightarrow C\rightarrow 0$. The well-known Cartan-Eilenberg-Bass Theorem says that a ring $R$ is Noetherian if and only if any direct sum of injective modules is injective (see \cite[Theorem 3.1.17]{ej11}). In order to obtain the Cartan-Eilenberg-Bass Theorem for uniformly $S$-Noetherian rings, we first introduce the $S$-analogue of injective modules.
\begin{definition} Let $R$ be a ring and $S$ a multiplicative subset of $R$. An $R$-module $E$ is called $u$-$S$-injective $($abbreviates uniformly $S$-injective$)$ provided that the induced sequence $$0\rightarrow {\rm Hom}_R(C,E)\rightarrow {\rm Hom}_R(B,E)\rightarrow {\rm Hom}_R(A,E)\rightarrow 0$$ is $u$-$S$-exact for any $u$-$S$-exact sequence $0\rightarrow A\rightarrow B\rightarrow C\rightarrow 0$.
\end{definition}
\begin{lemma}\label{u-S-tor-ext} Let $R$ be a ring and $S$ a multiplicative subset of $R$. If $T$ is a $u$-$S$-torsion module, then $\frak mathcal{E}xt_R^{n}(T,M)$ and $\frak mathcal{E}xt_R^{n}(M,T)$ are $u$-$S$-torsion for any $R$-module $M$ and any $n\geq 0$.
\end{lemma}
\begin{proof} We only prove $\frak mathcal{E}xt_R^{n}(T,M)$ is $u$-$S$-torsion, Since the case of $\frak mathcal{E}xt_R^{n}(M,T)$ is similar. Let $T$ be a $u$-$S$-torsion module with $sT=0$. If $n=0$, then for any $f\in {\rm Hom}_R(T,M)$, we have $sf(t)=f(st)=0$ for any $t\in T$. Thus $sf=0$ and so $s{\rm Hom}_R(T,M)=0$. Let $0\rightarrow M\rightarrow E\rightarrow \Omega^{-1} (M)\rightarrow0$ be a short exact sequence with $E$ injective and $\Omega^{-1} (M)$ the $1$-st cosyzygy of $M$. Then $\frak mathcal{E}xt_R^{1}(T,M)$ is a quotient of ${\rm Hom}_R(T,\Omega^{-1} (M))$ which is $u$-$S$-torsion. Thus $\frak mathcal{E}xt_R^{1}(T,M)$ is $u$-$S$-torsion. For $n\geq 2$, we have an isomorphism $\frak mathcal{E}xt_R^{n}(T,M)\cong \frak mathcal{E}xt_R^{1}(T,\Omega^{-(n-1)}(M))$ where $\Omega^{-(n-1)}(M)$ is the $(n-1)$-th cosyzygy of $M$. Since $\frak mathcal{E}xt_R^{1}(T,\Omega^{-(n-1)}(M))$ is $u$-$S$-torsion by induction, $\frak mathcal{E}xt_R^{n}(T,M)$ is $u$-$S$-torsion.
\end{proof}
\begin{theorem}\label{s-inj-ext}
Let $R$ be a ring, $S$ a multiplicative subset of $R$ and $E$ an $R$-module. Then the following assertions are equivalent:
\begin{enumerate}
\item $E$ is $u$-$S$-injective;
\item for any short exact sequence $0\rightarrow A\xrightarrow{f} B\xrightarrow{g} C\rightarrow 0$, the induced sequence $0\rightarrow {\rm Hom}_R(C,E)\xrightarrow{g^\ast} {\rm Hom}_R(B,E)\xrightarrow{f^\ast} {\rm Hom}_R(A,E)\rightarrow 0$ is $u$-$S$-exact;
\item $\frak mathcal{E}xt_R^1(M,E)$ is $u$-$S$-torsion for any $R$-module $M$;
\item $\frak mathcal{E}xt_R^n(M,E)$ is $u$-$S$-torsion for any $R$-module $M$ and $n\geq 1$.
\end{enumerate}
\end{theorem}
\begin{proof} $(1)\Rightarrow(2)$ and $(4)\Rightarrow(3)$: Trivial.
$(2)\Rightarrow(3)$: Let $0\rightarrow L\rightarrow P\rightarrow M\rightarrow 0$ be a short exact sequence with $P$ projective. Then there exists a long exact sequence $0\rightarrow {\rm Hom}_R(M,E)\rightarrow {\rm Hom}_R(P,E)\rightarrow {\rm Hom}_R(L,E)\rightarrow \frak mathcal{E}xt_R^1(M,E) \rightarrow 0$. Thus $\frak mathcal{E}xt_R^1(M,E)$ is $u$-$S$-torsion by $(2)$.
$(3)\Rightarrow (2)$: Let $0\rightarrow A\xrightarrow{f} B\xrightarrow{g} C\rightarrow 0$ be a short exact sequence. Then we have a long exact sequence $0\rightarrow {\rm Hom}_R(C,E)\xrightarrow{g^\ast} {\rm Hom}_R(B,E)\xrightarrow{f^\ast} {\rm Hom}_R(A,E)\xrightarrow{\delta} \frak mathcal{E}xt_R^1(C,E) \rightarrow 0$. By $(3)$, $\frak mathcal{E}xt_R^1(C,E)$ is $u$-$S$-torsion, and so $0\rightarrow {\rm Hom}_R(C,E)\xrightarrow{g^\ast} {\rm Hom}_R(B,E)\xrightarrow{f^\ast} {\rm Hom}_R(A,E)\rightarrow 0$ is $u$-$S$-exact.
$(3)\Rightarrow(4)$: Let $M$ be an $R$-module. Denote by $\Omega^{n-1}(M)$ the $(n-1)$-th syzygy of $M$. Then $\frak mathcal{E}xt_R^n(M,E)\cong \frak mathcal{E}xt_R^1(\Omega^{n-1}(M),E)$ is $u$-$S$-torsion by $(3)$.
$(2)\Rightarrow(1)$: Let $E$ be an $R$-module satisfies $(2)$. Suppose $0\rightarrow A\xrightarrow{f} B\xrightarrow{g} C\rightarrow 0$ is a $u$-$S$-exact sequence. Then there is an exact sequence $B\xrightarrow{g} C\rightarrow T\rightarrow 0 $ where $T={\rm Coker}(g)$ is $u$-$S$-torsion. Then we have an exact sequence $$0\rightarrow {\rm Hom}_R(T,E)\rightarrow {\rm Hom}_R(C,E)\rightarrow {\rm Hom}_R(B,E).$$
By Lemma \ref{u-S-tor-ext}, we have $ {\rm Hom}_R(T,E)$ is $u$-$S$-torsion. So $0\rightarrow {\rm Hom}_R(C,E)\xrightarrow{g^\ast} {\rm Hom}_R(B,E)\xrightarrow{f^\ast} {\rm Hom}_R(A,E)\rightarrow 0$ is $u$-$S$-exact at ${\rm Hom}_R(C,E)$.
There are also two short exact sequences:
\begin{center}
$0\rightarrow {\rm Ker}(f)\xrightarrow{i_{A}} A\xrightarrow{\frak pi_{{\rm Im}(f)}} {\rm Im}(f)\rightarrow 0$ and $0\rightarrow {\rm Im}(f)\xrightarrow{i_B} B\rightarrow {\rm Coker}(f)\rightarrow 0,$
\end{center}
where ${\rm Ker}(f)$ is $u$-$S$-torsion. Consider the induced exact sequences $$0\rightarrow {\rm Hom}_R({\rm Im}(f),E)\xrightarrow{\frak pi_{{\rm Im}(f)}^\ast} {\rm Hom}_R(A,E)\xrightarrow{i_{A}^\ast} {\rm Hom}_R({\rm Ker}(f),E)$$ and
$$0\rightarrow {\rm Hom}_R({\rm Coker}(f),E)\rightarrow {\rm Hom}_R(B,E)\xrightarrow{i_{B}^\ast} {\rm Hom}_R({\rm Im}(f),E).$$ Then ${\rm Im}(i_{A}^\ast)$ and ${\rm Coker}(i_{B}^\ast)$ are all $u$-$S$-torsion.
We have the following pushout diagram:
$$\xymatrix@R=20pt@C=25pt{ & 0\ar[d]&0\ar[d]&&\\
& {\rm Im}(i_{B}^\ast)\ar[d]\ar@{=}[r]^{} &{\rm Im}(i_{B}^\ast)\ar[d]& & \\
0 \ar[r]^{}& {\rm Hom}_R({\rm Im}(f),E)\ar[d]\ar[r]& {\rm Hom}_R(A,E) \ar[r]\ar[d]&{\rm Im}(i_{A}^\ast)\ar[r] \ar@{=}[d] &0\\
0 \ar[r]^{}&{\rm Coker}(i_{B}^\ast)\ar[d]\ar[r]&Y \ar[r]\ar[d]&{\rm Im}(i_{A}^\ast)\ar[r] &0\\
& 0 &0 & &\\}$$
Since ${\rm Im}(i_{A}^\ast)$ and ${\rm Coker}(i_{B}^\ast)$ are all $u$-$S$-torsion, $Y$ is also $u$-$S$-torsion by Lemma \ref{s-exct-tor}. Thus the natural composition $f^\ast: {\rm Hom}_R(B,E)\rightarrow {\rm Im}(i_{B}^\ast)\rightarrow {\rm Hom}_R(A,E)$ is a $u$-$S$-epimorphism. So $0\rightarrow {\rm Hom}_R(C,E)\xrightarrow{g^\ast} {\rm Hom}_R(B,E)\xrightarrow{f^\ast} {\rm Hom}_R(A,E)\rightarrow 0$ is $u$-$S$-exact at ${\rm Hom}_R(A,E)$.
Since the sequence $0\rightarrow A\xrightarrow{f} B\xrightarrow{g} C\rightarrow 0$ is $u$-$S$-exact at $B$ and $C$, there exists $s\in S$ such that $s{\rm Ker}(g)\subseteq {\rm Im}(f)$, $s{\rm Im}(f)\subseteq {\rm Ker}(g)$ and $s{\rm Coker}(g)=0$. We claim that $s^2{\rm Im} (g^\ast)\subseteq {\rm Ker}(f^\ast)$ and $s^2{\rm Ker}(f^\ast)\subseteq {\rm Im} (g^\ast)$. Indeed, consider the following diagram:
$$\xymatrix@R=20pt@C=25pt{
& & E& &\\
0 \ar[r]^{}&A\ar[r]^{f}&B \ar[u]^{h}\ar[r]^{g}&C\ar[r] &0\\}$$
Suppose $h\in {\rm Im} (g^\ast)$. Then there exists $u\in {\rm Hom}_R(C,E)$ such that $h=u\circ g$. Thus for any $a\in A$, $sh\circ f (a)=su\circ g\circ f(a)=u\circ g\circ sf(a)=0$ since $s{\rm Im}(f)\subseteq {\rm Ker}(g)$. So $sh\circ f=0$ and then $s{\rm Im} (g^\ast)\subseteq {\rm Ker}(f^\ast)$. Thus $s^2{\rm Im} (g^\ast)\subseteq {\rm Ker}(f^\ast)$. Now, suppose $h\in {\rm Ker}(f^\ast)$. Then $h\circ f=0$. Thus ${\rm Ker}(h)\supseteq {\rm Im}(f)\supseteq s{\rm Ker}(g)$. So $sh\circ i_{{\rm Ker}(g)}=0$ where $i_{{\rm Ker}(g)}:{\rm Ker}(g)\hookrightarrow B$ is the natural embedding map. There is a well-defined $R$-homomorphism $v:{\rm Im}(g)\rightarrow E$ such that $v\circ \frak pi_B=sh$, where $\frak pi_B$ is the natural epimorphism $B\twoheadrightarrow {\rm Im}(g)$. Consider the exact sequence ${\rm Hom}_R({\rm Coker}(g),E)\rightarrow {\rm Hom}_R(C,E)\rightarrow {\rm Hom}_R({\rm Im}(g),E)\rightarrow \frak mathcal{E}xt_R^1({\rm Coker}(g),E)$ induced by $0\rightarrow {\rm Im}(g)\rightarrow C\rightarrow {\rm Coker}(g)\rightarrow 0$. Since $s{\rm Hom}_R({\rm Coker}(g),E)=s \frak mathcal{E}xt_R^1({\rm Coker}(g),E)=0$, $s{\rm Hom}_R({\rm Im}(g),E)\subseteq i_{{\rm Im}(g)}^\ast({\rm Hom}_R(C,E))$. Thus there is a homomorphism $u:C\rightarrow E$ such that $s^2h=v\circ g$. Then we have $s^2{\rm Ker}(f^\ast)\subseteq {\rm Im} (g^\ast)$. So $0\rightarrow {\rm Hom}_R(C,E)\xrightarrow{g^\ast} {\rm Hom}_R(B,E)\xrightarrow{f^\ast} {\rm Hom}_R(A,E)\rightarrow 0$ is $u$-$S$-exact at ${\rm Hom}_R(B,E)$.
\end{proof}
It follows from Theorem \ref{s-inj-ext} that $u$-$S$-torsion modules and injective modules are $u$-$S$-injective.
\begin{corollary}\label{inj-ust-s-inj}
Let $R$ be a ring and $S$ a multiplicative subset of $R$. Suppose $E$ is a $u$-$S$-torsion $R$-module or an injective $R$-module. Then $E$ is $u$-$S$-injective.
\end{corollary}
The following example shows that the condition ``$\frak mathcal{E}xt_R^1(M,F)$ is $u$-$S$-torsion for any $R$-module $M$'' in Theorem \ref{s-inj-ext} can not be replaced by ``$\frak mathcal{E}xt_R^1(R/I,F)$ is $u$-$S$-torsion for any ideal $I$ of $R$''.
\begin{example}\label{uf not-extsion}
Let $R=\frak mathbb{Z}$ be the ring of integers, $p$ a prime in $\frak mathbb{Z}$ and $S=\{p^n|n\geq 0\}$. Let $J_p$ be the additive group of all $p$-adic integers $($see \cite{FS15} for example$)$. Then $\frak mathcal{E}xt_{R}^1(R/I,J_p)$ is $u$-$S$-torsion for any ideal $I$ of $R$. However, $J_p$ is not $u$-$S$-injective.
\end{example}
\begin{proof}
Let $\langle n\rightarrowngle$ be an ideal of $\frak mathbb{Z}$. Suppose $n=p^km$ with $(p,m)=1$. Then $\frak mathcal{E}xt_{\frak mathbb{Z}}^1(\frak mathbb{Z}/\langle n\rightarrowngle, J_p)\cong J_p/nJ_p\cong \frak mathbb{Z}/\langle p^k\rightarrowngle$ by \cite[Exercise 1.3(10)]{FS15}. So $\frak mathcal{E}xt_{\frak mathbb{Z}}^1(\frak mathbb{Z}/\langle n\rightarrowngle, J_p)$ is $u$-$S$-torsion for any ideal $\langle n\rightarrowngle$ of $\frak mathbb{Z}$. However, $J_p$ is not $u$-$S$-injective. Indeed, let $\frak mathbb{Z}(p^{\infty})$ be the quasi-cyclic group (see \cite{FS15} for example). Then $\frak mathbb{Z}(p^{\infty})$ is a divisible group and $J_p\cong{\rm Hom}_{\frak mathbb{Z}}(\frak mathbb{Z}(p^{\infty}),\frak mathbb{Z}(p^{\infty})))$. So
\begin{align*}
&\frak mathcal{E}xt_{\frak mathbb{Z}}^1(\frak mathbb{Z}(p^{\infty}), M) \\
\cong &\frak mathcal{E}xt_{\frak mathbb{Z}}^1(\frak mathbb{Z}(p^{\infty}), {\rm Hom}_{\frak mathbb{Z}}(\frak mathbb{Z}(p^{\infty}),\frak mathbb{Z}(p^{\infty}))) \\
\cong&{\rm Hom}_{\frak mathbb{Z}}({\rm Tor}_1^{\frak mathbb{Z}}(\frak mathbb{Z}(p^{\infty}),\frak mathbb{Z}(p^{\infty})),\frak mathbb{Z}(p^{\infty}))\\
\cong&{\rm Hom}_{\frak mathbb{Z}}(\frak mathbb{Z}(p^{\infty}),\frak mathbb{Z}(p^{\infty}))\cong J_p.
\end{align*}
Note that for any $p^k\in S$, we have $p^kJ_p\not=0$. So $J_p$ is not $u$-$S$-injective.
\end{proof}
\begin{remark}\label{uf not-dprod} It is well known that any direct product of injective modules is injective. However, the direct product of $u$-$S$-injective modules need not be $u$-$S$-injective. Indeed, Let $R$ and $S$ be in Example \ref{uf not-extsion}. Let $\frak mathbb{Z}/\langle p^k\rightarrowngle$ be cyclic group of order $p^k$ ($k\geq 1$). Then each $\frak mathbb{Z}/\langle p^k\rightarrowngle$ is $u$-$S$-torsion, and thus is $u$-$S$-injective. Let $\frak mathbb{Q}$ be the rational number group. Then, by \cite[Chapter 9 Theorem 6.2]{FS15}, we have
$\frak mathcal{E}xt_{\frak mathbb{Z}}(\frak mathbb{Q}/\frak mathbb{Z},\frak prod\limits_{k=1}^\infty \frak mathbb{Z}/\langle p^k\rightarrowngle)\cong \frak prod\limits_{k=1}^\infty \frak mathcal{E}xt_{\frak mathbb{Z}}^1(\frak mathbb{Q}/\frak mathbb{Z}, \frak mathbb{Z}/\langle p^k\rightarrowngle)\cong \frak prod\limits_{k=1}^\infty \frak mathbb{Z}/\langle p^k\rightarrowngle$ since each $\frak mathbb{Z}/\langle p^k\rightarrowngle$ is a reduced cotorsion group. It is easy to verify that $\frak prod\limits_{k=1}^\infty \frak mathbb{Z}/\langle p^k\rightarrowngle$ is not $u$-$S$-torsion. So $\frak prod\limits_{k=1}^\infty \frak mathbb{Z}/\langle p^k\rightarrowngle$ is not $u$-$S$-injective.
\end{remark}
\begin{proposition}\label{s-inj-prop}
Let $R$ be a ring and $S$ a multiplicative subset of $R$. Then the following assertions hold.
\begin{enumerate}
\item Any finite direct sum of $u$-$S$-injective modules is $u$-$S$-injective.
\item Let $0\rightarrow A\xrightarrow{f} B\xrightarrow{g} C\rightarrow 0$ be a $u$-$S$-exact sequence. If $A$ and $C$ are $u$-$S$-injective modules, so is $B$.
\item Let $A\rightarrow B$ be a $u$-$S$-isomorphism. If one of $A$ and $B$ is $u$-$S$-injective, so is the other.
\item Let $0\rightarrow A\xrightarrow{f} B\xrightarrow{g} C\rightarrow 0$ be a $u$-$S$-exact sequence. If $A$ and $B$ are $u$-$S$-injective, then $C$ is $u$-$S$-injective.
\end{enumerate}
\end{proposition}
\begin{proof}
$(1)$ Suppose $E_1,...,E_n$ are $u$-$S$-injective modules. Let $M$ be an $R$-module. Then there exists $s_i\in S$ such that $s_i\frak mathcal{E}xt_R^1(M,E_i)=0$ for each $i=1,...,n$. Set $s=s_1...s_n$. Then $s\frak mathcal{E}xt_R^1(M,\bigoplus\limits_{i=1}^n E_i)\cong \bigoplus\limits_{i=1}^ns\frak mathcal{E}xt_R^1(M, E_i)=0$. Thus $\bigoplus\limits_{i=1}^n E_i$ is $u$-$S$-injective.
$(2)$ Suppose $A$ and $C$ are $u$-$S$-injective modules and $0\rightarrow A\xrightarrow{f} B\xrightarrow{g} C\rightarrow 0$ is a $u$-$S$-exact sequence. Then there are three short exact sequences: $0\rightarrow {\rm Ker}(f)\rightarrow A\rightarrow {\rm Im}(f)\rightarrow 0$, $0\rightarrow {\rm Ker}(g)\rightarrow B\rightarrow {\rm Im}(g)\rightarrow 0$ and $0\rightarrow {\rm Im}(g)\rightarrow C\rightarrow {\rm Coker}(g)\rightarrow 0$. Then ${\rm Ker}(f)$ and ${\rm Coker}(g)$ are all $u$-$S$-torsion and $s{\rm Ker}(g)\subseteq {\rm Im}(f)$ and $s{\rm Im}(f)\subseteq {\rm Ker}(g)$ for some $s\in S$. Let $M$ be an $R$-module. Then $$ \frak mathcal{E}xt_R^1(M,A)\rightarrow \frak mathcal{E}xt_R^1(M,{\rm Im}(f))\rightarrow \frak mathcal{E}xt_R^2(M,{\rm Ker}(f))$$ is exact. Since ${\rm Ker}(f)$ is $u$-$S$-torsion and $A$ is $u$-$S$-injective, $\frak mathcal{E}xt_R^1(M,{\rm Im}(f))$ is $u$-$S$-torsion.
Note $${\rm Hom}_R(M,{\rm Coker}(g))\rightarrow \frak mathcal{E}xt_R^1(M,{\rm Im}(g))\rightarrow \frak mathcal{E}xt_R^1(M,C)$$ is exact. Since ${\rm Coker}(g)$ is $u$-$S$-torsion, ${\rm Hom}_R(M,{\rm Coker}(g))$ is $u$-$S$-torsion by Lemma \ref{u-S-tor-ext}. Thus $\frak mathcal{E}xt_R^1(M,{\rm Im}(g))$ is $u$-$S$-torsion as $\frak mathcal{E}xt_R^1(M,C)$ is $u$-$S$-torsion. We also note that
$$\frak mathcal{E}xt_R^1(M,{\rm Ker}(g))\rightarrow \frak mathcal{E}xt_R^1(M,B) \rightarrow \frak mathcal{E}xt_R^1(M,{\rm Im}(g))$$ is exact. Thus to verify that $\frak mathcal{E}xt_R^1(M,B)$ is $u$-$S$-torsion, we just need to show $\frak mathcal{E}xt_R^1(M,{\rm Ker}(g))$ is $u$-$S$-torsion. Denote $N= {\rm Ker}(g)+{\rm Im}(f)$.
Consider the following two exact sequences
\begin{center}
$0\rightarrow {\rm Ker}(g)\rightarrow N\rightarrow N/{\rm Ker}(g)\rightarrow 0$ and $0\rightarrow {\rm Im}(f)\rightarrow N\rightarrow N/{\rm Im}(f)\rightarrow 0.$
\end{center}
Then it is easy to verify $N/{\rm Ker}(g)$ and $N/{\rm Im}(f)$ are all $u$-$S$-torsion. Consider the following induced two exact sequences
$${\rm Hom}_R(M,N/{\rm Im}(f))\rightarrow \frak mathcal{E}xt_R^1(M,{\rm Ker}(g)) \rightarrow \frak mathcal{E}xt_R^1(M,N) \rightarrow \frak mathcal{E}xt_R^1(M, N/{\rm Im}(f)),$$ $${\rm Hom}_R(M,N/{\rm Ker}(g)) \rightarrow \frak mathcal{E}xt_R^1(M,{\rm Im}(f)) \rightarrow \frak mathcal{E}xt_R^1(M,N) \rightarrow \frak mathcal{E}xt_R^1(M, N/{\rm Ker}(g)).$$ Thus $\frak mathcal{E}xt_R^1(M,{\rm Ker}(g))$ is $u$-$S$-torsion if and only if $\frak mathcal{E}xt_R^1(M,{\rm Im}(f))$ is $u$-$S$-torsion. Consequently, $B$ is $u$-$S$-injective since $\frak mathcal{E}xt_R^1(M,{\rm Im}(f))$ is $u$-$S$-torsion.
$(3)$ Considering the $u$-$S$-exact sequences $0\rightarrow A\rightarrow B\rightarrow 0\rightarrow 0$ and $0 \rightarrow 0\rightarrow A\rightarrow B\rightarrow 0$, we have $A$ is $u$-$S$-injective if and only if $B$ is $u$-$S$-injective by $(2)$.
(4) Suppose $0\rightarrow A\xrightarrow{f} B\xrightarrow{g} C\rightarrow 0$ is a $u$-$S$-exact sequence. Then, as in the proof of $(3)$, there are three short exact sequences: $0\rightarrow {\rm Ker}(f)\rightarrow A\rightarrow {\rm Im}(f)\rightarrow 0$, $0\rightarrow {\rm Ker}(g)\rightarrow B\rightarrow {\rm Im}(g)\rightarrow 0$ and $0\rightarrow {\rm Im}(g)\rightarrow C\rightarrow {\rm Coker}(g)\rightarrow 0$. Then ${\rm Ker}(f)$ and ${\rm Coker}(g)$ are all $u$-$S$-torsion and $s{\rm Ker}(g)\subseteq {\rm Im}(f)$ and $s{\rm Im}(f)\subseteq {\rm Ker}(g)$ for some $s\in S$.
Let $M$ be an $R$-module. Note that $${\rm Hom}_R(M,{\rm Coker}(g))\rightarrow\frak mathcal{E}xt_R^1(M,{\rm Im}(g))\rightarrow\frak mathcal{E}xt_R^1(M,C)\rightarrow \frak mathcal{E}xt_R^1(M,{\rm Coker}(g)) $$ is exact. Since ${\rm Coker}(g)$ is $u$-$S$-torsion, we have ${\rm Hom}_R(M,{\rm Coker}(g)$ and $\frak mathcal{E}xt_R^1(M,{\rm Coker}(g))$ are $u$-$S$-torsion by Lemma \ref{u-S-tor-ext}. We just need to verify $\frak mathcal{E}xt_R^1(M,{\rm Im}(g))$ is $u$-$S$-torsion. Note that $$\frak mathcal{E}xt_R^1(M,B)\rightarrow \frak mathcal{E}xt_R^1(M,{\rm Im}(g)) \rightarrow \frak mathcal{E}xt_R^2(M,{\rm Ker}(g))$$ is exact. Since $\frak mathcal{E}xt_R^1(M,B)$ is $u$-$S$-torsion, we just need to verify that $\frak mathcal{E}xt_R^2(M,{\rm Ker}(g))$ is $u$-$S$-torsion. By the proof of $(2)$, we just need to show that $\frak mathcal{E}xt_R^2(M,{\rm Im}(f))$ is $u$-$S$-torsion. Note that $$ \frak mathcal{E}xt_R^2(M,A)\rightarrow \frak mathcal{E}xt_R^2(M,{\rm Im}(f))\rightarrow \frak mathcal{E}xt_R^3(M,{\rm Ker}(f))$$ is exact. Since $\frak mathcal{E}xt_R^2(M,A)$ and $\frak mathcal{E}xt_R^3(M,{\rm Ker}(f))$ is $u$-$S$-torsion, we have $\frak mathcal{E}xt_R^2(M,{\rm Im}(f))$ is $u$-$S$-torsion. So $C$ is $u$-$S$-injective.
\end{proof}
Let $\frak p$ be a prime ideal of $R$. We say an $R$-module $E$ is \emph{$u$-$\frak p$-injective} shortly provided that $E$ is $u$-$(R\setminus\frak p)$-injective. The next result gives a local characterization of injective modules.
\begin{proposition}\label{s-injective-loc-char}
Let $R$ be a ring and $E$ an $R$-module. Then the following statements are equivalent:
\begin{enumerate}
\item $E$ is injective;
\item $E$ is $u$-$\frak p$-injective for any $\frak p\in {\rm Spec}(R)$;
\item $E$ is $u$-$\frak m$-injective for any $\frak m\in {\rm Max}(R)$.
\end{enumerate}
\end{proposition}
\begin{proof} $(1)\Rightarrow (2)$ It follows from Theorem \ref{s-inj-prop}.
$(2)\Rightarrow (3):$ Trivial.
$(3)\Rightarrow (1):$ Let $M$ be an $R$-module. Then $\frak mathcal{E}xt_R^1(M,E)$ is $u$-$(R-\frak m)$-torsion. Thus for any $\frak m\in {\rm Max}(R)$, there exists $s_{\frak m}\in S$ such that $s_{\frak m}\frak mathcal{E}xt_R^1(M,E)=0$. Since the ideal generated by all $s_{\frak m}$ is $R$, $\frak mathcal{E}xt_R^1(M,E)=0$. So $E$ is injective.
\end{proof}
We say an $R$-module $M$ is $S$-divisible is $M=sM$ for any $s\in S$. The well known Baer's Criterion states that an $R$-module $E$ is injective if and only if $\frak mathcal{E}xt_R^1(R/I,E)=0$ for any ideal $I$ of $R$. The next result gives a uniformly $S$-version of Baer's Criterion.
\begin{proposition}\label{s-inj-baer}{\bf (Baer's Criterion for $u$-$S$-injective modules)}
Let $R$ be a ring, $S$ a multiplicative subset of $R$ and $E$ an $R$-module. If $E$ is a $u$-$S$-injective module then there exists an element $s\in S$ such that $s\frak mathcal{E}xt_R^1(R/I,E)=0$ for any ideal $I$ of $R$. Moreover, if $E=sE$, then the converse also holds.
\end{proposition}
\begin{proof}
If $E$ is a $u$-$S$-injective module, then $\frak mathcal{E}xt_R^1(\bigoplus\limits_{I\unlhd R}R/I,E)$ is $u$-$S$-torsion by Theorem \ref{s-inj-ext}. Thus there is an element $s\in S$ such that $s\frak mathcal{E}xt_R^1(\bigoplus\limits_{I\unlhd R}R/I,E)=s\frak prod\limits_{I\unlhd R}\frak mathcal{E}xt_R^1(R/I,E)=0$. So $s\frak mathcal{E}xt_R^1(R/I,E)=0$ for any ideal $I$ of $R$.
Suppose $E$ is an $S$-divisible $R$-module. Let $B$ be an $R$-module, $A$ a submodule of $B$ and $s$ an element in $S$ satisfying the necessity. Let $f:A\rightarrow E$ be an $R$-homomorphism. Set
\begin{center}
$\Gamma=\{(C,d)|C$ is a submodule of $B$ containing $A$ and $d|_A=sf\}.$
\end{center}
Since $(A,sf)\in \Gamma$, $\Gamma$ is nonempty. Set $(C_1,d_1)\leq (C_2,d_2)$ if and only if $C_1\subseteq C_2$ and $d_2|_{C_1}=d_1$. Then $\Gamma$ is a partial order. For any chain $(C_j,d_j)$, let $C_0=\bigcup\limits_{j}C_j$ and $d_0(c)=d_j(c)$ if $c\in C_j$. Then $(C_0,d_0)$ is the upper bound of the chain $(C_j,d_j)$. By Zorn's Lemma, there is a maximal element $(C,d)$ in $\Gamma$.
We claim that $C=B$. On the contrary, let $x\in B-C$. Denote $I=\{r\in R|rx\in C\}$. Then $I$ is an ideal of $R$. Since $E=sE$, there exists a homomorphism $h:I\rightarrow E$ satisfy that $sh(r)=d(rx)$. Then there is an $R$-homomorphism $g: R\rightarrow E$ such that $g(r)=sh(r)=d(rx)$ for any $r\in I$. Let $C_1=C+Rx$ and $d_1(c+rx)=d(c)+g(r)$ where $c\in C$ and $r\in R$. If $c+rx=0$, then $r\in I$ and thus $d(c)+g(r)=d(c)+sh(r)=d(c)+d(rx)=d(c+rx)=0$. Hence $d_1$ is a well-defined homomorphism such that $d_1|_A=sf$. So $(C_1,d_1)\in \Gamma$. However, $(C_1,d_1)> (C,d)$ which contradicts the maximality of $(C,d)$.
\end{proof}
Now, we give the main result of this section.
\begin{theorem}\label{s-injective-ext} {\bf (Cartan-Eilenberg-Bass Theorem for uniformly $S$-Noetherian rings)}
Let $R$ be a ring, $S$ a regular multiplicative subset of $R$. Then the following assertions are equivalent:
\begin{enumerate}
\item $R$ is $u$-$S$-Noetherian;
\item any direct sum of injective modules is $u$-$S$-injective;
\item any direct union of injective modules is $u$-$S$-injective.
\end{enumerate}
\end{theorem}
\begin{proof} $(1)\Rightarrow(3):$ Let $\{E_i,f_{i,j}\}_{i<j\in \Lambda}$ be a direct system of injective modules where each $f_{i,j}$ is the embedding map. Let $\lim\limits_{\longrightarrow}{E_i}$ be its direct limit. Let $s$ be an element in $S$ such that for any ideal $I$ of $R$ there exists a finitely generated sub-ideal $K$ of $I$ such that $sI\subseteq K$. Considering the short exact sequence $0\rightarrow I/K\rightarrow R/K\rightarrow R/I\rightarrow 0$, we have the following long exact sequence: $${\rm Hom}_R(I/K,\lim\limits_{\longrightarrow}{E_i})\rightarrow \frak mathcal{E}xt_R^1(R/I,\lim\limits_{\longrightarrow}{E_i})\rightarrow \frak mathcal{E}xt_R^1(R/K,\lim\limits_{\longrightarrow}{E_i})\rightarrow \frak mathcal{E}xt_R^1(I/K,\lim\limits_{\longrightarrow}{E_i}).$$ Since $R/K$ is finitely presented, we have $\frak mathcal{E}xt_R^1(R/K,\lim\limits_{\longrightarrow}{E_i})\cong \lim\limits_{\longrightarrow}\frak mathcal{E}xt_R^1(R/K,{E_i})=0$ by the Five Lemma and \cite[Theorem 24.10]{w}. By the proof of Lemma \ref{u-S-tor-ext}, one can show that $s{\rm Hom}_R(I/K,\lim\limits_{\longrightarrow}{E_i})=0$. Thus $s\frak mathcal{E}xt_R^1(R/I,\lim\limits_{\longrightarrow}{E_i})=0$ for any ideal $I$ of $R$. Since $S$ is composed of non-zero-divisors, each $E_i$ is $S$-divisible by the proof \cite[Theorem 2.4.5]{fk16}. Thus $\lim\limits_{\longrightarrow}{E_i}$ is also $S$-divisible. So $\lim\limits_{\longrightarrow}{E_i}$ is $u$-$S$-injective by Proposition \ref{s-inj-baer}.
$(3)\Rightarrow(2):$ Trivial.
$(2)\Rightarrow(1):$ Assume $R$ is not a $u$-$S$-Noetherian ring. By Theorem \ref{u-s-noe-char}, for any $s\in S$, there exists a strictly ascending chain $I_1\subset I_2\subset ...$ of ideals of $R$ such that for any $k\geq 1$ there is $n\geq k$ satisfying $sI_n\not\subseteq I_k$. Set $I=\bigcup\limits_{i=1}^{\infty}I_i$. Then $I$ is an ideal of $R$ and $I/I_i\not=0$ for any $i\geq 1$. Denote by $E(I/I_i)$ the injective envelope of $I/I_i$. Let $f_i$ be the natural composition $I\twoheadrightarrow I/I_i\rightarrowtail E(I/I_i)$. Since $sI_n\not\subseteq I_i$ for any $i\geq 1$ and some $n\geq i$, we have $sf_i\not=0$ for any $i\geq 1$. We define $f:I\rightarrow \bigoplus_{i=1}^{\infty}E(I/I_i)$ by $f(a)=(f_i(a))$. Not that for each $a\in I$, we have $a\in I_i$ for some $i\geq 1$. So $f$ is a well-defined $R$-homomorphism. Let $\frak pi_i:\bigoplus_{i=1}^{\infty}E(I/I_i)\twoheadrightarrow E(I/I_i)$ be the projection. The embedding map $i: I\rightarrow R$ induces an exact sequence $${\rm Hom}_R(R,\bigoplus_{i=1}^{\infty}E(I/I_i))\xrightarrow{i^{\ast}} {\rm Hom}_R(I,\bigoplus_{i=1}^{\infty}E(I/I_i))\xrightarrow{\delta} \frak mathcal{E}xt_R^1(R/I,\bigoplus_{i=1}^{\infty}E(I/I_i))\rightarrow 0.$$Since $\bigoplus_{i=1}^{\infty}E(I/I_i)$ is $u$-$S$-injective, there is an $s\in S$ such that $$s\frak mathcal{E}xt_R^1(R/I,\bigoplus_{i=1}^{\infty}E(I/I_i))=0.$$
Thus there exists a homomorphism $g:R\rightarrow \bigoplus_{i=1}^{\infty}E(I/I_i)$ such that $sf=i^{\ast}(g)$.
Thus for sufficiently large $i$, we have $s\frak pi_if(a)=\frak pi_ii^{\ast}(g)(a)=a\frak pi_ii^{\ast}(g)(1)=0$ for any $a\in I$.
So for such $i$, $sf_i=s\frak pi_if:I\rightarrow E(I/I_i)$ is a zero homomorphism, which is a contradiction. Hence $R$ is $u$-$S$-Noetherian.
\end{proof}
\end{document} |
\begin{document}
\title {Elliptic functions from $F(\frac{1}{3}, \frac{2}{3} ; \frac{1}{2} ; \bullet)$ }
\vcrossingte{}
\author[P.L. Robinson]{P.L. Robinson}
\address{Department of Mathematics \\ University of Florida \\ Gainesville FL 32611 USA }
\email[]{paulr@ufl.edu}
\subjclass{} \keywords{}
\begin{abstract}
Li-Chien Shen developed a family of elliptic functions from the hypergeometric function $_2F_1(\frac{1}{3}, \frac{2}{3} ; \frac{1}{2} ; \bullet)$. We comment on this development, offering some new proofs.
\end{abstract}
\maketitle
\medbreak
\medbreak
Shen [1] has presented an interesting construction of elliptic functions based on the hypergeometric function $F(\frac{1}{3}, \frac{2}{3} ; \frac{1}{2} ; \bullet)$. We open our commentary with a brief review of his construction, making some minor notational changes (most of which amount to the dropping of suffixes).
\medbreak
Fix $0 < k < 1$ and write
$$u = \int_0^{\sin \phi} F(\frac{1}{3}, \frac{2}{3} ; \frac{1}{2} ; k^2 t^2) \, \frac{{\rm d} t}{\sqrt{1 - t^2}}$$
so that
$$\frac{{\rm d} u}{{\rm d} \phi} = F(\frac{1}{3}, \frac{2}{3} ; \frac{1}{2} ; k^2 \sin^2 \phi).$$
\medbreak
\noindent
In a (connected) neighbourhood of the origin, the relation $\phi \mapsto u$ inverts to $u \mapsto \phi$ fixing $0$. Define functions $s, c, d$ by
$$s(u) = \sin \phi(u)$$
$$c(u) = \cos \phi(u)$$
and
$$d(u) = \phi\,'(u) = 1/F(\frac{1}{3}, \frac{2}{3} ; \frac{1}{2} ; k^2 s^2 (u)).$$
Plainly, $s$ and $c$ satisfy the Pythagorean relation
$$s^2 + c^2 = 1.$$
Shen uses the hypergeometric identity
$$F(\frac{1}{3}, \frac{2}{3} ; \frac{1}{2} ; \sin^2 z) = \frac{\cos \tfrac{1}{3} z}{\cos z}$$
and the trigonometric triplication formula
$$4 \cos^3 \tfrac{1}{3} z - 3 \cos \tfrac{1}{3} z = \cos z$$
to show that $s$ and $d$ satisfy the relation
$$d^3 + 3 d^2 = 4(1 - k^2 s^2)$$
or equivalently
$$4 k^2 s^2 = (1 - d) (2 + d)^2.$$
By differentiation,
$$s\,' = c \, d$$
$$c\,' = - s \,d$$
and
$$d\,' = - \frac{8}{3} k^2 \frac{s \, c}{2 + d}.$$
By means of the $(s, d)$ and $(s, c)$ relations, it follows that $d$ satisfies the differential equation
$$(d\,')^2 = \frac{4}{9} (1 - d) (d^3 + 3 d^2 + 4 k^2 - 4).$$
\medbreak
\begin{theorem} \leftarrowbel{d}
The function $d$ is $1 - \frac{4}{9} k^2 (\wp + \tfrac{1}{3})^{-1}$ where $\wp$ is the Weierstrass function with invariants
$$g_2 = \frac{4}{27} (9 - 8 k^2)$$
and
$$g_3 = \frac{8}{27^2} (8 k^4 - 36 k^2 + 27).$$
\end{theorem}
\begin{proof}
Of course, we mean that $d = \phi'$ extends to the stated rational function of $\wp$. In [1] this is proved by appealing to a standard formula for the integral of $f^{-1/2}$ when $f$ is a quartic. Instead, we may work with the differential equation itself, as follows. First, the form of the differential equation
$$(d\,')^2 = \frac{4}{9} (1 - d) (d^3 + 3 d^2 + 4 k^2 - 4)$$
suggests the substitution $r = (1 - d)^{-1}$: this has the effect of removing the explicit linear factor, thus
$$(r\,')^2 = \frac{4 k^2}{9} \Big(4 r^3 - \frac{9}{k^2} r^2 + \frac{6}{k^2} r - \frac{1}{k^2}\Big).$$
Next, the rescaling $q = \frac{4 k^2}{9} r$ leads to
$$(q\,')^2 = 4 q^3 - 4 q^2 + \frac{2^5}{3^3} k^2 q - \frac{2^6}{3^6} k^4$$
and the shift $p = q - \frac{1}{3}$ removes the quadratic term on the right side, yielding
$$(p\,')^2 = 4 p^3 - g_2 p - g_3$$
with $g_2$ and $g_3$ as stated in the theorem. Finally, the initial condition $d(0) = 1$ gives $p$ a pole at $0$; thus $p$ is the Weierstrass function $\wp$ and so $d$ is as claimed.
\end{proof}
\medbreak
We remark that $\wp$ has discriminant
$$g_2^3 - 27 g_3^2 = \frac{16^3}{27^3} k^6 (1 - k^2).$$
\bigbreak
Now, recall that $0 < k < 1$. It follows that the Weierstrass function $\wp$ has real invariants and positive discriminant. Consequently, the period lattice of $\wp$ is rectangular; let $2 K$ and $2 {\rm i} K'$ be fundamental periods, with $K > 0$ and $K' > 0$. We may take the period parallelogram to have vertices $0, 2 K, 2 K + 2 {\rm i} K', 2 {\rm i} K'$ (in counter-clockwise order); alternatively, we may take it to have vertices $\pm K \pm {\rm i} K'$ (with all four choices of sign). The values of $\wp$ around the rectangle $0 \to K \to K + {\rm i} K' \to {\rm i} K' \to 0$ strictly decrease from $+ \infty$ to $- \infty$; moreover, the extreme `midpoint values' satisfy $\wp(K) > 0 > \wp({\rm i} K')$. In particular, $\wp$ is strictly negative along the purely imaginary interval $(0, {\rm i} K')$.
\medbreak
As the Weierstrass function $\wp$ is elliptic of order two, with $2 K$ and $2 {\rm i} K'$ as fundamental periods, the same is true of the function
$$d = 1 - \tfrac{4}{9} k^2 (\wp + \tfrac{1}{3})^{-1}.$$
\medbreak
One of the most substantial efforts undertaken in [1] is the task of locating the poles of $d$. As a preliminary step, in [1] Lemma 3.1 it is shown (using conformal mapping theory) that $d$ has a pole in the interval $(0, {\rm i} K')$; as $d$ is even, it also has a pole in $(- {\rm i} K', 0)$. The precise location of the poles of $d$ is announced in [1] Lemma 3.2; the proof of this Lemma is prepared in [1] Section 4 and takes up essentially the whole of [1] Section 5. The approach taken in [1] rests heavily on the theory of theta functions and does more than just locate the poles of $d$. Our approach to locating the poles of $d$ will be more direct: we work with $\wp$ alone, without the need for theta functions. The following is our version of [1] Lemma 3.2.
\medbreak
\begin{theorem} \leftarrowbel{poles}
The elliptic function $d$ has a pole at $\frac{2}{3} {\rm i} K'$.
\end{theorem}
\begin{proof}
Theorem \ref{d} makes it plain that $d$ has a pole precisely where $\wp = - 1/3$. For convenience, write $a = \frac{2}{3} {\rm i} K'$; our task is to establish that $\wp(a) = -1/3$. Recall the Weierstrassian duplication formula
$$\wp(2 a) + 2 \, \wp(a) = \frac{1}{4} \Big\{ \frac{\wp''(a)}{\wp'(a)}\Big\}^2$$
where
$$\wp''(a) = 6 \, \wp(a)^2 - \frac{1}{2} \, g_2$$
and
$$\wp'(a)^2 = 4 \, \wp(a)^2 - g_2 \, \wp(a) - g_3.$$
Here,
$$2 a = \frac{4}{3} {\rm i} K' \equiv - \frac{2}{3} {\rm i} K' = - a$$
where the middle congruence is modulo the period $2 {\rm i} K'$ of $\wp$; consequently,
$$\wp(2 a) = \wp (- a) = \wp (a)$$
because $\wp$ is even. The left side of the duplication formula thus reduces to $3 \, \wp(a)$ and we deduce that $b = \wp(a)$ satisfies
$$3 b = \frac{1}{4} \frac{(6 b^2 - \frac{1}{2} g_2)^2}{4 b^3 - g_2 b - g_3}.$$
Otherwise said, $\wp(a)$ is a zero of the quartic $f$ defined by
$$f(z) = 12 z (4 z^3 - g_2 z - g_3) - (6 z^2 - \tfrac{1}{2} g_2)^2.$$
For convenience we work with
$$\frac{27}{4} f(\frac{w}{3}) = w^4 - \frac{2}{3} (9 - 8 k^2) w^2 - \frac{8}{27} (8 k^4 - 36 k^2 + 27) w - \frac{1}{27} (9 - 8 k^2)^2$$
which factorizes as
$$\frac{27}{4} f(\frac{w}{3}) = (w + 1) \Big(w^3 - w^2 + \frac{1}{3} (16 k^2 - 15) w - \frac{1}{27} (9 - 8 k^2)^2\Big).$$
Here, the cubic factor has discriminant
$$- \frac{4096}{27} k^4 (1 - k^2)^2 < 0$$
and so has just one real zero, which is clearly positive. Thus, $f$ has four zeros: a conjugate pair of non-real zeros, a positive zero and $-1/3$.
As the values of $\wp$ along $(0, {\rm i} K')$ are strictly negative, it follows that $\wp(a) = - 1/3$ as claimed.
\end{proof}
\medbreak
As an even function, $d$ also has a pole at $- \frac{2}{3} {\rm i} K'$. Both of the poles $\pm \frac{2}{3} {\rm i} K'$ lie in the period parallelogram with vertices $\pm K \pm {\rm i} K'$ and $d$ has order two, so each pole is simple and the accounting of poles (modulo periods) is complete; of course, this may be verified otherwise.
\medbreak
We close our commentary with a couple of remarks.
\medbreak
In [1] it is mentioned that the squares $s^2$ and $c^2$ are elliptic: as $d$ is elliptic, these facts follow in turn from the $(s, d)$ relation $4 k^2 s^2 = (1 - d) (2 + d)^2$ and the $(s, c)$ relation $c^2 = 1 - s^2$. As $d$ has simple poles, it follows that the poles of $s^2$ and $c^2$ are triple; thus, $s$ and $c$ themselves are not elliptic, as is also mentioned in [1]. Beyond this, the product $s \, c$ is elliptic with triple poles: indeed,
$$s\, c = - \frac{3}{8 k^2} (2 + d) d\,' = - \frac{3}{16 k^2} \{ (d + 2)^2\}'.$$
\medbreak
\section*{}
\bigbreak
\begin{center}
{\rm small R}{\footnotesize EFERENCES}
\end{center}
\medbreak
[1] Li-Chien Shen, {\it On the theory of elliptic functions based on $_2F_1(\frac{1}{3}, \frac{2}{3} ; \frac{1}{2} ; z)$}, Transactions of the American Mathematical Society {\bf 357} (2004) 2043-2058.
\medbreak
\end{document} |
\begin{document}
\title{Greenberger-Horne-Zeilinger-like proof of Bell's theorem
involving observers who do not share a reference frame}
\author{Ad\'{a}n Cabello}
\email{adan@us.es} \affiliation{Departamento de F\'{\i}sica
Aplicada II, Universidad de Sevilla, 41012 Sevilla, Spain}
\date{\today}
\begin{abstract}
Vaidman described how a team of three players, each of them
isolated in a remote booth, could use a three-qubit
Greenberger-Horne-Zeilinger state to always win a game which would
be impossible to always win without quantum resources. However,
Vaidman's method requires all three players to share a common
reference frame; it does not work if the adversary is allowed to
disorientate one player. Here we show how to always win the game,
even if the players do not share any reference frame. The
introduced method uses a 12-qubit state which is invariant under
any transformation $R_a \otimes R_b \otimes R_c$ (where $R_a = U_a
\otimes U_a \otimes U_a \otimes U_a$, where $U_j$ is a unitary operation
on a single qubit) and requires only single-qubit measurements. A
number of further applications of this 12-qubit state are
described.
\end{abstract}
\pacs{03.65.Ud,
03.65.Ta}
\maketitle
\section{Introduction}
\label{Sec1}
In 1991, after months of patient ``work'' and based on a study of
$20\,000$ events, a gang of players reached an amazing conclusion:
in eight roulette wheels of the Gran Casino of Madrid, six numbers
($1$ and its two neighbors, $20$ and $33$, and the opposite number
in the roulette wheel, $4$, and its two neighbors, $19$ and $21$)
occurred with an unexpectedly high frequency (assuming that each
of the $37$ numbers of the roulette wheel appears with the same
frequency), while four numbers ($11$, $12$, $28$, and $36$) rarely
occurred. The gang won a large amount of money by betting in these
roulette wheels. The casino never realized where the problem was,
never understood the ``method'' used by the gang but, after many
attempts, found its own method to defeat the gang: the casino
started to regularly exchange the pieces of the roulette wheels
and switch the numbers' positions. This altered the roulette
wheels' original ``defects'' and the gang stopped
winning~\cite{GG03}. The moral is that any winning strategy
usually has an antidote.
In 1999, Vaidman~\cite{Vaidman99} converted
Mermin's~\cite{Mermin90a,Mermin90b} version of the proof of Bell's
theorem without inequalities discovered by Greenberger, Horne, and
Zeilinger (GHZ)~\cite{GHZ89,GHZ90,GHSZ90} into a game involving a
team (a gang) of three players, each of them completely isolated
in a booth, and an opponent (a casino). Under some assumptions,
and using only classical resources, the maximum probability for
the team to win Vaidman's game is 75\% (thus a casino gets profit
by exploiting the remaining 25\%). Thanks to the fact that rules
of the game do not forbid the players to share qubits prepared in
some entangled state, there is a method which allows them to
always win the game. However, there is a simple manipulation that
nullifies the quantum advantage. A hidden assumption of the method
is that all three players share a common reference frame. If the
casino disorientates one of the players so that all three of them
do not share a reference frame, then the advantage of the method
is lost. The term ``unspeakable information'' was coined by Peres
and Scudo~\cite{PS02} to designate information that cannot be
represented by a sequence of discrete symbols, such as a direction
in space or a reference frame. In this paper we show that there is
a method to always win Vaidman's game without it being necessary
that the players share unspeakable information.
In Sec.~\ref{Sec2} we review the rules of Vaidman's game and the
original quantum method for always winning. In Sec.~\ref{Sec3} we
propose a quantum method for always winning, even if the players
do not share any reference frame. This method requires more
qubits, and thus one might think that it must require collective
measurements on several qubits, instead of single-qubit
measurements, as in the original method; in Sec.~\ref{Sec4} we
shall see that this is not the case. In Sec.~\ref{Sec5} we show
other applications of the method.
\section{Vaidman's game}
\label{Sec2}
\subsection{Rules}
Vaidman proposed the following game~\cite{Vaidman99}. Consider a
team of three players, who are allowed to agree on a common
strategy and make any preparation before they are taken to three
remote and isolated booths. Then, each player is asked one of the
two possible questions: ``What is $Z$?'' or ``What is $X$?'' Each
player must give an answer which is limited to one of only two
possibilities:~``$0$'' or~``$1$.'' One of the rules of the game is
that either all three players are asked the~$Z$ question or only
one player is asked the~$Z$ question and the other two are asked
the~$X$ question. The team wins if the number of~$0$ answers is
odd (one or three) in the case of three~$Z$~questions, and is even
(zero or two) in the case of one~$Z$ and two~$X$~questions.
Assuming that the four possible combinations of questions (i.e.,
$Z_1,Z_2,Z_3$; $Z_1,X_2,X_3$; $X_1,Z_2,X_3$; and~$X_1,X_2,Z_3$)
are asked with the same frequency, no classical protocol allows
the players to win the game in more than~75\% of the runs. For
instance, a simple strategy that allows them to win in ~75\% of
the runs is that each player always answers~$1$ to the~$Z$
question and~$0$ to the~$X$ question. However, quantum
mechanics provides a method to always win the game.
\subsection{GHZ-assisted quantum always winning strategy}
The method for always winning is the following. Before entering
the isolated booths, the players prepare a large number of
three-qubit systems in the GHZ
state~\cite{Mermin90a,Mermin90b,GHZ89,GHZ90,GHSZ90,Svetlichny87}
\begin{equation}
|{\rm GHZ}\rangle = {1 \over \sqrt{2}}
(|y_0,y_0,y_0\rangle+|y_1,y_1,y_1\rangle). \label{GHZ03}
\end{equation}
Here $|y_0,y_0,y_0\rangle = |y_0\rangle \otimes |y_0\rangle
\otimes |y_0\rangle$, where $|y_0\rangle = {1 \over \sqrt{2}}
(|z_0\rangle + i|z_1\rangle)$ and $|y_1\rangle = {1 \over
\sqrt{2}} (|z_0\rangle - i|z_1\rangle)$, where $|z_0\rangle =
\left(
\begin{array}{c} 1 \\ 0
\end{array} \right)$ and $|z_1\rangle = \left( \begin{array}{c} 0 \\ 1
\end{array} \right)$.
Then, for each three-qubit system, each of the players takes one
of the qubits with him. In case a player is asked ``What is
$Z$?,'' he performs a measurement on his qubit of the observable
represented by
\begin{equation}
Z=|z_0\rangle \langle z_0|-|z_1\rangle \langle z_1|,
\end{equation}
and gives the answer~$0$, if the outcome corresponds
to~$|z_0\rangle$, or the answer~$1$, if the outcome corresponds
to~$|z_1\rangle$.
In case a player is asked ``What is~$X$?,'' he performs a
measurement of the observable represented by
\begin{equation}
X=|x_0\rangle \langle x_0|-|x_1\rangle \langle x_1|,
\end{equation}
where~$|x_0\rangle = {1 \over \sqrt{2}} (|z_0\rangle +
|z_1\rangle)$ and $|x_1\rangle = {1 \over \sqrt{2}} (|z_0\rangle -
|z_1\rangle)$, and gives the answer~$0$, if the outcome
corresponds to~$|x_0\rangle$, or the answer~$1$, if the outcome
corresponds to~$|x_1\rangle$.
The protocol described above allows the team to always win the
game, because the state defined in Eq.~(\ref{GHZ03}) can also be
expressed in the following four forms:
\begin{eqnarray}
|{\rm GHZ}\rangle & = & {1 \over 2} (|z_0,z_0,z_0\rangle-|z_0,z_1,z_1\rangle \nonumber \\
& & -|z_1,z_0,z_1\rangle-|z_1,z_1,z_0\rangle)
\label{zzz} \\
& = & {1 \over 2} (|z_0,x_0,x_1\rangle+|z_0,x_1,x_0\rangle \nonumber \\
& & -|z_1,x_0,x_0\rangle+|z_1,x_1,x_1\rangle)
\label{zxx} \\
& = & {1 \over 2} (|x_0,z_0,x_1\rangle-|x_0,z_1,x_0\rangle \nonumber \\
& & +|x_1,z_0,x_0\rangle+|x_1,z_1,x_1\rangle)
\label{xzx} \\
& = & {1 \over 2} (-|x_0,x_0,z_1\rangle+|x_0,x_1,z_0\rangle \nonumber \\
& & +|x_1,x_0,z_0\rangle+|x_1,x_1,z_1\rangle).
\label{xxz}
\end{eqnarray}
It can be inferred from Eq.~(\ref{zzz}) that, if all players
measure~$Z$, then either all of them will obtain~$z_0$ or one will
obtain~$z_0$ and the other two will obtain~$z_1$. Analogously, it
can be inferred from Eqs.~(\ref{zxx})--(\ref{xxz}) that, if one
player measures~$Z$ and the other two measure~$X$, then either all
of them will obtain~$1$, or one will obtain~$1$ and the other
two will obtain~$0$.
\section{Quantum always winning strategy without unspeakable information}
\label{Sec3}
The method described above has one drawback that the adversary
could use to keep the players from always winning. If the qubits
are spin states of spin-${1 \over 2}$ particles, then the
observables~$Z$ and $X$ can be identified, respectively, with the
spin components along two orthogonal directions $z$ and $x$. Such
directions are determined by the preparation of the GHZ
state~(\ref{GHZ03}). This method requires all players to share the
directions $z$ and $x$ for the duration of the game. However, if
the opponent finds a way to confuse one of them, then the local
measurements performed by the players will not be adequately
correlated and thus the advantage provided by the GHZ state is
lost.
Fortunately, there is a method which is still valid even if the
players do not share two directions. Now, before entering the
booths, the players prepare a large number of 12-qubit systems in
the state
\begin{equation}
| \Psi\rangle={1 \over \sqrt{2}}
(|\eta_0,\eta_0,\eta_0\rangle+|\eta_1,\eta_1,\eta_1\rangle),
\label{GHZ12}
\end{equation}
where
$|\eta_0\rangle={1 \over \sqrt{2}} (|\phi_0\rangle+i
|\phi_1\rangle)$ and $|\eta_1\rangle={1 \over \sqrt{2}}
(|\phi_0\rangle-i |\phi_1\rangle)$, where
$|\phi_0\rangle$ and $|\phi_1\rangle$
are the four-qubit states
\begin{eqnarray}
|\phi_0\rangle & = & {1 \over 2}
(|z_0,z_1,z_0,z_1\rangle-|z_0,z_1,z_1,z_0\rangle
\nonumber \\ & &
-|z_1,z_0,z_0,z_1\rangle +|z_1,z_0,z_1,z_0\rangle),
\label{P0}
\\
|\phi_1\rangle & = & {1 \over 2 \sqrt{3}}
(2|z_0,z_0,z_1,z_1\rangle-|z_0,z_1,z_0,z_1\rangle
\nonumber \\ & &
-|z_0,z_1,z_1,z_0\rangle -|z_1,z_0,z_0,z_1\rangle
\nonumber \\ & &
-|z_1,z_0,z_1,z_0\rangle+2|z_1,z_1,z_0,z_0\rangle),
\label{P1}
\end{eqnarray}
introduced by Kempe {\em et al.}~\cite{KBLW01} in the context of
decoherence-free fault-tolerant universal quantum
computation~\cite{ZR97a,LCW98}, and recently obtained
experimentally using parametric down-converted
polarization-entangled photons~\cite{BEGKCW03}.
Then, for each 12-qubit system, the first player takes the first
{\em four} qubits with him, the second player takes the next four
qubits, and the third player takes the last four qubits. In case a
player is asked ``What is $Z$?,'' he performs on his four qubits a
measurement of the observable represented by
\begin{equation}
{\cal Z} = |\phi_0\rangle \langle\phi_0|- |\phi_1\rangle
\langle\phi_1|.
\label{calZ}
\end{equation}
The observable ${\cal Z}$ has {\em three} possible outcomes
(corresponding to its three eigenvalues, $-1$, $0$, and $1$).
However, if the qubits have been prepared in the state
$|\Psi\rangle$ given in Eq.~(\ref{GHZ12}), then only two outcomes can
occur (those corresponding to the eigenvalues $-1$ and $1$).
Measuring the observable ${\cal Z}$ on a system prepared in the
state $|\Psi\rangle$ is then equivalent to reliably discriminating
between the states~$|\phi_0\rangle$ and~$|\phi_1\rangle$. The
player gives the answer~$0$, if the outcome corresponds
to~$|\phi_0\rangle$, and the answer~$1$, if the outcome
corresponds to~$|\phi_1\rangle$.
In case a player is asked ``What is~$X$?,'' he performs a
measurement of the observable represented by
\begin{equation}
{\cal X} = |\psi_0\rangle \langle\psi_0|- |\psi_1\rangle
\langle\psi_1|,
\label{calX}
\end{equation}
where
\begin{eqnarray}
|\psi_0\rangle & = & {1 \over \sqrt{2}}
(|\phi_0\rangle+|\phi_1\rangle), \label{S0}
\\
|\psi_1\rangle & = & {1 \over \sqrt{2}}
(|\phi_0\rangle-|\phi_1\rangle). \label{S1}
\end{eqnarray}
Measuring ${\cal X}$ on a system prepared in the state~$|\Psi
\rangle$ is equivalent to reliably discriminating
between~$|\psi_0\rangle$ and~$|\psi_1\rangle$. The player gives
the answer~$0$, if the outcome corresponds to~$|\psi_0\rangle$,
or the answer~$1$, if the outcome corresponds
to~$|\psi_1\rangle$.
The state $|\Psi\rangle$ can be expressed in the following four
forms:
\begin{eqnarray}
| \Psi\rangle
& = & {1 \over 2} (|\phi_0,\phi_0,\phi_0\rangle-|\phi_0,\phi_1,\phi_1\rangle \nonumber \\
& & -|\phi_1,\phi_0,\phi_1\rangle-|\phi_1,\phi_1,\phi_0\rangle)
\label{ZZZ} \\
& = & {1 \over 2} (|\phi_0,\psi_0,\psi_1\rangle+|\phi_0,\psi_1,\psi_0\rangle \nonumber \\
& & -|\phi_1,\psi_0,\psi_0\rangle+|\phi_1,\psi_1,\psi_1\rangle)
\label{ZXX} \\
& = & {1 \over 2} (|\psi_0,\phi_0,\psi_1\rangle-|\psi_0,\phi_1,\psi_0\rangle \nonumber \\
& & +|\psi_1,\phi_0,\psi_0\rangle+|\psi_1,\phi_1,\psi_1\rangle)
\label{XZX} \\
& = & {1 \over 2} (-|\psi_0,\psi_0,\phi_1\rangle+|\psi_0,\psi_1,\phi_0\rangle \nonumber \\
& & +|\psi_1,\psi_0,\phi_0\rangle+|\psi_1,\psi_1,\phi_1\rangle).
\label{XXZ}
\end{eqnarray}
From Eq.~(\ref{ZZZ}), it can be inferred that if the three players
perform measurements to discriminate between~$|\phi_0\rangle$
and~$|\phi_1\rangle$, then they will always obtain an odd number
of states~$|\phi_0\rangle$. From Eqs.~(\ref{ZXX}) to (\ref{XXZ}),
it can be inferred that, if two players perform measurements to
discriminate between~$|\psi_0\rangle$ and~$|\psi_1\rangle$, and
the third performs measurements to discriminate
between~$|\phi_0\rangle$ and~$|\phi_1\rangle$, then they will
always obtain an odd number of states~$|\psi_1\rangle$
and~$|\phi_1\rangle$.
For our purposes, the fundamental property of the state $|\Psi
\rangle$ is that it is invariant under any transformation $R_a
\otimes R_b \otimes R_c$ (where $R_a = U_a \otimes U_a \otimes U_a
\otimes U_a$, where $U_j$ is a unitary operation on a single
qubit). This property derives from the fact that $|\phi_0\rangle$
and $|\phi_1\rangle$ and any linear combination thereof (such as
$|\psi_0\rangle$ and $|\psi_1\rangle$) are invariant under the
tensor product of four equal unitary operators, $U_j \otimes U_j
\otimes U_j \otimes U_j$. This means that the state $|\Psi
\rangle$ is invariant under local rotations, and the local
observables ${\cal Z}$ and ${\cal X}$ are invariant under $U_j
\otimes U_j \otimes U_j \otimes U_j$ and thus under rotations of
the local setups \cite{Cabello03a}. Therefore,
expressions~(\ref{ZZZ})--(\ref{XXZ}) remain unchanged after local
rotations. This implies that even if the adversary disorientates
one or more players, the outcomes of the local measurements still
possess the desired correlations, because the involved local
measurements are rotationally invariant.
\section{Measuring the observables by using single-qubit measurements}
\label{Sec4}
One might think that measuring ${\cal Z}$ (i.e., distinguishing
between $|\phi_0\rangle$ and $|\phi_1\rangle$) and ${\cal X}$
(i.e., distinguishing between $|\psi_0\rangle$ and $|\psi_1
\rangle$) could require collective measurements on each player's
four qubits. However, as in the original method, only single-qubit
measurements are needed.
\subsection{Distinguishing between $|\phi_0\rangle$ and $|\phi_1\rangle$}
The states $|\phi_0\rangle$ and $|\phi_1\rangle$ are reliably
distinguishable using single-qubit measurements because they can
be expressed as
\begin{eqnarray}
|\phi_0\rangle & = & {1 \over 2}
(-|z_0,z_1,x_0,x_1\rangle+|z_0,z_1,x_1,x_0\rangle \nonumber \\ & &
+|z_1,z_0,x_0,x_1\rangle-|z_1,z_0,x_1,x_0\rangle),
\\
|\phi_1\rangle & = & {1 \over 2 \sqrt{3}}
(|z_0,z_0,x_0,x_0\rangle-|z_0,z_0,x_0,x_1\rangle \nonumber \\ & &
-|z_0,z_0,x_1,x_0\rangle+|z_0,z_0,x_1,x_1\rangle \nonumber \\ & &
-|z_0,z_1,x_0,x_0\rangle+|z_0,z_1,x_1,x_1\rangle \nonumber \\ & &
-|z_1,z_0,x_0,x_0\rangle+|z_1,z_0,x_1,x_1\rangle \nonumber \\ & &
+|z_1,z_1,x_0,x_0\rangle+|z_1,z_1,x_0,x_1\rangle \nonumber \\ & &
+|z_1,z_1,x_1,x_0\rangle+|z_1,z_1,x_1,x_1\rangle).
\end{eqnarray}
Therefore, if the local measurements are $Z_1$ (i.e., the
component along the $z$~direction of the first qubit), $Z_2$
(i.e., the component along the $z$~direction of the second qubit),
$X_3$ (i.e., the component along the $x$~direction of the third
qubit), and $X_4$ (i.e., the component along the $x$~direction of
the fourth qubit) then, among the~$16$ possible outcomes,~$4$
occur (with equal probability) only if the qubits were in the
state $|\phi_0\rangle$, and the other~$12$ outcomes occur (with
equal probability) only if the qubits were in the state
$|\phi_1\rangle$. Note that now $z$ and $x$ are not fixed
directions, but any two orthogonal directions instead. This scheme
to distinguish between $|\phi_0\rangle$ and $|\phi_1\rangle$ using
only single-qubit measurements has recently been experimentally
implemented~\cite{BEGKCW03}.
\subsection{Distinguishing between $|\psi_0\rangle$ and $|\psi_1\rangle$}
The states $|\psi_0\rangle$ and $|\psi_1\rangle$ are not
distinguishable using {\em fixed} single-qubit measurements.
However, any two orthogonal states are distinguishable by
single-qubit measurements {\em assisted by classical
communication}~\cite{WSHV00}. This means that there is a {\em
sequence} of single-qubit measurements which allows us to reliably
distinguish between $|\psi_0\rangle$ and $|\psi_1\rangle$. In this
sequence, what is measured on one qubit could depend on the result
of a prior measurement on a different qubit. A sequence of
single-qubit measurements which allows us to reliably distinguish
between $|\psi_0\rangle$ and $|\psi_1\rangle$ follows from the
fact that these states can be expressed as
\begin{eqnarray}
|\psi_0\rangle & = & \alpha |z_0,x_0,a_0,c_0\rangle + \beta
|z_0,x_0,a_1,d_1\rangle \nonumber \\ & & + \alpha
|z_0,x_1,b_0,e_0\rangle + \beta |z_0,x_1,b_1,f_1\rangle \nonumber
\\ & & + \beta |z_1,x_0,b_0,f_0\rangle + \alpha
|z_1,x_0,b_1,e_1\rangle \nonumber \\ & & - \beta
|z_1,x_1,a_0,d_0\rangle + \alpha |z_1,x_1,a_1,c_1\rangle,
\label{psi0loc}
\\
|\psi_1\rangle & = & \beta |z_0,x_0,a_0,c_1\rangle + \alpha
|z_0,x_0,a_1,d_0\rangle \nonumber \\ & & + \beta
|z_0,x_1,b_0,e_1\rangle - \alpha |z_0,x_1,b_1,f_0\rangle \nonumber
\\ & & + \alpha |z_1,x_0,b_0,f_1\rangle - \beta
|z_1,x_0,b_1,e_0\rangle \nonumber \\ & & + \alpha
|z_1,x_1,a_0,d_1\rangle - \beta |z_1,x_1,a_1,c_0\rangle,
\label{psi1loc}
\end{eqnarray}
where
\begin{eqnarray}
\alpha & = & {\sqrt{3+\sqrt{6}} \over 2 \sqrt{6}}, \\
\beta & = & {\sqrt{3-\sqrt{6}} \over 2 \sqrt{6}},
\end{eqnarray}
and
\begin{eqnarray}
|a_0\rangle & = & p |z_0\rangle + q |z_1\rangle,\;\;\;\;
|a_1\rangle = q |z_0\rangle - p |z_1\rangle, \\
|b_0\rangle & = & -p |z_0\rangle + q |z_1\rangle,\;\;\;\;
|b_1\rangle = q |z_0\rangle + p |z_1\rangle, \\
|c_0\rangle & = & -r |z_0\rangle + s |z_1\rangle,\;\;\;\;
|c_1\rangle = -s |z_0\rangle - r |z_1\rangle, \\
|d_0\rangle & = & t |z_0\rangle + u |z_1\rangle,\;\;\;\;
|d_1\rangle = u |z_0\rangle - t |z_1\rangle, \\
|e_0\rangle & = & r |z_0\rangle + s |z_1\rangle,\;\;\;\;
|e_1\rangle = s |z_0\rangle - r |z_1\rangle, \\
|f_0\rangle & = & -t |z_0\rangle + u |z_1\rangle,\;\;\;\;
|f_1\rangle = u |z_0\rangle + t |z_1\rangle,
\end{eqnarray}
where
\begin{eqnarray}
p & = & {\sqrt{2-\sqrt{2}} \over 2}, \\
q & = & {\sqrt{2+\sqrt{2}} \over 2}, \\
r & = & {(3+\sqrt{3}) q \over 12 \alpha}, \\
s & = & {(3-\sqrt{3}) q \over 12 \beta}, \\
t & = & {(3-\sqrt{3}) p \over 12 \alpha}, \\
u & = & {(3+\sqrt{3}) p \over 12 \beta}.
\end{eqnarray}
Note that, for instance, the state $|b_0\rangle$ is {\em not}
orthogonal to $|a_0\rangle$ or $|a_1\rangle$. The comparison
between expressions~(\ref{psi0loc}) and~(\ref{psi1loc}) leads us
to a simple protocol for reliably distinguishing between $|\psi_0
\rangle$ and $|\psi_1\rangle$ using a sequence of single-qubit
measurements. This protocol is shown in Fig.~\ref{Flow02}.
\begin{figure}
\caption{\label{Flow02}
\label{Flow02}
\end{figure}
\section{Other applications}
\label{Sec5}
\subsection{No-hidden-variables theorems}
Vaidman's aim was to reformulate the GHZ proof of Bell's theorem
into a game ``which can convert laymen into admirers of quantum
theory'' by showing its ``miraculous power''~\cite{Vaidman99}. One
obvious application of the method for always winning Vaidman's
game introduced in this paper is thus to prove Bell's theorem
without inequalities when the local observers do not share any
reference frame. According to Eqs.~(\ref{ZZZ})--(\ref{XXZ}), one
can predict with certainty the value of either ${\cal Z}_j$ or
${\cal X}_j$ (with $j = 1, 2, 3$) from the results of spacelike
separated measurements on the other two four-qubit systems.
Therefore, for any $j$, ${\cal Z}_j$ and ${\cal X}_j$ can be
considered ``elements of reality,'' as defined by Einstein,
Podolsky, and Rosen~\cite{EPR35}. However, it is impossible to
assign predefined values, either $0$ or $1$, to the six
observables ${\cal Z}_j$ and ${\cal X}_j$ satisfying all
predictions given by Eqs.~(\ref{ZZZ})--(\ref{XXZ}).
This proof is of interest, since it shows that a perfect alignment
between the source of entangled states and the local detectors
does not play a fundamental role in Bell's theorem. For instance,
in 1988 Yuval Ne'eman argued that the answer to the puzzle posed
by Bell's theorem was to be found in the implicit assumption that
the detectors were aligned. Ne'eman apparently believed that the
two detectors were connected through the space-time affine
connection of general relativity~\cite{Neeman}. A proof of Bell's
theorem without inequalities and without alignments involving two
observers, eight-qubit states, and only fixed single-qubit
measurements (i.e., without requiring a protocol like the one in
Fig.~\ref{Flow02}) has been introduced in Ref.~\cite{Cabello03c}.
The interest of the proof of Bell's theorem without inequalities
for the state~$|\Psi\rangle$, given in Eq.~(\ref{GHZ12}), and the
local measurements of ${\cal Z}$ and ${\cal X}$, defined
respectively in Eqs.~(\ref{calZ}) and (\ref{calX}), is that such a
proof is valid for 100\% of the events prepared in the
state~$|\Psi\rangle$, instead of only for a small (8\%) subset of
the events in Ref.~\cite{Cabello03c}.
Other interesting application of the state $|\Psi\rangle$ and the
local observables ${\cal Z}$ and ${\cal X}$ is the Kochen-Specker
(KS) theorem of impossibility of noncontextual hidden variables in
quantum mechanics~\cite{KS67}. Mermin showed how the GHZ proof of
Bell's theorem could be converted into a proof of the KS
theorem~\cite{Mermin90d,Mermin93}. Analogously, the proof of
Bell's theorem using $|\Psi\rangle$, ${\cal Z}$, and ${\cal X}$
could be converted into a (subspace-dependent) proof of the KS
theorem, valid even for measurements along imperfectly defined
directions. This is of interest, because it sheds some extra light
on a recent debate about whether or not the KS theorem is still
valid when ideal measurements are replaced by imperfect
measurements~\cite{Meyer99,Kent99,CK00,HKSS99,Mermin99,Appleby02,Cabello02,Larsson02,Breuer02}.
\subsection{Reducing the communication complexity with prior entanglement}
Vaidman's game can also be seen as a scenario in which the
communication complexity of a certain task can be reduced if the
players are allowed to share some prior entangled state. In
Vaidman's game the task is to always win the game. Without quantum
resources, this task requires at least one of the players to send
$1$~bit to other player after the question ($Z$ or $X$) has been
posed to him. However, if they initially share a GHZ state, the
task does not require any transmission of classical information
between the players.
A similar example of reduction of the communication complexity
needed for a task if the parties share a GHZ state was discovered
by Cleve and Buhrman~\cite{CB97}, reformulated by Buhrman {\em et
al.}~\cite{BvHT99}, and attractively presented by Steane and van
Dam~\cite{Sv00} as follows: a secret integer number $n_A+n_B+n_C$
of apples, where $n_j=0$, $\frac{1}{2}$, $1$, or $\frac{3}{2}$, is
distributed among three players, Alice, Bob, and Charlie, of the
same team. Each of them is in an isolated booth. The team wins if
one of the players, Alice, can ascertain whether the total number
of distributed apples is even or odd. The only communication
allowed is that each of the other two players can send $1$~bit to
Alice after seeing the number of apples each of them got. Assuming
that each of the~$32$ possible variations of apples occurs with
the same probability and using only classical communication, Alice
cannot guess the correct answer in more than~75\% of the cases.
However, the players can always win if each has a qubit of a trio
prepared in the state $|{\rm GHZ}\rangle$ given in
Eq.~(\ref{GHZ03}), and each player~$j$ applies to his qubit the
rotation
\begin{equation}
R(n_j)=|y_0\rangle\langle y_0|+e^{i n_j \pi}|y_1\rangle\langle
y_1|,
\end{equation}
where $n_j$ is his number of apples, and then measures the spin of
his qubit along the $z$~direction. Finally, Bob and Charlie send
their outcomes to Alice. The success of the method is guaranteed
by the following property:
\begin{eqnarray}
\lefteqn{R(n_A) \otimes R(n_B) \otimes R(n_C) |{\rm GHZ}\rangle =} \nonumber \\
& & \left\{\begin{array}{ll}
|{\rm GHZ}\rangle & \mbox{if $n_A+n_B+n_C$ is even} \\
|{\rm GHZ}^\perp\rangle & \mbox{if $n_A+n_B+n_C$ is odd,}
\end{array} \right.
\end{eqnarray}
where
\begin{eqnarray}
|{\rm GHZ}^\perp\rangle & = & {i \over 2} (|z_0,z_0,z_1\rangle+|z_0,z_1,z_0\rangle \nonumber \\
& & +|z_1,z_0,z_0\rangle-|z_1,z_1,z_1\rangle),
\label{GHZ03perp}
\end{eqnarray}
can be reliably distinguished from $|{\rm GHZ}\rangle$ by local
measurements along the $z$~direction. This method assumes that all
players {\em share a reference frame} during the protocol.
However, such an assumption is not needed if each player replaces
his qubit belonging to a trio prepared in~$|{\rm GHZ}\rangle$ by
four qubits belonging to a dozen prepared in~$|\Psi\rangle$. The
local operations [i.e., the rotation $R(n_j)$ and the measurement
along the $z$~direction] are replaced by a protocol, using only
single-qubit measurements, for reliably distinguishing between two
four-particle states which are invariant under $U_j \otimes U_j
\otimes U_j \otimes U_j$.
\subsection{Quantum cryptography}
Other application in which the use of GHZ states provides
advantages over any classical protocol is the secret sharing
scenario~\cite{HBB99,KKI99,TZG99,Cabello00}: Alice wishes to
convey a cryptographic key to Bob and Charlie in such a way that
they both can read it only if they cooperate. In addition, they
wish to prevent any eavesdropper from acquiring any information
without being detected. It is assumed that the players share no
previous secret information nor any secure classical channel but,
although it is not usually explicitly stated, it is assumed that
all three parties {\em share a reference frame}. Once more, such a
requirement can be removed if we replace the GHZ state with the
state~$|\Psi\rangle$, and the measurements of~$Z$ and~$X$ with
measurements of ${\cal Z}$ and ${\cal X}$.
\subsection{Conclusion}
To sum up, the interest in rotationally invariant states (i.e.,
those invariant under $U \otimes \ldots \otimes U$, where $U$ is a
unitary operation) goes beyond their use for decoherence-free
fault-tolerant universal quantum
computation~\cite{KBLW01,ZR97a,LCW98,BEGKCW03}, solving the
Byzantine agreement problem~\cite{FGM01,Cabello02b,Cabello03b},
and transmitting classical and quantum information between parties
who do not share a reference frame~\cite{BEGKCW03,BRS03}. {\em
Entangled} rotationally invariant states (i.e., those invariant
under $U_A \otimes \ldots \otimes U_A \otimes \ldots \otimes U_N
\otimes \ldots \otimes U_N$), like the state $|\Psi\rangle$ given
in Eq.~(\ref{GHZ12}), can be used to overcome certain assumptions in
the proofs of nonexistence of hidden variables, can be applied to
reduce the communication complexity of certain tasks, even if the
parties do not share any reference frame, and to distribute secret
keys among parties who do not share unspeakable information.
\begin{acknowledgments}
This paper was sparked by a question raised by H.~Weinfurter about
the possibility of developing a GHZ-like proof of Bell's theorem
involving observers who do not share a reference frame and using
only single-qubit measurements. I would like to thank him and
M.~Bourennane for useful discussions on this and related subjects,
N.~D.~Mermin and A.~Peres for their comments on a preliminary
version, and I.~Garc\'{\i}a-Pelayo for sending me the proofs of
Ref.~\cite{GG03}. This work was supported by the Spanish
Ministerio de Ciencia y Tecnolog\'{\i}a Project No.~BFM2002-02815
and the Junta de Andaluc\'{\i}a Project No.~FQM-239.
\end{acknowledgments}
\end{document} |
\begin{equation}gin{document}
\title{Global existence and non-existence of stochastic parabolic equations }
\begin{equation}gin{abstract}
This paper is concerned with the blowup phenomenon of stochastic parabolic equations both on
bounded domain and in the whole space.
We introduce a new method to study the blowup phenomenon on bounded domain.
Comparing with the existing results, we delete the assumption that the solutions to
stochastic heat equations are non-negative. Then the blowup phenomenon in the whole space is
obtained by using the properties of heat kernel. We obtain that
the solutions will blow up in finite time for nontrivial initial data.
{\bf Keywords}: It\^{o}'s formula; Blowup; Stochastic heat equation; Impact of noise.
AMS subject classifications (2010): 35K20, 60H15, 60H40.
\end{abstract}
\baselineskip=15pt
\section{Introduction}
\setcounter{equation}{0}
For deterministic partial differential equations,
finite time blowup phenomenon has been studied by many authors, see the book \cite{Hubook2018}.
There are two cases to study this problem. One is bounded domain and the other is whole space.
On the bounded domain, the $L^p$-norm of solutions ($p>1$) will blow up in finite time.
The methods used for bounded domain include: Kaplan's first eigenvalue method, concavity method and
comparison method, see Chapter 5 of \cite{Hubook2018}. The main result is the following: under the assumptions that
the initial data is suitable large and that the nonlinear term $f(u)$ satisfies $f(u)\geq u^{1+\alpha}$
with $\alpha>0$, the solution of $u_t-\Delta u=f(u)$ with Dirichlet boundary condition will blow up in finite time.
For the whole space, the following "Fujita Phenomenon" has been attraction
in the literature. Consider the following Cauchy problem
\begin{equation}s\left\{\begin{equation}gin{array}{lll}
u_t=\Delta u+u^p,\ \ \ &x\in\mathbb{R}^d, \ \ t>0,\ \ p>0,\\
u(0,x)=u_0(x), \ \ \ &x\in\mathbb{R}^d.
\end{array}\right.\lbl{1.1}\end{equation}s
It has been proved that:
\begin{equation}gin{quote}
(i) if $0<p<1$, then every nonnegative solution is global, but not necessarily
unique;
(ii) if $1<p\leq1+\frac{2}{d}$, then any nontrivial, nonnegative solution
blows up in finite time;
(iii) if $p>1+\frac{2}{d}$, then $u_0\in\mathcal{U}$ implies that
$u(t,x,u_0)$ exists globally;
(iv) if $p>1+\frac{2}{d}$, then $u_0\in\mathcal{U_1}$ implies that
$u(t,x,u_0)$ blows up in finite time,
\end{quote}
where $\mathcal{U}$ and $\mathcal{U_1}$ are defined as follows
\begin{equation}ss
\mathcal{U}&=&\left\{v(x)|v(x)\in BC(\mathbb{R}^d,\mathbb{R}_+),
v(x)\leq \delta e^{-k|x|^2},\ k>0,\delta=\delta(k)>0\right\},\\
\mathcal{U_1}&=&\left\{v(x)|v(x)\in BC(\mathbb{R}^d,\mathbb{R}_+),
v(x)\geq c e^{-k|x|^2},\ k>0,c\gg1\right\}.
\end{equation}ss
Here $BC=\{$ bounded and uniformly continuous functions $\}$, see
Fujita \cite{F1966,F1970} and Hayakawa \cite{kH1973}.
It is easy to see that for the whole space, there are four types of behaviours for problem (\ref{1.1}), namely,
(1) global existence unconditionally but uniqueness fails in certain solutions, (2) global existence with restricted
initial data, (3) blowing up unconditionally, and (4) blowing up with restricted initial data. The
occurrence of these behaviors depends on the combination effect of the nonlinearity represented by the parameter $p$, the size of the initial datum $u_0(x)$, represented by the choice of $\mathcal{U}$
or $\mathcal{U_1}$, and the dimension of the space.
Now, we recall some known results of stochastic partial differential equations (SPDEs).
In this paper, we only focus on the stochastic parabolic equations.
It is known that the existence and uniqueness of
global solutions to SPDEs can be established under appropriate conditions (\cite{Cb2007,DW2014,LR2010,L2013,T2009}).
For the finite time blowup phenomenon of stochastic parabolic equations, we first consider
the case on bounded domain. Consider the following equation
\begin{equation}s\left\{\begin{equation}gin{array}{lll}
du=(\Delta u+f(u))dt+\sigma(u)dW_t, \ \ \qquad t>0,&x\in D,\\[1.5mm]
u(x,0)=u_0(x)\geq0, \ \ \ &x\in D,\\[1.5mm]
u(x,t)=0, \qquad \qquad \qquad \qquad \qquad \qquad t>0, &x\in\partial D,
\end{array}\right.\lbl{1.2}\end{equation}s
Da Prato-Zabczyk \cite{PZ1992} considered the existence of global solutions of (\ref{1.2}) with
additive noise ($\sigma$ is constant). Manthey-Zausinger \cite{MZ1999} considered (\ref{1.2}),
where $\sigma$ satisfied the global Lipschitz condition. Dozzi and L\'{o}pez-Mimbela
\cite{DL2010} studied equation (\ref{1.2}) with
$\sigma(u)=u$ and proved that if $f(u)\geq u^{1+\alpha}$ ($\alpha>0$) and initial data is large enough, the solution will blow up in finite time,
and that if $f(u)\leq u^{1+\begin{equation}ta}$ ($\begin{equation}ta$ is a certain positive constant) and the initial data is
small enough, the solution will exist globally, also see \cite{NX2012}. A natural question arises: If
$\sigma$ does not satisfy the global Lipschitz condition, what can we say about the solution?
Will it blow up in finite time or exist globally? Chow \cite{C2009,C2011} answered part of this question.
Lv-Duan \cite{LD2015} described the competition between the nonlinear term and noise term for equation (\ref{1.2}).
Bao-Yuan \cite{BY2014} and Li et al.\cite{LPJ2016} obtained the existence of local solutions of (\ref{1.2}) with jump process
and L$\acute{e}$vy process, respectively. For blowup phenomenon of stochastic functional
parabolic equations, see \cite{CL2012,LWW2016} for details.
In a somewhat different case, Mueller \cite{M1991} and, later,
Mueller-Sowers \cite{MuS1993} investigated the problem of a noise-induced explosion for a
special case of equation (\ref{1.2}), where $f(u)\equiv0,\,\sigma(u)=u^\gamma$ with
$\gamma>0$ and $W(x,t)$ is a space-time white noise. It was shown that the solution will
explode in finite time with positive probability for some $\gamma>3/2$.
We remark that the method used to prove the finite time blowup on bounded domain is
the stochastic Kaplan's first eigenvalue method. In order to make sure the inner product
$(u,\phi)$ is positive, the authors firstly proved the solutions of (\ref{1.2}) keep positive under some
assumptions, see \cite{BY2014,C2009,C2011,LPJ2016,LD2015}.
We find that under some special case the positivity of solution can be deleted.
What's more, in present paper, we will give
a new method (stochastic concavity method) to prove the solutions blow up in finite time.
The advantage of this method is that we need not the positivity of solution.
For the whole space, Foondun et al. \cite{FLN2018} considered the finite time blowup phenomenon
for the Cauchy problem of stochastic parabolic equations.
Comparing with the deterministic parabolic equations, they only obtained
the result similar to type (4).
In this paper, we establish the similar results to types (1)
and (3). The method used here is comparison principle and the properties of heat kernel.
We obtain some different phenomenon with or without noise. Moreover,
many types of noise are considered.
Comparing with the results of deterministic partial differential equations,
there are a lot of work to do and we will study this issue in our further paper.
This paper is arranged as follows. In Sections 2 and 3,
we will consider the global existence and non-existence of stochastic parabolic equations
on bounded domain and in the whole space, respectively.
This paper ends with a short discussion in Section 4.
Throughout this paper, we write $C$ as a general positive constant and $C_i$, $i=1,2,\cdots$ as
a concrete positive constant.
\section{Bounded domain}
\setcounter{equation}{0}
In this section, we first recall some known results on bounded domain, and then
give some non-trivial generalizations. Consider the following SPDE
\begin{equation}s\left\{\begin{equation}gin{array}{lll}
du=(\Delta u+f(u,x,t))dt+\sigma(u,\nablabla u,x,t)dW_t, \ \ \qquad &t>0,\ x\in D,\\[1.5mm]
u(x,0)=u_0(x), \ \ \ &\qquad\ \ x\in D,\\[1.5mm]
u(x,t)=0, \ \ \ \ &t>0,\ x\in\partial D,
\end{array}\right.\lbl{2.1}\end{equation}s
where $\sigma$ is a given function, and $W(x,t)$ is a Wiener random field defined in a complete probability
space $(\Omega,\mathcal {F},\mathbb{P})$ with a filtration $\mathcal {F}_t$. The Wiener random field has mean
$\mathbb{E}W(x,t)=0$ and its covariance function $q(x,y)$ is defined by
\begin{equation}ss
\mathbb{E}W(x,t)W(y,s)=(t\wedge s)q(x,y), \ \ \ x,y\in\mathbb{R}^n,
\end{equation}ss
where $(t\wedge s)=\min\{t,s\}$ for $0\leq t,s\leq T$. The existence
of strong solutions of (\ref{2.1}) has been studied by many authors \cite{Cb2007,PZ1992}.
To consider positive solutions, they start with the unique
solution $u\in C(\bar D\times[0,T])\cap L^2((0,T);H^2)$ for equation (\ref{2.1}).
Chow \cite{C2009,C2011} considered the finite time blowup problem of (\ref{2.1}).
They used the positivity of solution to prove the finite time blowup.
Under the following conditions
\begin{equation}gin{quote}
(P1) There exists a constant $\delta\geq0$ such that
\begin{equation}ss
\frac{1}{2} q(x,x)\sigma^2(r,\xi,x,t)-\sum_{i,j=1}^na_{ij}\xi_i\xi_j\leq\delta r^2
\end{equation}ss
for all $r\in\mathbb{R},x\in\bar D,\xi
\in\mathbb{R}^n$ and $t\in[0,T]$;\\
(P2) The function $f(r,x,t)$ is continuous on $\mathbb{R}\times\bar D\times[0,T]$ and such that
$f(r,x,t)\geq0$ for $r\leq0$ and $x\in\bar D$, $t\in[0,T]$; and \\
(P3) The initial datum $u_0(x)$ on $\bar D$ is positive and continuous,
\end{quote}
\begin{equation}gin{prop}\lbl{p1.1}{\rm\cite[Theorem 3.3]{C2009}} Suppose that the conditions
{\rm (P1),(P2)} and {\rm (P3)} hold true. Then the solution of the initial-boundary problem
for the parabolic It\^{o}'s equation {\rm(\ref{2.1})} remains positive, i.e.,
$u(x,t)\geq 0$, a.s. for almost every $x\in D$ and for all $ t\in[0,T]$.
\end{prop}
Let $\phi$ be the eigenfunction with respect to the first eigenvalue $\lambda_1$ on the bounded
domain, i.e.,
\begin{equation}ss\left\{\begin{equation}gin{array}{llll}
-\Delta \phi=\lambda_1 \phi, \ \ \ \ \ \ \ \ {\rm in} \ D,\\
\phi=0, \ \ \qquad\ \qquad {\rm on}\ \partial D.
\end{array}\right.\end{equation}ss
And we normalize it in such a way that
\begin{equation}ss
\phi(x)\geq0,\ \ \ \ \int_ D \phi(x)dx=1.
\end{equation}ss
In paper \cite{C2011}, Chow assumed that the following conditions hold
\begin{equation}gin{quote}
(N1) There exist a continuous function $F(r)$ and a constant $r_1>0$ such that
$F$ is positive, convex and strictly increasing for $r\geq r_1$ and satisfies
\begin{equation}ss
f(r,x,t)\geq F(r)
\end{equation}ss
for $r\geq r_1$, $x\in\bar D$, $t\in[0,\infty)$;\\
(N2) There exists a constant $M_1>r_1$ such that $F(r)>\lambda_1r$ for $r\geq M_1$;\\
(N3) The positive initial datum satisfies the condition
\begin{equation}ss
(\phi,u_0)=\int_ D u_0(x)\phi(x)dx>M_1;
\end{equation}ss
(N4) The following condition holds
\begin{equation}ss
\int_{M_1}^\infty\frac{dr}{F(r)-\lambda_1r}<\infty.
\end{equation}ss
\end{quote}
Alternatively, he imposes the following conditions $S$ on the noise term:
\begin{equation}gin{quote}
(S1) The correlation function $q(x,y)$ is continuous and positive for $x,y\in\bar D$
such that
\begin{equation}ss
\int_ D\int_ D q(x,y)v(x)v(y)dxdy\geq q_1\int_ D v^2(x)dx
\end{equation}ss
for any positive $v\in H$ and for some $q_1>0$;
(S2) There exist a positive constant $r_2$, continuous functions $\sigma_0(r)$ and $G(r)$ such that
they are both positive, convex and strictly increasing for $r\geq r_2$ and satisfy
\begin{equation}ss
\sigma(r,x,t)\geq \sigma_0(r)\ \ \ \ {\rm and} \ \ \ \ \sigma_0^2(r)\geq2G(r^2)
\end{equation}ss
for $x\in\bar D$, $t\in[0,\infty)$;
(S3) There exists a constant $M_2>r_2$ such that $q_1G(r)>\lambda_1r$ for $r\geq M_2$;
(S4) The positive initial datum satisfies the condition
\begin{equation}ss
(\phi,u_0)=\int_ D u_0(x)\phi(x)dx>M_2;
\end{equation}ss
(S5) The following integral is convergent so that
\begin{equation}ss
\int_{M_2}^\infty\frac{dr}{q_1G(r)-\lambda_1r}<\infty.
\end{equation}ss
\end{quote}
\begin{equation}gin{prop}\lbl{p1.2} {\rm\cite[Theorem 3.1]{C2011}}
Suppose the initial-boundary value problem {\rm(\ref{2.1})} has a unique local solution
and the conditions {\rm(P1)-(P3)} are satisfied, where $\sigma$ does not depend on
$\nablabla u$. In addition, we assume that either
the conditions {\rm(N1)-(N4)} or the alternative conditions {\rm(S1)-(S5)} given above
hold true. Then, for a real number $p>0$, there exists a constant $T_p>0$ such that
\begin{equation}ss
\lim\limits_{t\rightarrow T_p-}\mathbb{E}\|u \|_p
=\lim\limits_{t\rightarrow T_p-}\mathbb{E}\left(\int_ D|u(x,t)|^pdx\right)^\frac{1}{p}=\infty,
\end{equation}ss
where $p\geq1$ under conditions $N$, while $p\geq2$ under conditions $S$.
\end{prop}
The positivity of solutions is needed for the case that the nonlinear term induces the
finite time blowup.
But for a special case, we can prove the positivity of solutions can be deleted. Now, we consider
the following SPDEs
\begin{equation}s\left\{\begin{equation}gin{array}{lll}
du=\Delta udt+\sigma(u,x,t)dW(x,t), \ \ \qquad &t>0,\ x\in D,\\[1.5mm]
u(x,0)=u_0(x), \ \ \ &\qquad\ \ x\in D,\\[1.5mm]
u(x,t)=0, \ \ \ \ &t>0,\ x\in\partial D,
\end{array}\right.\lbl{2.3}\end{equation}s
where $W(x,t)$ is time-space white noise and $D\subset\mathbb{R}$ is an interval in $\mathbb{R}$.
\begin{equation}gin{theo}\lbl{t2.1} Assume that the initial-boundary problem (\ref{2.3}) has
a unique local solution.
Assume further that $C_1|u|^\gamma\leq|\sigma(u,x,t)|\leq C_2|u|^\gamma_1$ with $C_1>0$ and $\gamma_1\geq\gamma>1$, $u_0\geq0$ and
\begin{equation}ss
\left(\int_ D u_0(x)\phi(x)dx\right)^{2(\gamma-1)}\geq\frac{\lambda_1}{q_1C_1^2}.
\end{equation}ss
Then there exist constants $T^*>0$ and $p\geq2\gamma_1$ such that
\begin{equation}ss
\lim\limits_{t\rightarrow T^*-}\mathbb{E}\|u_t\|^p_{L^p}
=\lim\limits_{t\rightarrow T^*-}\mathbb{E}\int_D|u(x,t)|^pdx=\infty.
\end{equation}ss
\end{theo}
{\bf Proof.}
We will prove the theorem by contradiction.
Suppose finite time blowup is false. Then there exist a global positive solution $u$ and $p\geq2\gamma_1$ such
that for any $T>0$
\begin{equation}ss
\sup_{0\leq t\leq T}\mathbb{E}\|u(\cdot,t)\|^p_{L^p}<\infty,
\end{equation}ss
which implies that
\begin{equation}ss
\sup_{0\leq t\leq T}\mathbb{E}\Big|\int_ D u(x,t)\phi(x)dx\Big|^2\leq\|\phi\|^2_{L^q(D)}\sup_{0\leq t\leq T}\mathbb{E}\|u(\cdot,t)\|^p_{L^p}<\infty,
\end{equation}ss
where $1/p+1/q=1$, $\phi$ is defined as below Proposition \ref{p1.1} and satisfies $\int_ D\phi(x)dx=1$.
Define
\begin{equation}ss
\hat u(t):=\int_ D u(x,t)\phi(x)dx.
\end{equation}ss
By applying It\^{o}'s formula to $\hat u^2(t)$, we get
\begin{equation}s
\hat u^2(t)&=&(u_0,\phi)^2-2\lambda_1\int_0^t\hat u^2(s)ds+2\int_0^t\int_ D \hat u(s)\sigma(u,x,t)\phi(x)d W(x,s)dx\nonumber\\
&&+\int_0^t\int_ D\sigma^2(u,x,s)\phi^2(x)dxds
\lbl{2.4}\end{equation}s
We note that the stochastic term is usually a local martingale. Thus we need use the technique of stopping time.
Let
\begin{equation}ss
\tau_n=\inf\{t\geq0:\ \int_0^t\int_ D\sigma^2(u,x,s)\phi^2(x)dxds\geq n\}.
\end{equation}ss
Let $\eta(t\wedge\tau_n)=\mathbb{E}\hat u^2(t\wedge\tau_n)$. By taking an expectation over (\ref{2.4}), we obtain
\begin{equation}ss
\eta(t\wedge\tau_n)&=&(u_0,\phi)^2-2\lambda_1\int_0^{t\wedge\tau_n}\eta(s)ds
+\int_0^{t\wedge\tau_n}\mathbb{E}\int_ D\sigma^2(u,x,s)\phi^2(x)dxds.
\end{equation}ss
Noting that
\begin{equation}ss
\eta(t\wedge\tau_n)\leq(u_0,\phi)^2
+\int_0^{t}\mathbb{E}\int_ D\sigma^2(u,x,s)\phi^2(x)dxds,
\end{equation}ss
and letting $n\to\infty$, we have
\begin{equation}ss
\eta(t)=(u_0,\phi)^2-2\lambda_1\int_0^{t}\eta(s)ds
+\int_0^{t}\mathbb{E}\int_ D\sigma^2(u,x,s)\phi^2(x)dxds.
\end{equation}ss
Using the assumptions $\inf_{x,y\in D}q(x,y)\geq q_1>0$ and $\sigma^2(u,x,s)\geq C_1|u|^{2\gamma}$ with $\gamma>1$ and
Jensen's inequality, we have
\begin{equation}ss
\eta(t)&\geq&\eta(0)-2\lambda_1\int_0^t\eta(s)ds
+2q_1C_1^2\int_0^t\eta^\gamma(s)ds,
\end{equation}ss
or, in the differential form,
\begin{equation}ss\left\{\begin{equation}gin{array}{llll}
\displaystyle\frac{d\eta(t)}{dt}=-2\lambda_1\eta(t)+2q_1C_1^2 \eta^\gamma(t)\\[1.5mm]
\eta(0)=\eta_0.
\end{array}\right. \end{equation}ss
Noting that
\begin{equation}ss
\eta(0)=\left(\int_ D u_0(x)\phi(x)dx\right)^{2}\geq\left(\frac{\lambda_1}{q_1C_1^2}\right)^{\frac{1}{(\gamma-1)}},
\end{equation}ss
we have $\eta'(0)\geq0$. This implies that $\eta(t)>0$. An integration of the differential equation gives that
\begin{equation}ss
T\leq\int_{\eta_0}^{\eta(T)}\frac{dr}{C^2_1q_1r^{\gamma}-\lambda_1r}
\leq\int_{\eta_0}^\infty\frac{dr}{C^2_1q_1r^{\gamma}-\lambda_1r}<\infty,
\end{equation}ss
which implies $\eta(t)$ must blow up at a time $T^*\leq\int_{\eta_0}^\infty\frac{dr}{C^2_1q_1r^{\gamma}-\lambda_1r}$.
Hence this is a contradiction. This completes the proof. $\Box$
The advantage of Theorem \ref{t2.1} is that the positivity of the solution is not needed.
And in above Theorem, we assume that the initial-boundary problem (\ref{2.3}) has
a unique local solution. In fact, if $\sigma$ satisfies the local Lipschitz condition,
one can follow the method of \cite{T2009} to obtain the existence and uniqueness of local solution, also see \cite{eP1979}.
In \cite{eP1979,T2009}, the authors established the existence and uniqueness of energy solution, where
the solutions belong to $H_0^1(D)$ for any fixed time almost surely. Noting that
$H^{\frac{1}{2}+}(D)\hookrightarrow L^\infty(D)$ for $D\subset\mathbb{R}$, our assumptions are valid.
If we only consider the case
$\sigma$ does not depend on $\xi$, that is,
$\sigma:=\sigma(u,x,t)$. Then it follows the assumption (P1) that
$\sigma(0,x,t)=0$, which implies that for additive noise, the solutions maybe not
keep positive. Hence the first eigenvalue method will fail. Next, we introduce another method.
For simplicity, we consider the following SPDEs
\begin{equation}s\left\{\begin{equation}gin{array}{lll}
du=[\Delta u+|u|^{p-1}u]dt+\sigma(x,t)dB_t, \ \ \qquad &t>0,\ x\in D,\\[1.5mm]
u(x,0)=u_0(x), \ \ \ &\qquad\ \ x\in D,\\[1.5mm]
u(x,t)=0, \ \ \ \ &t>0,\ x\in\partial D,
\end{array}\right.\lbl{2.5}\end{equation}s
where $B_t$ is an one-dimensional Brownian motion. If the initial data belongs to $H^1(D)$,
Debussche et al. \cite{DMH2015} proved the solution of (\ref{2.5}) belongs to $H^3_0(D)$ during the
lifespan.
\begin{equation}gin{theo}\lbl{t2.2} Suppose that $p>1$ and $u_0$ satisfies
\begin{equation}ss
-\frac{1}{2}\int_D|\nablabla u_0(x)|^2dx+\frac{1}{p+1}\int_D|u_0(x)|^{p+1}dx-\frac{1}{2}\int_0^\infty\mathbb{E}\int_D|\nablabla\sigma(x,t)|^2dxdt>0,
\end{equation}ss
then the solution of (\ref{2.5}) must blow up in finite time in sense of mean square.
\end{theo}
{\bf Proof.} We will
prove the theorem by contradiction.
First we suppose there exist a global solution $u$ such that
\begin{equation}ss
\sup_{t\in[0,T]}\mathbb{E}\int_Du^2dx<\infty
\end{equation}ss
for any $T>0$. Similar to the proof of Theorem \ref{t2.1}, by using It\^{o} formula, we have
\begin{equation}ss
\mathbb{E}\int_Du^2-\int_Du^2_0=-2\mathbb{E}\int_0^t\int_D|\nablabla u|^2
+2\mathbb{E}\int_0^t\int_D|u|^{p+1}+\mathbb{E}\int_0^t\int_D|\sigma(x,s)|^2.
\end{equation}ss
Denote
\begin{equation}ss
v(t)=\mathbb{E}\int_Du^2, \ \ \ h(t)=\mathbb{E}\int_D\left(-2|\nablabla u|^2
+2|u|^{p+1}+|\sigma(x,t)|^2\right),
\end{equation}ss
then we have
\begin{equation}ss
v(t)-v(0)=\int_0^th(s)ds.
\end{equation}ss
Let
\begin{equation}ss
I(t)=\int_0^tv(s)ds+A,\ \ \ A\ {\rm is \ a\ positive\ constant},
\end{equation}ss
then we have $I'(t)=v(t),\ I''(t)=h(t)$. Set
\begin{equation}ss
J(t)= \mathbb{E}\int_D\left(-\frac{1}{2}|\nablabla u|^2
+\frac{1}{p+1}|u|^{p+1}\right).
\end{equation}ss
It\^{o} formula implies that
\begin{equation}ss
&&\frac{1}{2}\mathbb{E}\int_D|\nablabla u|^2-\frac{1}{2}\mathbb{E}\int_D|\nablabla u_0|^2\\
&=&-\int_0^t\mathbb{E}\int_D\Delta u(\Delta u+|u|^{p-1}u)+\frac{1}{2}\int_0^t\mathbb{E}\int_D|\nablabla\sigma(x,t)|^2,
\end{equation}ss
and
\begin{equation}ss
&&\frac{1}{p+1}\mathbb{E}\int_D|u|^{p+1}-\frac{1}{p+1}\mathbb{E}\int_D|u_0|^{p+1}\\
&=&\int_0^t\mathbb{E}\int_D|u|^{p-1}u(\Delta u+|u|^{p-1}u)+\frac{p}{2}\int_0^t\mathbb{E}\int_D|u|^{p-1}\sigma^2(x,t).
\end{equation}ss
Therefore, we have
\begin{equation}ss
J(t)=J(0)+\int_0^t\mathbb{E}\int_D(\Delta u+|u|^{p-1}u)^2-\frac{1}{2}\int_0^t\mathbb{E}\int_D|\nablabla\sigma(x,s)|^2
+\frac{p}{2}\int_0^t\mathbb{E}\int_D|u|^{p-1}\sigma^2(x,s).
\end{equation}ss
By comparing $I''(t)$ and $J(t)$, we have, for $1<\delta<\frac{p+1}{2}$,
\begin{equation}ss
I''(t)=h(t)\geq4(1+\delta)J(t).
\end{equation}ss
Clearly,
\begin{equation}ss
I'(t)=v(t)&=&v(0)+\int_0^th(s)ds\\
&=&v(0)+\int_0^t\mathbb{E}\int_D|\sigma(x,t)|^2+\int_0^t\mathbb{E}\int_D\left(-2|\nablabla u|^2
+2|u|^{p+1}\right)dxds\\
&=&v(0)+\int_0^t\mathbb{E}\int_D|\sigma(x,t)|^2+\int_0^t\mathbb{E}\int_D\left(2u\Delta u
+2|u|^{p+1}\right)dxds.
\end{equation}ss
It follows that, for any $\varepsilon>0$,
\begin{equation}ss
I'(t)^2&\leq&4(1+\varepsilon)\left[\int_0^t\mathbb{E}\int_D\left(\Delta u
+|u|^{p-1}u\right)^2dxds\right]\left[\int_0^t\mathbb{E}\int_Du^2dxds\right]\\
&&+\frac{1}{1+\varepsilon}\left[v(0)+\int_0^t\mathbb{E}\int_D|\sigma(x,t)|^2\right]^2.
\end{equation}ss
Combining the above estimates, we obtain
\begin{equation}ss
&&I''(t)I(t)-(1+\alpha)I'(t)^2\\
&\geq&4(1+\delta)\left[J(0)+\int_0^t\mathbb{E}\int_D(\Delta u+|u|^{p-1}u)^2-\frac{1}{2}\int_0^t\mathbb{E}\int_D|\nablabla\sigma(x,s)|^2\right.\\
&&\left.+\frac{p}{2}\int_0^t\mathbb{E}\int_D|u|^{p-1}\sigma^2(x,s)\right]
\times\left[\int_0^t\int_Du^2dxds+A\right]\\
&&-4(1+\alpha)(1+\varepsilon)\left[\int_0^t\mathbb{E}\int_D\left(\Delta u
+|u|^{p-1}u\right)^2dxds\right]\left[\int_0^t\mathbb{E}\int_Du^2dxds\right]\\
&&-\frac{(1+\alpha)}{1+\varepsilon}\left[v(0)+\int_0^t\mathbb{E}\int_D|\sigma(x,t)|^2\right]^2
\end{equation}ss
Now we choose $\varepsilon$ and $\alpha$ small enough such that
\begin{equation}ss
1+\delta>(1+\alpha)(1+\varepsilon).
\end{equation}ss
By assumption,
\begin{equation}ss
J(0)-\frac{1}{2}\int_0^t\mathbb{E}\int_D|\nablabla\sigma(x,s)|^2>0.
\end{equation}ss
We can choose $A$ large enough such that
\begin{equation}ss
I''(t)I(t)-(1+\alpha)I'(t)^2>0,
\end{equation}ss
which implies that
\begin{equation}ss
\frac{d}{dt}\left(\frac{I'(t)}{I^{1+\alpha}(t)}\right)>0.
\end{equation}ss
Then we have
\begin{equation}ss
\frac{I'(t)}{I^{1+\alpha}}(t)>\frac{I'(0)}{I^{1+\alpha}(0)}\ \ \ {\rm for}\ t>0.
\end{equation}ss
It follows that $I(t)$ cannot remain finite for all $t$. This is a contradiction. The proof is complete. $\Box$
\begin{equation}gin{remark}\lbl{r2.1} The advantage of concavity method is that we did not use the positivity
of solutions.
Meanwhile, the disadvantage of Theorem \ref{t2.2} is that we only deal with the additive noise.
For multiplicative noise, when we deal with the term $\mathbb{E}\int_D|\nablabla u|^2$, by using
It\^{o} formula, we will have the term $-\frac{1}{2}\int_0^t\mathbb{E}\int_D|\nablabla\sigma(u)|^2$, and
we cannot control this term.
\end{remark}
\begin{equation}gin{remark}\lbl{r2.2} The effect of noise on the blowup problem can be described as the followings:
(i) for an additive noise, without help of the nonlinear term, the solutions will not blow up in
finite time; but if the solutions blow up in finite time without noise, the additive noise
can make the finite time blowup hard to happen. In other words,
the assumption on initial data will be stronger if we add the additive noise.
(ii) for multiplicative noise, without the help of nonlinear term, the solutions blow up
in finite time under some assumptions on initial data.
\end{remark}
Look back at Proposition \ref{p1.2} and Theorems \ref{t2.1} and \ref{t2.2}, we find
the finite time blowup appear in the $L^p$-norm of the solutions, $p>1$. Maybe we will
ask what about the case $0<p<1$. The following result answer this equation. Consider the
following stochastic parabolic equations
\begin{equation}s\left\{\begin{equation}gin{array}{lll}
du=[\Delta u+f(u)]dt+\sigma(u) dW(x,t), \ \ \qquad &t>0,\ x\in D,\\[1.5mm]
u(x,0)=u_0(x), \ \ \ &\qquad\ \ x\in D,\\[1.5mm]
u(x,t)=0, \ \ \ \ &t>0,\ x\in\partial D.
\end{array}\right.\lbl{2.5}\end{equation}s
\begin{equation}gin{theo}\lbl{t2.3}
Assume $f(r)\geq0$ for $r\leq0$. Then we have:
(i) Assume further that $f(r)\geq C_0r^p$, $q(x,y)\leq q_0$ for $x,y\in D$ and $\sigma^2(u)\leq C_1u^2$.
If the initial data satisfies
\begin{equation}ss
\left(\int_Du_0(x)\phi(x)dx\right)^{p-1}>\frac{\hat\lambda}{C_0\epsilon}, \ \
\hat\lambda=\epsilon\lambda_1+\frac{\epsilon}{2}(1-\epsilon)q_0C_1^2.
\end{equation}ss
then the solution $u(x,t)$ of (\ref{2.5}) will blow up in finite time in $L^1$-norm and $\epsilon$-order moment,
where $0<\epsilon<1$ and $p>1$, i.e., there exists a positive $T>0$ such that
\begin{equation}ss
\mathbb{E}\|u(\cdot,t)\|^\epsilon_{L^1(D)}\to\infty,\ \ \ {\rm as}\ \ \ t\to T;
\end{equation}ss
(ii) Assume further that $f(r)\leq C_0r^p$, $q(x,y)\geq q_1$ for $x,y\in D$ and $\frac{1}{C_1}u^m\leq\sigma^2(u)\leq C_2u^m$.
Then, if $m>p>1$, $(m-p)(2m-1)>mp$ and the initial data are bounded,
then the solution $u(x,t)$ of (\ref{2.5}) will exist globally in the following sense:
$\mathbb{E}[|(u,\phi)|^\epsilon]<\infty$ for any $t>0$.
\end{theo}
{\bf Proof.} (i) It follows from Proposition \ref{p1.1} that
(\ref{2.5}) has a unique positive solution. Similar to the proof of Theorem \ref{t2.1}, we will prove the theorem by contradiction.
Suppose the claim is false. Then there exists a global positive solution $u$ such
that for any $T>0$
\begin{equation}ss
\sup_{0\leq t\leq T}\mathbb{E}\|u(\cdot,t)\|^\epsilon_{L^1(D)}<\infty,
\end{equation}ss
which implies that
\begin{equation}ss
\sup_{0\leq t\leq T}\mathbb{E}\left(\int_ D u(x,t)\phi(x)dx\right)^\epsilon\leq\|\phi\|_{L^\infty(D)}\sup_{0\leq t\leq
T}\mathbb{E}\|u(\cdot,t)\|^\epsilon_{L^1(D)}<\infty.
\end{equation}ss
Set $\hat u=(u,\phi)$. It\^{o} formula gives that
\begin{equation}s
\hat u^\epsilon(t)&=&(u_0,\phi)^\epsilon-\epsilon\lambda_1\int_0^t\hat u^\epsilon(s)ds
+\epsilon\int_0^t\hat u(s)^{\epsilon-1}\int_ Df(u)\phi dxds\nonumber\\
&&+
\epsilon\int_0^t\int_ D \hat u(s)^{\epsilon-1}\sigma(u)\phi(x)d W(x,s)dx\nonumber\\
&&+\frac{\epsilon(\epsilon-1)}{2}\int_0^tu(s)^{\epsilon-2}\int_ D\int_ D q(x,y) \sigma(u)\phi(x)\sigma(u)\phi(y)dxdyds
\lbl{2.6}\end{equation}s
Let $\eta(t)=\mathbb{E}\hat u^\epsilon(t)$. Similar to the proof of Theorem \ref{t2.1}, by taking an expectation over (\ref{2.6}), we obtain
\begin{equation}ss
\eta(t)&=&(u_0,\phi)^\epsilon-\epsilon\lambda_1\int_0^t\eta(s)ds+
\epsilon\int_0^t\mathbb{E}\hat u(s)^{\epsilon-1}\int_ Df(u)\phi dxds\nonumber\\
&&+\frac{\epsilon(\epsilon-1)}{2}\int_0^t\mathbb{E}u(s)^{\epsilon-2}\int_ D\int_ D q(x,y) \sigma(u)\phi(x)\sigma(u)\phi(y)dxdyds.
\end{equation}ss
Using the assumptions $\inf_{x,y\in D}q(x,y)\leq q_0$ and $\sigma^2(u)|\leq C_1|u|^2$ and
Jensen's inequality, we have
\begin{equation}ss
\eta(t)&\geq&\eta(0)-\varepsilon\lambda_1\int_0^t\eta(s)ds+C_0\epsilon\int_0^t\eta^{\frac{p+\epsilon-1}{\epsilon}}(s)ds
-\frac{\epsilon}{2}(1-\epsilon)q_0C_1^2\int_0^t\eta(s)ds,
\end{equation}ss
or, in the differential form,
\begin{equation}ss\left\{\begin{equation}gin{array}{llll}
\displaystyle\frac{d\eta(t)}{dt}=-\hat\lambda\eta(t)+C_0\epsilon\eta^{\frac{p+\epsilon-1}{\epsilon}}(t)\\[1.5mm]
\eta(0)=\eta_0.
\end{array}\right. \end{equation}ss
Noting that $\eta'(0)>0$. This implies that $\eta(t)>0$. An integration of the differential equation gives that
\begin{equation}ss
T\leq\int_{\eta_0}^{\eta(T)}\frac{dr}{C_0\epsilon r^{\frac{p+\epsilon-1}{\epsilon}}-\hat\lambda r}
\leq\int_{\eta_0}^\infty\frac{dr}{C_0\epsilon r^{\frac{p+\epsilon-1}{\epsilon}}-\hat\lambda r}<\infty,
\end{equation}ss
which implies $\eta(t)$ must blow up at a time
$T^*\leq\int_{\eta_0}^\infty\frac{dr}{C_0\epsilon r^{\frac{p+\epsilon-1}{\epsilon}}-\hat\lambda r}$.
Hence this is a contradiction. Thus we obtain the desired result.
(ii) Define
\begin{equation}ss
\tau_n=\inf\{t>0, \ \ (u,\phi)^\epsilon>n\}.
\end{equation}ss
Set $\hat u=(u,\phi)$. By using It\^{o} formula, for $t\leq\tau_n$, we have
\begin{equation}s
\hat u^\epsilon(t)&=&(u_0,\phi)^\epsilon-\epsilon\lambda_1\int_0^t\hat u^\epsilon(s)ds
+\epsilon\int_0^t\hat u(s)^{\epsilon-1}\int_ Df(u)\phi dxds\nonumber\\
&&+
\epsilon\int_0^t\int_ D \hat u(s)^{\epsilon-1}\sigma(u)\phi(x)d W(x,s)dx\nonumber\\
&&+\frac{\epsilon(\epsilon-1)}{2}\int_0^tu(s)^{\epsilon-2}\int_ D\int_ D q(x,y) \sigma(u)\phi(x)\sigma(u)\phi(y)dxdyds
\lbl{2.7}\end{equation}s
Let $\eta(t)=\mathbb{E}\hat u^\epsilon(t)$. By taking an expectation over (\ref{2.7}), we obtain
\begin{equation}s
\eta(t)&=&(u_0,\phi)^\epsilon-\epsilon\lambda_1\int_0^t\eta(s)ds+
\epsilon\int_0^t\mathbb{E}\hat u(s)^{\epsilon-1}\int_ Df(u)\phi dxds\nonumber\\
&&+\frac{\epsilon(\epsilon-1)}{2}\int_0^t\mathbb{E}u(s)^{\epsilon-2}\int_ D\int_ D q(x,y) \sigma(u)\phi(x)\sigma(u)\phi(y)dxdyds
\nonumber\\
&\leq&\eta(0)-\epsilon\lambda_1\int_0^t\eta(s)ds+C_0
\epsilon\int_0^t\mathbb{E}\hat u(s)^{\epsilon-1}\int_ D|u|^p\phi dxds\nonumber\\
&&+\frac{\epsilon(\epsilon-1)}{2}\int_0^t\mathbb{E}u(s)^{\epsilon-2}\int_ D\int_ D q(x,y) \sigma(u)\phi(x)\sigma(u)\phi(y)dxdyds.
\lbl{2.8}\end{equation}s
H$\mbox{\rm d}_\ep\mbox{}playstyleot{o}$lder inequality and $\varepsilon$-Young inequality yield that
\begin{equation}ss
&&C_0\epsilon\hat u(s)^{\epsilon-1}\int_ D|u|^p\phi dx\\
&\leq& C_0\epsilon\hat u(s)^{\epsilon-1}\left(\int_ D|u|^m\phi dx\right)^{\frac{p}{m}}\\
&\leq&\frac{\epsilon q_1(1-\epsilon)}{4C_1}u(s)^{\epsilon-2}\left(\int_ D|u|^m\phi dx\right)^2+C
u(s)^{\frac{2m}{2m-p}(2p-p\epsilon-1+\epsilon)}.
\end{equation}ss
Submitting the above inequality into (\ref{2.8}), and using the assumptions on $\sigma$, we have
\begin{equation}s
\eta(t)
&\leq&\eta(0)-\epsilon\lambda_1\int_0^t\eta(s)ds+C\int_0^tu(s)^{\frac{2m}{2m-p}(2p-p\epsilon-1+\epsilon)}ds\nonumber\\
&&
-\int_0^t\frac{\epsilon q_1(1-\epsilon)}{2C_1}u(s)^{\epsilon-2}\left(\int_ D|u|^m\phi dx\right)^2ds\nonumber\\
&\leq&\eta(0)-\epsilon\lambda_1\int_0^t\eta(s)ds+C\int_0^tu(s)^{\frac{2m}{2m-p}(2p-p\epsilon-1+\epsilon)}ds\nonumber\\
&&
-\int_0^t\frac{\epsilon q_1(1-\epsilon)}{2C_1}u(s)^{2m+\epsilon-2}ds.
\lbl{2.9} \end{equation}s
The assumption $(m-p)(2m-1)>mp$ gives
\begin{equation}ss
\epsilon<\frac{2m}{2m-p}(2p-p\epsilon-1+\epsilon)<2m+\epsilon-2.
\end{equation}ss
Noting that for any $r<m<n$ and $u>0$, we have
\begin{equation}s
u^m=u^\begin{equation}ta u^{m-\begin{equation}ta}\leq \varepsilon u^n+C(\varepsilon)u^r, \ \ \ \begin{equation}ta=\frac{r(n-m)}{n-r}.
\lbl{2.10}\end{equation}s
So we can use (\ref{2.10}) to deal with the second last term of right hand side of (\ref{2.9}).
Eventually, we get for $t\leq\tau_n$
\begin{equation}ss
\eta(t)\leq\eta(0)+C\int_0^t\eta(s)ds.
\lbl{2.11} \end{equation}ss
We remark the constant $C$ does not depend on $t$. The Gronwall's lemma implies that
\begin{equation}ss
\eta(t)\leq C+Ce^{Ct}, \ \ \ t\leq\tau_n.
\end{equation}ss
Letting $n\to\infty$, the above inequality implies that $\mathbb{P}\{\tau_\infty<\infty\}=0$.
The proof is complete. $\Box$
\section{Whole space}
\setcounter{equation}{0}
In this section, we consider stochastic parabolic equations in whole space.
Our aim is to establish the global existence and non-existence under some assumptions.
We first recall the results of Foondun et al. \cite{FLN2018}, where the
authors considered the following equation
\begin{equation}s
\partial_tu_t(x)=\mathcal{L}u_t(x)+\sigma(u_t(x))\dot{F}(x,t)
\ t>0,\ x\in\mathbb{R}^d.
\lbl{3.1} \end{equation}s
Here $\mathcal{L}$ denotes the fractional Laplacian, the generator of an
$\alpha$-stable process and $\dot{F}$ is the random forcing term which they took to be white in time and possibly colored in space. They obtained the
following results.
\begin{equation}gin{prop}\lbl{p3.1}\cite[Theorems 1.2,1,5,1.6,1.8,1.9]{FLN2018}
(i) Noise white both in time and space, i.e.,
\begin{equation}ss
\mathbb{E}[\dot{F}(x,t)\dot{F}(y,s)]=\delta_0(t-s)\delta_0(x-y).
\end{equation}ss
Assume that there exists a $\gamma>0$ such that
\begin{equation}ss
\sigma(x)\geq|x|^{1+\gamma}\ \ \ {\rm for\ all\ }\ x\in\mathbb{R}^d,
\end{equation}ss
and that there is a positive constant $\kappa$ such that $\inf_{x\in\mathbb{R}^d}:=\kappa$ .
Then
there exists a $t_0>0$ such that for all $x\in\mathbb{R}^d$, the solution
$u_t(x)$ of (\ref{3.1}) blows up in finite time, i.e.,
\begin{equation}s
\mathbb{E}|u_t(x)|^2=\infty\ \ \ {\rm whenever}\ \ t\geq t_0.
\lbl{3.2}\end{equation}s
Furthermore, the initial condition can be weaken as the following,
\begin{equation}s
\int_{B(0,1)}u_0(x)dx:=K_{u_0}>0,
\lbl{3.3}\end{equation}s
where $B(0,1)$ is the ball centred in the point $0$ and radius $1$.
The solution
$u_t(x)$ of (\ref{3.1}) also blows up in finite time whenever $K_{u_0}\geq K$, where $K$ is some positive constant.
(ii) Noise white in time and correlated in space, i.e.,
\begin{equation}ss
\mathbb{E}[\dot{F}(x,t)\dot{F}(y,s)]=\delta_0(t-s)f(x,y).
\end{equation}ss
Assume that for fixed $R>0$, there exists some positive number $K_f$ such
that
\begin{equation}s
\inf_{x,y\in B(0,R)}(x,y\in B(0,R))f(x,y)\geq K_f.
\lbl{3.4}\end{equation}s
Then, for fixed $t_0>0$ there exists a positive unmber $\kappa_0$ such
that for all $\kappa\geq\kappa_0$ and $x\in\mathbb{R}^d$ we have (\ref{3.2}) holds.
In particularly, suppose that the correlation function $f$ is
given by
\begin{equation}ss
f(x,y)=\frac{1}{|x-y|^\begin{equation}ta} \ \ {\rm with}\ \ \begin{equation}ta<\alpha\wedge d.
\end{equation}ss
Then for $\kappa>0$ there exists a $t_0>0$ such that (\ref{3.2}) holds.
Furthermore, under the assumptions (3.3) and (\ref{3.4}), there
exists a $t_0>0$ such that for all $x\in\mathbb{R}^d$ (\ref{3.2}) holds.
\end{prop}
In the above proposition, Foondun et al. \cite{FLN2018} only considered
the finite time blowup phenomenon driven by noise. Our aim in this paper
is to find the effect of noise, including additive noise and multiplicative noise. And we are also very interested in the type (3) as introduction
said.
We first consider the global existence of the following stochastic parabolic equations
\begin{equation}s \left\{\begin{equation}gin{array}{llll}
du_t=(\Delta u+f(u,x,t))dt+\sigma(u,x,t)dB_t,\ \ t>0,\ &x\in\mathbb{R}^d,\\
u(x,0)=u_0(x)\gneqq0, \ \ &&x\in\mathbb{R}^d,
\end{array}\right.\lbl{3.5}\end{equation}s
where $B_t$ is one-dimensional Brownian motion.
A mild solution to (\ref{3.5}) in sense of Walsh \cite{walsh1986} is any
$u$ which is adapted to the filtration generated by the
white noise and satisfies the following evolution equation
\begin{equation}ss
u(x,t)&=&\int_{\mathbb{R}^d}K(t,x-y)u_0(y)dy
+\int_0^t\int_{\mathbb{R}^d}K(t-s,x-y)f(u,y,s)dyds\\
&&
+\int_0^t\int_{\mathbb{R}^d}K(t-s,x-y)\sigma(u,y,s)dydB_s,
\end{equation}ss
where $K(t,x)$ denotes the heat kernel of Laplacian operator, i.e.,
\begin{equation}ss
K(t,x)=\frac{1}{(2\pi t)^{d/2}}\exp\left(-\frac{|x|^2}{2t} \right)
\end{equation}ss
satisfies
\begin{equation}ss
\left(\frac{\partial}{\partial t}-\Delta\right)K(t,x)=0\ \ \ {\rm for}\ \
(x,t)\neq(0,0).
\end{equation}ss
We get the following results.
\begin{equation}gin{theo}\lbl{t3.1}
Suppose that there exist positive constants $C_0,\ 0<p<1$ such that
\begin{equation}ss
|h(u,x,t)|\leq C_0|u|^p,\ \ \
h=f \ {\rm or}\ g.
\end{equation}ss
Then the solutions of (\ref{3.5}) with bounded continuous
initial data $u_0$ exist globally in any
$r$-order moment, $r\geq1$.
\end{theo}
{\bf Proof.} By taking the second moment and using the Walsh isometry,
we get for any $T>0$
\begin{equation}ss
\mathbb{E}|u(x,t)|^2&=&\left(\int_{\mathbb{R}^d}K(t,x-y)u_0(y)dy
+\int_0^t\int_{\mathbb{R}^d}K(t-s,x-y)f(u,y,s)dyds\right.\\
&&\left.
+\int_0^t\int_{\mathbb{R}^d}K(t-s,x-y)\sigma(u,y,s)dydB_s)\right)^2\\
&\leq&4\int_{\mathbb{R}^d}K(t,x-y)u^2_0(y)dy+4C_0^2
\int_0^t\int_{\mathbb{R}^d}K(t-s,x-y) [\mathbb{E}|u(y,s)|^2]^pdyds\\
&&
+4C_0^2\int_0^t\mathbb{E}\left(\int_{\mathbb{R}^d}K(t-s,x-y) |u(y,s)|^p]\right)^2\\
&\leq&4 \sup_{x\in\mathbb{R}^d}|u_0(x)|^2+8C_0^2\sup_{t\in[0,T],x\in\mathbb{R}^d}[\mathbb{E}|u(y,s)|^2]^p
\int_0^T\int_{\mathbb{R}^d}K(t,x) dtdx
\end{equation}ss
Then taking supremum for $t,x$ over $\in[0,T]\times\mathbb{R}^d$ (the right hand is independent of $t$ and $x$) , we get
\begin{equation}ss
\sup_{t\in[0,T],x\in\mathbb{R}^d} \mathbb{E}|u(x,t)|^2\leq 4 \sup_{x\in\mathbb{R}^d}|u_0(x)|^2+8C_0^2T\sup_{t\in[0,T],x\in\mathbb{R}^d}[\mathbb{E}|u(y,s)|^2]^p.
\end{equation}ss
Notice that $0<p<1$, we have for any $T>0$
\begin{equation}ss
\sup_{t\in[0,T],x\in\mathbb{R}^d} \mathbb{E}|u(x,t)|^2\leq C(T)<\infty,
\end{equation}ss
which implies that $\mathbb{P}\{|u(x,t)|=\infty\}=0$. The proof is complete. $\Box$
We remark that the heat kernel $K$ belongs to $L^1(\mathbb{R}^d)$ but not $L^2(\mathbb{R}^d)$.
Hence this result does not hold for the noise white in both time and space. Meanwhile, if we assume
the covariance function $q(x,y)$ is uniformly bounded, then the above result also hold for
the noise white in time and correlated in space.
Next, we establish the result similar to the case of type (3). In order to do that,
we will consider the following Cauchy problem
\begin{equation}s \left\{\begin{equation}gin{array}{llll}
du_t=\Delta udt+\sigma(u,x,t)dW(x,t),\ \ t>0,\ &x\in\mathbb{R}^d,\\
u(x,0)=u_0(x)\gneqq0, \ \ &&x\in\mathbb{R}^d,
\end{array}\right.\lbl{3.6}\end{equation}s
where $W(t,x)$ is white noise both in time and space.
In the rest of paper, we always assume that the initial data is nonnegative continuous function.
A mild solution to (\ref{3.6}) in sense of Walsh \cite{walsh1986} is any
$u$ which is adapted to the filtration generated by the
white noise and satisfies the following evolution equation
\begin{equation}ss
u(x,t)=\int_{\mathbb{R}^d}K(t,x-y)u_0(y)dy
+\int_0^t\int_{\mathbb{R}^d}K(t-s,x-y)\sigma(u,y,s)W(dy,ds),
\end{equation}ss
where $K(t,x)$ denotes the heat kernel of Laplacian operator.
We get the following results.
\begin{equation}gin{theo}\lbl{t3.2}
Suppose $d=1$ and $\sigma^2(u,x,t)\geq C_0u^{2m}$, $C_0>0$, then for $1<m\leq\frac{3}{2}$, the solutions of (\ref{3.5}) blows up in finite
time for any nontrivial nonnegative initial data $u_0$. That is to say, there exists a positive constant $T$ such
that for all $x\in\mathbb{R}$
\begin{equation}ss
\mathbb{E}u^2(x,t)=\infty\ \ {\rm for}\ t\geq T.
\end{equation}ss
\end{theo}
{\bf Proof.} We assume that the solution remains finite for all finite $t$ almost
surely and want to derive a contradiction.
By taking the second moment and using the Walsh isometry,
we get
\begin{equation}ss
\mathbb{E}|u(x,t)|^2&=&\left(\int_{\mathbb{R}^d}K(t,x-y)u_0(y)dy\right)^2
+\int_0^t\int_{\mathbb{R}^d}K^2(t-s,x-y)\mathbb{E}\sigma^2(u,y,s)dyds\\
&=:&I^2_1(x,t)+I_2(x,t).
\end{equation}ss
We may assume without loss of generality that $u_0(x)\geq C_1>0$ for $|x|<1$ by the
assumption. A direct computation shows that
\begin{equation}s
I_1(x,t)&\geq&\frac{C_1}{(2\pi t)^{d/2}}\int_{B_1(0)}\exp\left(-\frac{|x|^2+|y|^2}{2t}\right)dy\nonumber\\
&\geq& \frac{C_1}{(2\pi t)^{d/2}}\exp\left(-\frac{|x|^2}{2t}\right)\int_{|y|\leq\frac{1}{\sqrt{t}}}
\exp\left(-\frac{|y|^2}{2}\right)dy\nonumber\\
&\geq& \frac{C}{(2\pi t)^{d/2}}\exp\left(-\frac{|x|^2}{2t}\right)
\lbl{3.7} \end{equation}s
for $t>1$ and $C>0$.
It is easy to see that
\begin{equation}ss
I_2(x,t)&\geq& C_0\int_0^t\int_{\mathbb{R}^d}K^2(t-s,x-y)\mathbb{E}|u(y,s)|^{2m}dyds\\
&\geq& C_0\int_0^t\int_{\mathbb{R}^d}K^2(t-s,x-y)[\mathbb{E}|u(y,s)|^{2}]^mdyds.
\end{equation}ss
Denote $v(x,t)=\mathbb{E}|u(x,t)|^{2}$. Let
\begin{equation}ss
G(t)=\int_{\mathbb{R}^d}K(t,x) v(x,t)dx.
\end{equation}ss
Then for $t>1$,
\begin{equation}s
G(t)&=&\int_{\mathbb{R}^d}I^2_1(x,t)K(t,x)dx+\int_{\mathbb{R}^d}I_2(x,t)K(t,x)dx\nonumber\\
&\geq&\frac{C_2}{t^d}+\int_0^t\int_{\mathbb{R}^d}\int_{\mathbb{R}^d}K(t,x)K^2(t-s,x-y)v^m(y,s)dydxds.
\lbl{3.8}\end{equation}s
It is clear that
\begin{equation}ss
&&\int_{\mathbb{R}^d}K(t,x)K^2(t-s,x-y)dx\\
&=&\frac{1}{(2\pi t)^{d/2}[2\pi (t-s)]^{d}}\int_{\mathbb{R}^d}
\exp\left(-\frac{|x|^2}{2t}-\frac{|x-y|^2}{t-s}\right)dx\\
&=&K(s,y)\frac{(2\pi s)^{d/2}}{(2\pi t)^{d/2}[2\pi (t-s)]^{d}}\int_{\mathbb{R}^d}
\exp\left(\frac{|y|^2}{2s}-\frac{|x|^2}{2t}-\frac{|x-y|^2}{t-s}\right)dx.
\end{equation}ss
Since
\begin{equation}ss
&&\frac{|y|^2}{2s}-\frac{|x|^2}{2t}-\frac{|x-y|^2}{t-s}\\
&\geq&\frac{|y|^2}{2s}-\frac{|x-y|^2+|y|^2+2|x-y||y|}{2t}-\frac{|x-y|^2}{t-s}\\
&=&\frac{1}{2t}\left(-2|x-y||y|+\frac{t-s}{s}|y|^2\right)-\frac{|x-y|^2}{2t}-\frac{|x-y|^2}{t-s}\\
&\geq&-\frac{s|x-y|^2}{2t(t-s)}-\frac{|x-y|^2}{2t}-\frac{|x-y|^2}{t-s}\\
&\geq& -\frac{2|x-y|^2}{t-s}\ \ {\rm for}\ 0<s<t,
\end{equation}ss
we get for $0<s<t$
\begin{equation}ss
\int_{\mathbb{R}^d}
\exp\left(\frac{|y|^2}{2s}-\frac{|x|^2}{2t}-\frac{|x-y|^2}{t-s}\right)dx
\geq \int_{\mathbb{R}^d}
\exp\left(-\frac{2|x-y|^2}{t-s}\right)dx
=C_3(t-s)^{d/2}.
\end{equation}ss
Substituting the above estimate into (\ref{3.8}) and applying Jensen's inequality, we obtain
\begin{equation}ss
G(t)&\geq&\frac{C_2}{t^d}+C_4\int_0^t\frac{s^{d/2}}{t^d}\int_{\mathbb{R}^d}K(s,y)v^m(y,s)dydxds\\
&\geq&\frac{C_2}{t^d}+C_4\int_0^t\frac{s^{d/2}}{t^d}G^m(s)ds
\end{equation}ss
We can rewrite the above inequality as
\begin{equation}s
t^d G(t)&\geq& C_2+C_4\int_0^ts^{d/2}G^m(s)ds\lbl{3.9}\\
&=:&g(t).\nonumber
\end{equation}s
Then for $t>1$, we have
\begin{equation}ss
&&g(t)\geq C_2,\\
&&g'(t)\geq C_4t^{d/2}G^m(t)\geq C_4t^{d/2}\left(\frac{1}{t^d}g(t)\right)^m=C_4t^{\frac{d}{2}-dm}g^m(t),
\end{equation}ss
which implies
\begin{equation}ss
\frac{C_2^{1-m}}{m-1}\geq \frac{1}{m-1}g^{1-m}(t)\geq C_4\int_t^Ts^{\frac{d}{2}-dm}dx\ \ {\rm for}\
T>t\geq1.
\end{equation}ss
If $m\leq\frac{d+2}{2d}$, that is, $\frac{d}{2}-dm+1\geq0$, the right-hand side of the above
inequality is unbounded as $T\to\infty$, which gives a contradiction. Noting that we must
let $m>1$ because we used the Jensen's inequality, thus we get $1<m\leq\frac{3}{2}$ and $d=1$.
And thus we complete
the proof. $\Box$
If the noise is just one-dimensional Brownian motion, the result
will be different. For this, we consider the following stochastic
\begin{equation}s \left\{\begin{equation}gin{array}{llll}
du_t=\Delta udt+\sigma(u,x,t)dB_t,\ \ t>0,\ &x\in\mathbb{R}^d,\\
u(x,0)=u_0(x)\gneqq0, \ \ &&x\in\mathbb{R}^d,
\end{array}\right.\lbl{3.10}\end{equation}s
where $B_t$ is one-dimensional Brownian motion.
A mild solution to (\ref{3.10}) in sense of Walsh \cite{walsh1986} is any
$u$ which is adapted to the filtration generated by the
white noise and satisfies the following evolution equation
\begin{equation}ss
u(x,t)=\int_{\mathbb{R}^d}K(t,x-y)u_0(y)dy
+\int_0^t\int_{\mathbb{R}^d}K(t-s,x-y)\sigma(u,y,s)dydB_s,
\end{equation}ss
where $K(t,x)$ denotes the heat kernel of Laplacian operator.
\begin{equation}gin{theo}\lbl{t3.3}
Suppose $d=1$ and $\sigma^2(u,x,t)\geq C_0u^{2}$, $C_0>0$, then the solutions of (\ref{3.10}) blows up in finite
time for any nontrivial nonnegative initial data $u_0$.
\end{theo}
{\bf Proof.} Similar to the proof of Theorem \ref{t3.2}, we assume that the solution remains finite for all finite $t$ almost
surely.
By taking the second moment and using the Walsh isometry,
we get
\begin{equation}ss
\mathbb{E}|u(x,t)|^2&=&\left(\int_{\mathbb{R}^d}K(t,x-y)u_0(y)dy\right)^2
+\int_0^t\left(\int_{\mathbb{R}^d}K(t-s,x-y)\mathbb{E}\sigma(u,y,s)dy\right)^2ds\\
&=:&u_1(x,t)+u_2(x,t).
\end{equation}ss
We may assume without loss of generality that $u_0(x)\geq C_1>0$ for $|x|<1$ by the
assumption.
The estimate (\ref{3.7}) also holds, i.e.,
\begin{equation}ss
u_1(x,t)\geq \frac{C}{(2\pi t)^{d/2}}\exp\left(-\frac{|x|^2}{2t}\right)
\end{equation}ss
for $t>1$ and $C>0$.
It is easy to see that, for $m\geq2$,
\begin{equation}ss
u_2(x,t)&\geq& C_0\int_0^t\left(\int_{\mathbb{R}^d}K(t-s,x-y)\mathbb{E}|u(y,x)|^{m}dy\right)^2ds\\
&\geq& C_0\int_0^t\left(\int_{\mathbb{R}^d}K(t-s,x-y)[\mathbb{E}|u(y,x)|^{2}]^{m/2}dy\right)^2ds\\
&\geq& C_0\int_0^t\left(\int_{\mathbb{R}^d}K(t-s,x-y)\mathbb{E}|u(y,x)|^{2}dy\right)^mds.
\end{equation}ss
Denote $v(x,t)=\mathbb{E}|u(x,t)|^{2}$. Let
\begin{equation}ss
G(t)=\int_{\mathbb{R}^d}K(t,x) v(x,t)dx.
\end{equation}ss
Then for $t>1$,
\begin{equation}s
G(t)&=&\int_{\mathbb{R}^d}u^2_1(x,t)K(t,x)dx+\int_{\mathbb{R}^d}u_2(x,t)K(t,x)dx\nonumber\\
&\geq&\frac{C_2}{t^d}+\int_0^t\left(\int_{\mathbb{R}^d}\int_{\mathbb{R}^d}K(t,x)K(t-s,x-y)v(y,s)dydx\right)^mds.
\lbl{3.11}\end{equation}s
It is clear that (see \cite[Page 42]{Hubook2018})
\begin{equation}ss
\int_{\mathbb{R}^d}K(t,x)K(t-s,x-y)dx\geq C_3K(s,y)\left(\frac{s}{t}\right)^{d/2}.
\end{equation}ss
Substituting the above estimate into (\ref{3.11}) and applying Jensen's inequality, we obtain
\begin{equation}ss
G(t)\geq\frac{C_2}{t^d}+C_3\int_0^t\left(\frac{s^{d/2}}{t^{d/2}}\right)^mG^m(s)ds
\end{equation}ss
We can rewrite the above inequality as
\begin{equation}s
t^{md/2} G(t)&\geq& C_2t^{(m-2)d/2}+C_3\int_0^ts^{dm/2}G^m(s)ds\lbl{3.9}\\
&=:&g(t).\nonumber
\end{equation}s
Then for $t>1$, we have
\begin{equation}ss
&&g(t)\geq C_2t^{(m-2)d/2},\\
&&g'(t)\geq C_3t^{dm/2}G^m(t)\geq C_3t^{d/2}\left(\frac{1}{t^{dm/2}}g(t)\right)^m=C_3t^{(1-m)md/2}g^m(t),
\end{equation}ss
which implies
\begin{equation}ss
\frac{C_2^{1-m}}{m-1}t^{-d(m-1)(m-2)/2}\geq \frac{1}{m-1}g^{1-m}(t)\geq C_4\int_t^Ts^{(1-m)md/2}dx\ \ {\rm for}\
T>t\geq1.
\end{equation}ss
If $(m-1)md/2\leq1$, we will get a contradiction by letting $T\to\infty$. If
$\frac{d(m-1)(m-2)}{2}>-1+ \frac{(m-1)md}{2}$, then we will get a contradiction by letting
$T\to\infty$ and then taking $t\gg1$. Noting that when $m=2,\ d=1$, we have
$(m-1)md/2=1$ and $\frac{d(m-1)(m-2)}{2}>-1+ \frac{(m-1)md}{2}$ is equivalent to $m<1+\frac{1}{d}$.
Since $m\geq2$, we get a contradiction for the case that $m=2,\ d=1$. The proof is complete. $\Box$
\begin{equation}gin{remark} \lbl{r3.1}
Comparing Theorem \ref{t3.2} with Proposition \ref{p3.1},
the assumptions of Proposition \ref{p3.1} on initial data need the lower bound,
but in Theorem \ref{t3.2} we did not.
Theorems \ref{t3.2} and \ref{t3.3} show that the time-space white noise and
Brownian motion are different. But the method used here
is not suitable to fractional Laplacian operator.
Sugitani \cite{Sug1975} established the Fujita index for
Cauchy problem of fractional Laplacian operator.
The main difficult is that we can not get the exact estimate of
$\int_{\mathbb{R}^d}p^2(t,x)p(t-s,x-y)dx$, where $p(t,x)$ is the
heat kernel of fractional Laplacian operator.
\end{remark}
\section{Discussion}
\setcounter{equation}{0}
An interesting issue of stochastic partial differential equations is to find
the difference when we add the noise, i.e., the impact of
noise. For stochastic partial differential equations, we want to know whether the solutions keep positive.
In this section, we first consider the positivity of the solutions of stochastic parabolic
equations in the whole space, and then consider the impact of noise.
In the followings, we will select a test function $\begin{equation}ta_\varepsilon(r)$.
Define
\begin{equation}ss
&&\begin{equation}ta_\varepsilon(r)=\int_r^\infty\rho_\varepsilon(s)ds,\ \ \
\rho_\varepsilon(r)=\int_{r+\varepsilon}^\infty J_\varepsilon(s)ds,\ \ \ r\in\mathbb{R},\nonumber\\
&&J_\varepsilon(|x|)=\varepsilon^{-n}J\left(\frac{|x|}{\varepsilon}\right),\ \ \
J(x)=\left\{\begin{equation}gin{array}{llll}
C\exp\left(\frac{1}{|x|^2-1}\right),\ \ \ &|x|<1,\\
0, \ \ \ &|x|\geq1.
\end{array}\right. \end{equation}ss
Then by direct verification, we have the following result.
\begin{equation}gin{lem}\lbl{l4.1} The above constructed functions $\rho_\varepsilon,\begin{equation}ta_\varepsilon$ are in $C^\infty(\mathbb{R})$ and
have the following properties:
$\rho_\varepsilon$ is a non-increasing function and
\begin{equation}ss
\begin{equation}ta_\varepsilon'(r)=-\rho_\varepsilon(r)=\left\{\begin{equation}gin{array}{llll}
0,\ \ \ &r\geq0,\\
-1,\ \ \ &r\leq-2\varepsilon.
\end{array}\right.\end{equation}ss
Additionally, $\begin{equation}ta_\varepsilon$ is convex and
\begin{equation}ss
\begin{equation}ta_\varepsilon(r)=\left\{\begin{equation}gin{array}{llll}
0,\ \ \ \ \ &r\geq0,\\
-2\varepsilon-r+\varepsilon \hat C, \ \ &r\leq-2\varepsilon,
\end{array}\right.\end{equation}ss
where $\hat C=\int^0_{-2}\int_{t+1}^1J(s)dsdt<2$. Furthermore,
\begin{equation}ss
0\leq\begin{equation}ta_\varepsilon''(r)=J_\varepsilon(r+\varepsilon)\leq \varepsilon^{-d}C, \ \ -2\varepsilon\leq r\leq0,
\end{equation}ss
which implies that
\begin{equation}ss
-2^dC\leq r^d\begin{equation}ta_\varepsilon''(r)\leq0 \ \ \ \ & {\rm for}\ -2\varepsilon\leq r\leq0,\ {\rm and}\ d\ {\rm is}\ {\rm odd};\\
0\leq r^d\begin{equation}ta_\varepsilon''(r)\leq2^dC\ \ \ \ &{\rm for}\ -2\varepsilon\leq r\leq0,\ {\rm and}\ d\ {\rm is}\ {\rm even}.
\end{equation}ss
\end{lem}
Now, we consider the following stochastic parabolic equations
\begin{equation}s\left\{\begin{equation}gin{array}{lll}
du=(\Delta u+f(u,x,t))dt+g(u,x,t)dW(x,t), \ \ \qquad t>0,\ &x\in \mathbb{R},\\[1.5mm]
u(x,0)=u_0(x), \ \ \ \ \ &x\in \mathbb{R},
\end{array}\right.\lbl{4.1}\end{equation}s
where $W(x,t)$ is time-space white noise.
\begin{equation}gin{theo}\lbl{t4.1} Assume that (i) the function $f(r,x,t)$ is continuous on
$\mathbb{R}\times\mathbb{R}\times[0,T]$; (ii) $f(r,x,t)\geq0$ for $r\leq0$, $x\in\mathbb{R}$
and $t\in[0,T]$; and (iii) $ g^2(u,x,t)\leq ku^{2m}$,
where $k>0$, $2m>1$ and $(-1)^{2m-1}\in\mathbb{R}$. Then the solution of
initial-boundary value problem {\rm(\ref{4.1})} with nonnegative initial datum remains positive:
$u(x,t)\geq0$, a.s. for almost every $x\in \mathbb{R}$ and for all $ t\in[0,T]$.
\end{theo}
{\bf Proof.} Define
\begin{equation}ss
\Phi_\varepsilon(u_t)=(1,\begin{equation}ta_\varepsilon(u_t))=\int_{\mathbb{R}} \begin{equation}ta_\varepsilon(u(x,t))dx.
\end{equation}ss
By It\^{o}'s formula, we have
\begin{equation}ss
\Phi_\varepsilon(u_t)&=&\Phi_\varepsilon(u_0)+\int_0^t\int_{\mathbb{R}} \begin{equation}ta_\varepsilon'(u(x,s))\Delta u(x,s)dxds\\
&&+\int_0^t\int_ {\mathbb{R}} \begin{equation}ta_\varepsilon'(u(x,s))f(u(x,s),x,s)dxds\\
&&+\int_0^t\int_ {\mathbb{R}} \begin{equation}ta_\varepsilon'(u(x,s))g(u(x,s),x,s)dW(x,s)dx\\
&&+\frac{1}{2}\int_0^t\int_{\mathbb{R}^d}\begin{equation}ta_\varepsilon''(u(x,s))g^2(u(x,s),x,t)dxds\\
&=&\Phi_\varepsilon(u_0)+\int_0^t\int_{\mathbb{R}}\begin{equation}ta_\varepsilon''(u(x,s))\left(\frac{1}{2}g^2(u(x,s),x,s)-|\nablabla u|^2\right)dxds\\
&&+\int_0^t\int_ {\mathbb{R}} \begin{equation}ta_\varepsilon'(u(x,s))f(u(x,s),x,s)dxds\\
&&+\int_0^t\int_ {\mathbb{R}} \begin{equation}ta_\varepsilon'(u(x,s))g(u(x,s),x,s)dW(x,s)dx.
\end{equation}ss
Taking expectation over the above equality and using Lemma \ref{l4.1}, we get
\begin{equation}ss
\mathbb{E}\Phi_\varepsilon(u_t)
&=&\mathbb{E}\Phi_\varepsilon(u_0)+\mathbb{E}\int_0^t\int_{\mathbb{R}} \begin{equation}ta_\varepsilon''(u(x,s))\\
&&\times\left(\frac{1}{2}g^2(u(x,s),x,s)-|\nablabla u|^2\right)dxds\\
&&+\mathbb{E}\int_0^t\int_{\mathbb{R}} \begin{equation}ta_\varepsilon'(u(x,s))f(u(x,s),x,s)dxds\\
&\leq&\mathbb{E}\Phi_\varepsilon(u_0)+\frac{k}{2}\mathbb{E}\int_0^t\int_{\mathbb{R}} \begin{equation}ta_\varepsilon''(u(x,s))
u(x,s)^{2m}dxds\\
&&+\mathbb{E}\int_0^t\int_{\mathbb{R}}\begin{equation}ta_\varepsilon'(u(x,s))f(u(x,s),x,s)dxds.
\end{equation}ss
Here and after, we denote $\|\cdot\|_{L^1}$ by $\|\cdot\|_1$. Let $\eta(u)=u^-$
denote the negative part of $u$ for $u\in\mathbb{R}$.
Then we have $\lim\limits_{\varepsilon\rightarrow0}\mathbb{E}\Phi_\varepsilon(u_t)=\mathbb{E}\|\eta(u_t)\|_1$.
It follows from Lemma \ref{l4.1} that
\begin{equation}ss
0\geq u^{2m}\begin{equation}ta''_\varepsilon(u)\geq\left\{\begin{equation}gin{array}{llll}
0,\ \ \ \ & u\geq0\ {\rm or}\ u\leq-2\varepsilon,\\
-2Cu^{2m-1},\ \ \ \ &-2\varepsilon\leq u\leq0,\ {\rm and}\ u^{2m-1}\geq0,
\end{array}\right.
\end{equation}ss
or
\begin{equation}ss
0\leq u^{2m}\begin{equation}ta''_\varepsilon(u)\leq\left\{\begin{equation}gin{array}{llll}
0,\ \ \ \ & u\geq0\ {\rm or}\ u\leq-2\varepsilon,\\
-2Cu^{2m-1},\ \ \ \ &-2\varepsilon\leq u\leq0,\ {\rm and}\ u^{2m-1}\leq0
\end{array}\right.
\end{equation}ss
which implies that
$\lim\limits_{\varepsilon\rightarrow0}u^{2m}\begin{equation}ta_\varepsilon''(u)=0$ provided that $2m>1$.
By taking the limits termwise as $\varepsilon\rightarrow0$ and using Lemma \ref{l4.1}, we get
\begin{equation}ss
\mathbb{E}\|\eta(u_t)\|_1&\leq&\mathbb{E}\|\eta(u_0)\|_1
-\mathbb{E}\int_0^t\int_{\mathbb{R}} \eta'(u(x,s))f(u(x,s),x,s)dxds\nonumber\\
&\leq&0,
\end{equation}ss
which implies that $u^-=0$ a.s. for a.e. $x\in D$, $\forall t\in[0,T]$.
This completes the proof.
$\Box$
If $W(x,t)$ is replaced by $B_t$ in (\ref{4.1}), then Theorem \ref{t4.1} holds for any dimension.
The reason why we only consider one dimension in Theorem \ref{t4.1} is that the It\^{o} formula
only holds for one-dimensional time-space white noise.
In order to find the impact of noise, we first recall a well-known result of
deterministic parabolic equations. Consider the Cauchy problem
\begin{equation}s \left\{\begin{equation}gin{array}{llll}
\frac{\partial}{\partial t}u_t=\Delta u+u^p,\ \ t>0,\ &x\in\mathbb{R}^d,\\
u(x,0)=u_0(x)\gneqq0, \ \ &x\in\mathbb{R}^d.
\end{array}\right.\lbl{4.2}\end{equation}s
\begin{equation}gin{prop}\lbl{p4.1} (i) If $p>1+\frac{2}{d}$, then the solution of
(\ref{4.2}) is global in time, provided the initial datum satisfies,
for some small $\varepsilon>0$,
\begin{equation}ss
u_0(x)\leq\varepsilon K(1,x), \ \ \ x\in\mathbb{R}^d.
\end{equation}ss
(ii) If $1<p\leq1+\frac{2}{d}$, then all nontrivial solutions of
(\ref{4.2}) blow up in finite time.
\end{prop}
Next we consider the stochastic parabolic equation
\begin{equation}s \left\{\begin{equation}gin{array}{llll}
du_t=[\Delta u+|u|^{p-1}u]dt+\sigma(u)dW(x,t),\ \ t>0,\ &x\in\mathbb{R}^d,\\
u(x,0)=u_0(x)\gneqq0, \ \ &x\in\mathbb{R}^d.
\end{array}\right.\lbl{4.3}\end{equation}s
It is well known that the mild solution of (\ref{4.3}) can be written as
\begin{equation}ss
u(x,t)&=&\int_{\mathbb{R}^d}K(t,x-y)u_0(y)dy+\int_0^t\int_{\mathbb{R}^d}K(t-s,x-y)|u|^{p-1}udyds\\
&&
+\int_0^t\int_{\mathbb{R}^d}K(t-s,x-y)\sigma(u,y,s)W(dy,ds).
\end{equation}ss
\begin{equation}gin{theo}\lbl{t4.2} Assume all the assumptions of Theorem
\ref{t4.1} hold. Then $1<p\leq1+\frac{2}{d}$, then the expectation of all nontrivial solutions of
(\ref{4.3}) blow up in finite time. That is to say, there exists a positive constant $t_0>0$
such that $\mathbb{E}u(x,t)=\infty,\ \ t\geq t_0$ for all $x\in\mathbb{R}^d$. When $m>1$, the mean square of
solutions to (\ref{4.3}) will blow up in finite time under the
condition that the initial data is suitable large.
\end{theo}
{\bf Proof.} It follows from Theorem \ref{t4.1} that the solutions of
(\ref{4.3}) keep positive. Following the representation of mild solution, we have
\begin{equation}ss
\mathbb{E}u(x,t)=\int_{\mathbb{R}^d}K(t,x-y)\mathbb{E}u_0(y)dy+\int_0^t\int_{\mathbb{R}^d}K(t-s,x-y)\mathbb{E}|u|^pdyds,
\end{equation}ss
which implies that
\begin{equation}ss
\mathbb{E}u(x,t)\geq\int_{\mathbb{R}^d}K(t,x-y)\mathbb{E}u_0(y)dy+\int_0^t\int_{\mathbb{R}^d}K(t-s,x-y)[\mathbb{E}u]^pdyds,
\end{equation}ss
Denoting $v(x,t)=\mathbb{E}u(x,t)$, we have that $v(x,t)$ is a super-solution of (\ref{4.2}).
By the results of Proposition \ref{p4.1} and comparison principle, we obtain that there exists a positive constant $t_0>0$
such that $\mathbb{E}u(x,t)=\infty,\ \ t\geq t_0$ for all $x\in\mathbb{R}^d$. Meanwhile,
noting that
\begin{equation}ss
\mathbb{E}u(x,t)\leq\left(\mathbb{E}u^p(x,t)\right)^{\frac{1}{p}},\ \ p>1,
\end{equation}ss
we have that $\mathbb{E}u^p(x,t)$, $p>1$, will blow up in finite time.
When $m>1$, we have
\begin{equation}ss
\mathbb{E}|u(x,t)|^2&\geq&\left(\int_{\mathbb{R}^d}K(t,x-y)u_0(y)dy\right)^2
+\int_0^t\int_{\mathbb{R}^d}K^2(t-s,x-y)\mathbb{E}\sigma^2(u,y,s)dyds\\
&=:&w(x,t).
\end{equation}ss
Foondun \cite{FLN2018} proved the mean square of function $w(x,t)$ will blow up in finite
time under the condition that the initial data is suitable large. So the solution $u$ will
also blow up in finite time.
The proof is complete. $\Box$
\noindent {\bf Acknowledgment} The first author was supported in part
by NSFC of China grants 11771123. The authors thanks Prof. Feng-yu Wang for discussing
this manuscript.
\begin{equation}gin{thebibliography}{99}
\bibitem{BY2014} J. Bao and C. Yuan, {\em Blow-up for stochastic reactin-diffusion equations with jumps},
J Theor. Probab. {\bf29} (2016) 617-631.
\bibitem{Cb2007} P-L. Chow, {\em Stochastic partial differential equations},
Chapman Hall/CRC Applied Mathematics and Nonlinear Science Series. Chapman Hall/CRC,
Boca Raton, FL, 2007. x+281 pp. ISBN: 978-1-58488-443-9.
\bibitem{C2009} P-L. Chow, {\em Unbounded positive solutions of nonlinear parabolic It\^{o}
equations}, Communications on Stochastic Analysis {\bf 3} (2009) 211-222.
\bibitem{C2011} P-L. Chow, {\em Explosive solutions of stochastic reaction-diffusion
equations in mean $L^p$-norm}, J. Differential Equations {\bf 250} (2011) 2567-2580.
\bibitem{CL2012} Pao-Liu Chow and K. Liu, {\em Positivity and explosion in mean $L^p$-norm
of stochastic functional parabolic equations of retarded type}, Stochastic Processes and their
Applications {\bf 122} (2012) 1709-1729.
\bibitem{DMH2015}A. Debussche, S. de Moor and M. Hofmanov$\acute{a}$, {\em A regularity result for quasilinear stochastic
partial differential equations of parabolic type}, SIAM J. Math. Anal. {\bf47} (2015) 1590-1614.
\bibitem{DW2014} J. Duan and W. Wang, {\em Effective Dynamics of Stochastic
Partial Differential Equations}, Elsevier, 2014.
\bibitem{DL2010} M. Dozzi and J. A. L\'{o}pez-Mimbela, {\em
Finite-time blowup and existence of global positive solutions of a semi-linear spde},
Stochastic Process. Appl., {\bf120} (2010) 767-776.
\bibitem{FF2013} E. Fedrizzi and F. Flandoli, {\em Noise prevents singularities in linear transport equations},
Journal of Functional Analysis {\bf264} (2013) 1329-1354.
\bibitem{FLN2018} M. Foondun, W. Liu and E. Nane {\em Some non-existence
results for a class of stochastic partial differential equations}, J. Differential Equations in press.
\bibitem{F1966} H. Fujita, {\em On the blowing up of solutions of the Cauchy problem for
$u_t-\Delta u=u^{1+\alpha}$}, J. Fac. Sci. Univ. Tokyo Sect.
IA Math. {\bf 13} (1966) 109-124.
\bibitem{F1970} H. Fujita, {\em On some nonexistence and nonuniqueness theorems for nonlinear parabolic equations},
Proc. Symp. Pure Math. {\bf XVIII} (1970) 105-113.
\bibitem{Hubook2018} B. Hu, {\em Blow-up Theories for Semilinear Parabolic Equations},
Lecture Notes in Mathematics ISSN print edition: 0075-8434, Springer Heidelberg Dordrecht London New York, 2018.
\bibitem{kH1973} K. Hayakawa, {\em On nonexistence of global solutions of some semilinear parabolic equations}, Proc. Japan Acad. Ser.
A Math. {\bf49} (1973) 503-505.
\bibitem{LPJ2016} K. Li, J. Peng and J. Jia, {\em Explosive solutions
of parabolic stochastic partial differential equations with L$\acute{e}$vy
noise}, arXiv:1306.01676.
\bibitem{LR2010} W. Liu and M. R\"{o}ckner, {\em SPDE in Hilbert space with locally monotone
coefficients}, J. of Functional Analysis {\bf 259} (2010) 2902-2922.
\bibitem{L2013} W. Liu, {\em Well-posedness of stochastic partial differential equations with
Lyapunov condition}, J. Differential Equations {\bf 254} (2013) 725-755.
\bibitem{LD2015} G. Lv and J. Duan, {\em Impacts of Noise on a Class of Partial Differential Equations},
J. Differential Equations, {\bf258} (2015) 2196-2220.
\bibitem{LWW2016}G. Lv, L. Wang and X. Wang, {\em Positive and unbounded solution of stochastic delayed evolution equations}, Stoch. Anal. Appl. {\bf34} (2016) 927-939.
\bibitem{MZ1999} R. Manthey and T. Zausinger, {\em Stochastic evolution equations in $L^{2\nu}_\rho$},
Stochastics and Stochastic Report {\bf 66} (1999) 37-65.
\bibitem{M1991} C. Mueller, {\em Long time existence for the heat equation with a noise term},
Probab. Theory Related Fields {\bf 90} (1991) 505-517.
\bibitem{MuS1993} C. Mueller and R. Sowers, {\em Blowup for the heat equation with a noise term},
Probab. Theory Related Fields {\bf 93} (1993) 287-320.
\bibitem{NX2012} M. Niu and B. Xin, {\em Impacts of Gaussian noises on the blow-up times of nonlinear stochastic
partial differential equations}, Nonlinear Analysis: Real World Applications {\bf13} (2012) 1346-1352.
\bibitem{eP1979} E. Pardoux, {\em Stochastic partial differential equations and filtering of diffusion processes}, Stochastic, {\bf3} (1979) 127-167.
\bibitem{PZ1992} G. Da Prato and J. Zabczyk, {\em Stochastic equations in infinite dimensions},
Encyclopedia of Mathematics and its applications, Cambridge University Press (1992).
\bibitem{PZ1992} G. Da Prato and J. Zabczyk, {\em Nonexplosion, boundedness and ergodicity
for stochastic semilinear equations}, J. Differential Equations {\bf 98} (1992) 181-195.
\bibitem{SGKM} A. Samarskii, V. Galaktionov, S. Kurdyumov and S. Mikhailov, {\em
Blow-up in quasilinear parabolic equations}, Walter de Gruyter, Berlin, New York, 1995.
\bibitem{Shiga} T. Shiga {\em Some properties of solutions for one-dimensional SPDE's associated
with space-time white noise}, Gaussian random fields (Nagoya, 1990), 354-363.
\bibitem{Sug1975} S. Sugitani, {\em On nonexistence of global
solutions for some nonlinear integral equations}, Osaka J. Math.,
{\bf12} (1975) 35-51.
\bibitem{T2009} T. Taniguchi, {\em The existence and uniqueness of energy solutions to local
non-Lipschitz stochastic evolution equations}, J. Math. Anal. Appl. {\bf 360} (2009) 245-253.
\bibitem{walsh1986} John B. Walsh, {\em
An introduction to Stochastic Partial Differential Equations},
volume 1180 of Lecture Notes in Math., pages 265-439, Springer
Berlin, 1986.
\end{thebibliography}
\end{document} |
\begin{document}
\title{Lower bounds on weighted moments of primes in short intervals in number fields}
\begin{abstract}
We consider an analog of a conjecture of Montgomery and Soundararajan on the moments of primes in short intervals in number fields; this analog was discussed and heuristically derived in a paper of the second author, Rodgers, and Roditty-Gershon. Adapting work of the first author and Fiorilli in the integer case, we establish lower bounds on a weighted version of these moments which agree with the conjectured values.
\mathrm{e}nd{abstract}
\section{Introduction}
Goldston and Montgomery \cite{MR1018376-goldston-montgomery} conjectured that for $\delta \in (0,1)$, as $X \to \infty$, the variance of the number of primes in short intervals of width $\delta X$ is given by
\begin{equation}\label{eq:goldston-montgomery-conjecture}
\frac 1X \int_X^{2X} \big(\psi(x+\delta X) - \psi(x) - \delta X\big)^2\mathrm{d}x \sim \delta X \log(\delta^{-1}),
\mathrm{e}nd{equation}
where $\psi(x) := \sum_{n \le x} \Lambda(n)$ is the Chebyshev function. This conjecture was extended by Montgomery and Soundararajan \cite{MR2104891-montgomery-sound} when they conjectured that the distribution of primes in short intervals is Gaussian. More precisely, they conjectured that for fixed $\varepsilon > 0$ and $n \in \mathbb N$, uniformly for $\frac{(\log X)^{1+\varepsilon}}{X} \le \delta \le \frac{1}{X^{\varepsilon}}$, the $n$th moment of this distribution is
\begin{equation*}
\frac 1X \int_{X}^{2X} \frac{(\psi(x+\delta X)-\psi(x)-\delta X)^{n}}{X^{n/2}} \mathrm{d}x = (\mu_n + o(1))\big(\delta \log(\delta^{-1})\big)^{n/2},
\mathrm{e}nd{equation*}
where $\mu_n$ are the Gaussian moment constants: $\mu_n = 1 \cdot 3 \cdots (n-1)$ if $n$ is even and $\mu_n = 0$ if $n$ is odd.
Montgomery and Soundararajan showed that their conjecture follows from a strong form of the Hardy--Littlewood $k$-tuples conjecture. By using similar, albeit heuristic, reasoning building off of Gross and Smith's~\cite{MR1763807-gross-smith-hardy-littlewood} generalization of the Hardy--Littlewood conjecture to number fields, the second author along with Rodgers and Roditty-Gershon \cite{MR4421937-kuperberg-rodgers-roditty-gershon} derived an analog of Goldston and Montgomery's conjecture in the number field setting. They consider a broad generalization of short intervals: for a fixed number field $K/\Q$, an embedding $m:K\rightarrow \mathbb R^n$, and a norm $\|\cdot\|$ on $\mathbb R^n$, for parameters $x\in \mathbb R^n$ and $H \in \mathbb R$, they consider the number of primes $\alpha \in K$ with $\|m(\alpha)-x\| \le H$. They conjecture that the variance of the number of primes in short intervals in $K$ is asymptotically proportional to $(1-\delta)$ times the expected number of primes, where~$H = X^\delta$ and $x$ varies over points $\|x\| \le X$; the conjecture for the variance in this setting is identical to the analogous conjecture in the integer setting.
{\color{black} As a natural related question, one can consider prime ideals with norms lying in short intervals, that is to say, small sets of prime ideals determined by their norm. } In this setting, one might expect the analog of Goldston and Montgomery's conjecture to be
\begin{equation}\label{eq:goldston-montgomery-conjecture-in-number-fields}
\frac 1X \int_X^{2X} \big(\psi_K(x+\delta X) - \psi_K(x) - \delta X\big)^2\mathrm{d}x \sim C_K \delta X \log (\delta^{-1}),
\mathrm{e}nd{equation}
where~$C_K$ is a constant depending only on $K/\Q$. This is identical to \mathrm{e}qref{eq:goldston-montgomery-conjecture} except that the prime-counting function $\psi_K(x)$ is given by $\psi_K(x) := \sum_{\substack{\mathfrak n \subset K \\ N_K(\mathfrak n) \le x}} \Lambda_K(\mathfrak n),$ where $\Lambda_K(\mathfrak n)$ are the coefficients of $-\frac{\zeta_K'(s)}{\zeta_K(s)}$ and where $\zeta_K(s)$ is the Dedekind zeta function.
In \cite{MR4322621-conjecture-Montgomery-Soundararajan-integers}, the first author and Fiorilli proved a lower bound, dependent on the Riemann Hypothesis, for a weighted version of Montgomery and Soundararajan's conjecture. For fixed $\mathbf kappa > 0$ and an even differentiable test function $\mathrm{e}ta:\mathbb R \to \mathbb R$ such that $\mathrm{e}ta(t),\mathrm{e}ta'(t) \ll \mathrm{e}^{-\mathbf kappa t}$, they define
\begin{equation*}
\psi_{\mathrm{e}ta}(x,\delta) := \sum_{n \ge 1} \frac{\Lambda(n)}{n^{1/2}} \mathrm{e}ta\left(\delta^{-1} \log \left(\frac nx \right) \right).
\mathrm{e}nd{equation*}
{\color{black}We observe that if $\mathrm{e}ta$ is chosen to be a smooth approximation of $\mathrm{e}ta_0(t)=\mathrm{e}^{\delta t}1_{[-1,1]}$, we have
$$\psi_{\mathrm{e}ta}(x,\delta)\approx \psi_{\mathrm{e}ta_0}(x,\delta)=\frac{\psi(x\mathrm{e}^{\delta})-\psi(x\mathrm{e}^{-\delta})}{\sqrt{x} } ,$$
noting that the function $\mathrm{e}ta_0$ itself does not satisfy our hypothesis.}
For $\delta > 0$ and a non-trivial even integrable function $\Phi: \mathbb R \to \mathbb R$ with $\Phi,$ $\widehat\Phi \ge 0$, they consider a weighted $n$-th moment
\begin{equation*}
M_{n}(X,\delta;\mathrm{e}ta,\Phi):=\frac 1{(\log X) \int_0^\infty \Phi} \int_1^\infty \Phi\left(\frac{\log x}{\log X}\right)\big(\psi_\mathrm{e}ta(x,\delta) - x^{\tfrac 12} \delta \mathcal L_\mathrm{e}ta(\tfrac{\delta}{2})\big)^n\frac{\mathrm{d}x}{x},
\mathrm{e}nd{equation*}
where $x^{\tfrac 12}\delta\mathcal L_\mathrm{e}ta(\frac{\delta}{2})$ is an explicit function defined \mathrm{e}qref{eq:def-of-L-eta} to the expected main term of $\psi_\mathrm{e}ta(x,\delta)$. In particular, they show for even moments that there exists $C_\mathrm{e}ta>0$ such that
\begin{align*}
M_{2n}(X,\delta;\mathrm{e}ta,\Phi) \ge \mu_{2n} \delta^n \left(\alpha(\mathrm{e}ta)\log(\delta^{-1}) + \beta(\mathrm{e}ta)\right)^n& \left(1 + O_{\mathbf kappa,\mathrm{e}ta} \left(\frac{n^2\delta}{\log(\delta^{-1} + 2)}\right)\right)
+ O_\Phi\left(\delta\frac{(C_\mathrm{e}ta \log(\delta^{-1} + 2))^{2n}}{\log X}\right).
\mathrm{e}nd{align*}
Here $\alpha$ and $\beta$ are defined as
\begin{equation}\label{def:alphabeta}
\alpha(\mathrm{e}ta)=\int_{\mathbb R} \widehat \mathrm{e}ta^2(\xi){\rm d}\xi=\int_{\mathbb R} \mathrm{e}ta^2(\xi){\rm d}\xi,\qquad \beta(\mathrm{e}ta)=\int_{\mathbb R} \widehat \mathrm{e}ta^2(\xi)\log |\xi|{\rm d}\xi.
\mathrm{e}nd{equation} where, for $\mathrm{e}ta \in \mathcal L^1(\mathbb R)$, we recall the usual definition of the Fourier transform
$$\widehat \mathrm{e}ta(\xi) := \int_{\R} \mathrm{e}ta(t)\mathrm{e}^{-2\pi i \xi t} {\rm d} t. $$
Their proof relies on a positivity argument in the explicit formula for the Riemann zeta function; since they consider an expansion with only positive terms, they can discard any ``off-diagonal'' terms without endangering the lower bound. For example, instead of assuming that the zeroes of $\zeta$ are simple, this technique allows one to freely discard any terms arising from possibly multiplicities of the zeroes.
The goal of this work is to extend the results of \cite{MR4322621-conjecture-Montgomery-Soundararajan-integers} to all extensions of $\mathbb Q$, just as the results of \cite{MR4421937-kuperberg-rodgers-roditty-gershon} extend the conjecture of Goldston and Montgomery. Many of the ideas are very similar to those of \cite{MR4322621-conjecture-Montgomery-Soundararajan-integers}, showing the flexibility of their technique. {\color{black}Despite the weighted averages $\psi_\mathrm{e}ta(x,\delta)$ used in \cite{MR4322621-conjecture-Montgomery-Soundararajan-integers}, their results allow them to prove unconditional lower bounds on counts of primes in short intervals in integers. In the same way, our results on the weighted functions $\psi_{\mathrm{e}ta,K}(x,\delta)$ imply unconditional lower bounds on counts of prime ideals with norms lying in a short interval. These derivations are identical to the arguments in \cite{MR4322621-conjecture-Montgomery-Soundararajan-integers}, so we omit them.}
Throughout, fix a finite Galois extension $K/\Q$. Denote by $\mathcal O_K$ the ring of integers of $K$, and for an ideal~$\mathfrak n \subset \mathcal O_K$, let $N_K \mathfrak n$ denote the absolute norm of the ideal $\mathfrak n$, so that $N_K\mathfrak n = [\mathcal O_K:\mathfrak n]$.
For fixed $\mathbf kappa > 0$, define $\mathcal E_\mathbf kappa \subset \mathcal L^1(\mathbb R)$ to be the set of differentiable even test functions $\mathrm{e}ta:\R \to \R$ such that~$\widehat{\mathrm{e}ta}(0) > 0$, such that for all $t \in \R$,
\begin{equation*}
\label{bound eta}
\mathrm{e}ta(t),\mathrm{e}ta'(t) \ll \mathrm{e}^{-\mathbf kappa|t|},\mathrm{e}nd{equation*}
and such that for all $\xi \in \R$,
\[0 \le \widehat{\mathrm{e}ta}(\xi) \ll (|\xi|+1)^{-1}\log(|\xi|+2)^{-2-\mathbf kappa}.\]
This is the same class of test functions as defined in \cite{MR4322621-conjecture-Montgomery-Soundararajan-integers}. For $\mathrm{e}ta \in \mathcal E_\mathbf kappa$ and $\delta < 2\mathbf kappa$, define
\[\psi_{\mathrm{e}ta,K}(x,\delta):=\sum_{\substack{\mathfrak n \subset \mathcal O_K\\ \mathfrak n \text{ ideal}}} \frac{\Lambda_K(\mathfrak n)}{N_K\mathfrak n^{1/2}} \mathrm{e}ta\Big(\delta^{-1}\log\Big(\frac {N_K\mathfrak n}x \Big)\Big),\]
where $\Lambda_K$ is defined by
$$\sum_{\substack{\mathfrak n \subset \mathcal O_K\\ \mathfrak n \text{ ideal}}}\frac{\Lambda_K(\mathfrak n)}{N_K\mathfrak n^s}=-\frac{\zeta'_K}{\zeta_K}(s).$$
{\color{black}In the case when $K=\Q$, the quantity $\psi_{\mathrm{e}ta,K}(x,\delta)$ is precisely the quantity $\psi_\mathrm{e}ta(x,\delta)$ defined in \cite{MR4322621-conjecture-Montgomery-Soundararajan-integers}.}
We consider the $n$th moment
\begin{equation}\label{eq:weighted-nth-moment-definition}
M_{n,K}(X,\delta;\mathrm{e}ta,\Phi) := \frac 1{(\log X)\int_0^\infty \Phi}\int_1^\infty \Phi\Big(\frac{\log x}{\log X}\Big) \Big(\psi_{\mathrm{e}ta,K}(x,\delta)-\delta x^{1/2}\mathcal L_{\mathrm{e}ta}(\delta/2)+\delta\widehat \mathrm{e}ta(0) {\rm ord}_{s=\frac 12}\zeta_K(s)\Big)^n \frac{\mathrm{d}x}{x},
\mathrm{e}nd{equation}
where $\mathcal L_\mathrm{e}ta$ is the expected main term for $\psi_{\mathrm{e}ta,K}(x,\delta)$, given by
\begin{equation}\label{eq:def-of-L-eta}
\int_0^\infty \frac{\mathrm{e}ta(\delta^{-1}\log(\tfrac tx ))}{t^{\tfrac 12}} \mathrm{d}t = x^{\tfrac 12} \delta \int_{\mathbb R} \mathrm{e}^{\tfrac{\delta w}2} \mathrm{e}ta(w)\mathrm{d}w =: x^{\tfrac 12}\delta \mathcal L_\mathrm{e}ta(\tfrac \delta 2).
\mathrm{e}nd{equation}
The second subtracted term, $-\delta \hat{\mathrm{e}ta}(0) \mathrm{ord}_{s=\tfrac 12}\zeta_K(s)$, is the contribution from a zero of $\zeta_K(s)$ at the central point~$s = \tfrac 12$, if any such zero exists. In many cases, it is expected that no such zero exists: for example, if the Galois group of $K/\mathbb Q$ is abelian, then the Dedekind zeta function is a product of Dirichlet $L$-functions, which are conjectured to never vanish at the central value (see for example \cite{MR2978845-bui-non-vanishing} where it is shown that 34.1\% of Dirichlet $L$-functions are non-vanishing at the central point). However, there are some extensions where~$\zeta_K(\tfrac 12) = 0$; see \cite{MR291122-armitage-central-zeroes} and \cite{MR4433141-kandhil} for a construction and further discussion. Vanishing of the Dedekind zeta function at the central point is a key counterexample to the philosophy that the nontrivial zeroes of~$\zeta_K(s)$ should be linearly independent, which is why we separate this contribution.
The function $\psi_{\mathrm{e}ta,K}(x,\delta)$ morally counts powers of prime ideals $\mathfrak n$ with $N_K\mathfrak n$ lying in the interval $[x(1-O(\delta)),x(1+O(\delta))]$; for prime ideals in this interval, the weight $(N_K\mathfrak n)^{-1/2}$ is $x^{-1/2}(1+O(\delta))$. Based on~\mathrm{e}qref{eq:goldston-montgomery-conjecture-in-number-fields}, the analog of the conjecture of Goldston and Montgomery in number fields explored in \cite{MR4421937-kuperberg-rodgers-roditty-gershon}, one might expect~$M_{2,K}(X,\delta;\mathrm{e}ta,\Phi)$ to have a main term $\delta \log (\delta^{-1})$ (in terms of $\delta$), with some additional factors relating to the choice of test function $\mathrm{e}ta$ and the field extension $K/\mathbb Q$.
Before stating our main result, we introduce some notation relating to the field extension $K/\Q$. Let $n_K$ denote the degree $[K:\Q]$, let $\Delta_K$ denote the absolute discriminant of $K$, and let ${\rm rd}_K$ denote the root discriminant defined by ${\rm rd}_K := \Delta_K^{1/n_K}$.
For $j\geq 1$ and a finite group $G$, we define the character sum
\begin{equation}\label{def:lambda}
\lambda_j(G):=\sum_{\chi\in\mathrm{Irr}(G)}\chi(1)^j.
\mathrm{e}nd{equation}
In our applications, $G$ will always be the Galois group $\mathrm{Gal}(K/\Q)$. For a Galois group $G = \mathrm{Gal}(K/\Q)$, the degree $n_K$ satisfies $n_K = \sum_{\chi\in\mathrm{Irr}(\mathrm{Gal}(K/\Q))} \chi(1)^2 = \lambda_2(G)$. Moreover, $\lambda_3(G)/{ n_K }\geq 1,$ and $\lambda_5(G) \le \lambda_3(G)^2$.
We also define $q_j(K)$ and $z_j(K)$, two additional sums over characters of the Galois group, which will arise in our computations. For $\mathfrak f(K/\Q,\chi)$ the Artin conductor of $\chi$, let $q_j(K)$ and $z_j(K)$ be given by
\begin{equation}\label{def:qz}\begin{split}
q_j(K)&:= \sum_{\chi\in\mathrm{Irr}(\mathrm{Gal}(K/\Q))}\chi(1)^{j-1} \log\big( \mathfrak f(K/\mathbb Q,\chi)2^{-2r_{2,K}\chi(1)}\big),\cr
z(K,\chi)&:= {\rm ord}_{s=\frac 12}L(s,K,\chi),
\cr z_j(K )&:= \sum_{\chi\in\mathrm{Irr}(\mathrm{Gal}(K/\Q))}\chi(1)^{j-1}z(K/\Q,\chi)=\sum_{\chi\in\mathrm{Irr}(\mathrm{Gal}(K/\Q))}\chi(1)^{j-1}{\rm ord}_{s=\frac 12}L(s,K/\Q,\chi).
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
We will show in \mathrm{e}qref{encadrementj}
that $ z_j(K)\ll q_j(K)\leq 2\lambda_j(G) \log ({\rm rd}_K). $
With these definitions in hand, we are ready to state our main result.
\begin{theorem}\label{thm:main-theorem}
Let $K/\Q$ be a Galois extension of number fields with $G:=\mathrm{Gal}(K/\Q)$. Assume the Artin holomorphy conjecture and GRH. Let $0<\mathbf kappa<\tfrac 12$ and $\mathrm{e}ta \in \mathcal E_\mathbf kappa$.
There exist positive constants $c_\mathrm{e}ta$ and $C_\mathrm{e}ta$, depending only on $\mathrm{e}ta$, as well as a function $V(K;\mathrm{e}ta,\delta)$ satisfying
$$
V(K;\mathrm{e}ta,\delta) = {\delta}\Big(
\lambda_{3}(G)\big(\alpha(\mathrm{e}ta) \log(\delta^{-1})+ \beta(\mathrm{e}ta)\big)+\alpha(\mathrm{e}ta) q_{3}(K)-\alpha(\mathrm{e}ta)z_{3}(K) \Big) +O_\mathrm{e}ta\big( \lambda_{3}(G) \delta ^2 \big) $$ such that for $\delta\ll {\rm rd}_K^{-c_\mathrm{e}ta}$ we have uniformly in $n\geq 1$
\begin{align*}
(-1)^nM_{n,K}(X, \delta; \mathrm{e}ta,\Phi) \geq & \mu_{n}V(K;\mathrm{e}ta,\delta)^{n/2} \Big\{1+O\Big(\frac{n^2 n!\delta}{\log (1/\delta)}\frac{\lambda_5(G)}{ \lambda_3(G)^2}\Big)\Big\}\cr&+
O\Big( V(K;\mathrm{e}ta,\delta)^{n/2} \Big( \frac{C_{\mathrm{e}ta}n_K^2 \log(\delta^{-1} )}{\lambda_3(G)} \Big)^{n/2}\frac{ \delta^{1-n/2} }{\log X} \Big).
\mathrm{e}nd{align*}
\par In particular, for fixed $\mathrm{e}ta$, $M_{2,K}(X, \delta; \mathrm{e}ta,\Phi) \geq V(K;\mathrm{e}ta,\delta)(1+o(1)) $ whenever
$\log (1/\delta)=o(\lambda_3(G)(\log X) ).$
For fixed $m\geq 2$ and fixed $\mathrm{e}ta$,
$M_{2m,K}(X, \delta; \mathrm{e}ta,\Phi) \geq \mu_{2m}V(K;\mathrm{e}ta,\delta)^m (1+o(1)) $ whenever
\begin{equation}\label{eq:main-theorem-delta-bounds}
\delta^{-1 } =o\Big(\frac{\lambda_3(G)^{m/(m-1)}(\log X)^{1/(m-1)}}{ n_K^{2/(m-1)} (\log(\lambda_3(G)\log X))^{m/(m-1)} }\Big).
\mathrm{e}nd{equation}
\mathrm{e}nd{theorem}
In the range when $\delta$ satisfies \mathrm{e}qref{eq:main-theorem-delta-bounds}, the lower bound $M_{2m,K}(X,\delta;\mathrm{e}ta,\Phi) \geq \mu_{2m}V(K;\mathrm{e}ta,\delta)^m(1+o(1))$ has a main term of size $(\delta \log(\delta^{-1}))^m$; this lower bound is consistent both with the Goldston--Montgomery conjectures in this setting and with the predictions of Montgomery and Soundararajan that the distribution is normal. This lower bound on the variance indicates that the constant $C_K$ in \mathrm{e}qref{eq:goldston-montgomery-conjecture-in-number-fields} is given by $ \lambda_3(G)$; the factor $\lambda_3(G)$ comes from the multiplicity of the zeroes of the Dedekind zeta function when the Galois group $G$ is nonabelian. In the case when $G$ is abelian, $\lambda_3(G) = n_K$, so the constant is simply the degree of the extension. However, the presence of a more complicated character sum in the nonabelian case is a notable aspect of Theorem \ref{thm:main-theorem}.
When $n=2m+1$ is odd, Theorem \ref{thm:main-theorem} shows that
\begin{equation*}
M_{2m+1,K}(X,\delta;\mathrm{e}ta,\Phi) \le O\left(V(K;\mathrm{e}ta,\delta)^{n/2} \left(\frac{C_\mathrm{e}ta \log(\delta^{-1})}{\lambda_3(G)}\right)^{n/2} \frac{\delta^{1-n/2}}{\log X}\right).
\mathrm{e}nd{equation*}
In particular, the upper bound for $|M_{2m+1,K}|^{1/(2m+1)}$ is smaller than the lower bound for $M_{2m,K}^{1/2m}$; this is consistent with the fact that the odd moments of a normal distribution are zero. However, Theorem \ref{thm:main-theorem} does not rule out the possibility that the odd moments are large and negative.
The condition \mathrm{e}qref{eq:main-theorem-delta-bounds} on the size of $\delta$ is fairly restrictive; Theorem \ref{thm:main-theorem} is meaningful when $\delta$ is reasonably close to $1$. In this range, the expected main term for $\psi_{\mathrm{e}ta,K}(x,\delta)$ is large, so that Theorem \ref{thm:main-theorem} implies lower bounds not only for the asymptotic main term but also for second-order terms.
The proof of Theorem \ref{thm:main-theorem} follows closely the ideas of \cite{MR4322621-conjecture-Montgomery-Soundararajan-integers}. The key difference arising in this setting is that general Dedekind zeta functions can have zeroes with multiplicity. As we discuss further in
Section \ref{sec:number-fields-background}, each zero of the Dedekind zeta function $\zeta_K(s)$ of a Galois extension $K/\Q$ is a zero of some Artin $L$-function of a character $\chi$ of the Galois group. Assuming Artin's holomorphy conjecture as well as independence and simplicity of zeroes for Artin $L$-functions, the multiplicity of each zero is expected to be precisely equal to the dimension of the character $\chi$. A lower bound of the correct order of magnitude must therefore take into account the expected multiplicity of the zeroes of $\zeta_K(s)$. Thus, our estimates incorporate more involved combinatorial arguments in order to handle zeroes with higher multiplicity.
Our methods generalize to the relative setting of Galois extensions $L/K$, where the base field is not necessarily $\mathbb Q$. However, for clarity, we state our arguments in the case of an extension of $\mathbb Q$. This setting has the advantage that $\psi_{\mathrm{e}ta,K}$ is straightforwardly a weighted count of prime ideals in $K$.
\section{Background and notation for Dedekind zeta functions}\label{sec:number-fields-background}
For a Galois extension $L/K$ of number fields,
\begin{equation}\label{eq:Dedekind-zeta-decomposition}
\zeta_L(s) = \zeta_K(s) \prod_{\chi} L(s,\chi,L/K)^{\chi(1)
},
\mathrm{e}nd{equation}
where the product is taken over all nontrivial irreducible characters $\chi$ of the Galois group $\mathrm{Gal}(L/K)$, and $L(s,\chi,L/K)$ is defined for $\Re e(s)>1$ by the Euler product
\begin{equation}\label{eq:Galois-L-function-definition}
L(s,\chi,L/K) := \prod_{\substack{\mathfrak p \lhd \mathcal O_K \\ \mathfrak p \text{ prime}}} L_{\mathfrak p}(s,\chi), \quad (L_{\mathfrak p}(s,\chi) := \det(\mathrm{Id} - N_K\mathfrak p^{-s} \rho(\phi_{\mathfrak p})|_{V^{I_{\mathfrak p}}}, \mathfrak p \lhd \mathcal O_K \text{ prime}).
\mathrm{e}nd{equation}
Here $V^{I_{\mathfrak p}}$ is the subspace of the representation space $V$ which is invariant under the inertia group $I_{\mathfrak p}$; see \cite{MR0218327-zeta-and-L-functions}.
Artin's holomorphy conjecture (see for example \cite[Chapter VII, Section 10]{MR1697859-neukirch}) states that each Artin $L$-function $L(s,\chi,L/K)$ for a nontrivial character $\chi$ can be extended to an entire function, and when $\chi$ is trivial, $L(s,\chi,L/K)$ is entire except for a simple pole at $s=1$. Assuming Artin's conjecture, formula \mathrm{e}qref{eq:Dedekind-zeta-decomposition} implies that the zeroes of $\zeta_L(s)$ correspond to zeroes of $\zeta_K(s)$ or zeroes of some $L(s,\chi,L/K)^{ \chi(1)}$, and the multiplicity of a zero (assuming independence and simplicity of zeroes) is governed by the dimension of the corresponding representation of $\mathrm{Gal}(L/K)$.
For an Artin $L$-function $L(s,\chi,L/K)$, define the completed Artin $L$-function of the character $\chi$ via
\begin{equation}\label{defLambda}
\begin{split}
\Lambda(s,\chi,L/K) := \big(s(s-1)\big)^{E_0(\chi)}&\mathfrak f(L/K,\chi)^{s/2}\left( \prod_{\mathfrak p|\infty \text{ real}}\Gamma_{\mathbb R}(s)^{n_+}\Gamma_{\mathbb R}(s+1)^{n_-} \right)\\
&\times\Gamma_{\mathbb C}(s)^{\chi(1)r_{2,K}}L(s,\chi,L/K),
\mathrm{e}nd{split}\mathrm{e}nd{equation}
where
\begin{equation}
\label{defGamma}
\Gamma_{\mathbb R}(s) := \pi^{-s/2}\Gamma\left(\frac s2\right), \quad
\Gamma_{\mathbb C}(s) := 2(2\pi)^{-s}\Gamma(s),
\mathrm{e}nd{equation}
$r_{1,K}$ is the number of real places of $K$, $r_{2,K}$ is the number of complex places of $K$, and if $\phi_{\mathfrak P}$ denotes the distinguished generator of $\mathrm{Gal}(L_{\mathfrak P}/K_{\mathfrak p})$, then $n_+(\mathfrak p) := \frac{\chi(1) + \chi(\phi_{\mathfrak P})}{2}$ and $n_-(\mathfrak p) := \frac{\chi(1)-\chi(\phi_{\mathfrak P})}{2}$. Note that $n_+(\mathfrak p)$ and $n_-(\mathfrak p)$ do not depend on the choice of $\mathfrak P$, although they do depend on $\mathfrak p$. Finally, $\mathfrak f(L/K,\chi)$ is the Artin conductor of $\chi$; we denote by $\mathfrak f(L/K,\chi)$ both the ideal which is the Artin conductor and its norm. The exponent $E_0(\chi)$ is $0$ if $\chi$ is nontrivial, and $1$ if $\chi$ is trivial.
The relative discriminant $\Delta_{L/K}$ and the Artin conductors of the character $\chi$ are related via the Conductor-discriminant formula
\begin{equation}\label{conductor discriminant}
\Delta_{L/K}=\prod_{\chi\in\mathrm{Irr}(\mathrm{Gal}(L /K ))} \mathfrak f(L/K,\chi)^{\chi(1)}
.
\mathrm{e}nd{equation}
In the case of an extension of $K/\Q$, this formula becomes
\begin{equation*}
\log\big(\Delta_{K}\big)=\sum_{\chi\in\mathrm{Irr}(\mathrm{Gal}(K/\Q ))} \chi(1)\log\big(\mathfrak f(K/\Q,\chi)\big),
\mathrm{e}nd{equation*}
where $\Delta_K$ is the absolute discriminant.
Finally, formula \mathrm{e}qref{eq:Dedekind-zeta-decomposition} implies the following identity concerning the order of vanishing of $\zeta_K$ at the central point:
\begin{equation*}
{\rm ord}_{s=\frac 12}\zeta_K(s)=\sum_{\chi\in\mathrm{Irr}(\mathrm{Gal}(K/\mathbb Q ))} \chi(1) \ {\rm ord}_{s=\frac 12}L(s,K/\Q,\chi).\mathrm{e}nd{equation*}
\section{Weil's explicit formula and auxiliary estimates}\label{sec:auxiliary-lemmas}
For $\delta_0 > 0$, define ${\mathcal F}(\delta_0)$ to be the set of measurable functions $F(x)$ satisfying
\begin{equation}\label{defPhiF}
\int_{-\infty}^\infty \mathrm{e}^{(\tfrac 12 + \delta_0)2\pi|x|}|F(x)|\mathrm{d}x < \infty,
\mathrm{e}nd{equation}
\begin{equation*}
\int_{-\infty}^\infty \mathrm{e}^{(\tfrac 12 + \delta_0)2\pi|x|}|\mathrm{d}F(x)| < \infty,
\mathrm{e}nd{equation*} and such that $F(x) = \frac 12(F(x^{-}) + F(x^{+}))$ for all $x$ and $F(x) + F(-x) = 2F(0) + O(|x|)$.
For $F\in {\mathcal F}(\delta_0)$ with $\delta_0 > 0$, define
\begin{equation*}
\Phi_F(s) = \int_{-\infty}^\infty F(x) \mathrm{e}^{-(s-1/2)2\pi x}\mathrm{d}x,
\mathrm{e}nd{equation*}
where $-\delta_0 < \Re e(s) < 1 + \delta_0$.
\begin{lemma}\label{lem:weil-explicit-Galois-L-functions}
Assume Artin's holomorphy conjecture. Let $F\in {\mathcal F}(\delta_0)$ with $\delta_0 > 0$ and $\Phi_F$ defined in~\mathrm{e}qref{defPhiF}. Let~$L/K$ be a Galois extension of number fields, let $\chi$ be an irreducible character of the Galois group~$\mathrm{Gal}(L/K)$, and define $L(s,\chi,L/K)$ by \mathrm{e}qref{eq:Galois-L-function-definition}. Assume that $K$ has $r_{1,K}$ real embeddings and $r_{2,K}$ complex embeddings. Then
\begin{align*}
\sum_{\gamma}\Phi_F(\rho) = &E_0(\chi)(\Phi_F(0) + \Phi_F(1)) + \frac {F(0)}{\pi}\Bigg[\log \big(\mathfrak f(L/K,\chi)^{1/2}\pi^{-r_{1,K}\chi(1)/2}(2\pi)^{-r_{2,K}\chi(1)}\big) \\
&+r_{2,K} \chi(1) \frac{\Gamma'}{\Gamma}\Big(\frac 12\Big) +\frac 12 \sum_{\mathfrak p|\infty \text{ real}} \Big(n_+(\mathfrak p) \frac{\Gamma'}{\Gamma}\Big(\frac 14\Big) + n_-(\mathfrak p)\frac{\Gamma'}{\Gamma}\Big(\frac 34 \Big)\Big)\Bigg] \\
&-\frac 1{2\pi} \sum_{\mathfrak p \subset \mathcal O_K} \sum_{m=1}^\infty \frac{\log N_K\mathfrak p}{(N_K\mathfrak p)^{m/2}}\left(\chi(\sigma_\mathfrak p^m)F\left(\frac{-m\log N_K\mathfrak p}{2\pi}\right) + \bar{\chi}(\sigma_{\mathfrak p}^m)F\left(\frac{m\log N_K\mathfrak p}{2\pi}\right)\right) \\
&+ \int_0^\infty \big(2F(0)-F( x)-F(-x)\big)\Big( \sum_{\mathfrak p|\infty \text{ real}}\frac{n_+(\mathfrak p)\mathrm{e}^{-\pi x}+n_-(\mathfrak p)\mathrm{e}^{-3\pi x}}{1-\mathrm{e}^{-4\pi x}}
+ r_{2,K}\frac{\chi(1)\mathrm{e}^{- \pi x }}{1-\mathrm{e}^{-{2\pi x}}}\Big)\mathrm{d}x,
\mathrm{e}nd{align*}
where the sum on the left-hand side is taken over all nontrivial zeroes of $L(s,\chi,L/K)$.
\mathrm{e}nd{lemma}
Lemma \ref{lem:weil-explicit-Galois-L-functions} is more general than the same result for Dedekind zeta functions, stated in the following corollary.
\begin{corollary}[Weil's explicit formula for Dedekind zeta functions]\label{cor:weil-explicit-dedekind}
Let $K/\Q$ be a number field of degree~$n_K$, norm $N_K$, and discriminant $\Delta_K$, and assume that $K$ has $r_{1,K}$ real embeddings and $r_{2,K}$ conjugate pairs of complex embeddings. Then
\begin{align*}
\sum_{\gamma}\Phi_F(\rho) = &\Phi_F(0) + \Phi_F(1) + \frac {F(0)}{\pi}\Big( \log\big(\Delta_K^{1/2}\pi^{-r_{1,K}/2}(2\pi)^{-r_{2,K}}\big) + \frac{r_{1,K}}{2 } \frac{\Gamma'}{\Gamma}\Big(\frac14\Big) + {r_{2,K}} \frac{\Gamma'}{\Gamma}\Big(\frac12\Big)\Big) \\
&-\frac 1{2\pi} \sum_{\mathfrak n \subset \mathcal O_K} \frac{\Lambda_K(\mathfrak n)}{(N_K\mathfrak n)^{1/2}} \Big(F\Big(\frac{-\log N_K\mathfrak n}{2\pi}\Big) + F\Big(\frac{\log N_K\mathfrak n}{2\pi}\Big)\Big) \\
&+ \int_0^\infty \big(2F(0)-F( x)-F(-x)\big)\Big( \frac{r_{1,K}\mathrm{e}^{-\pi x}}{1-\mathrm{e}^{-4\pi x}}
+ \frac{ r_{2,K}\mathrm{e}^{- \pi x }}{1-\mathrm{e}^{-{2\pi x}}}\Big)\mathrm{d}x,
\mathrm{e}nd{align*}
where the sum on the left-hand side is taken over all nontrivial zeroes of $\zeta_K$.
\mathrm{e}nd{corollary}
\begin{remark}
Corollary \ref{cor:weil-explicit-dedekind} follows from Lemma \ref{lem:weil-explicit-Galois-L-functions} by taking $\chi$ to be the trivial character. However, the statement of Corollary \ref{cor:weil-explicit-dedekind} can also be obtained by noting that
\[\sum_{\substack{\gamma \\ \zeta_K(\tfrac 12 + i\gamma) = 0}} \Phi_F(\tfrac 12 + i\gamma) = \sum_{\chi} \chi(1) \sum_{\substack{\gamma \\ L(\tfrac 12 + i\gamma, \chi,K/\Q) = 0}} \Phi_F(\tfrac 12 + i\gamma), \]
where the outside sum on the right is taken over all characters $\chi$ of $\mathrm{Gal}(K/\Q)$, and then applying Lemma
\ref{lem:weil-explicit-Galois-L-functions} to every term on the right. This latter formula simplifies to the statement of Corollary \ref{cor:weil-explicit-dedekind} by applying the Conductor-discriminant formula \mathrm{e}qref{conductor discriminant} and the fact that for a Galois extension $K/\Q$ (which must be either totally real or totally complex),
\[\sum_{\chi} \chi(1) n_+ = r_{1,K} + r_{2,K}, \qquad \sum_{\chi} \chi(1) n_- = r_{2,K}.\]
\mathrm{e}nd{remark}
\begin{proof}[Proof of Lemma \ref{lem:weil-explicit-Galois-L-functions}]
We begin with the functional equation for Artin $L$-functions, which can be found in \cite[Chapter VII]{MR1697859-neukirch}.
Artin's holomorphy conjecture states that $\Lambda(s,\chi,L/K)$, defined in \mathrm{e}qref{defLambda}, is an entire function for all $\chi$. By \cite[Theorem 12.6]{MR1697859-neukirch},
\begin{equation*}
\Lambda(s,\chi,L/K) = W(\chi)\Lambda(1-s,\bar{\chi},L/K),
\mathrm{e}nd{equation*}
where $W(\chi) \in \mathbb C$ is a constant with absolute value $1$. Then
\begin{equation*}
\sum_{\gamma} \Phi_F(\rho) = \frac 1{2\pi i} \int_{(1+\delta_1)} \Big(\Phi_F(s) \frac{\Lambda'}{\Lambda}(s,\chi,L/K) + \Phi_F(1-s)\frac{\Lambda'}{\Lambda}(s,\bar{\chi},L/K) \Big)\mathrm{d}s,
\mathrm{e}nd{equation*}
since when moving the line of integration of $\Phi_F(s) \frac{\Lambda'}{\Lambda}(s,\chi,L/K)$ from $1 + \delta_1$ to $-\delta_1$, one picks up contributions from the zeroes (and can then obtain the integral on the right hand side by applying the functional equation for $\frac{\Lambda'}{\Lambda}(s,\chi,L/K)$).
The logarithmic derivative of $\Lambda(s,\chi,L/K)$ is given by
\begin{align*}
\frac{\Lambda'}{\Lambda}(s,\chi,L/K) = &E_0(\chi)\left(\frac 1s + \frac 1{s-1}\right) + \frac 12 \log \mathfrak f(L/K,\chi) - \frac{n_{K/\mathbb Q}\chi(1)}{2}\log \pi
-\chi(1)r_{2,K}\log 2\\
&+ \frac 12 \sum_{\mathfrak p|\infty \text{ real}} \left(n_+(\mathfrak p) \frac{\Gamma'}{\Gamma}\left(\frac s2\right) + n_-(\mathfrak p) \frac{\Gamma'}{\Gamma}\left(\frac{s+1}{2}\right)\right)
+ r_{2,K} \chi(1)\frac{\Gamma'}{\Gamma}(s) + \frac{L'}{L}(s,\chi,L/K).
\mathrm{e}nd{align*}
As shown in \cite[Proposition 2.3.1]{nathan-ng-thesis}, the logarithmic derivative $\frac{L'}{L}(s,\chi,L/K)$ is given by
\begin{equation*}
\frac{L'}{L}(s,\chi,L/K) = -\sum_{\mathfrak p \in \mathcal O_K} \sum_{m=1}^\infty \frac{\chi(\sigma_{\mathfrak p}^m)\log N_K\mathfrak p}{(N_K\mathfrak p)^{ms}},
\mathrm{e}nd{equation*}
where $\sigma_{\mathfrak p}$ is the conjugacy class of the Frobenius elements. For $\mathfrak p$ unramified, $\chi(\sigma_{\mathfrak p}^m)$ is well-defined, and if $\mathfrak p$ is ramified, let $\mathfrak q|\mathfrak p$ and define $\chi(\sigma_{\mathfrak p}^m) = \frac 1{|I_{\mathfrak q}|}\sum_{\tau \in I_{\mathfrak q}} \chi(\sigma_{\mathfrak q}^m \tau).$
As in the proof of Theorem 12.13 in \cite{MR2378655-montgomery-vaughan},
\begin{align*}
&\frac 1{2\pi i}\int_{(1+\delta_1)}\Big(\Phi_F(s) +\Phi_F(1-s)\Big)\frac{L'}{L}(s,\chi,L/K) \mathrm{d}s \\
&= -\frac 1{2\pi} \sum_{\mathfrak p \in \mathcal O_K} \sum_{m=1}^\infty \frac{\log N_K\mathfrak p}{(N_K\mathfrak p)^{m/2}}\left(\chi(\sigma_\mathfrak p^m)F\left(\frac{-m\log N_K\mathfrak p}{2\pi}\right) + \bar{\chi}(\sigma_{\mathfrak p}^m)F\left(\frac{m\log N_K\mathfrak p}{2\pi}\right)\right).
\mathrm{e}nd{align*}
The remaining contribution to the integral can be computed via the residue theorem and applying Lemma~12.14 from~\cite{MR2378655-montgomery-vaughan}.
\mathrm{e}nd{proof}
\begin{lemma}\label{lemma explicit formula}
Assume Artin's holomorphy conjecture and GRH. Let $0<\mathbf kappa<\tfrac 12$ and $\mathrm{e}ta \in \mathcal E_K$. For $t \ge 0$ and $0 < \delta < \mathbf kappa$,
\begin{equation*}
\psi_{\mathrm{e}ta,K}(\mathrm{e}^t,\delta)-\mathrm{e}^{\tfrac t2}\delta\mathcal L_{\mathrm{e}ta}(\tfrac \delta{2}) +\delta\widehat \mathrm{e}ta(0) {\rm ord}_{s=\frac 12}\zeta_K(s )= -\delta \sum_{\rho} \mathrm{e}^{(\rho-1/2)t}\widehat{\mathrm{e}ta}\Big(\frac{\delta}{2\pi}\frac{\rho-1/2}{i}\Big) + O_{\mathbf kappa,\mathrm{e}ta}\big(n_K E_{\mathbf kappa,\mathrm{e}ta}(t,\delta)\big),
\mathrm{e}nd{equation*}
where $\rho\neq \tfrac12$ runs over the nontrivial zeroes of $\zeta_K(s)$, and
\begin{equation*}
E_{\mathbf kappa,\mathrm{e}ta}(t,\delta) := \begin{cases} \delta \mathrm{e}^{- t/2} + \log(\delta^{-1} + 2)\mathrm{e}^{- {\mathbf kappa t}/{\delta}} &\text{ if } t \ge 1, \\
\tfrac{\delta}{t} + \log(\delta^{-1} + 2)\mathrm{e}^{- {\mathbf kappa t}/{\delta}} &\text{ if }\delta \le t \le 1, \\
\log(\delta^{-1} + 2) &\text{ if }0 \le t \le \delta.\mathrm{e}nd{cases}
\mathrm{e}nd{equation*}
\mathrm{e}nd{lemma}
\begin{proof} We follow the proof of Lemma 2.1 of \cite{MR4322621-conjecture-Montgomery-Soundararajan-integers}.
Apply Corollary \ref{cor:weil-explicit-dedekind} with $F(u) := \mathrm{e}ta\Big(\tfrac{t+2\pi u}{\delta}\Big)$, so that $\widehat{F}(\xi) = \mathrm{e}^{i\xi t}\frac{\delta}{2\pi} \widehat{\mathrm{e}ta}\Big(\frac{\delta\xi}{2\pi}\Big).$ Then
\begin{equation*}
\begin{split}
\psi_{\mathrm{e}ta,K}(\mathrm{e}^t,\delta) &- \mathrm{e}^{t/2}\delta \mathcal L_{\mathrm{e}ta}\Big(\frac{\delta}{2}\Big) = -\delta \sum_{\rho} \mathrm{e}^{(\rho -1/2)t} \widehat{\mathrm{e}ta}\Big(\frac{\delta}{2\pi}\frac{\rho-1/2}{i}\Big) \\
&+ \mathrm{e}^{-t/2}\delta \int_{-\infty}^{\infty} \mathrm{e}^{\delta x/2}\mathrm{e}ta(x)\mathrm{d}x - \sum_{\substack{\mathfrak n \subset \mathcal O_K \\ \mathfrak n \text{ ideal}}} \frac{\Lambda(\mathfrak n)}{N_K\mathfrak n^{1/2}} \mathrm{e}ta\Big(\frac{t+\log N_K\mathfrak n}{\delta}\Big) \\
&+\mathrm{e}ta\Big(\frac t{\delta}\Big)\Big(r_{1,K} \frac{\Gamma'}{\Gamma}\Big(\frac14\Big) + 2r_{2,K} \frac{\Gamma'}{\Gamma}\Big(\frac12\Big) + \log\big(|\Delta_K|\pi^{-r_{1,K}}(2\pi)^{-2r_{2,K}}\big)\Big) \\
&+ \int_0^\infty \Big(2\mathrm{e}ta\Big(\frac t{\delta}\Big) - \mathrm{e}ta\Big(\frac{t+x}{\delta}\Big) -\mathrm{e}ta\Big(\frac{t-x}{\delta}\Big)\Big)\Big( \frac{r_{1,K}\mathrm{e}^{-x/2}}{1-\mathrm{e}^{-2x}} + \frac{r_{2,K}\mathrm{e}^{-x/2}}{1-\mathrm{e}^{-x}} \Big) \mathrm{d}x.
\mathrm{e}nd{split}
\mathrm{e}nd{equation*}
Performing the same careful analysis of the final integral as in the integer case, and noting that the final integral is bounded by the $n_K$th multiple of its integer analog, gives the result.
\mathrm{e}nd{proof}
For $\chi\in\mathrm{Irr}({\mathrm{Gal}}( L/K ))$, $h \in \mathcal E_\mathbf kappa$, and $\delta > 0$,
define
\begin{equation*}
b (\chi;h,\delta):=\sum_{\rho}
h\Big(\frac{\delta}{2\pi}\frac{\rho-\tfrac 12}{i}\Big)
\mathrm{e}nd{equation*}
where (assuming GRH) the sum on the left-hand side is taken over all nontrivial zeroes of $L(s,\chi,L/K)$ and
\begin{equation}
\label{def:b0hdelta}b_0 (\chi;h,\delta):=\sum_{\rho\neq 1/2}
h\Big(\frac{\delta}{2\pi}\frac{\rho-\tfrac 12}{i}\Big).
\mathrm{e}nd{equation}
In Lemma \ref{lemma3.4}, we apply Lemma \ref{lem:weil-explicit-Galois-L-functions} in order to estimate $b(\chi;h,\delta).$
\begin{lemma}\label{lemma3.4} Let $L/K$ be a Galois extension of number fields, let $\chi$ be an irreducible character of the Galois group $\mathrm{Gal}(L/K)$. Assume the Artin holomorphy conjecture and GRH.
Let $0<\mathbf kappa<\tfrac 12$ and let $h:\R\to\R$ be a measurable function such that for all $\xi \in \R$, $0 \le h(\xi) \ll (|\xi|+1)^{-2}(\log(|\xi|+2))^{-2-\mathbf kappa}$, and for all $t \in \R$, $\widehat h(t), \widehat h'(t)\ll \mathrm{e}^{-\mathbf kappa|t|}$. Fix an Artin $L$-function $L(s,\chi,L/K)$ for the extension $L/K$.
For $0<\delta < 2\mathbf kappa$,
\begin{align*}
\delta b (\chi;h,\delta) = &
n_{K}\chi(1)\widehat{h}(0) \log(\delta^{-1})+n_{K}\chi(1)\int_{\mathbb R}h(\xi)\log |\xi|{\rm d}\xi +\widehat{h}(0) \log\big( \mathfrak f(L/K,\chi)2^{-2r_{2,K}\chi(1)}\big)\\
&+O_h\big(n_{K}\chi(1)\delta\big).
\mathrm{e}nd{align*}
\mathrm{e}nd{lemma}
\begin{proof}[Proof of Lemma \ref{lemma3.4}]
Applying Lemma \ref{lem:weil-explicit-Galois-L-functions} with $F(x) = 2\pi \delta^{-1}\widehat{h}(-2\pi\delta^{-1}x)$ yields
\begin{equation*}
\sum_{\rho} h\Big(\frac{\delta}{2\pi}\frac{\rho-\tfrac 12}{i}\Big) = \delta^{-1}\big(b_1(h) + b_2(h) + I(h)\big) + E_0(\chi)\Big(h\Big(\frac{\delta i }{4\pi}\Big) + h\Big(-\frac{\delta i}{4\pi}\Big)\Big),
\mathrm{e}nd{equation*}
where
\begin{align*}
b_1(h) &:= \widehat{h}(0) \Big(\log\big(\mathfrak f(L/K,\chi)\pi^{-n_{K}\chi(1)}2^{-r_{2,K}\chi(1)}\big) \cr&\quad+ \sum_{\mathfrak p|\infty \text{ real}}\Big\{n_+(\mathfrak p) \frac{\Gamma'}{\Gamma}\Big(\frac14\Big)+n_-(\mathfrak p)\frac{\Gamma'}{\Gamma}\Big(\frac 34\Big)\Big\} + 2r_{2,K}\chi(1)\frac{\Gamma'}{\Gamma}\Big(\frac12\Big)\Big),
\cr
b_2(h) &:= -\sum_{\mathfrak p \in \mathcal O_K}\sum_{m=1}^\infty \frac{\log N_{L/K}\mathfrak p}{N_{L/K}\mathfrak p^{m/2}} \big(\chi(\sigma_{\mathfrak p}^m)\widehat{h}(\delta^{-1}m\log N_{L/K}\mathfrak p) +\bar{\chi}(\sigma_{\mathfrak p}^m) \widehat{h}(-\delta^{-1}m\log N_{L/K}\mathfrak p)\big),
\mathrm{e}nd{align*}
and
\begin{equation*}
I(h):=\int_0^\infty \big(2\widehat{h}(0) - \widehat{h}(\delta^{-1}x) - \widehat{h}(-\delta^{-1}x)\big)\Big(\sum_{\mathfrak p|\infty \text{ real}}\frac{n_+(\mathfrak p)\mathrm{e}^{-x/2}+n_-(\mathfrak p)\mathrm{e}^{-3x/2}}{1-\mathrm{e}^{-2x}} + r_{2,K}\frac{\chi(1)\mathrm{e}^{-x/2}}{1-\mathrm{e}^{-x}}\Big) \mathrm{d}x.
\mathrm{e}nd{equation*}
Split the integral $I(h)$ into three ranges, $[0,\delta]$, $[\delta,1]$, and $[1,+\infty)$, and denote by $I_1(h)$, $I_2(h)$, and $I_3(h)$ the respective integrals. Then, using that $\widehat{h}(t) \ll e^{-\mathbf kappa|t|}, $
\begin{equation*}
I_3(h) = \widehat{h}(0) \int_1^\infty \Big(\sum_{\mathfrak p|\infty \text{ real}}\frac{2(n_+(\mathfrak p)\mathrm{e}^{-x/2}+n_-(\mathfrak p)\mathrm{e}^{-3x/2})}{1-\mathrm{e}^{-2x}} + \frac{2r_{2,K}\chi(1)\mathrm{e}^{-x/2}}{1-\mathrm{e}^{-x}}\Big)\mathrm{d}x + O_h(n_{K}\mathrm{e}^{-\mathbf kappa/\delta}).
\mathrm{e}nd{equation*}
Moreover,
\begin{equation*}\begin{split}
I_2&(h) = n_{K}\chi(1)\widehat{h}(0) \log(\delta^{-1})\\
\cr
&+\widehat{h}(0)
\int_0^1 \Bigg(\sum_{\mathfrak p|\infty \text{ real}}\Big\{n_+(\mathfrak p) \Big(\frac{2\mathrm{e}^{-x/2} }{1-\mathrm{e}^{-2x}}-\frac1x \Big) +n_-(\mathfrak p)
\Big(\frac{\mathrm{e}^{-3x/2}}{1-\mathrm{e}^{-2x}}- \frac1x \Big)\Big\}+
2r_{2,K}\chi(1) \Big(\frac{\mathrm{e}^{- x/2 }}{1-\mathrm{e}^{- x}}-\frac1{x}\Big)\Bigg)\mathrm{d}x
\cr&
-n_{K}\chi(1)\int_{\mathbb R}h(\xi)\int_1^\infty \cos (2\pi x\xi)\frac{\mathrm{d}x}{x}{\rm d}\xi+ O_h\big(n_{K}\chi(1)\delta \big)
\mathrm{e}nd{split}
\mathrm{e}nd{equation*}
and
\begin{equation*}\begin{split}
I_1(h) = & n_{K}\chi(1)\int_{\mathbb R}h(\xi)\int_0^1 \big(1-\cos (2\pi x\xi)\big)\frac{\mathrm{d}x}{x}{\rm d}\xi+ O_h\big(n_{K}\chi(1)\delta \big)
\mathrm{e}nd{split}\mathrm{e}nd{equation*}
with
$$n_{K}\chi(1)= \sum_{\mathfrak p|\infty \text{ real}}(n_+(\mathfrak p)+n_-(\mathfrak p))+2r_{2,K}\chi(1).$$
Gathering these calculations and using
$$ \int_0^1 \big(1-\cos (2\pi x\xi)\big)\frac{\mathrm{d}x}{x}-\int_1^\infty \cos (2\pi x\xi)\frac{\mathrm{d}x}{x}
=\log (\pi |\xi|)
+\int_0^1 \big(1-\cos (2 x )\big)\frac{\mathrm{d}x}{x}-\int_1^\infty \cos (2 x )\frac{\mathrm{d}x}{x} ,
$$ we get
\begin{align*}
\delta\sum_{\rho} h\Big(\frac{\delta}{2\pi}\frac{\rho-\tfrac 12}{i}\Big) = &
n_{K}\chi(1)\widehat{h}(0) \log(\delta^{-1})+n_{K}\chi(1)\int_{\mathbb R}h(\xi)\log |\xi|{\rm d}\xi +\widehat{h}(0) \log\big(\mathfrak f(L/K,\chi)2^{-r_{2,K}\chi(1)}\big)\\
&+\widehat{h}(0)\Big(\sum_{\mathfrak p|\infty \text{ real}}(n_+C_{1,+} + n_-(\mathfrak p)C_{1,-})+{2}r_{2,K}\chi(1)C_2\Big)+O_h\big(n_{K}\chi(1)\delta\big)
\mathrm{e}nd{align*}
with
\begin{align*}
C_{1,+} &:= \int_0 ^1\Big( \frac{2\mathrm{e}^{- x/2 }}{1-\mathrm{e}^{-2 x}}-\frac 1x\Big)\mathrm{d}x +\int_1^\infty \frac{2\mathrm{e}^{- x/2 }}{1-\mathrm{e}^{-2 x}}\mathrm{d}x +
\frac{\Gamma'}{\Gamma}\Big(\frac 14\Big) + \int_0^1 (1-\cos (2 x ))\frac{\mathrm{d}x}{x}-\int_1^\infty \cos (2 x )\frac{\mathrm{d}x}{x}\\
C_{1,-} &:= \int_0 ^1\Big( \frac{2\mathrm{e}^{- 3x/2 }}{1-\mathrm{e}^{-2 x}}-\frac 1x\Big)\mathrm{d}x +\int_1^\infty \frac{2\mathrm{e}^{-3x/2 }}{1-\mathrm{e}^{-2 x}}\mathrm{d}x +
\frac{\Gamma'}{\Gamma}\Big(\frac 34\Big) + \int_0^1 (1-\cos (2 x ))\frac{\mathrm{d}x}{x}-\int_1^\infty \cos (2 x )\frac{\mathrm{d}x}{x}\\
C_2 &:= \int_0 ^1\Big( \frac{ \mathrm{e}^{- x/2 }}{1-\mathrm{e}^{- x}}-\frac 1x\Big)\mathrm{d}x +\int_1^\infty \frac{ \mathrm{e}^{- x/2 }}{1-\mathrm{e}^{- x}}\mathrm{d}x +
\frac{\Gamma'}{\Gamma}\Big(\frac 12\Big) + \int_0^1 (1-\cos (2 x ))\frac{\mathrm{d}x}{x}-\int_1^\infty \cos (2 x )\frac{\mathrm{d}x}{x}.
\mathrm{e}nd{align*}
In \cite{MR4322621-conjecture-Montgomery-Soundararajan-integers}, it was shown that $C_{1,+} = 0$. To calculate $C_{1,-}$, note that by \cite[\S II.0, Exercise 149]{MR3363366-tenenbaum},
\[\frac{\Gamma'}{\Gamma}\Big(\frac 34\Big)=
\int_0^\infty \Big(\frac{ \mathrm{e}^{-2x }}{x}- \frac{2\mathrm{e}^{- 3x/2 }}{1-\mathrm{e}^{-2 x}}\Big) \mathrm{d}x,\]
and thus
\[C_{1,-} = \int_0^\infty \frac{\mathrm{e}^{-2x}- \cos(2x)}{x} \mathrm{d}x = 0.\]
To calculate $C_2$, we use the identity \cite[\S II.0, Exercise 149]{MR3363366-tenenbaum}
$$
\frac{\Gamma'}{\Gamma}\Big(\frac 12\Big)=
\int_0^\infty \Big(\frac{ \mathrm{e}^{-2x }}{x}- \frac{2 \mathrm{e}^{- x }}{1-\mathrm{e}^{-2 x}}\Big) \mathrm{d}x
=
\int_0^\infty \Big(\frac{ \mathrm{e}^{-2x }}{x}- \frac{ \mathrm{e}^{- x/2 }}{1-\mathrm{e}^{- x}}\Big) \mathrm{d}x
$$
and we get
\begin{align*}
C_2&=\int_0 ^1 \frac {\mathrm{e}^{-2x}-1}x \mathrm{d}x +\int_1^\infty \frac {\mathrm{e}^{-2x}}x \mathrm{d}x + \int_0^1 (1-\cos (2 x ))\frac{\mathrm{d}x}{x}-\int_1^\infty \cos (2 x )\frac{\mathrm{d}x}{x}
=\int_0 ^\infty \frac {\mathrm{e}^{-2x}-\cos(2x)}x \mathrm{d}x=0.
\mathrm{e}nd{align*}
The claimed estimate follows.
\mathrm{e}nd{proof}
We can apply Lemma \ref{lemma3.4} to estimate the sums
\begin{equation}\label{defnuj}
\nu_j(K;\mathrm{e}ta,\delta):=\sum_{\chi\in\mathrm{Irr}(\mathrm{Gal}(K/\Q))} \chi(1)^j b_0(\chi;|\widehat \mathrm{e}ta|^2,\delta) .
\mathrm{e}nd{equation}
In order to do so, we recall the definitions \mathrm{e}qref{def:lambda}, \mathrm{e}qref{def:qz} and \mathrm{e}qref{def:alphabeta} concerning character sums.
\begin{lemma}\label{lemma:estnuj} Let $K/\Q$ be a Galois extension of number fields with $G:=\mathrm{Gal}(K/\Q)$. Assume the Artin holomorphy conjecture and GRH.
Let $j\geq 2,$ $\mathbf kappa > 0$, $\mathrm{e}ta\in\mathcal E_\mathbf kappa $. We have
\begin{align*}
\nu_j(K;\mathrm{e}ta,\delta) = &\frac{1}{\delta}\Big(
\lambda_{j+1}(G)\big(\alpha(\mathrm{e}ta) \log(\delta^{-1})+ \beta(\mathrm{e}ta)\big)+\alpha(\mathrm{e}ta) q_{j+1}(K)-\alpha(\mathrm{e}ta)z_{j+1}(K) \Big) +O_\mathrm{e}ta\big(\lambda_{j+1}(G) \big).
\mathrm{e}nd{align*}
In particular, if $\delta\ll {\rm rd}_K^{-c}$ for suitable large constant $c>0,$ we have
\begin{align*}
\nu_j(K;\mathrm{e}ta,\delta) = &\frac{\lambda_{j+1}(G)\alpha(\mathrm{e}ta)}{\delta}\Big(
\log(\delta^{-1})+ O_\mathrm{e}ta\big(\log ({\rm rd}_K)\big)\Big)\asymp
\frac{\lambda_{j+1}(G)\alpha(\mathrm{e}ta)}{\delta}
\log(\delta^{-1}) .
\mathrm{e}nd{align*}
\mathrm{e}nd{lemma}
\begin{proof}
Following \cite[Proposition 5.21]{MR2061214-iwaniec-kowalski}, under GRH, we have
$$z(K/\Q,\chi)\ll \frac{\log \mathfrak f(K/\Q,\chi)}{\log (3\log \mathfrak f(K/\Q,\chi)/n_{K})}\ll \log \mathfrak f(K/\Q,\chi).$$
In fact, the bound $z(K/\Q,\chi)\ll \log \mathfrak f(K/\Q,\chi) $ is unconditional (see \cite[(5.27)]{MR2061214-iwaniec-kowalski}). For what follows, we will only need the unconditional bound.
Thus
$ z_j(K)\ll q_j(K) $.
Using $\mathfrak f(L/K,\chi)\leq 2\chi(1)n_{K} \log ({\rm rd}_K) $ (see \cite[Lemma 4.1]{MR4400872-fiorilli-jouve-chebyshev}),
we get
\begin{equation}\label{encadrementj}
z_j(K)\ll q_j(K)\leq 2\lambda_j(G) \log ({\rm rd}_K),
\mathrm{e}nd{equation}
where the factor of $n_{\mathbb Q}$ is simply $1$.
Now apply Lemma \ref{lemma3.4} for the extension $K/\Q$ (in place of $L/K$) and for $h=\widehat\mathrm{e}ta^2$ with $\widehat{h}(0)=\alpha(\mathrm{e}ta)$ to get
\begin{align*}b_0(\chi;|\widehat \mathrm{e}ta|^2,\delta)&=\frac{1}{\delta}\Big(
\chi(1)\big(\alpha(\mathrm{e}ta) \log(\delta^{-1})+ \beta(\mathrm{e}ta)\big) +\alpha(\mathrm{e}ta) \log\big( \mathfrak f(K/\Q,\chi)\big)-\alpha(\mathrm{e}ta)z(K/\Q,\chi)\Big)
+O_h\big( \chi(1) \big).
\mathrm{e}nd{align*}
Summing over $\chi\in G$ gives
\begin{align*}
\nu_j(K;\mathrm{e}ta,\delta) = &\frac{1}{\delta}\Big(\lambda_{j+1}(G)\big(\alpha(\mathrm{e}ta) \log(\delta^{-1})+ \beta(\mathrm{e}ta)\big)+\alpha(\mathrm{e}ta) q_{j+1}(K)-\alpha(\mathrm{e}ta)z_{j+1}(K) \Big) +O_\mathrm{e}ta\big(\lambda_{j+1}(G) \big).
\mathrm{e}nd{align*}
This is precisely the first statement of Lemma \ref{lemma:estnuj}. The second statement follows by determining a range of $\delta$ for which the main term dominates. In particular, \mathrm{e}qref{encadrementj} bounds nearly all of the terms in our expansion of $\nu_j$ by $\log({\rm rd}_K)$, and the remaining terms are bounded in terms of $\delta$. Thus for any fixed $\mathrm{e}ta$, if $\delta \ll {\rm rd}_K^{-c}$ for a suitable large constant $c > 0$, then
\begin{align*}
\nu_j(K;\mathrm{e}ta,\delta) = &\frac{ \lambda_{j+1}(G)\alpha(\mathrm{e}ta)}{\delta}\Big(
\log(\delta^{-1})+ O_\mathrm{e}ta\big(\log ({\rm rd}_K)\big)\Big)\asymp
\frac{ \lambda_{j+1}(G)\alpha(\mathrm{e}ta)}{\delta}
\log(\delta^{-1}) .
\mathrm{e}nd{align*}
\mathrm{e}nd{proof}
\section{Proof of Theorem \ref{thm:main-theorem}}
Assume the Artin holomorphy conjecture and GRH. Beginning with \mathrm{e}qref{eq:weighted-nth-moment-definition}, apply Lemma \ref{lemma explicit formula}. Any zero $\rho $ can be written $\rho=\tfrac 12+i\gamma$. Since $\widehat \Phi$ and $ \Phi$ are even and real-valued,
\begin{align*}
(-1)^n M_{n,K}(&X, \delta; \mathrm{e}ta,\Phi)=\frac {(-1)^n}{ \log X\int_0^\infty \Phi}\int_{0}^{\infty} \Phi\Big( \frac {t}{\log X}\Big)\big(\psi_\mathrm{e}ta(\mathrm{e}^t,\delta)-\mathrm{e}^{\frac t2}\delta \mathcal L_\mathrm{e}ta( \tfrac \delta 2 ) +\delta\widehat \mathrm{e}ta(0) {\rm ord}_{s=\frac 12}\zeta_K(s,\chi)\big)^{n} {\rm d} t \\
&= \frac{\delta^n}{\int_0^\infty \Phi} \sum_{\gamma_1,\dots,\gamma_n\neq 0} \widehat \mathrm{e}ta\Big(\frac{\delta\gamma_1}{2\pi}\Big)\cdots \widehat\mathrm{e}ta\Big(\frac{\delta\gamma_n}{2\pi}\Big)\int_0^{\infty} \mathrm{e}^{i t (\gamma_1+\dots+\gamma_n)\log X }\Phi(t) {\rm d} t +O\Big( \frac{ \delta (C_{\mathrm{e}ta}n_K \log(\delta^{-1}+2))^n}{\log X }\Big)\\
&= \frac{ \delta^n}{2\int_0^\infty \Phi} \sum_{\gamma_1,\dots,\gamma_n\neq 0} \widehat \Phi\Big(\frac { (\gamma_1+\dots+\gamma_n)\log X}{2\pi}\Big)\widehat \mathrm{e}ta\Big(\frac{\delta\gamma_1}{2\pi}\Big)\cdots \widehat\mathrm{e}ta\Big(\frac{\delta\gamma_n}{2\pi}\Big) +O\Big( \frac{ \delta (C_{\mathrm{e}ta}n_K \log(\delta^{-1}+2))^n}{\log X} \Big).
\mathrm{e}nd{align*}
For even $n=2m$, by positivity of $\widehat \Phi$ and $\widehat \mathrm{e}ta$, we have
\begin{equation}
\label{geqM2m}
M_{2m,K}(X, \delta; \mathrm{e}ta,\Phi)\geq { \delta^{2m}} S_{2m} +O\Big( \frac{ \delta (C_{\mathrm{e}ta}n_K \log(\delta^{-1}+2))^{2m}}{\log X} \Big),
\mathrm{e}nd{equation}
with
\begin{equation}\label{eq:S2m-definition}
S_{2m}:=\sum_{\substack{\gamma_1, \dots, \gamma_{2m}\neq 0 \\ \gamma_1 + \cdots + \gamma_{2m} = 0}} \widehat{\mathrm{e}ta}\Big(\frac{\delta\gamma_1}{2\pi}\Big)\cdots \widehat{\mathrm{e}ta}\Big(\frac{\delta\gamma_{2m}}{2\pi}\Big),
\mathrm{e}nd{equation}
whereas for odd $n=2m+1$, by positivity of $\widehat \Phi$ and $\widehat \mathrm{e}ta$,
\begin{equation}
\label{geqM2m+1}
-M_{2m+1,K}(X, \delta; \mathrm{e}ta,\Phi)\geq O\Big( \frac{ \delta (C_{\mathrm{e}ta} n_K\log(\delta^{-1}+2))^{n}}{\log X} \Big).
\mathrm{e}nd{equation}
We shall prove the following lower bound to study the even case.
\begin{lemma}
\label{lemma S2mgeq}
Let $K/\Q$ be a Galois extension of number fields with $G:=\mathrm{Gal}(K/\Q)$. Assume the Artin holomorphy conjecture and GRH. Let $\mathrm{e}ta\in\mathcal E_\mathbf kappa $ fixed. Then, for $m\geq 1$ and $\delta>0$
$$S_{2m}\geq \mu_{2m}\nu_2(K;\mathrm{e}ta,\delta)^m \Big(1+O_\mathrm{e}ta\Big(m^2 m!\frac{\nu_4(K;\mathrm{e}ta,\delta)}{ \nu_2(K;\mathrm{e}ta,\delta)^2} \Big)\Big),$$ where $S_{2m}$ is defined by \mathrm{e}qref{eq:S2m-definition}.
\mathrm{e}nd{lemma}
Its proof, which we present in Section \ref{sec:pfoflemmaS2mgeq} relies on an application of Lemma~5.5 of \cite{dfjouve-general-class-fcns}.
Applying Lemma \ref{lemma S2mgeq} to \mathrm{e}qref{geqM2m} gives
\begin{align*}
M_{2m,K}(X, \delta; \mathrm{e}ta,\Phi)&\geq \mu_{2m}\big(\delta^2\nu_2(K;\mathrm{e}ta,\delta)\big)^m \Big(1+O\Big(m^2 m!\frac{\nu_4(K;\mathrm{e}ta,\delta)}{ \nu_2(K;\mathrm{e}ta,\delta)^2} \Big)\Big) +O\Big(n_K^{2m}\frac{ \delta (C'_{\mathrm{e}ta} \log(\delta^{-1} ))^{2m}}{\log X} \Big).
\mathrm{e}nd{align*}
Lemma \ref{lemma:estnuj} implies that
when $\delta \leq {\rm rd}_K^{-c_\mathrm{e}ta}$
$$\delta \nu_2(K;\mathrm{e}ta,\delta) \asymp
{ \lambda_{3}(G)\alpha(\mathrm{e}ta)}
\log(\delta^{-1}),\qquad \frac{\nu_4(K;\mathrm{e}ta,\delta)}{ \nu_2(K;\mathrm{e}ta,\delta)^2}\asymp \frac{\delta}{\log (1/\delta)}\frac{\lambda_5(G)}{ \lambda_3(G)^2}
.$$
Thus,
\begin{align*}
M_{2m,K}(X, \delta; \mathrm{e}ta,\Phi)&\geq \big(\delta^2\nu_2(K;\mathrm{e}ta,\delta))^m \Big\{\mu_{2m}+O\Big(\frac{m^2 m!\mu_{2m}\delta}{\log (1/\delta)}\frac{\lambda_5(G)}{\lambda_3(G)^2} \Big)\Big\} +O\Big(\frac{ \delta (C'_{\mathrm{e}ta}n_K \log(\delta^{-1} ))^{2m}}{\log X} \Big)\cr
&\geq V(K;\mathrm{e}ta,\delta)^m \Big\{\mu_{2m}+O\Big(\frac{m^2 m!\mu_{2m}\delta}{\log (1/\delta)}\frac{\lambda_5(G)}{\lambda_3(G)^2} +\Big( \frac{C_{\mathrm{e}ta}n_K^2 \log(\delta^{-1} )}{\lambda_3(G)} \Big)^{ m}\frac{ \delta^{1-m} }{\log X} \Big)\Big\}
\mathrm{e}nd{align*}
where $C_{\mathrm{e}ta}=(C'_{\mathrm{e}ta})^2/\alpha(\mathrm{e}ta)$ and $V(K;\mathrm{e}ta,\delta):=\delta^2\nu_2(K;\mathrm{e}ta,\delta).$ By Lemma \ref{lemma:estnuj}, this completes the proof of Theorem~\ref{thm:main-theorem}.
\section{Proof of Lemma \ref{lemma S2mgeq}}\label{sec:pfoflemmaS2mgeq}
The following lemma is an application of lemma~5.5 of \cite{dfjouve-general-class-fcns}. It makes use of the classification of irreducible characters $\chi$ of $G$ according to their \mathrm{e}mph{Frobenius--Schur indicator} $\varepsilon_2(\chi)$ (see
for example \cite[Theorem 8.7]{MR1645304-Huppert}).
\begin{lemma}
\label{lemma pre combinatoire}
For $\mathrm{e}ll\in \mathbb N$, let $\mathrm{e}ta\in \mathcal S_\delta$, $\psi\in \mathrm{Irr}(G)$, and let $\chi_1,\dots,\chi_{2\mathrm{e}ll} \in \{ \psi, \overline{\psi} \}$.
If $\psi$ is unitary (that is, $\varepsilon_2(\psi)=0$) then there exists a constant $C_\mathrm{e}ta$ such that we have the estimate
\begin{equation*}
\sum_{\substack{\gamma_{\chi_1},...,\gamma_{\chi_{\mathrm{e}ll }}> 0 \\
\gamma_{\chi_{\mathrm{e}ll+1}},...,\gamma_{\chi_{2\mathrm{e}ll }}< 0 \\ \forall \gamma\in \R,\\ \# \{ k\leq 2\mathrm{e}ll : \chi_k\in \{ \psi, \overline{\psi} \} , \gamma_{\chi_k}= \gamma \} = \\ \# \{ k\leq 2\mathrm{e}ll : \chi_k\in \{ \psi, \overline{\psi} \}, \gamma_{\chi_k}= -\gamma \} }} \!\!\!\!\!\!\!\!\! \prod_{k=1}^{2\mathrm{e}ll}\widehat \mathrm{e}ta\Big(\frac{\delta \gamma_{\chi_k}}{2\pi}\Big)
\geq \max\Big\{
\mathrm{e}ll! b_0(\psi;|\widehat \mathrm{e}ta|^2,\delta)^\mathrm{e}ll -C_\mathrm{e}ta \mathrm{e}ll!^2\mathrm{e}ll(\mathrm{e}ll-1) b_0(\psi;|\widehat \mathrm{e}ta|^2,\delta)^{\mathrm{e}ll-1} ,0\Big\},
\mathrm{e}nd{equation*}
where the $\gamma_{\chi_j}$ run through the multiset of imaginary parts of the zeros of $L(s,L/F,\psi)L(s,L/F,\overline\psi)$ (with multiplicity).
If $\psi$ is either orthogonal or symplectic (that is, $\varepsilon_2(\psi)\in \{ \pm 1\}$), there exists a constant $C_\mathrm{e}ta$ such that
$$ \sum_{\substack{\gamma_{1},\dots,\gamma_\mathrm{e}ll>0 \\ \gamma'_{1},\dots, \gamma'_{\mathrm{e}ll} < 0 \\ \forall \gamma\in \R,\\ \# \{ k\leq \mathrm{e}ll : \gamma_{ k}= \gamma \} = \\ \# \{ k\leq \mathrm{e}ll : \gamma'_{ k}= -\gamma \} }} \prod_{k=1}^{\mathrm{e}ll}\widehat \mathrm{e}ta\Big(\frac{\delta \gamma_{ k}}{2\pi}\Big)\widehat \mathrm{e}ta\Big(\frac{\delta \gamma'_{ k}}{2\pi}\Big)
\geq \max\Big\{2^{-\mathrm{e}ll}\mathrm{e}ll! b_0(\psi;|\widehat \mathrm{e}ta|^2,\delta)^\mathrm{e}ll -C_\mathrm{e}ta 2^{-\mathrm{e}ll}\mathrm{e}ll!^2\mathrm{e}ll(\mathrm{e}ll-1) b_0(\psi;|\widehat \mathrm{e}ta|^2,\delta)^{\mathrm{e}ll-1} ,0\Big\},
$$
where the $\gamma_1,\dots,\gamma_\mathrm{e}ll, \gamma'_1,\dots ,\gamma'_\mathrm{e}ll$ run through the imaginary parts of the zeros of $L(s,L/F,\psi)$ (with multiplicity).
\mathrm{e}nd{lemma}
\begin{proof}[Proof of Lemma~\ref{lemma S2mgeq}]
Let
$$\mathrm{Irr}(G)=\big\{ \psi_1,\psi_2,\ldots,\psi_{r_1},\psi_{r_1+1},\overline{\psi_{r_1+1}},\psi_{r_1+2}, \ldots,\psi_{r_1+r_2},\overline{\psi_{r_1+r_2}}\big\},$$
where $\psi_1,\dots \psi_{r_1}$ are real and $\psi_{r_1+1},\dots ,\psi_{r_1+r_2}$ are complex.
Using the methods of \cite{dfjouve-general-class-fcns}, we prove a lower bound of the sum $S_{2m}.$
The main additional difficulties come from the fact that the zeroes of $\zeta_K(s)$ are not always simple. However, assuming the Artin holomorphy conjecture we can nevertheless find a lower bound by considering only ``expected'' multiplicities of the zeroes of $\zeta_K(s)$. To begin with,
$$S_{2m} =\sum_{\boldsymbol{\chi}=(\chi_1,\dots \chi_{2m})\in \mathrm{Irr}(G)^{2m}}\Big(\prod_{j=1}^{2m} \chi_j(1)\Big)
\sum_{\substack{\gamma_1, \dots, \gamma_{2m}\neq 0 \\ \gamma_1 + \cdots + \gamma_{2m} = 0\\ L( 1/2+i\gamma_j,\chi_j,L/K)=0}} \widehat{\mathrm{e}ta}\Big(\frac{\delta\gamma_1}{2\pi}\Big)\cdots \widehat{\mathrm{e}ta}\Big(\frac{\delta\gamma_{2m}}{2\pi}\Big) .$$
In the inner sum the zeroes of $L(\tfrac 12+i\gamma_j,\chi_j,L/K)$ are counted with multiplicity, whereas in the definition of $S_{2m}$ the multiplicity was
that of $L(\tfrac 12+i\gamma_j,\chi_j,L/K)^{\chi_j(1)}.$
Given a vector $\boldsymbol{\chi}=(\chi_1,\dots \chi_{2m}) \in \mathrm{Irr} (G)^{2m}$ and $1 \le j \le r_1+r_2$, define
$$ E_j(\boldsymbol{\chi}):= \big\{1\leq k \leq 2m \colon \chi_k \in\{ \psi_j,\overline{\psi_j} \} \big\}, $$
and define $\mathrm{e}ll_j(\boldsymbol{\chi}):=|E_j(\boldsymbol{\chi})|$ to be the size of $E_j(\boldsymbol{\chi})$. Note that $ \sum_{j=1}^{r_1+r_2} \mathrm{e}ll_j(\boldsymbol{\chi}) = 2m$.
All terms in the sum are positive, so we can get a lower bound by restricting the sum to certain tuples of characters and zeroes. In particular, we consider the sum over characters restricted to those $\boldsymbol{\chi}=(\chi_1,\dots ,\chi_{2m})$ that are elements of $\mathrm{Irr} (G)^{2m}$ and $(\gamma_{\chi_1},\dots,\gamma_{\chi_{2m}})$ which appear in conjugate pairs; that is, for which
for any $j\leq r_1+r_2$ and $ \gamma \in \mathbb R$ we have
$$
\big|\big\{ k\in E_j(\boldsymbol{\chi}) \colon \chi_k\in\{\psi_j,\overline{\psi_j}\}, \gamma_{\chi_k}= \gamma \big\}\big| =\big|\big\{ k\in E_j(\boldsymbol{\chi}) \colon \chi_k\in\{\psi_j,\overline{\psi_j}\}, \gamma_{\chi_k}= -\gamma \big\}\big|.$$
Finally, we may further impose that $k_j(\boldsymbol{\chi}):=\tfrac12\mathrm{e}ll_j(\boldsymbol{\chi}) \in \mathbb N$, and restrict the sum over characters to the subset $\mathrm{Irr}_{2m}$ of vectors of characters $\boldsymbol{\chi} = (\chi_1,\dots,\chi_{2m}) \in \mathrm{Irr} (G)^{2m}$ for which for every $r_1+1\leq j\leq r_1+r_2 $, $|\{ \mathrm{e}ll \leq 2m : \chi_\mathrm{e}ll = \psi_j\}|=|\{ \mathrm{e}ll \leq 2m: \chi_\mathrm{e}ll = \overline{\psi_j}\}|$.
Now stratify the outside sum according to the values assumed by
$k_j(\boldsymbol{\chi})$. Given an $(r_1+r_2)$-tuple $\mathbf k=(k_1,\dots,k_{r_1+r_2}) \in \mathbb N^{r_1+r_2}$ such that $k_1+\dots+k_{r_1+r_2}=m$, it remains to evaluate the sum
$$
D(\mathbf k ):= \sum_{ \substack{\boldsymbol{\chi}=(\chi_1,...,\chi_{2m}) \in \mathrm{Irr}(G)^{2m}\\ \forall j,\, k_j(\boldsymbol{\chi}) =k_j }} \Big(\prod_{j=1}^{2m} \chi_j(1)\Big)\sum_{\substack{\gamma_{\chi_1},...,\gamma_{\chi_{2m}}\neq 0 \\ \forall j\leq r_1+r_2,\forall \gamma\in \R,\\ \# \{ k\in E_j(\boldsymbol{\chi}) : \chi_k{ \in\{\psi_j,\overline{\psi_j}\}}, \gamma_{\chi_k}= \gamma \} = \\ \# \{ k\in E_j(\boldsymbol{\chi}) : \chi_k{\in\{\psi_j,\overline{\psi_j}\}}, \gamma_{\chi_k}= -\gamma \} }} \prod_{j=1}^{2m}\widehat \mathrm{e}ta\Big(\frac{\delta\gamma_{\chi_j}}{2\pi}\Big).
$$
After reindexing, $D(\mathbf k)$ becomes
\begin{equation}\label{Dk}
D(\mathbf k )= \binom{2m}{2k_1,\dots,2k_{r_1+r_2}} \prod_{j=1}^{r_1+r_2}\big( \psi_j(1)^{2k_j}\sigma_j(k_j,\delta)\big)
\mathrm{e}nd{equation}
with
$$\sigma_j(k_j,\delta):=\sum_{\substack{\gamma_{\chi_1},...,\gamma_{\chi_{2k_j}}\neq 0 \\ \forall \gamma\in \R,\\ \# \{ k\leq 2k_j :
\gamma_{\chi_k}= \gamma \} = \\ \# \{ k\leq 2k_j :
\gamma_{\chi_k}= -\gamma \} }} \!\! \prod_{k=1}^{2k_j}\widehat \mathrm{e}ta\Big(\frac{\delta\gamma_{\chi_k}}{2\pi}\Big)
=\binom{2k_j}{k_j}\sum_{\substack{\gamma_{\chi_1},\ldots,\gamma_{\chi_{k_j}}>0\\ \gamma_{\chi'_{1}},\ldots,\gamma_{\chi'_{ k_j}}<0\\ \forall \gamma\in \R_{>0},\\ \# \{ k\leq k_j :
\gamma_{\chi_k}= \gamma \} = \\ \# \{ k\leq k_j :
\gamma_{\chi'_k}= -\gamma \} }} \!\! \prod_{k=1}^{ k_j}\widehat \mathrm{e}ta\Big(\frac{\delta\gamma_{\chi_k}}{2\pi}\Big)\widehat \mathrm{e}ta\Big(\frac{\delta\gamma_{\chi'_k}}{2\pi}\Big).$$
For~$j\geq r_1+1$ (i.e. $\psi_j$ is unitary), applying Lemma~\ref{lemma pre combinatoire} shows that
$$\sigma_j(k_j,\delta)
\geq 2^{k_j}\mu_{2k_j} b_0(\psi_j;|\widehat \mathrm{e}ta|^2,\delta)^{k_j}\max\Big\{ 1 -C_\mathrm{e}ta \frac{{k_j}!{k_j}({k_j}-1)}{ b_0(\psi_j;|\widehat \mathrm{e}ta|^2,\delta)} ,0\Big\},
$$
since
$$\binom{2k_j}{k_j}k_j!=2^{k_j}\mu_{2k_j}.$$
If instead $j \le r_1$ (i.e. $\psi_j$ is either orthogonal or symplectic), then we may fix the sign of the imaginary parts $\gamma_{\chi_j}$ and deduce that
$$ \sigma_j(k_j)=\binom{2k_j}{k_j} \sum_{\substack{\gamma_{1},\ldots,\gamma_{k_j}>0\\ \gamma'_{1} ,\dots ,\gamma'_{k_j}< 0 \\ \forall \gamma\in \R,\\ \# \{ k\leq k_j : \gamma_{k}= \gamma \} = \\ \# \{ k\leq k_j : \gamma'_{k}= -\gamma \} }} \prod_{k=1}^{ k_j}\widehat \mathrm{e}ta\Big(\frac{\delta\gamma_{\chi_k}}{2\pi}\Big)\widehat \mathrm{e}ta\Big(\frac{\delta\gamma_{\chi'_k}}{2\pi}\Big).
$$
Applying Lemma~\ref{lemma pre combinatoire} once more yields the bound
$$\sigma_j(k_j)
\geq \mu_{2k_j}b_0(\psi_j;|\widehat \mathrm{e}ta|^2,\delta)^{k_j}\max\Big\{ 1 -C_\mathrm{e}ta\ \frac{{k_j}!{k_j}({k_j}-1)}{ b_0(\psi_j;|\widehat \mathrm{e}ta|^2,\delta)} ,0\Big\}.
$$
We continue to follow proof of lemma 5.5 of \cite{dfjouve-general-class-fcns} reporting these estimates in \mathrm{e}qref{Dk} and using
$$
\prod_{\mathrm{e}ll=1}^{r_1+r_2} \max\big\{ 1-x_\mathrm{e}ll ,0\big\}
\geq 1-\sum_{j=1}^{r_1+r_2} x_j \qquad (x_\mathrm{e}ll \geq 0). $$
The main term is equal to
$\mu_{2m} \nu_2(K;\mathrm{e}ta,\delta)^{m} , $
with $\nu_2(K;\mathrm{e}ta,\delta)$ defined in \mathrm{e}qref{defnuj}
and the error term is
\begin{align*}
&\ll {m^2}m!\mu_{2m} \Big(\sum_{j =1}^{r_1+r_2} \psi_j(1)^4b_0(\psi_j;|\widehat \mathrm{e}ta|^2,\delta) \Big) \Big( \sum_{j=1}^{r_1} \psi_j(1)^2 b_0(\psi_j;|\widehat \mathrm{e}ta|^2,\delta) + 2\sum_{j =r_1}^{r_1+r_2} \psi_j(1)^2 b_0(\psi_j;|\widehat \mathrm{e}ta|^2,\delta) \Big)^{m-2} \\
&\ll\mu_{2m} \nu_2(K;\mathrm{e}ta,\delta)^{m-2} {m^2}m! \nu_4(K;\mathrm{e}ta,\delta)
. \mathrm{e}nd{align*}
with
$
\nu_4(K;\mathrm{e}ta,\delta)$ defined in \mathrm{e}qref{defnuj}.
Then we have
$$S_{2m}\geq \mu_{2m}\nu_2(K;\mathrm{e}ta,\delta)^m \Big(1+O\Big(m^2 m!\frac{\nu_4(K;\mathrm{e}ta,\delta)}{ \nu_2(K;\mathrm{e}ta,\delta)^2} \Big)\Big).$$
\mathrm{e}nd{proof}
\mathrm{e}nd{document} |
\begin{document}
\author{G. Boyadzhiev}
\title{Comparison principle for non - cooperative elliptic systems}
\date{28.02.2007}
\maketitle
\section{Introduction}
In this paper are considered weakly coupled linear elliptic systems
of the form
\ \\(1)\qquad $
L_Mu=0$ in a bounded domain $\Omega \in R^n$ with smooth boundary
\ \\and boundary data $u(x)=g(x)$ on $\partial \Omega $,
where $L_M=L+M$, $L$ is a matrix operator with null off-diagonal
elements $L=diag\left(L_1, L_2, ... L_N\right) $, and matrix
$M=\{m_{ik}(x)\}_{i,k=1}^{N}$. Scalar operators
\ \\ \qquad $L_ku_k = -\sum_{i,j=1}^{n}D_j
\left( a_k^{ij}(x)D_iu_k \right) +\sum_{i=1}^{n}b_k^i(x)D_iu_k+c_ku_k$ in $
\Omega $
\ \\are uniformly elliptic ones for $k=1,2,...N$, i.e. there are constants
$\lambda,\Lambda >0$ such that
\ \\(2) \qquad $\lambda \left| \xi \right|
^2\leq \sum_{i,j=1}^{n}a_k^{ij}(x) \xi _i\xi _j\leq \Lambda \left|
\xi \right| ^2$
\ \\for every $k$ and any $\xi =(\xi _1,...\xi _n)\in R^n$.
Coefficients $c_k$ and $m_{ik}$ in (1) are supposed continuous in
$\overline{\Omega}$, and $a_k^{ij}(x),b_k^i(x)\in
W^{1,\infty}(\Omega)\cap C(\overline{\Omega})$.
Quasi-linear weakly coupled elliptic systems
\ \\(3) \qquad $Q^l(u)=-diva^l(x,u^l,Du^l)+F^l(x,u^1,...u^N,Du^l)=f^l(x)$
in $\Omega $
\ \\(4) \qquad $u^l(x)=g^l(x)$ on $\partial \Omega $
\ \\$l=1,...N$ are considered as well.
System (3) is supposed uniformly elliptic one, i.e. there are
continuous and positive functions $\lambda (\left| u\right|
),\Lambda (\left| u\right| )$, $\left| u\right| =\left( \left(
u^1\right) ^2+...+\left( u^N\right) ^2\right) ^{1/2}$, such that
$\lambda (s)$ is monotone-decreasing one, $\Lambda (s)$ is monotone
increasing one and
\ \\ \qquad $\lambda (\left| u\right| )\left| \xi ^l\right|
^2\leq \sum_{i,j=1}^{n}\frac{\partial a^{li}}{
\partial p_j^l}(x,t,u^1,...u^N,p^l)\xi _i^l\xi _j^l\leq \Lambda (\left|
u\right| )\left| \xi ^l\right| ^2$
\ \\for every $u^l$ and $\xi ^l=(\xi _1^l,...\xi _n^l)\in R^n$, $
l=1,2,...N.$
The coefficients $a^l(x,u,p)$, $
F^l(x,u,p)$, $f^l(x)$, $g^l(x)$ are supposed to be at least
measurable functions with respect to the $x$ variable and locally
Lipschitz continuous on $u^l,u$ and $p$, i.e.
\ \\ $
\begin{array}{l}
\left| F^l(x,u,p)-F^l(x,v,q)\right| \leq C(K)\left( \left|
u-v\right|
+\left| p-q\right| \right) , \\
\\
\left| a^l(x,u^l,p)-a^l(x,v^l,q)\right| \leq C(K)\left( \left|
u^l-v^l\right| +\left| p-q\right| \right)
\end{array}$
\ \\for every $(x)\in \Omega$, $\left| u\right| +\left| v\right|
+\left| p\right| +\left| q\right| \leq K$, $l=1,...N.$
Hereafter by $f^-(x)=min(f(x),0)$ and $f^+(x)=max(f(x),0)$ are
denoted the non-negative and, respectively, the non-positive part of
the function f. The same convention is valid for matrixes as well.
For instance, we denote by $M^+$ the non-negative part of $M$,
i.e.$M^+={\{ m^+_{ij}(x)\} }_{i,j=1}^{N} $.
\ \\This paper concerns the validity of the comparison principle for
weakly-coupled elliptic systems. Let us briefly recall the
definition of the comparison principle in a weak sense for linear
systems.
{\it \ The comparison principle holds in a weak sense for the
operator $L_M$ if $(L_Mu,v)\leq 0$ and $u|_{\partial \Omega}\leq 0$
imply $(u,v)\leq 0$ in $\Omega$ for every $v>0$, $v\in
\left(W^{1,\infty}(\Omega)\cap C_0(\overline{\Omega})\right)^N $ and $u\in
\left(W^{1,\infty}(\Omega)\cap C(\overline{\Omega})\right)^N $.}
\ \\As it is well-known, there is no comparison principle for an
arbitrary elliptic system /see Theorem 6 below/. On the other hand,
there are broad classes of elliptic systems, such that the
comparison principle holds for their members. According to Theorem 1
below, one of these classes can be constructed using the following
condition:
\ \\(6) {\it \qquad There is real-valued principal eigenvalue $\lambda_{\Omega_0}$
of $L_M$ and its adjoint operator ${L^*}_M$ for every
$\Omega_0\subseteq\Omega$, such that the corresponding
eigenfunctions $\tilde{w_{\Omega_0}},w_{\Omega_0}\in {\left(
W_{loc}^{2}(\Omega_0)\bigcap C_0(\overline{\Omega_0}) \right)}^N$
are positive ones}.$\Box$
\ \\\emph{{Remark 1: By adjoint operator we mean ${L^*}_M=L^{*}+M^{t}$,
$L^{*}=diag\left(L^{*}_1, L^{*}_2,..., L^{*}_N\right) $, and
$L^{*}_{k}$ are $L^2$-adjoint operators to $L_{k}$}. The principal
eigenvalue is the first one, or the smallest eigenvalue.}
\ \\More precisely, the class is $C^6=\{L_M$ satisfies (6) and $\lambda_{\Omega_0}>0$
for every $\Omega_0\subseteq\Omega\}$ i.e. $C^6$ contains the
elliptic systems possessing a positive principal eigenvalue with
positive corresponding eigenfunction in $\Omega_0$. In this case the
necessary and sufficient condition for the validity of the
comparison principle for systems (Theorem 1) is the same as the one
for a single equation (See [2]).
{\bf Theorem 1}{\it : Assume that (2) and (6) are satisfied. The
comparison principle holds for system (1) if the principal
eigenvalue $\lambda_{\Omega_0} >0$, where $\lambda_{\Omega_0}$ is
the principal eigenvalue of the operator $L_{M}$ on
$\Omega_0\subseteq\Omega$. If the principal eigenvalue
$\lambda=\lambda_{\Omega}\leq 0$, then the comparison principle does
not hold.}
If we consider classical solutions, then comparison principle holds
if and only if $\lambda=\lambda_{\Omega}\leq 0$.
Proof: 1.Assume that the comparison principle does not hold for
$L_M$. Let $\underline{u}, \overline{u}\in
\left(W^{1,\infty}(\Omega)\cap C(\overline{\Omega})\right)^N$ be an
arbitrary weak sub- and super-solution of $L_M$. Then
$u=\underline{u}- \overline{u}\in \left(W^{1,\infty}(\Omega)\cap
C(\overline{\Omega})\right)^N$ is a weak sub-solution of $L_M$, i.e.
$(L_M(u),v)\leq 0$ in $\Omega$ for any $v\in \left(
W^{1,\infty}(\Omega)\cap C_0(\overline{\Omega})\right)^N, v>0$ and
$u^+\equiv 0$ on $\partial\Omega$. Suppose $u^+\neq 0$. Then
$$0\geq \left(L_Mu^+,w_{\Omega_0}\right)=\left(u^+,L^*_Mw_{\Omega_0}\right)=\lambda\left(u^+,w_{\Omega_0}\right)>0$$
\ \\for $\lambda_{\Omega_0}$, $w_{\Omega_0}$ defined in (6).
Therefore $u^+\equiv 0$, i.e for any sub- and
super-solution of $L_M$ we obtain $\underline{u}\leq \overline{u}$.
2. Suppose $\lambda\leq 0$ and $\tilde{w}$ is the corresponding
positive eigenfunction of $L_M$. Then $\tilde{w}>0$ but $L_M(\tilde{w})=\lambda
\tilde{w} \leq 0$. Therefore the comparison principle does not hold for (1).$\Box$
Unfortunately, there are some odds in the application of this
general theorem since the condition (6) is uneasy to check. First of
all, the system (1) may have no principal eigenvalue at all (See
[10]). Another obstacle is the computation of $\lambda$ even when it
exists.
Comparison principle holds for members of another broad class,
so-called cooperative elliptic systems, i.e. the systems with
$m_{ij}(x)\leq 0$ for $i\neq j$ (See [9]). Most results on the
positivity of the classical solutions of linear elliptic systems
with non-negative boundary data are obtained for the cooperative
systems (See [6,7,13,15,16,18,19,21]). As it is well known, the
positiveness and the comparison principle are equivalent for linear
systems. As for the non-linear ones, the positiveness of the
solutions is a weaker statement than the comparison principle;
positiveness can hold without ordering of sub-and super-solutions or
uniqueness of the solutions at all.
Comparison principle for the diffraction problem for weakly coupled
quasi-linear elliptic systems is proved in [3].
The spectrum properties of the cooperative $L_M$ are studied as
well. A powerful tool in the cooperative case is the theory of the
positive operators (See [17]) since the inverse operator of the
cooperative $L_{M^-}$ is positive in the weak sense. Unfortunately,
this approach cannot be applied to the general case $M\neq M^-$
since $(L_M)^{-1}$ is not a positive operator at all. Nevertheless
in [20] is proved the validity of the comparison principle for
non-cooperative systems obtained by small perturbations of
cooperative ones.
Using unconventional approach, an interesting result is obtained in
[14] for two-dimensional system (1) with $m_{11}=m_{22}=0$ and
$m_{ij}=p_i(x)>0$ for $i\neq j$, $i=1,2$. Theorem 6.5 [14] states
the existence of a principal eigenvalue with positive principal
eigenfunction in the cone $C_U=P_U\times (-P_U)$, where $P_U$ is the
cone of the positive functions in $W^1_{\infty}(\Omega)$. In the
same paper, Theorem 6.3, are provided sharp conditions for the
validity of the comparison principle with respect to the order in
$C_U=P_U\times (-P_U)$, i.e. $(u_1,u_2)\leq (v_1,v_2)$ if and only
if $u_1\leq v_1$ and $u_2\geq v_2$.
In [12] are studied existence and local stability of positive
solutions of systems with $L_k=-d_k \Delta$, linear cooperative and
non-linear competitive part, and Neumann boundary conditions.
Theorem 2.4 in [12] is similar to Theorem 2 in the present article
for $L_k=-d_k \Delta$.
Let us recall that the comparison principle was proved in [11] for
the viscosity sub-and super-solutions of general fully non-linear
elliptic systems $ G^l(x,u^1,...u^N,Du^l,D^2u^l)=0$, $l=1,...N$ /See
also the references there/. The systems considered in [11] are
degenerate elliptic ones and satisfy the same structure-smoothness
condition as the one for a single equation. The first main
assumption in [11] guarantees the quasi-monotonicity of the system.
Quasi-monotonicity in the non-linear case is an equivalent condition
to the cooperativeness in the linear one.
The second main assumption in [11] comes from the method of doubling
of the variables in the proof.
\ \\This work extends the results obtained for cooperative systems to
the non-cooperative ones. The general idea is the separation of the
cooperative and competitive part of system (1). Then using the
appropriate spectral properties of the cooperative part, in Theorems
3 and 4 are derived conditions for the validity of the comparison
principle for the initial system. In particular in Theorem 3 is
employed the fact that irreducible cooperative system possesses a
principal eigenvalue and the corresponding eigenfunction is a
positive one, i.e. condition (6) holds. This way are obtained some
sufficient conditions for validity of the comparison principle for
the non-cooperative system as well. Analogously, in Theorem 4 are
derived the corresponding conditions for the validity of comparison
principle for competitive systems. The conditions derived in
Theorems 3 and 4 are not sharp.
Since predator-prey systems are basic model example for
non-cooperative systems, in Theorem 5 is adapted the main idea of
Theorem 4 to systems which cooperative part is a triangular matrix.
Sufficient condition for the validity of comparison principle for
predator-prey systems is derived in Theorem 5.
In Theorems 6 and 7 are given conditions for failure of the
comparison principle.
The results of Theorems 3 and 4 are adapted to quasi-linear systems
in Theorem 8.
\section{Comparison principle for linear elliptic systems}
As a preliminary statement we need the following well known fact
{\bf Theorem 2}{\it : Every irreducible cooperative system $L_{M^-}$
has unique principal eigenvalue and the corresponding eigenfunction
is positive }.
The principal eigenfunction for linear operators is unique up to
positive multiplicative constants, but for our purpose the
positiveness is of importance.
In fact, Theorem 2 is in the scope of Theorems 11 and 12 in [1].
Theorems 11 and 12 in [1] concern second order cooperative linear
elliptic systems with cooperative boundary conditions and are more
general then Theorem 2. In sake of completeness, a sketch of the
proof of Theorem 2 follows. It is based on the idea of adding a big
positive constant to the operator. The same idea appears for
instance in [16] and many other works.
Skatch of tne proof: Let us consider the operator $L_c= L_{M^-}+cI$
where $c\in R$ is a constant and $I$ is the identity matrix in
$R^n$. Then $L_c$ satisfies the conditions of Theorem 1.1.1 [16] if
$c$ is large enough, namely
1. $L_c$ is a cooperative one;
2. $L_c$ is a fully coupled;
3. There is a super-solution $\varphi$ of $L_c \varphi =0$.
Conditions 1 and 2 above are obviously fulfilled by $L_c$, since
$L_{M^-}$ is a cooperative and a fully coupled one, and $L_c$
inherits these properties from $L_{M^-}$.
As for the condition 3, we construct the super – solution $\varphi$
using the principal eigenfunctions of the operators $L_k-c_k$. More
precisely, $\varphi = (\varphi_1, \varphi_2,..., \varphi_N)$, where
$ \left( L_k-c_k\right) \varphi_k={\lambda}_k \varphi_k$, and
${\lambda}_k,\varphi_k>0$ in $\Omega$. The existence of $\varphi_k$
is a well - known fact.
We claim that if $c$ is large enough then $\varphi$ is a super -
solution
of $L_c$ , i.e. $\varphi \in {\left( W_{loc}^{2,n}(\Omega)\bigcap
C(\overline{\Omega})\right)}^N$ and $\varphi\geq 0$, $L_c \varphi
\geq 0$ and $\varphi$ is not identical to null in $\Omega$.
Since we have chosen ${\varphi}_k$ being the principal
eigenfunctions of $L_k-c_k$, we have ${\varphi}_k \in {\left(
C^{2}(\Omega)\bigcap C(\overline{\Omega})\right)}$ and ${\varphi}_k>
0$. It remains to prove that $L_c \varphi \geq 0$.
Let $$A_k = {\left(L_c\varphi \right)}_k = -\sum_{i,j=1}^{n}D_j
\left( a_k^{ij}(x)D_i{\varphi}_k \right)
+\sum_{i=1}^{n}b_k^i(x)D_i{\varphi}_k+
\sum_{i=1}^{n}m_{ki}(x){\varphi}_i +(c_k+c){\varphi}_k =$$
$$=({\lambda}_k+c_k+c){\varphi}_k+
\sum_{i=1}^{n}m_{ki}(x){\varphi}_i.
$$
Then $A_k\geq 0$ for every $i$.
First of all, if we denote by $n$ the outer unitary normal vector to
$\partial\Omega$, then
$${\frac{dA_k}{dn}}|_{\partial\Omega}=
({\lambda}_k+c_k+c)\frac{d{\varphi}_k}{dn}
+\sum_{i=1}^{n}m_{ki}(x)\frac{d{\varphi}_i}{dn}$$ since
${\varphi_i}|_{\partial\Omega}=0$. Therefore there is a constant
$c'$, such that ${\frac{dA_k}{dn}}|_{\partial\Omega}<0$ for $c>c'$
since $\frac{{d\varphi}_i}{dn}<0$ on $\partial\Omega$ (See [14],
Theorem 7, p.65) and $\lambda_i$ is independent on $c$.
Hence there is a neighbourhood ${\Omega}_\varepsilon =
\{x\in\overline{\Omega}:dist(x,\partial\Omega)<\varepsilon\}$ for
some $\varepsilon>0$, such that
$${\frac{dA_k}{dn}}|_{{\Omega}_\varepsilon}<0$$.
Since $A_k=0$ on $\partial\Omega$, then $A_k>0$ in
${\Omega}_\varepsilon $
The set $\Omega \setminus {\Omega }_\varepsilon$ is compact,
therefore there is $c">0$ such that $A_k>0$ in the compact set
$\Omega \setminus {\Omega}_\varepsilon$ for $c>c"$, since
${\varphi}_k>0$ in ${\Omega} \setminus {\Omega}_{\varepsilon}$.
Considering $c>max(c',c")$ we obtain $A_k>0$ in $\Omega$, therefore
$\varphi$ is indeed a super - solution of $L_c$.
The rest of the proof follows the proof of Theorem 1.1.1 [16].$\Box$
A reasonable question is: could the non-cooperative part of the
system "improve" the spectral facilities of the cooperative system?
In other words, if the cooperative part of the system has
non-positive principal eigenvalue, what are conditions on the
competitive part, such that the comparison principle holds for the
system? An answer of this question is given in the following
{\bf Theorem 3}{\it : Let (1) be a weakly coupled system with
irreducible cooperative part of $L^*_{M^-}$ such that (2) is
satisfied. Then the comparison principle holds for system (1) if
there is $x_0\in \Omega$ such that
\ \\(7)\qquad $
\left(\lambda+\sum_{k=1}^{N}m_{kj}^{+}(x_0)\right)>0$ for
$j=1...N$
\ \\and
\ \\(8)\qquad $
\lambda+ m_{jj}^{+}(x)\geq 0$ for every $x\in \Omega$ and
$j=1...N$
\ \\where $\lambda=inf_{\Omega_0\subseteq \Omega}\{\lambda_{\Omega_0}$ : $\lambda_{\Omega_0}$ is the
principal eigenvalue of the operator $L_{M^-}$ on $\Omega_0$\}}.
It is obvious, that if $ \lambda> 0$, then the comparison principle
holds. More interesting case is $ \lambda < 0$. Then $m^+_{kj}$ can
"improve" the properties of $L_M$ with respect to the validity of
the comparison principle. Furthermore, if $ \lambda+ m_{jj}^{+}(x)>
0$, then (7) is consequence of (8). Condition (7) is important when
$ \lambda+ m_{jj}^{+}(x)\equiv 0$.
\emph{Remark 2: If $L^*_{M^-}$ is irreducible, then $L_{M^-}$ is
irreducible as well. In fact $L^*_{M^-}=L^*+{M^-}^t$ and if
${M^-}^t$ is irreducible, then such is $M^-$.}
Proof: Suppose all conditions of Theorem 3 are satisfied by $L_M$
but the comparison principle does not hold for $L_M$. Let
$\underline{u}, \overline{u}\in \left(W^{1,\infty}(\Omega)\cap
C(\overline{\Omega})\right)^N$ be an arbitrary weak sub- and
super-solution of $L_M$. Then $u=\underline{u}- \overline{u}\in
\left(W^{1,\infty}(\Omega)\cap C(\overline{\Omega})\right)^N$ is a
weak sub-solution of $L_M$ as well, i.e. $(L_M(u),v)\leq 0$ in
$\Omega$ for any $v\in \left(W^{1,\infty}(\Omega)\cap
C_0(\overline{\Omega}\right)^N, v>0$ and $u^+\equiv 0$ on
$\partial\Omega$.
Assume $u^+\neq 0$. Let $\Omega_{supp(u^+)}\subseteq supp(u^+)$ has
smooth boundary. Then for any $v> 0$, $v\in
\left(W^{1,\infty}(\Omega_{supp(u^+)})\cap
C(\overline{\Omega_{supp(u^+)}})\right)^N$
\ \\(9)\qquad $0\geq \left( \L_{M}u^+,v\right)
=\left(u^+,\L^*_{M^-}v\right)+\left(M^+u^+,v\right)$
\ \\is satisfied since $L_{M}(u^+)\leq 0$.
Since $L_{M^-}$ is a cooperative operator, such is ${\left(
L_{M^-}\right)}^{*}=L^{*}+(M^-)^{t}$ as well. According to Theorem 2
above, there is a unique positive eigenfunction $w\in {\left(
W_{loc}^{2,n}(\Omega_{supp(u^+)})\bigcap
C_0(\overline{\Omega_{supp(u^+)}}) \right)}^N$ such that $w>0$ and
$L^*_{M^-}w=\lambda w$ for some $\lambda >0$.
Then $w$ is a suitable test-function for (9). Rewriting the
inequality (9) for $v=w$ we obtain
\ \\$0\geq \left(u^+,\L^*_{M^-}w\right)+\left(M^+u^+,w\right)=
\left(u^+,\lambda w\right)+\left(M^+u^+,w\right)$
\ \\or componentwise
\ \\(10)\qquad
$0\geq \left(u^{+}_{k},\lambda
w_k\right)+\left({\sum}_{j=1}^{N}m^{+}_{kj}u^{+}_{j},w_k\right)$
\ \\for $k=1,...n$.
The sum of inequalities (10) is
\ $0\geq {\sum}_{k=1}^{N}\left(
\left(u^{+}_{k},{{\tilde{L}}^*}_{k}w_k\right)
+\left({\sum}_{j=1}^{N}m^{+}_{kj}u^{+}_{j},w_k\right)\right)=$
\ \\
$={\sum}_{k=1}^{N}\left(u^{+}_{k},{\lambda}w_k\right)
+{\sum}_{k,j=1}^{N}\left(u^{+}_{j},m^{+}_{kj}w_k\right)=$
\ \\
$={\sum}_{j=1}^{N}\left(u^{+}_{j},{\sum}_{k=1}^{N}\left(
{{\delta}_{jk}\lambda}+m^{+}_{kj}\right)w_k\right)>0$
\ \\since $u^{+}> 0$, $w_k>0$, (7) and (8). Condition (8)
is used in $\left(u^{+}_{k},({\lambda}+m^{+}_{kk})w_k\right)\geq 0$.
\ \\The above contradiction proves that $u^+\equiv 0$
and therefore the comparison principle holds for operator
$L_M$.$\Box$
Since in [1] and [18] are considered only systems with irreducible
cooperative part, the ones with reducible $L_{M^-}$ are excluded of
the range of Theorem 3. Nevertheless the same idea is applicable to
some systems with reducible cooperative part as well, as it is given
it Theorem 4.
{\bf Theorem 4}{\it : Assume $m^-_{ij}\equiv 0$ for $i\neq j$ and
(2) is satisfied. Then the comparison principle holds for system (1)
if there is $x_0\in \Omega$ such that
\ \\(11)\qquad $
\left(\lambda_{j}+\sum_{k=1}^{N} m_{kj}^{+}(x_0)\right)>0$ for
$j=1...N$
\ \\and
\ \\(12)\qquad $
\lambda_{j}+ m_{jj}^{+}(x)\geq 0$ for every $x\in \Omega$
$j=1...N$,
\ \\where $\lambda_{j}=inf_{\Omega_0\subseteq\Omega}\{\lambda_{j\Omega_0}$ : $\lambda_{j\Omega_0}$ is the
principal eigenvalue of the operator $L_j+m^-_{jj}$ on
$\Omega_0$\}}.
Theorem 4 is formulated for diagonal matrix $M^-$. The statement is
valid with obvious modification if $M^-$ has block structure, i.e.
$$M^-=\left(
\begin {array}{cccccccc}
M^-_1 & & 0 & & ... & & 0 \\
& & & & & & \\
0 & & M^-_2 & & ... & & 0
\\ & & & & & &
\\... & & ... & & ... & & ... & \\
& & & & & & \\
0 & & 0 & & ... & &M^-_r
\end{array}
\right) $$
\ \\where $M^-_k$ are $d_k$-dimensional square matrixes,
$\sum d_k\leq N$ .
Proof: Let all conditions of Theorem 4 be satisfied by $L_M$ but the
comparison principle does not hold for $\tilde{L}_{M^+}$. Let
$\underline{u}, \overline{u}\in \left(W^{1,\infty}(\Omega)\cap
C(\overline{\Omega})\right)^N$ be an arbitrary weak sub- and
super-solution of $\tilde{L}_{M^+}$. Then $u=\underline{u}-
\overline{u}\in \left(W^{1,\infty}(\Omega)\cap
C(\overline{\Omega})\right)^N$ is a weak sub-solution of
$\tilde{L}_{M^+}$ as well, i.e. $(\tilde{L}_{M^+}(u),v)\leq 0$ in
$\Omega$ for any $v\in \left(W^{1,\infty}(\Omega)\cap
C_0(\overline{\Omega})\right)^N, v>0$ and $u^+\equiv 0$ on
$\partial\Omega$.
Suppose that $u^+\neq 0$. Let $\Omega_{supp(u^+)}\subseteq
supp(u^+)$ has smooth boundary. Then for any $v> 0$, $v\in
W_2^{1,\infty}(\Omega_{supp(u^+)})\cap
C(\overline{\Omega_{supp(u^+)}})$
\ \\(13)\qquad $0\geq \left( \tilde{L}_{M^+}u^+,v\right)
=\left(u^+,\tilde{L}^*v\right)+\left(M^+u^+,v\right)$
\ \\is satisfied since $\tilde{L}_{M^+}u^+\leq 0$.
According to Theorem 2.1 in [2], there is a positive principal
eigenfunction for the operator ${\tilde{L}}^*_k$ in
$\Omega_{supp(u^+)}$, i.e. $\exists\qquad w_k(x)\in
C^2(\Omega_{supp(u^+)} \bigcap R^1)$ such that
${{\tilde{L}}^*}_kw_k(x)={\lambda}_kw_k(x)$ and $w_k(x)>0$. Note
that $w_k$ are classical solutions.
Then the vector-function $w(x)=(w_1(x),...,w_n(x))$, composed of the
principal eigenfunctions $w_k(x)$, is suitable as a test-function in
(13).
Writing componentwise inequality (13) for $v=w$ we obtain
\ \\(14)\qquad
$0\geq
\left(u^{+}_{k},{{\tilde{L}}^*}_{k}w_k\right)+\left({\sum}_{j=1}^{N}m^{+}_{kj}u^{+}_{j},w_k\right)$
\ \\for $k=1,...N$.
The sum of inequalities (14) is
\ $0\geq {\sum}_{k=1}^{N}\left(
\left(u^{+}_{k},{{\tilde{L}}^*}_{k}w_k\right)
+\left({\sum}_{j=1}^{N}m^{+}_{kj}u^{+}_{j},w_k\right)\right)=$
\ \\
$={\sum}_{k=1}^{N}\left(u^{+}_{k},{\lambda}_{k}w_k\right)
+{\sum}_{k,j=1}^{N}\left(u^{+}_{j},m^{+}_{kj}w_k\right)=$
\ \\
$={\sum}_{j=1}^{N}\left(u^{+}_{j},{\sum}_{k=1}^{N}\left(
{{\delta}_{jk}\lambda}_{j}+m^{+}_{kj}\right)w_k\right)>0$
\ \\since $u^{+}> 0$, $w_k>0$, (11) and (12).
\ \\The above contradiction proves that $u^+\equiv 0$
and therefore the comparison principle holds for operator
$L_M^{+}$.$\Box$
\emph{Remark 3: It is obvious that conditions (7),(8), and
respectively, (11), (12), can be substituted by the sharper
condition ${\sum}_{k=1}^{n}\left(
{{\delta}_{jk}\lambda_k}+m_{kj}\right) w_k>0$ for every $x\in
\Omega$ and every $j=1...N$, which is useful only if the exact
values of the eigenfunctions $w_k$ can be computed.}
The main idea in Theorem 4 could be modified for systems with
triangular cooperative part, for instance with null elements above
the main diagonal. For instance predator-prey systems have
triangular cooperative part. Of course, if $m^-_{ij}(x)>0$ for every
$x\in\Omega$ and $i=1,...N$, $j<i$, then the system is in the scope
of Theorem 3. In Theorem 5 this condition is not necessary, i.e.
some of the species can extinguish in some subarea of $\Omega$.
{\bf Theorem 5}{\it : Assume (2) is satisfied and the cooperative
part $M^-$ is triangular for the system (1), i.e. $m^-_{ij}=0$ for
$i=1,...N$, $j>i$. Then the comparison principle holds for system
(1), if there is} $\varepsilon > 0$ \emph{such that}
\ \\(15)\qquad $
\left(\lambda_{j}-(1-\delta_{1j})\varepsilon+\sum_{k=1}^{N} m_{kj}^{+}(x_0)\right)>0$ for
$j=1...N$ \emph{for some} $x_0\in\Omega$
\ \\\emph{and}
\ \\(16)\qquad $
\lambda_{j}-(1-\delta_{1j})\varepsilon +m^+_{jj}(x)\geq 0$ for
every $x\in\Omega$ and $j=1...N$,
\ \\\emph{where $\lambda_{j}=inf_{\Omega_0\subseteq\Omega}\{\lambda_{j\Omega_0}$ : $\lambda_{j\Omega_0}$ is the
principal eigenvalue of the operator $L_j+m^-_{jj}$ on
$\Omega_0$\}}.
Note that the condition for triangular cooperative part does not
exclude $m^-_{ij}(x_0)=0$ for some $x_0\in \Omega$, $i,j=1,...N$.
Proof: 1. The first equation in $L_{M^-}$ is not coupled, and there
are principal eigenvalue $\lambda_1$ and principal eigenfunction
$w_1>0$ of $L_1+m^-_{11}$ (See Theorem 2.1 in [2]). We put
$\widetilde{w}_1=w_1$.
2. The equation
$(L_2+m^-_{22})\widetilde{w}_2-\lambda\widetilde{w}_2 =
m_{21}\widetilde{w}_1$ with null boundary conditions has unique
solution for $\lambda<\lambda_2$, where $\lambda_2$ is the principal
eigenvalue of $L_2+m^-_{22}$. We put
$\lambda=\lambda_2-\varepsilon$. Since the right-hand side
$m_{21}\widetilde{w}_1$ is positive, the solution $\widetilde{w}_2$
is positive as well.
3. By induction we construct positive functions $\widetilde{w}_j$,
$j=3,...N$ as solutions of
$(L_j+m^-_{jj})\widetilde{w}_j-(\lambda_j-\varepsilon)\widetilde{w}_j
= \sum_{i=1}^{j-1}m_{ji}\widetilde{w}_i$ with null boundary
conditions. As usual $\lambda_j$ are the principal eigenfunctions of
$L_j+m^-_{jj}$.
4. The rest of the proof follows the proof of Theorem 4 where
$\lambda_j$ is substituted with $\lambda_j-\varepsilon$ and $w_j$ is
substituted with $\widetilde{w}_j$.
For the simplest predator-prey system, $N=2$, $m_{11}=m_{22}=0$,
$m_{12}>0$ and $m_{21}<0$, conditions (15) and (16) are
$\lambda_1\geq0$, $\lambda_2>0$, where $\lambda_{j}$ is the
principal eigenvalue of the operator $L_j$, $j=1,2$.
Condition (12) in Theorem 2 is useful for construction of
counter-example for the non-validity of comparison principle in
general.
{\bf Theorem 6}{\it : Let (1) be a weakly coupled system with
reducible cooperative part $L_{M^-}$ and (2) be satisfied. Suppose
that (12) is not true, i.e there is some $j\in \{ 1...N\}$ such that
$ \left( \lambda_j + m_{jj}^{+}(x)\right)<0$ for any $x\in\Omega$,
and $m^+_{jl}=0$ for $l\neq j$, $l=1,...N$. Then comparison
principle does not hold for system (1)}.
Proof: Let us suppose for simplicity that $j=1$ and $m^-_{1,j}=0$
for $j=2,...N$. We consider vector-function $w(x)={w_1(x),0,...,0}$,
where $w_1(x)$ is the principal eigenfunction of $L_1+m^-_{11}$.
Then for the first component ${(L_M)}_1$ of $L_M$ is valid
$(L_Mw)_1= \lambda w_1(x) + m_{11}^{+}w_1(x)<0$ in $\Omega$, where
$\lambda_j$ is the principal eigenvalue of $L_1$, and $(L_Mw)_k=0$
for $k=1,...N$. Therefore, $L_Mw\leq 0$ but $w(x)\geq 0$ and
comparison principle fails. $\Box$
The simplest case to illustrate Theorems 4 and 6 is $N=2$. Let us
consider irreducible competitive system
\ \\(17)\qquad $L_ju_j+\sum_{j,k=1}^2m_{jk}u_k=f_j$, $j=1,2$,
\ \\where $m_{11}=m_{22}=0$, $m_{12}>0$, $m_{21}>0$.
Suppose $\lambda_ j$ is the principal eigenvalue of $L^*_j$,
$j=1,2$. If $\lambda_ j\geq 0$ and there is $x_0\in\Omega$ such
that $\lambda_1+m_{21}(x_0)>0$ and $\lambda_2+m_{12}(x_0)>0$, then
according to Theorem 4 the comparison principle holds for system
(1), i.e. if $f_1>0$, $f_2>0$, then $u_1>0$ and $u_2>0$, where
$u=\underline{u}-\overline{u}$ is defined in the proof of Theorem 3.
If $\lambda_2+m_{12}(x)<0$ for every $x\in\Omega$, then according to
Theorem 6 there is no comparison principle for system (1) in the
lexicographic order, used in this paper.
More detailed analysis of the validity of the comparison principle
for system (1) could be done if we consider order in the cone
$C_U=P_U\times (-P_U)$, i.e. $(u_1,u_2)\leq (v_1,v_2)$ if and only
if $u_1\leq v_1$ and $u_2\geq v_2$. Then Theorem 6.5 [14] states the
existence of a principal eigenvalue $\lambda$ of $L^*$ with positive
in $C_U$ principal eigenfunction $w_1(x)>0$, $w_2(x)<0$.
If $\lambda>0$, then according to Theorem 6.3 [14] the comparison
principle holds in the order in $C_U$, i.e. if $f_1>0$, $f_2<0$,
then $u_1>0$ and $u_2<0$.
If $\lambda<0$, then
$(L_1(-u_1)+m_{12}u_2,w_1)+(L_2u_2+m_{21}(-u_1),w_2)= (-u_1, \lambda
w_1+m_{21}w_2)+(u_2, m_{21}w_1+\lambda w_2)>0$. Hence $u_1<0$ and
$u_2>0$ for $f_1>0$, $f_2>0$.
A statement analogous to Theorem 6 is valid for irreducible systems
as well.
{\bf Theorem 7}{\it : Let (1) be a weakly coupled system with
irreducible cooperative part $L_{M^-}$ and (2) be satisfied. Suppose
that (7) is not true, i.e there is some $j\in \{ 1...N\}$ such that
$ \left( \lambda + m_{jj}^{+}(x)\right)<0$ for any $x\in\Omega$, and
$m^+_{jl}=0$ for $l\neq j$, $l=1,...N$. Then comparison principle
does not hold for system (1)}.
Note that in Theorem 6 and Theorem 7 we need the violation of
condition (12) and, respectively, condition (7) in all $\Omega$.
The proof of Theorem 7 follows the proof of Theorem 6 with obvious
adaptation.
\section{Comparison principle for quasi-linear elliptic systems}
Considering quasi-linear system (3), (4), we use the results of
the previous section to derive conditions for the validity of comparison principle.
Let $u(x)\in \left(W^{1,\infty}(\Omega)\cap
C(\overline{\Omega})\right)^N$ be a sub-solution and $v(x)\in
\left(W^{1,\infty}(\Omega)\cap C(\overline{\Omega})\right)^N$ be a
super-solution of (3), (4). Comparison principle holds for (3), (4),
if $Q(u)\leq Q(v)$ in $\Omega$, $u\leq v$ on $\partial \Omega$ imply
$u\leq v$ in $\Omega$. Last three inequalities are considered in the
weak sense.
Recall that the vector-function $u(x)$ is a weak sub-solution of (3), (4) if
$$\int_\Omega \left( a^{li}(x,u^l,Du^l)\eta _{x_i}^l+F^l(x,u^1,...u^N,Du^l)\eta ^l-f^l(x)\eta ^l\right) dx\leq 0$$
\ \\for $l=1,...N$ and for every nonnegative vector function
$\eta\in \left(\stackrel{\circ }{W}^{1,\infty}(\Omega)\cap
C(\overline{\Omega})\right)^N$ (i.e. $\eta=(\eta ^1,...\eta ^N)$,
$\eta ^l\geq 0$, $\eta ^l\in \left(W^{1,\infty}(\Omega)\cap
C(\overline{\Omega})\right)^N\cap C( \overline{\Omega})$ and $\eta
^l=0$ on $\partial \Omega$.
Analogously, $v(x)\in \left(W^{1,\infty}(\Omega)\cap
C(\overline{\Omega})\right)^N$ is a super-solution of (3), (4), if
$$\int_\Omega \left( a^{li}(x,v^l,Dv^l)\eta _{x_i}^l+F^l(x,v^1,...v^N,Dv^l)\eta ^l-f^l(x)\eta ^l\right) dx\geq 0$$
\ \\for $l=1,...N$ and for every nonnegative vector function
$\eta \in \left(\stackrel{\circ }{W}^{1,\infty}(\Omega)\cap
C(\overline{\Omega})\right)^N$.
Since $u(x)$ and $v(x)$ are sub-and super-solution respectively,
then $\tilde{w}(x)=u(x)-v(x)$ is a weak sub-solution of the
following problem
\ \\ $-\sum^{n}_{i,j=1} D_i \left( B_j^{li}D_j \tilde{w}^l
+B_0^{li}\tilde{w}^l\right) +{\sum}^{N}_{k=1}E_k^l\tilde{w}^k+{\sum}^{n}_{i=1}H_i^lD_i \tilde{w}^l=0$ in $
\Omega $
\ \\with non-positive boundary data on $\partial \Omega $. Here
\ \\$B_j^{li}=\int_0^1\frac{\partial a^{li}}{\partial p_j}
(x,P^l)ds$, $B_0^{li}=\int_0^1\frac{\partial a^{li}}{\partial
u^l}(x,P^l)ds$, $E_k^l=\int_0^1\frac{\partial F^l}{\partial
u^k}(x,S^l)ds$,
\ \\$H_i^l=\int_0^1\frac{\partial F^l}{\partial p_i}(x,S^l)ds$, $
P^l=\left( v^l+s(u^l-v^l),Dv^l+sD(u^l-v^l)\right) $,
\ \\ $S^l=\left( v+s(u-v),Dv^l+sD(u^l-v^l)\right)$.
Therefore, $\tilde{w}_{+}(x)=\max \left( \tilde{w}(x),0\right)$ is a
sub-solution of
\ \\(18)\qquad $\sum^{n}_{i,j=1} D_i\left( B_j^{li}D_j{\tilde{w}_+}^l
+B_0^{li}{\tilde{w}_+}^l\right) +\sum^{N}_{k=1}E_k^l{\tilde{w}_+}^k+\sum^{n}_{i=1}H_i^lD_i {\tilde{w}_+}^l=0$ in $
\Omega $
\ \\with zero boundary data on $\partial \Omega$.
Equation (18) is equivalent in terms of matrix to
\ \\(19)\qquad $B_E\tilde{w}_+=(B+E)\tilde{w}_+=0$ in
$\Omega$,
\ \\where $B=diag(B_1,B_2,...B_N)$,
$B_l=\sum^{n}_{i,j=1} D_i\left( B_j^{li}D_j{\tilde{w}_+}^l
+B_0^{li}{\tilde{w}_+}^l\right) +\sum^{n}_{i=1}H_i^lD_i
{\tilde{w}_+}^l$ and $E=\{E_k^l\}_{l,k+1}^N$.
If we denote $B_i^{kj}$ by $a_k^{ij}$, $B_0^{ki}+H_i^k$ by $b_k^i$,
${\sum}_{i=1}^n D_iB_0^{ki}+E_k^k$ by $m_{kk}(x)$ for $i,j=1...n$,
$k=1...N$ and $E_k^l$ by $m_{lk}(x)$ for $k,l=1...N$, $k\neq l$,
system (18) looks like system (1). Hereafter we follow the notations
for system (1).
Suppose now that $\tilde{w}_{+}(x)$ is not identical equal to zero
in $\Omega $, i.e. comparison principle fails for (3), (4). Suppose
$L_{M^-}$ is irreducible. Then
\ \\$0\geq \left(L_M \tilde{w}_+,w \right)
=\left(\tilde{w}_{+},\L^*_{M^-}w\right)+\left(M^+\tilde{w}_{+},w\right)=
\left(\tilde{w}_{+},\lambda
w\right)+\left(M^+\tilde{w}_{+},w\right)$
\ \\where $\lambda$ is the principal eigenvalue of $L^*_{M^-}$ and $w$ is the corresponding eigenfunction.
Suppose $a_k^{ij}$ and $m_{lk}(x)$ satisfy the conditions (2), (7)
and (8) in Theorem 3. Following the proof of Theorem 3, we obtain
that $\tilde{w}_{+}\equiv 0$ in $\Omega$, i.e. comparison principle
holds for the system (3), (4).
If $L_{M^-}$ is reducible, then
\ \\$0\geq \left(L_M\tilde{w}_{+},w \right)
=\left(\tilde{w}_{+},\L^*w\right)+\left(M^+\tilde{w}_{+},w\right)=\left(\tilde{w}_{+},\tilde{\lambda}
w\right)+\left(M^+\tilde{w}_{+},w\right)$
\ \\where
$\tilde{\lambda}w=(\tilde{\lambda}_1w_1,\tilde{\lambda}_2w_2,...\tilde{\lambda}_Nw_N)$,
$\tilde{\lambda}_k$ is the principal eigenvalue of $L^*_{k}$ and
$w_k$ is the corresponding eigenfunction for $k=1,...N$.
Suppose $a_k^{ij}$ and $m_{lk}(x)$ satisfy the conditions (2), (11)
and (12) in Theorem 4. Following the proof of Theorem 4, we obtain
that $\tilde{w}_{+}\equiv 0$ in $\Omega$, i.e. comparison principle
holds for the system (3), (4).
We have sketched the proof the following
{\bf Theorem 8}{\it : Suppose (3), (4) is a quasi-linear system and
the corresponding system $B_{E^-}$ in (19) is elliptic. Then the
comparison principle holds for system (3), (4) if}
\ \\(i)\qquad {\it $B_{E^-}$ in (19) is irreducible and for every $j=1...n$}
\ \\(ii)\qquad $
\lambda+\left(\sum_{k=1}^{N} \frac{\partial F^k}{\partial
p^j}(x,p,Dp^l)+{\sum}_{i=1}^N D_i\frac{\partial a^{ji}}{\partial
p^j}(x,p^j,Dp^j)\right)^+ >0$,
\ \\(iii)\qquad $
\lambda+ \left({\sum}_{i=1}^n D_i\frac{\partial a^{ji}}{\partial
p^j}(x,p^j,Dp^j)+{\frac{\partial F^j}{\partial
p^j}(x,p,Dp^j)}\right)^+\geq 0$
\ \\ {\it where $x\in \Omega$, $p\in R^n$ and $\lambda=inf_{\Omega_0\subseteq \Omega}\{\lambda_{\Omega_0}$ : $\lambda_{\Omega_0}$ is the
principal eigenvalue of the operator $B_{E^-}$ on $\Omega_0$\};
\ \\{\it or
\ \\(i')\qquad $B_{E^-}$ in (19) is reducible and for every $j=1...n$}
\ \\(ii')\qquad $
{\lambda}_j+\left(\sum_{k=1}^{n} \frac{\partial F^k}{\partial
p^j}(x,p,Dp^j)+{\sum}_{i=1}^n D_i\frac{\partial a^{ji}}{\partial
p^j}(x,p^j,Dp^j)\right)^+ >0$,
\ \\(iii')\qquad $
{\lambda}_j+ \left({\sum}_{i=1}^n D_i\frac{\partial a^{ji}}{\partial
p^j}(x,p^j,Dp^j)+{\frac{\partial F^j}{\partial
p^j}(x,p,Dp^j)}\right)^+\geq 0$
\ \\{\it where $x\in \Omega$, $p\in R^n$ and $\lambda_{l}=inf_{\Omega_0\subseteq\Omega}\{\lambda_{l\Omega_0}$ : $\lambda_{l\Omega_0}$ is the
principal eigenvalue of the operator $B_l$ on $\Omega_0$.}
\section{Final remarks}
The sufficient conditions in Theorems 3 and 4 are derived from the
spectral properties of the cooperative part of (1) - the operator
$L_{M^-}$, or, in other words, comparing the principal eigenvalue of
$L_{M^+}$ with the quantities in $M^{+}$. In fact the positive
matrix $M^+$ causes a migration of the principal eigenvalue of
$L_{M^-}$ to the left.
Theorems 3 and 4 provide a huge class of non-cooperative systems
such that the comparison principle is valid for. The idea of
migrating the spectrum of a positive operator on the right works in
this case, though the spectrum itself is not studied in this
article. The results for non-cooperative systems in this paper are
not sharp and the validity of the comparison principle is to be
determined more precisely in the future.
\section{Acknowledgment}
The author would like to acknowledge Professor Alexander Sobolev for
the very useful talks on the theory of positive operators, during
the author's stay at University of Sussex as Maria Curie fellow.
\section{REFERENCES}
[1] H.Amann, Maximum Principles and Principal Eigenvalues, 10
Mathematical Essays on Approximation in Analysis and Topology
(J.Ferrera, J.Lopez-Gomez and F.R.Ruiz del Portal Eds.), Elsevier,
Amsterdam (2005), 1-60.
[2] H.Berestycki, L.Nirenberg, S.R.S. Varadhan : The principal
eigenvalue and maximum principle for second-order elliptic operators
in general domains, Commun. Pure Appl. Math. 47, No.1, 47-92 (1994).
[3] G.Boyadzhiev, N.Kutev : Diffraction problems for quasilinear
reaction-diffusion systems, Nonlinear Analysis 55 (2003), 905-926.
[4] G.Caristi, E. Mitidieri : Further results on maximum principle
for non-cooperative elliptic systems. Nonl.Anal.T.M.A., 17 (1991),
547-228.
[5] C.Coosner, P.Schaefer : Sign-definite solutions in some linear
elliptic systems. Peoc.Roy.Soc.Edinb.,Sect.A 111, (1989), 347-358.
[6] D.di Figueredo, E.Mitidieri : Maximum principles for cooperative
elliptic systems. C.R.Acad.Sci. Paris, Ser. I, 310 (1990), 49-52.
[7] D.di Figueiredo, E.Mitidieri : A maximum principle for an
elliptic system and applications to semi-linear problems, SIAM
J.Math.Anal. 17 (1986), 836-849.
[8] Gilbarg, D and Trudinger, N. Elliptic partial differential
equations of second order. 2nd ed., Springer - Verlag, New York.
[9] M.Hirsch : Systems of differential equations which are
competitive or cooperative I. Limit sets, SIAM J. Math. Anal. 13
(1982), 167-179.
[10] P.Hess : On the Eigenvalue Problem for Weakly Coupled Elliptic
Systems, Arch. Ration. Mech. Anal. 81 (1983), 151-159.
[11] Ishii, Sh. Koike : Viscosity solutions for monotone systems of
second order elliptic PDEs. Commun. Part.Diff.Eq. 16 (1991), 1095 -
1128.
[12] Li Jun Hei, Juan Hua Wu : Existence and Stability of Positive
Solutions
for an Elliptic Cooperative System. Acta Math. Sinica Oct.2005, Vol.21,
No 5, pp 1113-1130.
[13] J.Lopez-Gomez, M. Molina-Meyer : The maximum principle for
cooperative weakly coupled elliptic systems and some applications.
Diff.Int.Eq. 7 (1994), 383-398.
[14] J.Lopez-Gomez, J.C.Sabina de Lis, Coexistence states and
global attractivity for some convective diffusive competing species
models, Trans.Amer.Math.So. 347, 10 (1995), 3797-3833.
[15] E.Mitidieri, G.Sweers : Weakly coupled elliptic systems and
positivity. Math.Nachr. 173 (1995), 259-286.
[16] M. Protter, H.Weinberger : Maximum Principle in Differential
Equations, Prentice Hall, 1976.
[17] M.Reed, B.Simon : Methods of modern mathematical Physics, v.IV:
Analysis of operators, Academic Press, New York, (1978).
[18] G.Sweers : Strong positivity in $C(\overline{\Omega })$ for
elliptic systems. Math.Z. 209 (1992), 251-271.
[19] G.Sweers : Positivity for a strongly coupled elliptic systems
by Green function estimates. J Geometric Analysis, 4, (1994),
121-142.
[20] G.Sweers : A strong maximum principle for a noncooperative
elliptic systems. SIAM J. Math. Anal., 20 (1989), 367-371.
[21] W.Walter : The minimum principle for elliptic systems.
Appl.Anal.47 (1992), 1-6.
Author's address:
Institute of Mathematics and Informatics,
Bulgarian Academy of Sciences,
Acad.G.Bonchev st., bl.8,
Sofia, Bulgaria
\end{document} |
\begin{document}
\title{A randomized polynomial kernelization for Vertex Cover with a smaller parameter}
\begin{abstract}
In the \problem{Vertex Cover} problem we are given a graph $G=(V,E)$ and an integer $k$ and have to determine whether there is a set $X\subseteq V$ of size at most $k$ such that each edge in $E$ has at least one endpoint in $X$. The problem can be easily solved in time $\mathcal{O}^*(2^k)$, making it fixed-parameter tractable (FPT) with respect to $k$. While the fastest known algorithm takes only time $\mathcal{O}^*(1.2738^k)$, much stronger improvements have been obtained by studying \emph{parameters that are smaller than~$k$}. Apart from treewidth-related results, the arguably best algorithm for \problem{Vertex Cover} runs in time $\mathcal{O}^*(2.3146^p)$, where $p=k-LP(G)$ is only the excess of the solution size $k$ over the best fractional vertex cover (Lokshtanov et al.\ TALG 2014). Since $p\leq k$ but $k$ cannot be bounded in terms of $p$ alone, this strictly increases the range of tractable instances.
Recently, Garg and Philip (SODA 2016) greatly contributed to understanding the parameterized complexity of the \problem{Vertex Cover} problem. They prove that $2LP(G)-MM(G)$ is a lower bound for the vertex cover size of $G$, where $MM(G)$ is the size of a largest matching of $G$, and proceed to study parameter $\ell=k-(2LP(G)-MM(G))$. They give an algorithm of running time $\mathcal{O}^*(3^\ell)$, proving that \problem{Vertex Cover} is FPT in $\ell$. It can be easily observed that $\ell\leq p$ whereas $p$ cannot be bounded in terms of $\ell$ alone.
We complement the work of Garg and Philip by proving that \problem{Vertex Cover} admits a randomized polynomial kernelization in terms of $\ell$, i.e., an efficient preprocessing to size polynomial in $\ell$. This improves over parameter $p=k-LP(G)$ for which this was previously known (Kratsch and Wahlstr\"om FOCS 2012).
\end{abstract}
\section{Introduction}
A \emph{vertex cover} of a graph $G=(V,E)$ is a set $X\subseteq V$ such that each edge $e\in E$ has at least one endpoint in $X$. The \probname{Vertex Cover}\xspace problem of determining whether a given graph $G$ has a vertex cover of size at most $k$ has been an important benchmark problem in parameterized complexity for both \emph{fixed-parameter tractability} and \emph{(polynomial) kernelization},\footnote{Detailed definitions can be found in Section~\ref{section:preliminaries}. Note that we use $\ell$, rather than $k$, as the default symbol for parameters and use \probname{Vertex Cover}\xspace{}$(\ell)$ to refer to the \probname{Vertex Cover}\xspace problem with parameter $\ell$.} which are the two notions of tractability for parameterized problems. Kernelization, in particular, formalizes the widespread notion of efficient preprocessing, allowing a rigorous study (cf.~\cite{Kratsch14}).
We present a randomized polynomial kernelization for \probname{Vertex Cover}\xspace for the to-date smallest parameter, complementing a recent fixed-parameter tractability result~by~Garg~and~Philip~\cite{GargP16}.
Let us first recall what is known for the so-called \emph{standard parameterization} \probname{Vertex Cover}\xspacek, i.e., with parameter $\ell=k$: There is a folklore $\mathcal{O}^*(2^k)$ time\footnote{We use $\mathcal{O}^*$ notation, which suppresses polynomial factors.} algorithm for testing whether a graph $G$ has a vertex cover of size at most $k$, proving that \probname{Vertex Cover}\xspacek is fixed-parameter tractable (\ensuremath{\mathsf{FPT}}\xspace); this has been improved several times with the fastest known algorithm due to Chen et al.~\cite{ChenKX10} running in time $\mathcal{O}^*(1.2738^k)$. Under the Exponential Time Hypothesis of Impagliazzo et al.~\cite{ImpagliazzoPZ01} there is no algorithm with runtime $\mathcal{O}^*(2^{o(k)})$. The best known kernelization for \probname{Vertex Cover}\xspacek reduces any instance $(G,k)$ to an equivalent instance $(G',k')$ with $|V(G')|\leq 2k$; the total size is $\mathcal{O}(k^2)$~\cite{ChenKJ01}. Unless \ensuremath{\mathsf{NP \subseteq coNP/poly}}\xspace and the polynomial hierarchy collapses there is no kernelization to size $\mathcal{O}(k^{2-\varepsilon})$~\cite{DellM14}.
At first glance, the \ensuremath{\mathsf{FPT}}\xspace and kernelization results for \probname{Vertex Cover}\xspacek seem essentially best possible. This is true for parameter $\ell=k$, but there are \emph{smaller parameters} $\ell'$ for which both \ensuremath{\mathsf{FPT}}\xspace-algorithms and polynomial kernelizations are known. The motivation for this is that even when $\ell'=\mathcal{O}(1)$, the value $\ell=k$ may be as large as $\Omega(n)$, making both \ensuremath{\mathsf{FPT}}\xspace-algorithm and kernelization for parameter $k$ useless for such instances (time $2^{\Omega(n)}$ and size guarantee $\mathcal{O}(n)$). In contrast, for $\ell'=\mathcal{O}(1)$ an \ensuremath{\mathsf{FPT}}\xspace-algorithm with respect to $\ell'$ runs in polynomial time (with only leading constant depending on $\ell'$). Let us discuss the relevant type of smaller parameter, which relates to \emph{lower bounds on the optimum} and was introduced by Mahajan and Raman~\cite{MahajanR99}; two other types are discussed briefly under related work.
Two well-known lower bounds for the size of vertex covers for a graph $G=(V,E)$ are the maximum size of a matching of $G$ and the smallest size of fractional vertex covers for $G$; we (essentially) follow Garg and Philip~\cite{GargP16} in denoting these two values by $MM(G)$ and $LP(G)$. Note that the notation $LP(G)$ comes from the fact that fractional vertex covers come up naturally in the linear programming relaxation of the \probname{Vertex Cover}\xspace problem, where we must assign each vertex a fractional value such that each edge is incident with total value of at least $1$. In this regard, it is useful to observe that the LP relaxation of the \problem{Maximum Matching} problem is exactly the dual of this. Accordingly, we have $MM(G)\leq LP(G)$ since each integral matching is also a fractional matching, i.e., with each vertex incident to a total value of at most $1$. Similarly, using $VC(G)$ to denote the minimum size of vertex covers of $G$ we get $VC(G)\geq LP(G)$ and, hence, $VC(G)\geq LP(G)\geq MM(G)$.
A number of papers have studied vertex cover with respect to ``above lower bound'' parameters $\ell'=k-MM(G)$ or $\ell''=k-LP(G)$ \cite{RazgonO09,RamanRS11,CyganPPW13,NarayanaswamyRRS12,LokshtanovNRRS14}. Observe that
\[
k\geq k-MM(G) \geq k-LP(G).
\]
For the converse, note that $k$ can be unbounded in terms of $k-MM(G)$ and $k-LP(G)$, whereas $k-MM(G)\leq 2(k-LP(G))$ holds~\cite{KratschW12,Jansen_Thesis}. Thus, from the perspective of achieving fixed-parameter tractability (and avoiding large parameters) both parameters are equally useful for improving over parameter $k$. Razgon and O'Sullivan~\cite{RazgonO09} proved fixed-parameter tractability of \probname{Almost 2-SAT($k$)}\xspace, which implies that \probname{Vertex Cover}\xspaceamm is \ensuremath{\mathsf{FPT}}\xspace due to a reduction to \probname{Almost 2-SAT($k$)}\xspace by Mishra et al.~\cite{MishraRSSS11}. Using $k-MM(G)\leq 2(k-LP(G))$, this also entails fixed-parameter tractability of \probname{Vertex Cover}\xspacealp.
After several improvements~\cite{RamanRS11,CyganPPW13,NarayanaswamyRRS12,LokshtanovNRRS14} the fastest known algorithm, due to Lokshtanov et al.~\cite{LokshtanovNRRS14}, runs in time $\mathcal{O}^*(2.3146^{k-MM(G)})$. The algorithms of Narayanaswamy et al.~\cite{NarayanaswamyRRS12} and Lokshtanov et al.~\cite{LokshtanovNRRS14} achieve the same parameter dependency also for parameter $k-LP(G)$. The first (and to our knowledge only) kernelization result for these parameters is a randomized polynomial kernelization for \probname{Vertex Cover}\xspacealp by Kratsch and Wahlstr\"om~\cite{KratschW12}, which of course applies also to the larger parameter $k-MM(G)$.
Recently, Garg and Philip~\cite{GargP16} made an important contribution to understanding the parameterized complexity of the \probname{Vertex Cover}\xspace problem by proving it to be \ensuremath{\mathsf{FPT}}\xspace with respect to parameter $\ell=k-(2LP(G)-MM(G))$. Building on an observation of Lov\'asz and Plummer~\cite{LovaszP1986} they prove that $VC(G)\geq 2LP(G)-MM(G)$, i.e., that $2LP(G)-MM(G)$ is indeed a lower bound for the minimum vertex covers size of any graph $G$. They then design a branching algorithm with running time $\mathcal{O}^*(3^\ell)$ that builds on the well-known Gallai-Edmonds decomposition for maximum matchings to guide its branching choices.
\problembox{\probname{Vertex Cover}\xspaceanb}{A graph $G=(V,E)$ and an integer $k\in\mathbb{N}$.}{$\ell=k-(2LP(G)-MM(G))$ where $LP(G)$ is the minimum size of fractional vertex covers for $G$ and $MM(G)$ is the maximum cardinality of matchings of $G$.}{Does $G$ have a vertex cover of size at most $k$, i.e., a set $X\subseteq V$ of size at most $k$ such that each edge of $E$ has at least one endpoint in $X$?}
Since $LP(G)\geq MM(G)$, we clearly have $2LP(G)-MM(G)\geq LP(G)$ and hence $\ell= k-(2LP(G)-MM(G))$ is indeed at most as large as the previously best parameter $k-LP(G)$. We can easily observe that $k-LP(G)$ cannot be bounded in terms of $\ell$: For any odd cycle $C$ of length $2s+1$ we have $LP(C)=\frac12(2s+1)$, $VC(C)=s+1$, and $MM(C)=s$. Thus, a graph $G$ consisting of $t$ vertex-disjoint odd cycles of length $2s+1$ has $LP(G)=\frac12t(2s+1)$, $VC(G)=t(s+1)$, and $MM(G)=ts$. For $k=VC(G)=t(s+1)$ we get
\[
\ell=k - (2LP(G)-MM(G))=t(s+1) - t(2s+1) + ts=0
\]
whereas
\[
k-LP(G) = t(s+1) - \frac12t(2s+1) = \frac12t(2s+2) - \frac12t(2s+1) = \frac12t.
\]
Generally, it can be easily proved that $LP(G)$ and $2LP(G)-MM(G)$ differ by exactly $\frac12$ on any \emph{factor-critical} graph (cf.~Proposition~\ref{proposition:factorcritical:minvc:minfvc}).
As always in parameterized complexity, when presented with a new fixed-parameter tractability result, the next question is whether the problem also admits a polynomial kernelization. It is well known that decidable problems are fixed-parameter tractable if and only if they admit a (not necessarily polynomial) kernelization.\footnote{We sketch this folklore fact for \probname{Vertex Cover}\xspaceanb: If the input is larger than $3^\ell$, where $\ell=k-(2LP(G)-MM(G))$, then the algorithm of Garg and Philip~\cite{GargP16} runs in polynomial time and we can reduce to an equivalent small yes- or no-instance; else, the instance size is bounded by $3^\ell$; in both cases we get size at most $3^\ell$ in polynomial time. The converse holds since a kernelization followed by any brute-force algorithm on an instance of, say, size $g(\ell)$ gives an \ensuremath{\mathsf{FPT}}\xspace running time in terms of $\ell$.} Nevertheless, not all problems admit polynomial kernelizations and, in the present case, both an extension of the methods for parameter $k-LP(G)$ \cite{KratschW12} or a lower bound proof similar to Cygan et al.~\cite{CyganLPPS14} or Jansen~\cite[Section 5.3]{Jansen_Thesis} (see related work) are conceivable.
\subparagraph{Our result.}
We give a randomized polynomial kernelization for \probname{Vertex Cover}\xspaceanb. This improves upon parameter $k-LP(G)$ by giving a strictly smaller parameter for which a polynomial kernelization is known. At high level, the kernelization takes the form of a (randomized) polynomial parameter transformation from \probname{Vertex Cover}\xspaceanb to \probname{Vertex Cover}\xspaceamm, i.e., a polynomial-time many-one (Karp) reduction with \emph{output parameter polynomially bounded in the input parameter}. It is well known (cf.~Bodlaender et al.~\cite{BodlaenderTY11}) that this implies a polynomial kernelization for the source problem, i.e., for \probname{Vertex Cover}\xspaceanb in our case. Let us give some more details of this transformation.
Since the transformation is between different parameterizations of the same problem, it suffices to handle parts of any input graph $G$ where the input parameter $\ell=k-(2LP(G)-MM(G))$ is (much) smaller than the output parameter $k-MM(G)$. After the well-known LP-based preprocessing (cf.~\cite{GargP16}), the difference in parameter values is equal to the number of vertices that are exposed (unmatched) by any maximum matching $M$ of $G$.
Consider the Gallai-Edmonds decomposition $V=A\mathbin{\dot\cup} B\mathbin{\dot\cup} D$ of $G=(V,E)$, where $D$ contains the vertices that are exposed by at least one maximum matching, $A=N(D)$, and $B=V\setminus (A\cup D)$. Let $M$ be a maximum matching and let $t$ be the number of exposed vertices. There are $t$ components of $G[D]$ that have exactly one exposed vertex each. The value $2LP(G)-MM(G)$ is equal to $|M|+t$ when $LP(G)=\frac12|V|$, as implied by LP-based preprocessing.
To reduce the difference in parameter values we will remove all but $\mathcal{O}(\ell^4)$ components of $G[D]$ that have an exposed vertex; they are called \emph{unmatched components} for lack of a matching edge to $A$ and we can ensure that they are not singletons. It is known that any such component $C$ is factor-critical and hence has no vertex cover smaller than $\frac12(|C|+1)$; this exactly matches its contribution to $|M|+t$: It has $\frac12(|C|-1)$ edges of $M$ and one exposed vertex. Unless the instance is trivially \textbf{no}\xspace all but at most $\ell$ of these components $C$ have a vertex cover of size $\frac12(|C|+1)$, later called a \emph{tight vertex cover}. The only reason not to use a tight vertex cover for $C$ can be due to adjacent vertices in $A$ that are not selected; this happens at most $\ell$ times. A technical lemma proves that this can always be traced to at most three vertices of $C$ and hence at most three vertices in $A$ that are adjacent with $C$.
In contrast, there are (matched, non-singleton) components $C$ of $G[C]$ that together with a matched vertex $v\in A$ contribute $\frac12(|C|+1)$ to the lower bound due to containing this many matching edges. To cover them at this cost requires not selecting vertex $v$. This in turn propagates along $M$-alternating paths until the cover picks both vertices of an $M$-edge, which happens at most $\ell$ times, or until reaching an unmatched component, where it may help prevent a tight vertex cover. We translate this effect into a two-way separation problem in an auxiliary directed graph. Selecting both vertices of an $M$-edge is analogous to a adding a vertex to the separator. Relative to a separator the question becomes which sets of at most three vertices of $A$ that can prevent tight vertex covers are still reachable by propagation. At this point we can apply representative set tools from Kratsch and Wahlstr\"om~\cite{KratschW12} to identify a small family of such triplets that works for all separators (and hence for all so-called \emph{dominant} vertex covers) and keep only the corresponding components.
\subparagraph{Related work.}
Let us mention some further kernelization results for \probname{Vertex Cover}\xspace with respect to nonstandard parameters. There are two further types of interesting parameters:
\begin{enumerate}
\item \emph{Width-parameters:} Parameters such as treewidth allow dynamic programming algorithms running in time, e.g., $\mathcal{O}^*(2^{\tw})$, independently of the size of the vertex cover. It is known that there are no polynomial kernels for \probname{Vertex Cover}\xspace (or most other \ensuremath{\mathsf{NP}}\xspace-hard problems) under such parameters~\cite{BodlaenderDFH09}. The treewidth of a graph is upper bounded by the smallest vertex cover, whereas graphs of bounded treewidth can have vertex cover size $\Omega(n)$.
\item \emph{``Distance to tractable case''-parameters:} \probname{Vertex Cover}\xspace can be efficiently solved on forests. By a simple enumeration argument it is fixed-parameter tractable when $\ell$ is the minimum number of vertices to delete such that $G$ becomes a forest. Jansen and Bodlaender~\cite{JansenB13} gave a polynomial kernelization to $\mathcal{O}(\ell^3)$ vertices. Note that the vertex cover size is an upper bound on $\ell$, whereas trees can have unbounded vertex cover size. The \ensuremath{\mathsf{FPT}}\xspace-result can be carried over to smaller parameters corresponding to distance from larger graph classes on which \probname{Vertex Cover}\xspace is polynomial-time solvable, however, Cygan et al.~\cite{CyganLPPS14} and Jansen~\cite[Section 5.3]{Jansen_Thesis} ruled out polynomial kernels for some of them. E.g., if $\ell$ is the deletion-distance to an outerplanar graph then there is no kernelization for \probname{Vertex Cover}\xspace{}$(\ell)$ to size polynomial in $\ell$ unless the polynomial hierarchy collapses~\cite{Jansen_Thesis}.
\end{enumerate}
\subparagraph{Organization.}
Section~\ref{section:preliminaries} gives some preliminaries. In Section~\ref{section:tightvertexcovers:factorcritical} we discuss vertex covers of factor-critical graphs and prove the claimed lemma about critical sets. Section~\ref{section:nicedecompositions} introduces a relaxation of the Gallai-Edmonds decomposition, called \emph{nice decomposition}, and Section~\ref{section:nicedecompositionsandvertxcovers} explores the relation between nice decompositions and vertex covers. The kernelization for \probname{Vertex Cover}\xspaceanb is given in Section~\ref{section:kernelization}. In Section~\ref{section:proofofmatroidresult} we provide for self-containment a result on representative sets that follows readily from~\cite{KratschW12}. We conclude in Section~\ref{section:conclusion}.
\section{Preliminaries}\label{section:preliminaries}
We use the shorthand~$[n]:=\{1,\ldots,n\}$. By $A\mathbin{\dot\cup} B$ to denote the disjoint union of $A$ and $B$.
\subparagraph{Parameterized complexity.}
Let us recall that a \emph{parameterized problem} is a set $Q\subseteq\Sigma^*\times\mathbb{N}$ where $\Sigma$ is any finite alphabet, i.e., a language of pairs $(x,\ell)$ where the component $\ell\in\mathbb{N}$ is called the \emph{parameter}.
Recall also that a classical (unparameterized) problem is usually given as a set (language) $L\subseteq\Sigma^*$. For the classical problem \problem{Vertex Cover}, with instances $(G,k)$, asking whether $G$ has a vertex cover of size at most $k$, the canonical parameterized problem is \probname{Vertex Cover}\xspacek where the parameter value is simply $\ell=k$; this is the same procedure for any other decision problem obtained from an optimization problem by asking whether $\opt\leq k$ resp.\ $\opt\geq k$ and is called the \emph{standard parameterization}. We remark that this notation is usually abused by, e.g., using $(G,k)$ for an instance of \probname{Vertex Cover}\xspacek rather than the redundant $((G,k),k)$; we will use $(G,k)$ for $((G,k),k)$ and $(G,k,\ell)$ for $((G,k),\ell)$.
A parameterized problem $Q$ is \emph{fixed-parameter tractable} (\ensuremath{\mathsf{FPT}}\xspace) if there exists a function $f\colon\mathbb{N}\to\mathbb{N}$, a constant $c$, and an algorithm $A$ that correctly decides $(x,\ell)\in Q$ in time $f(\ell)\cdot |x|^c$ for all $(x,\ell)\in\Sigma^*\times\mathbb{N}$. A parameterized problem $Q$ has a \emph{kernelization} if there is a function $g\colon\mathbb{N}\to\mathbb{N}$ and a polynomial-time algorithm $K$ that on input $(x,\ell)$ returns an instance $(x',\ell')$ with $|x'|,\ell'\leq g(\ell)$ and with $(x,\ell)\in Q$ if and only if $(x',\ell')\in Q$. The function $g$ is called the \emph{size} of the kernelization $K$ and a polynomial kernelization requires that $g$ is polynomially bounded. A \emph{randomized (polynomial) kernelization} may err with some probability, in which case the returned instance is not equivalent to the input instance. Natural variants with one-side error respectively bounded error are defined completely analogous to randomized algorithms. For a more detailed introduction to parameterized complexity we recommend the recent books by Downey and Fellows~\cite{DowneyF13} and Cygan et al.~\cite{CyganFKLMPPS15}.
\subparagraph{Graphs.}
We require both directed and undirected graphs; all graphs are finite and simple, i.e., they have no parallel edges or loops. Accordingly, an undirected graph $G=(V,E)$ consists of a finite set $V$ of vertices and a set $E\subseteq\binom{V}{2}$ of edges; a directed graph $H=(V,E)$ consists of a finite set $V$ and a set $E\subseteq V^2\setminus\{(v,v)\mid v\in V\}$. For clarity, all undirected graphs are called $G$ and all directed graphs are called $H$ (possibly with indices etc.).
For a graph $G=(V,E)$ and vertex set $X\subseteq V$ we use $G-X$ to denote the graph induced by $V\setminus X$; we also use $G-v$ if $X=\{v\}$.
Analogous definitions are used for directed graphs $H$.
Let $H=(V,E)$ be a directed graph and let $S$ and $T$ be two not necessarily disjoint vertex sets in $H$. A set $X\subseteq V$ is an \emph{$S,T$-separator} if in $G-X$ there is no path from $S\setminus X$ to $T\setminus X$; note that $X$ may overlap both $S$ and $T$ and that $S\cap T\subseteq X$ is required. The set $T$ is \emph{closest to $S$} if there is no $S,T$-separator $X$ with $X\neq T$ and $|X|\leq|T|$, i.e., if $T$ is the unique minimum $S,T$-separator in $G$ (cf.~\cite{KratschW12}). Both separators and closeness have analogous definitions in undirected graphs but they are not required here.
\begin{proposition}[cf.~\cite{KratschW12}]\label{proposition:closest}
Let $H=(V,E)$ be a directed graph and let $S,T\subseteq V$ such that $T$ is closest to $S$. For any vertex $v\in V\setminus T$ that is reachable from $S$ in $H-T$ there exist $|T|+1$ (fully) vertex-disjoint paths from $S$ to $T\cup\{v\}$.
\end{proposition}
\begin{proof}
Assume for contradiction that such $|T|+1$ directed paths do not exist. By Menger's Theorem there must be an $S,T\cup\{v\}$-separator $X$ of size at most $|T|$. Observe that $X\neq T$ since $v$ is reachable from $S$ in $H-T$. Thus, $X$ is an $S,T$-separator of size at most $|T|$ that is different from $T$; this contradicts closeness of $T$.
\end{proof}
For an undirected graph $G=(V,E)$, a \emph{matching} is any set $M\subseteq E$ such that no two edges in $M$ have an endpoint in common. If $M$ is a matching in $G=(V,E)$ then we will say that a path is $M$-alternating if its edges are alternatingly from $M$ and from $\overline{M}:=E\setminus M$. An $M,M$-path is an $M$-alternating path whose first and last edge are from $M$; it must have odd length. Similarly, we define $\overline{M},M$-paths, $M,\overline{M}$-paths (both of even length), and $\overline{M},\overline{M}$-paths (of odd length). If $M$ is a matching of $G$ and $v$ is incident with an edge of $M$ then we use $M(v)$ to denote the other endpoint of that edge, i.e., the \emph{mate} or \emph{partner} of $v$. Say that a vertex $v$ is \emph{exposed by $M$} if it is not incident with an edge of $M$; we say that $v$ is \emph{exposable} if it is exposed by some maximum matching of $G$. A graph $G=(V,E)$ is \emph{factor-critical} if for each vertex $v\in V$ the graph $G-v$ has a perfect matching (a \emph{near-perfect matching of $G$}); observe that all factor-critical graphs must have an odd number of vertices.
A \emph{vertex cover} of a graph $G=(V,E)$ is a set $X\subseteq V$ such that each edge $e\in E$ has at least one endpoint in $X$. There is a well-known linear programming relaxation of the \probname{Vertex Cover}\xspace problem for a graph $G=(V,E)$:
\begin{align*}
\min \quad& \sum_{v\in V} x(v)\\
s.t. \quad& x(u)+x(v)\geq 1\\
&x(v)\geq 0
\end{align*}
The optimum value of this linear program can be computed in polynomial time and it is denoted $LP(G)$. The feasible solutions $x\colon V\to\mathbb{R}_{\geq 0}$ are called fractional vertex covers; the \emph{cost} of a solution/fractional vertex cover $x$ is $\sum_{v\in V} x(v)$. It is well-known that the extremal points $x$ of the linear program are half-integral, i.e., $x\in\{0,\frac12,1\}^V$. With this in mind, we will tacitly assume that all considered fractional vertex covers are half-integral. We will often use the simple fact that the size of any matching $M$ of $G$ lower bounds both the cardinality of vertex covers and the cost of fractional vertex covers of $G$.
\subparagraph{Gallai-Edmonds decomposition.} We will now introduce the Gallai-Edmonds decomposition following the well-known book of Lov\'asz and Plummer~\cite{LovaszP1986}.\footnote{We use $B$ instead $C$ for $V\setminus (A\cup D)$ to leave the letter $C$ for cycles and connected components.}
\begin{definition}\label{definition:ged}
Let $G=(V,E)$ be a graph. The \emph{Gallai-Edmonds decomposition} of $G$ is a partition of $V$ into three sets $A$, $B$, and $D$ where
\begin{itemize}
\item $D$ consists of all vertices $v$ of $G$ such that there is a maximum matching $M$ of $G$ that contains no edge incident with $v$, i.e., that leaves $v$ exposed,
\item $A$ is the set of neighbors of $D$, i.e., $A:=N(D)$, and
\item $B$ contains all remaining vertices, i.e., $B:=V\setminus(A\cup D)$.
\end{itemize}
\end{definition}
It is known (and easy to verify) that the Gallai-Edmonds decomposition of any graph $G$ is unique and can be computed in polynomial time. The Gallai-Edmonds decomposition has a number of useful properties; the following theorem states some of them.
\begin{theorem}[cf.\ {\cite[Theorem~3.2.1]{LovaszP1986}}]\label{theorem:ged}
Let $G=(V,E)$ be a graph and let $V=A\mathbin{\dot\cup} B\mathbin{\dot\cup} D$ be its Gallai-Edmonds decomposition. The following properties hold:
\begin{enumerate}
\item The connected components of $G[D]$ are factor-critical.
\item The graph $G[B]$ has a perfect matching.
\item Every maximum matching $M$ of $G$ consists of a perfect matching of $G[B]$, a near-perfect matching of each component of $G[D]$, and a matching of $A$ into $D$.
\end{enumerate}
\end{theorem}
\section{Tight vertex covers of factor-critical graphs}\label{section:tightvertexcovers:factorcritical}
In this section we study vertex covers of factor-critical graphs, focusing on those that are of smallest possible size (later called tight vertex covers). We first recall the fact that any factor-critical graph with $n\geq 3$ vertices has no vertex cover of size less than $\frac12(n+1)$. By a similar argument such graphs have no fractional vertex cover of cost less than $\frac12n$.
\begin{proposition}[folklore]\label{proposition:factorcritical:minvc:minfvc}
Let $G=(V,E)$ be a factor-critical graph with at least three vertices. Every vertex cover $X$ of $G$ has cardinality at least $\frac12(|V|+1)$ and every fractional vertex cover $x\colon V\to \mathbb{R}_{\geq 0}$ of $G$ has cost at least $\frac12|V|$.
\end{proposition}
\begin{proof}
Let $X\subseteq V$ be a vertex cover of $G$. Since $G$ has at least three vertices and is factor-critical, it has a maximum matching $M$ of size $\frac12(|V|-1)\geq 1$. It follows that $X$ has size at least one. (This is not true for graphs consisting of a single vertex, which are also factor-critical. All other factor-critical graphs have at least three vertices.) Pick any vertex $v\in X$. Since $G$ is factor-critical, there is a maximum matching $M_v$ of $G-v$ of size $\frac12(|V|-1)$. It follows that $X$ must contain at least one vertex from each edge of $M_v$, and no vertex is contained in two of them. Together with $v$, which is not in any edge of $M_v$, this gives a lower bound of $1+\frac12(|V|-1)=\frac12(|V|+1)$, as claimed.
Let $x\colon V\to \mathbb{R}_{\geq 0}$ be a fractional vertex cover of $G$. We use again the matching $M$ of size at least one from the previous case; let $\{u,v\}\in M$. It follows that $x(u)+x(v)\geq 1$; w.l.o.g. we have $x(v)\geq \frac12$. Let $M_v$ be a maximum matching of $G-v$ of size $\frac12(|V|-1)$. For each edge $\{p,q\}\in M_v$ we have $x(p)+x(q)\geq 1$. Since the matching edges are disjoint we get a lower bound of $\sum_{p\in V\setminus\{v\}} x(p)\geq \frac12(|V|-1)$. Together with $x(v)\geq \frac12$ we get the claimed lower bound of $\frac12|V|$ for the cost of $x$.
\end{proof}
Note that Proposition~\ref{proposition:factorcritical:minvc:minfvc} is tight for example for all odd cycles of length at least three, all of which are factor-critical. We now define tight vertex covers and critical sets.
\begin{definition}[tight vertex covers, critical sets]
Let $G=(V,E)$ be a factor-critical graph with $|V|\geq 3$. A vertex cover $X$ of $G$ is \emph{tight} if $|X|=\frac12(|V|+1)$. Note that this is different from a minimum vertex cover, and a factor-critical graph need not have a tight vertex cover; e.g., odd cliques with at least five vertices are factor-critical but have no tight vertex cover.
A set $Z\subseteq V$ is called a \emph{bad set} of $G$ if there is no tight vertex cover of $G$ that contains $Z$. The set $Z$ is a \emph{critical set} if it is a minimal bad set, i.e., no tight vertex cover of $G$ contains $Z$ but for all proper subsets $Z'$ of $Z$ there is a tight vertex cover containing $Z'$.
\end{definition}
Observe that a factor-critical graph $G=(V,E)$ has no tight vertex cover if and only if $Z=\emptyset$ is a critical set of $G$. It may be interesting to note that a set $X\subseteq V$ of size $\frac12(|V|+1)$ is a vertex cover of $G$ if and only if it contains no critical set. (We will not use this fact and hence leave its two line proof to the reader.) The following lemma proves that all critical sets of a factor-critical graph have size at most three; this is of central importance for our kernelization. For the special case of odd cycles, the lemma has a much shorter proof and we point out that all critical sets of odd cycles have size exactly three.
\begin{lemma}\label{lemma:criticalsets:boundsize}
Let $G=(V,E)$ be a factor-critical graph with at least three vertices. All critical sets $Z$ of $G$ have size at most three.
\end{lemma}
\begin{proof}
Let $\ell\in\mathbb{N}$ with $\ell\geq1$ such that $|V|=2\ell+1$; recall that all factor-critical graphs have an odd number of vertices.
Assume for contradiction that there is a critical set $Z$ of $G$ of size at least four. Let $w,x,y,z\in Z$ be any four pairwise different vertices from $Z$. Let $M$ be a maximum matching of $G-w$. Since $G$ is factor-critical, we get that $M$ is a perfect matching of $G-w$ and has size $|M|=\ell$. Observe that any tight vertex cover of $G$ that contains $w$ must contain exactly one vertex from each edge of $M$, since its total size is $\frac12(|V|+1)=\ell+1$. We will first analyze $G$ and show that the presence of certain structures would imply that some proper subset $Z'$ of $Z$ is bad, contradicting the assumption that $Z$ is critical. Afterwards, we will use the absence of these structures to find a tight vertex cover that contains $Z$, contradicting the fact that it is a critical set.
If there is an $M,M$-path from $x$ to $y$ then $\{w,x,y\}$ is a bad set, i.e., no tight vertex cover of $G$ contains all three vertices $w$, $x$, and $y$, contradicting the choice of $Z$: Let $P=(v_1,v_2,\ldots,v_{p-1},v_p)$ denote an $M,M$-path from $v_1=x$ to $v_p=y$. Accordingly, we have $\{v_1,v_2\},\ldots,\{v_{p-1},v_p\}\in M$ and the path $P$ has odd length. Assume that $X$ is a tight vertex cover containing $w$, $x$, and $y$. It follows, since $w\in X$, that $X$ contains exactly one vertex per edge in $M$; in particular it contains exactly one vertex per matching edge on the path $P$. Since $v_1=x\in X$ we have $v_2\textbf{no}\xspacetin X$. Thus, as $\{v_2,v_3\}$ is an edge of $G$, we must have $v_3\in X$ to cover this edge; this in turn implies that $v_4\textbf{no}\xspacetin X$ since it already contains $v_3$ from the matching edge $\{v_3,v_4\}$. Continuing this argument along the path $P$ we conclude that $v_{p-1}\in X$ and $v_p\textbf{no}\xspacetin X$, contradicting the fact that $v_p=y\in X$. Thus, if there is an $M,M$-path from $x$ to $y$ then there is no tight vertex cover of $G$ that contains $w$, $x$, and $y$, making $\{w,x,y\}$ a bad set and contradicting the assumption that $Z$ is a critical set. It follows that there can be no $M,M$-path from $x$ to $y$. The same argument can be applied also to $x$ and $z$, and to $y$ and $z$, ruling out $M,M$-paths connecting them.
Similarly, if there is an edge $\{u,v\}\in M$ such that $z$ reaches both $u$ and $v$ by (different, not necessarily disjoint) $M,\overline{M}$-paths then no tight vertex cover of $G$ contains both $w$ and $z$, contradicting the choice of $Z$: Let $P=(v_1,v_2,\ldots,v_{p-1},v_p)$ denote an $M,\overline{M}$-path from $v_1=z$ to $v_p=u$ with $\{v_1,v_2\},\{v_3,v_4\},\ldots,\{v_{p-2},v_{p-1}\}\in M$. Let $X$ be a tight vertex cover of $G$ that contains $w$ and $z$. It follows (as above) that $v_1,v_3,\ldots,v_{p-2}\in X$ and $v_2,v_4,\ldots,v_{p-1}\textbf{no}\xspacetin X$, by considering the induced $M,M$-path from $z=v_1$ to $v_{p-1}$. The fact that $v_{p-1}\textbf{no}\xspacetin X$ directly implies that $v_p=u\in X$ in order to cover the edge $\{v_{p-1},v_p\}$. Repeating the same argument on an $M,\overline{M}$-path from $z$ to $v$ we get that $v\in X$. Thus, we conclude that $u$ and $v$ are both in $X$, contradicting the fact that $X$ must contain exactly one vertex of each edge in $X$. Hence, there is no tight vertex cover of $G$ that contains both $w$ and $z$. We conclude that $\{w,z\}$ is a bad set, contradicting the choice of $Z$. Hence, there is no edge $\{u,v\}\in M$ such that $z$ has $M,\overline{M}$-paths (not necessarily disjoint) to both $u$ and $v$.
Now we will complete the proof by using the established properties, i.e., the non-existence of certain $M$-alternating paths starting in $z$, to construct a tight vertex cover of $G$ that contains all of $Z$, giving the final contradiction. Using minimality of $Z$, let $X$ be a tight vertex cover of $G$ that contains $Z\setminus\{z\}$; by choice of $Z$ we have $z\textbf{no}\xspacetin X$. We construct the claimed vertex cover $X'\supseteq Z$ from $X'=X$ as follows:
\begin{enumerate}
\item Add vertex $z$ to $X$ and remove $M(z)$, i.e., remove the vertex that $z$ is matched to.
\item Add all vertices $v$ to $X'$ that can be reached from $z$ by an $M,\overline{M}$-path.
\item Remove all vertices from $X'$ that can be reached from $z$ by an $M,M$-path of length at least three. (There is a single such path of length one from $z$ to $M(z)$ which, for clarity, was handled already above.)
\end{enumerate}
We need to check four things: (1) The procedure above is well-defined, i.e., no vertex can be reached by both $M,M$- and $M,\overline{M}$-paths from $z$. (2) The size of $X'$ is at most $|X|=\ell+1$. (3) $X'$ is a vertex cover. (4) The set $X'$ contains $w$, $x$, $y$, and $z$.
(1) Assume that there is a vertex $v$ such that $z$ reaches $v$ both by an $M,M$-path $P=(v_1,v_2,\ldots,v_p)$ with $v_1=z$ and $v_p=v$, and by an $M,\overline{M}$-path $P'$. Observe that $\{v_{p-1},v_p\}\in M$ since $P$ is an $M,M$-path and, hence, that $P''=(v_1,\ldots,v_{p-1})$ is an $M,\overline{M}$-path from $v$ to $v_{p-1}$. Together, $P'$ and $P''$ constitute two $M,\overline{M}$-paths from $z$ to both endpoints $v_{p-1}$ and $v_p$ of the matching edge $\{v_{p-1},v_p\}$; a contradiction (since we ruled out this case earlier).
(2) In the first step, we add $z$ and remove $M(z)$. Note that $z\textbf{no}\xspacetin X$ implies that $M(z)\in X$ (we start with $X'=X$). Thus the size of $X'$ does not change. Consider a vertex $v$ that is added in the second step, i.e., with $v\textbf{no}\xspacetin X$: There is an $M,\overline{M}$-path $P$ from $z$ to $v$. Since $w\in X$ we know that $v\neq w$. Thus, since $M$ is a perfect matching of $G-w$, there is a vertex $u$ with $u=M(v)$. The vertex $u:=M(v)$ must be in $X$ to cover the edge $\{v,u\}\in M$, as $v\textbf{no}\xspacetin X$. Moreover, $u$ cannot be on $P$ since that would make it incident with a second matching edge other than $\{u,v\}$. Thus, by extending $P$ with $\{v,u\}$ we get an $M,M$-path from $z$ to $u$, implying that $u$ is removed in the second step. Since $u\in X$ the total size change is zero. Observe that the vertex $u=M(v)$ used in this argument is not used for any other vertex $v'$ added in the second step since it is only matched to $v$. Similarly, due to (1), the vertex $u$ is not also added in the second step since it cannot be simultaneously have an $M,\overline{M}$-path from $z$.
(3) Assume for contradiction that some edge $\{u,v\}$ is not covered by $X'$, i.e., that $u,v\textbf{no}\xspacetin X'$. Since $w\in X'$ is the only unmatched vertex it follows that both $u$ and $v$ are incident with some edge of $M$. We distinguish two cases, namely (a) $\{u,v\}\in M$ and (b) $\{u,v\}\textbf{no}\xspacetin M$.
(3.a) If $\{u,v\}\in M$ then without loss of generality assume $u\in X$ (as $X$ is a vertex cover). By our assumption we have $u\textbf{no}\xspacetin X'$, which implies that we have removed it on account of having an $M,M$-path $P$ from $z$ to $u$. Since $\{u,v\}\in M$ the path $P$ must visit $v$ as its penultimate vertex; there is no other way for an $M,M$-path to reach $u$. This, however, implies that there is an $M,\overline{M}$-path from $z$ to $v$, and that we have added $v$ in the second step; a contradiction.
(3.b) In this case we have $\{u,v\}\textbf{no}\xspacetin M$. Again, without loss of generality, assume that $u\in X$. Since $u\textbf{no}\xspacetin X'$ there must be an $M,M$-path $P$ from $z$ to $u$. If $P$ does not contain $v$ then extending $P$ by edge $\{u,v\}\textbf{no}\xspacetin M$ would give an $M,\overline{M}$-path from $z$ to $v$ and imply that $v\in X'$; a contradiction. In the remaining case, the vertex $v$ is contained in $P$; let $P'$ denote the induced path from $z$ to $v$ (not containing $u$ as it is the final vertex of $P$). Since $v\textbf{no}\xspacetin X'$ we know that $P'$ cannot be an $M,\overline{M}$-path, or else we would have $v\in X'$, and hence it must be an $M,M$-path. Now, however, extending $P'$ via $\{v,u\}\textbf{no}\xspacetin M$ yields an $M,\overline{M}$-path from $z$ to $u$, contradicting (1). Altogether, we conclude that $X'$ is indeed a vertex cover.
(4) Clearly, $z\in X'$ by construction. Similarly, $w\in X'$ since it is contained in $X$ and it cannot be removed since there is no incident $M$-edge (i.e., no $M,M$-paths from $z$ can end in $w$). Finally, regarding $x$ and $y$, we proved earlier that there are no $M,M$-paths from $z$ to $x$ or from $z$ to $y$. Thus, since both $x$ and $y$ are in $X$ they must also be contained in $X'$.
We have showed that under the assumption of minimality of $Z$ and using $|Z|\geq 4$ one can construct a vertex cover $X'$ of optimal size $\ell+1$ that contains $Z$ entirely. This contradicts the choice of $Z$ and completes the proof.
\end{proof}
\section{(Nice) relaxed Gallai-Edmonds decomposition}\label{section:nicedecompositions}
The Gallai-Edmonds decomposition of a graph has a number of strong properties and, amongst others, has played a vital role in the FPT-algorithm for Garg and Philip~\cite{GargP16}. It is thus not surprising that we find it rather useful for the claimed kernelization. Unfortunately, in the context of reduction rules, there is the drawback that the Gallai-Edmonds decomposition of a graph and that the graph obtained from the reduction rule might be quite different. (E.g., even deleting entire components of $G[D]$ may ``move'' an arbitrary number of vertices from $A\cup D$ to $B$.) We cope with this problem by defining a relaxed variant of this decomposition. The relaxed form is no longer unique, but when applying certain reduction rules the created graph can effectively inherit the decomposition.
The definition mainly drops the requirement that $D$ is the set of exposable vertices and instead allows any set $D$ that gives the desired properties. Moreover, instead of a (strong) statement about all maximum matchings of $G$ (cf.\ Definition~\ref{definition:ged}) we simply require that a single maximum matching $M$ with appropriate properties be given along with $V=A\mathbin{\dot\cup} B\mathbin{\dot\cup} D$.
\begin{definition}[relaxed Gallai-Edmonds decomposition]\label{definition:relaxedged}
Let $G=(V,E)$ be a graph. A \emph{relaxed Gallai-Edmonds decomposition of $G$} is a tuple $(A,B,D,M)$ where $V=A\mathbin{\dot\cup} B\mathbin{\dot\cup} D$ and $M$ is a maximum matching of $G$ such that
\begin{enumerate}
\item $A=N(D)$,
\item each connected component of $G[D]$ is factor-critical,
\item $M$ restricted to $B$ is a perfect matching of $G[B]$,
\item $M$ restricted to any component $C$ of $G[D]$ is a near-perfect matching of $G[C]$, and
\item each vertex of $A$ is matched by $M$ to a vertex of $D$.
\end{enumerate}
\end{definition}
\begin{observation}
Let $G=(V,E)$ be a graph and let $(A,B,D,M)$ be a relaxed Gallai-Edmonds decomposition of $G$. For each connected component $C$ of $G[D]$ we have $N(C)\subseteq A$. (Note that this is purely a consequence of $N(D)=A$ and $C$ being a connected component of $G[D]$.)
\end{observation}
It will be of particular importance for us in what way the matching $M$ of a decomposition $(A,B,D,M)$ of $G$ matches vertices of $A$ to vertices of components of $G[D]$. We introduce appropriate definitions next. In particular, we define sets $\ensuremath{\mathcal{C}_1}\xspace$, $\ensuremath{\hat{\mathcal{C}}_1}\xspace$, $\ensuremath{\mathcal{C}_3}\xspace$, $\ensuremath{\hat{\mathcal{C}}_3}\xspace$, $A_1$, and $A_3$ that are derived from $(A,B,D,M)$ and $G$ in a well-defined way. Whenever we have a decomposition $(A,B,D,M)$ of $G$ we will use these sets without referring again to this definition. We will use, e.g., $\ensuremath{\mathcal{C}_1}\xspace'$ in case where we require these sets for two decomposed graphs $G$ and $G'$.
\begin{definition}[matched/unmatched connected components of {$G[D]$}]\label{definition:mumcomponents}
Let $G=(V,E)$ be a graph and let $(A,B,D,M)$ be a relaxed Gallai-Edmonds decomposition of $G$. We say that a connected component $C$ of $G[D]$ is \emph{matched} if there are vertices $v\in C$ and $u\in N(C)\subseteq A$ such that $\{u,v\}\in M$; we will also say that $u$ and $C$ are matched to one another. Otherwise, we say that $C$ is \emph{unmatched}. Note that edges of $M$ with both ends in $C$ have no influence on whether $C$ is matched or unmatched.
We use $\ensuremath{\mathcal{C}_1}\xspace$ and $\ensuremath{\hat{\mathcal{C}}_1}\xspace$ to denote the set of matched and unmatched singleton components in $G[D]$. We use $\ensuremath{\mathcal{C}_3}\xspace$ and $\ensuremath{\hat{\mathcal{C}}_3}\xspace$ for matched and unmatched non-singleton components. By $A_1$ and $A_3$ we denote the set of vertices in $A$ that are matched to singleton respectively non-singleton components of $G[D]$; note that $A=A_1\mathbin{\dot\cup} A_3$. We remark that the names $\ensuremath{\mathcal{C}_3}\xspace$ and $\ensuremath{\hat{\mathcal{C}}_3}\xspace$ refer to the fact that these components have at least three vertices each as they are factor-critical and non-singleton.
\end{definition}
\begin{observation}
Let $G=(V,E)$ be a graph and let $(A,B,D,M)$ be a relaxed Gallai-Edmonds decomposition of $G$. If a component $C$ of $G[D]$ is matched then there is a unique edge of $M$ that matches a vertex of $C$ with a vertex in $A$. This is a direct consequence of $M$ inducing a near-perfect matching on $G[C]$, i.e., that only a single vertex of $C$ is not matched to another vertex of $C$.
\end{observation}
We now define the notion of a nice relaxed Gallai-Edmonds decomposition (short: nice decomposition), which only requires in addition that there are no unmatched singleton components with respect to decomposition $(A,B,D,M)$ of $G$, i.e., that $\ensuremath{\hat{\mathcal{C}}_1}\xspace=\emptyset$. Not every graph has a nice decomposition, e.g., the independent sets (edgeless graphs) have none. For the moment, we will postpone the question of how to actually find a nice decomposition (and how to ensure that there is one for each considered graph).
\begin{definition}[nice decomposition]\label{definition:nicedecomposition}
Let $(A,B,D,M)$ be a relaxed Gallai-Edmonds decomposition of a graph $G$. We say that $(A,B,D,M)$ is a \emph{nice relaxed Gallai-Edmonds decomposition} (short a \emph{nice decomposition}) if there are no unmatched singleton components.
\end{definition}
In the following section we will derive several lemmas about how vertex covers of $G$ and a nice decomposition $(A,B,D,M)$ of $G$ interact. For the moment, we will only prove the desired property that certain operations for deriving a graph $G'$ from $G$ allow $G'$ to effectively inherit the nice decomposition of $G$ (and also keep most of the related sets $\ensuremath{\mathcal{C}_1}\xspace$ etc.\ the same).
\begin{lemma}\label{lemma:inheritance}
Let $G=(V,E)$ be a graph, let $(A,B,D,M)$ be a relaxed Gallai-Edmonds decomposition, and let $C\in\ensuremath{\hat{\mathcal{C}}_1}\xspace\mathbin{\dot\cup}\ensuremath{\hat{\mathcal{C}}_3}\xspace$ be an unmatched component of $G[D]$. Then $(A,B,D',M')$ is a relaxed Gallai-Edmonds decomposition of $G'=G-C$ where $M'$ is $M$ restricted to $V(G')=V\setminus C$ and where $D':=D\setminus C$. The corresponding sets $A_1$, $A_3$, $\ensuremath{\mathcal{C}_1}\xspace$, and $\ensuremath{\mathcal{C}_3}\xspace$ are the same as for $G$. The sets $\ensuremath{\hat{\mathcal{C}}_1}\xspace$ and $\ensuremath{\hat{\mathcal{C}}_3}\xspace$ differ only by the removal of component $C$, i.e., $\ensuremath{\hat{\mathcal{C}}_1}\xspace'=\ensuremath{\hat{\mathcal{C}}_1}\xspace\setminus\{C\}$ and $\ensuremath{\hat{\mathcal{C}}_3}\xspace'=\ensuremath{\hat{\mathcal{C}}_3}\xspace\setminus\{C\}$. Moreover, if $(A,B,D,M)$ is a nice decomposition then so is $(A,B,D',M')$.
\end{lemma}
\begin{proof}
Clearly, $A\mathbin{\dot\cup} B\mathbin{\dot\cup} D'$ is a partition of the vertex set of $G'$. Next, let us prove first that $M'$ is a maximum matching of $G'$: To get $M'$ we delete the edges of $M$ in $C$ and we delete all vertices in $C$. Thus, any matching of $G'$ that is larger than $M'$ could be extended to matching larger than $M$ for $G$ by adding the edges of $M$ on vertices of $C$. Now, we consider the connected components of $G'[D']$: We deleted the entire component $C$ of $G[D]$ to get $G'=G-C$. It follows that the connected components of $G'[D']$ are the same except for the absence of $C$, and they are factor-critical since that holds for all components of $G[D]$. Moreover, for any component $C'$ of $G[D']$ the set $M'$ induces a near-perfect matching, as it is the restriction of $M$ to $G-C$. Similarly, since $B\cap C=\emptyset$ the set $M'$ induces a perfect matching on $G[B]$. In the same way, if $\{u,v\}\in M$ where $u\in A$ and $v\in C'$ where $C'$ is a connected component of $G[D]$ other than $C$ then $u$ is also matched to a component of $G'[D']$ in $G'$, namely to $C'$. It follows that $A\subseteq N_{G'}(D')$ using that $C$ is unmatched. The inverse inclusion follows since $N_G(D)=A$ and we did not make additional vertices adjacent to $D'$. Thus, $N_{G'}(A)=D'$. Thus, $(A,B,D',M')$ is a relaxed Gallai-Edmonds decomposition.
Let us now check that the sets $A_1$, $A_3$, etc.\ are almost the same: We already saw that matching edges between vertices of $A$ and components of $G[D]$ persist in $G'$. It follows that $A'_1=A_1$ and $A'_3=A_3$, and that $\ensuremath{\mathcal{C}_3}\xspace'=\ensuremath{\mathcal{C}_3}\xspace$ and $\ensuremath{\mathcal{C}_1}\xspace'=\ensuremath{\mathcal{C}_1}\xspace$. If $C\in\ensuremath{\hat{\mathcal{C}}_1}\xspace$ then we get $\ensuremath{\hat{\mathcal{C}}_1}\xspace'=\ensuremath{\hat{\mathcal{C}}_1}\xspace\setminus\{C\}$; else we get $\ensuremath{\hat{\mathcal{C}}_1}\xspace'=\ensuremath{\hat{\mathcal{C}}_1}\xspace=\ensuremath{\hat{\mathcal{C}}_1}\xspace\setminus\{C\}$. Similarly, $\ensuremath{\hat{\mathcal{C}}_3}\xspace'=\ensuremath{\hat{\mathcal{C}}_3}\xspace\setminus\{C\}$. This completes the main statement of the lemma. The moreover part follows because $(A,B,D,M)$ being a nice decomposition of $G$ implies $\ensuremath{\hat{\mathcal{C}}_1}\xspace=\emptyset$, which yields $\ensuremath{\hat{\mathcal{C}}_1}\xspace'=\emptyset$ and, hence, that $(A,B,D',M')$ is a nice decomposition of $G'$.
\end{proof}
\section{Nice decompositions and vertex covers}\label{section:nicedecompositionsandvertxcovers}
In this section we study the relation of vertex covers $X$ of a graph $G$ and any nice decomposition $(A,B,D,M)$ of $G$. As a first step, we prove a lower bound of $|M|+|\ensuremath{\hat{\mathcal{C}}_3}\xspace|$ on the size of vertex covers of $G$; this bound holds also for relaxed Gallai-Edmonds decompositions. Additionally, we show that $|M|+|\ensuremath{\hat{\mathcal{C}}_3}\xspace|=2LP(G)-MM(G)$, if $(A,B,D,M)$ is a nice decomposition. Note that Garg and Philip~\cite{GargP16} proved that $2LP(G)-MM(G)$ is a lower bound for the vertex cover size for every graph $G$, but we require the bound of $|M|+|\ensuremath{\hat{\mathcal{C}}_3}\xspace|$ related to our decompositions, and the equality to $2LP(G)-MM(G)$ serves ``only'' to later relate to the parameter value $\ell=k-(2LP(G)-MM(G))$.
\begin{lemma}\label{lemma:nice:vclb}
Let $G=(V,E)$ be a graph and let $(A,B,D,M)$ be a nice decomposition of $G$. Each vertex cover of $G$ has size at least $|M|+|\ensuremath{\hat{\mathcal{C}}_3}\xspace|=2LP(G)-MM(G)$.
\end{lemma}
\begin{proof}
Let $X$ be any vertex cover of $G$. For each edge of $M$ that is not in a component of $|\ensuremath{\hat{\mathcal{C}}_3}\xspace|$ the set $X$ contains at least one endpoint of $M$. For each component $C\in\ensuremath{\hat{\mathcal{C}}_3}\xspace$ the set $X$ contains at least $\frac12(|C|+1)$ vertices of $C$ by Proposition~\ref{proposition:factorcritical:minvc:minfvc}, since $C$ is also a component of $G[D]$ and all those components are factor-critical. Since $M$ contains a near-perfect matching of $C$, i.e., of cardinality $\frac12(|C|-1)$, the at least $\frac12(|C|+1)$ vertices of $C$ in $X$ can also be counted as one vertex per matching edge in $G[C]$ plus one additional vertex. Overall, the set $X$ contains at least $|M|+|\ensuremath{\hat{\mathcal{C}}_3}\xspace|$ vertices.
We now prove that $|M|+|\ensuremath{\hat{\mathcal{C}}_3}\xspace|=2LP(G)-MM(G)$; first we show that $LP(G)=\frac12|V|$. Let $x\colon V\to\{0,\frac12,1\}$ be a fractional vertex cover of $V$. For each edge $\{u,v\}\in M$ that is not in a component of $\ensuremath{\hat{\mathcal{C}}_3}\xspace$ we have $x(u)+x(v)\geq 1$, in other words, $\frac12$ per vertex of the matching edge. For each $C\in \ensuremath{\hat{\mathcal{C}}_3}\xspace$ we have $\sum_{v\in C} x(v)\geq\frac12|C|$ by Proposition~\ref{proposition:factorcritical:minvc:minfvc} since $G[C]$ is factor-critical; again this equals $\frac12$ per vertex (of $C$). Using that $(A,B,D,M)$ is a nice decomposition, we can show that these considerations yield a lower bound of $\frac12(|V|)$; it suffices to check that all vertices have been considered: Components in $\ensuremath{\mathcal{C}_1}\xspace\cup\ensuremath{\mathcal{C}_3}\xspace$ are fully matched, all vertices in $A$ are matched (to components in $\ensuremath{\mathcal{C}_1}\xspace\cup\ensuremath{\mathcal{C}_3}\xspace$), and $M$ restricts to a perfect matching of $G[B]$; all these vertices contribute $\frac12$ each since they are in an edge of $M$ that is not in a component of $\ensuremath{\hat{\mathcal{C}}_3}\xspace$. All remaining vertices are in components of $\ensuremath{\hat{\mathcal{C}}_3}\xspace$ since $\ensuremath{\hat{\mathcal{C}}_1}\xspace=\emptyset$; these vertices contribute $\frac12$ per vertex by being in some component $C\in\ensuremath{\hat{\mathcal{C}}_1}\xspace$ that contributes $\frac12|C|$. Overall we get that the $x$ has cost at least $\frac12|V|$ and, hence, that $LP(G)\geq\frac12|V|$. Since $x(v)\equiv\frac12$ is a feasible fractional vertex cover for every graph, we conclude that $LP(G)=\frac12|V|$.
Now, let us consider $MM(G)$: Note that $MM(G)=|M|$ as $M$ is a maximum matching of $G$. Since $\ensuremath{\hat{\mathcal{C}}_1}\xspace=\emptyset$, we know that the only exposed vertices (w.r.t.~$M$) are in components of $\ensuremath{\hat{\mathcal{C}}_3}\xspace$; exactly one vertex per component. Thus, $|V|=2|M|+|\ensuremath{\hat{\mathcal{C}}_3}\xspace|$, which implies
\[
2LP(G)-MM(G)=|V|-|M|=2|M|+|\ensuremath{\hat{\mathcal{C}}_3}\xspace|-|M|=|M|+|\ensuremath{\hat{\mathcal{C}}_3}\xspace|.
\]
This completes the proof.
\end{proof}
Intuitively, if the size of a vertex cover $X$ is close to the lower bound of $|M|+|\ensuremath{\hat{\mathcal{C}}_3}\xspace|$ then, apart from few exceptions (at most as many as the excess over the lower bound), it contains exactly one vertex per matching edge and exactly $\frac12(|C|+1)$ vertices per component $C\in\ensuremath{\hat{\mathcal{C}}_3}\xspace$, i.e., it induces a tight vertex cover on all but few components $C\in\ensuremath{\hat{\mathcal{C}}_3}\xspace$.
Our analysis of vertex covers $X$ in relation to a fixed nice decomposition will focus on those parts of the graph where $X$ exceeds the number of one vertex per matching edge respectively $\frac12(|C|+1)$ vertices per (unmatched, non-singleton) component $C\in\ensuremath{\hat{\mathcal{C}}_3}\xspace$. To this end, we introduce the terms \emph{active component} and a set $\ensuremath{X_{\mathtt{op}}}\xspace\subseteq X$, which essentially capture the places where $X$ ``overpays'', i.e., where it locally exceeds the lower bound.
\begin{definition}[active component]\label{definition:activecomponent}
Let $G=(V,E)$ be a graph, let $(A,B,D,M)$ be a nice decomposition of $G$, and let $X$ be a vertex cover of $G$. A component $C\in\ensuremath{\hat{\mathcal{C}}_3}\xspace$ is \emph{active (with respect to $X$)} if $X$ contains more than $\frac12(|C|+1)$ vertices of $C$, i.e., if $X\cap C$ is not a tight vertex cover of $G[C]$.
\end{definition}
\begin{definition}[set $\ensuremath{X_{\mathtt{op}}}\xspace$]\label{definition:setxop}
Let $G=(V,E)$ be a graph and let $(A,B,D,M)$ be a nice decomposition of $G$. For $X\subseteq V$ define \emph{$\ensuremath{X_{\mathtt{op}}}\xspace=\ensuremath{X_{\mathtt{op}}}\xspace(A_1,A_3,M,X)\subseteq A\cap X$} to contain all vertices $v$ that fulfill either of the following two conditions:
\begin{enumerate}
\item $v\in A_1$ and $X$ contains both $v$ and $M(v)$.
\item $v\in A_3$ and $X$ contains $v$.\label{definition:setxop:condition2}
\end{enumerate}
\end{definition}
Both conditions of Definition~\ref{definition:setxop} capture parts of the graph where $X$ contains more vertices than implied by the lower bound. To see this for the second condition, note that if $v\in A_3\cap X$ then $X$ still needs at least $\frac12(|C|+1)$ vertices of the component $C\in\ensuremath{\mathcal{C}_3}\xspace$ that $v$ is matched to; since there are $\frac12(|C|+1)$ matching edges that $M$ has between vertices of $C\cup\{v\}$ we find that $X$ (locally) exceeds the lower bound, as $|X\cap(C\cup\{v\})|\geq 1+\frac12(|C|+1)$. Conversely, if $X$ does match the lower bound on $C\cup\{v\}$ then it cannot contain $v$.
We now prove formally that a vertex cover $X$ of size close to the lower bound of Lemma~\ref{lemma:nice:vclb} has only few active components and only a small set $\ensuremath{X_{\mathtt{op}}}\xspace\subseteq X$.
\begin{lemma}\label{lemma:nice:boundxh:boundac}
Let $G=(V,E)$ be a graph, let $(A,B,D,M)$ be a nice decomposition of $G$, let $X$ be a vertex cover of $G$, and let $\ensuremath{X_{\mathtt{op}}}\xspace=\ensuremath{X_{\mathtt{op}}}\xspace(A_1,A_3,M,X)$. The set $\ensuremath{X_{\mathtt{op}}}\xspace$ has size at most $\ell$ and there are at most $\ell$ active components in $\ensuremath{\hat{\mathcal{C}}_3}\xspace$ with respect to $X$ where $\ell=|X|-(|M|+|\ensuremath{\hat{\mathcal{C}}_3}\xspace|)=|X|-(2LP(G)-MM(G))$.
\end{lemma}
\begin{proof}
By Lemma~\ref{lemma:nice:vclb} we have that $X$ has size at least $|M|+|\ensuremath{\hat{\mathcal{C}}_3}\xspace|=2LP(G)-MM(G)$. Let $\ell=|X|-(|M|+|\ensuremath{\hat{\mathcal{C}}_3}\xspace|)$.
Assume first that $|\ensuremath{X_{\mathtt{op}}}\xspace|>\ell$. Let $\overline{M}\subseteq M$ denote the matching edges between a vertex of $A_1$ and the vertex of a (matched) singleton component from $\ensuremath{\mathcal{C}_1}\xspace$ that $X$ contains both endpoints of. Let $\overline{A}_3:=A_3\cap X$. By definition of $\ensuremath{X_{\mathtt{op}}}\xspace$ we get that $|\ensuremath{X_{\mathtt{op}}}\xspace|=|\overline{M}|+|\overline{A}_3|$. For $u\in \overline{A}_3$ consider the component $C_u\in\ensuremath{\mathcal{C}_3}\xspace$ with $\{u,v\}\in M$ and $v\in C_u$. Observe that $C_u$ is factor-critical and has at least three vertices, which implies that $X$ needs to contain at least $\frac12(|C_u|+1)$ vertices of $C_u$ (Proposition~\ref{proposition:factorcritical:minvc:minfvc}). Note that, $M$ contains exactly $\frac12(|C_u|+1)$ matching edges between vertices of $C_u\cup\{u\}$, but $X$ contains at least $\frac12(|C_u|)+1)+1$ vertices of $C\cup\{u\}$.
Observe that the arguments of Lemma~\ref{lemma:nice:vclb} still apply. That is, for each component $C\in\ensuremath{\hat{\mathcal{C}}_3}\xspace$ the set $X$ contains at least $\frac12(|C|+1)$ of its vertices, and for all matching edges not in such a component we know that it contains at least one of its endpoints. Summing this up as in Lemma~\ref{lemma:nice:vclb} yields the lower bound of $|M|+|\ensuremath{\hat{\mathcal{C}}_3}\xspace|=2LP(G)-MM(G)$. Now, however, for each edge of $\overline{M}$ we get an extra $+1$ in the bound, and the same is true for each vertex $u\in \overline{A}_3$ since $X$ contains at least $\frac12(|C_u|)+1)+1$ vertices of $C\cup\{u\}$, which is one more than the number of matching edges on these vertices. Thus, the size of $X$ is at least $2LP(G)-MM(G)+|\overline{M}|+|\overline{A_3}|>2LP(G)-MM(G)+\ell=|X|$; a contradiction.
Assume now that there are more than $\ell$ active components. We can apply the same accounting argument as before since $X$ needs to independently contain at least one vertex per matching edge and at least $\frac12(|C|+1)$ vertices per component $C\in\ensuremath{\mathcal{C}_3}\xspace$. Having more than $\ell$ active components, i.e., more than $\ell$ components of $C$ where $X$ has more than $\frac12(|C|+1)$ vertices would then give a lower bound of $|X|> 2LP(G)-MM(G)+\ell=|X|$; a contradiction.
\end{proof}
The central question is of course how the different structures where $X$ exceeds the lower bound interact. We are only interested in aspects that are responsible for not allowing a tight vertex cover for any (unmatched, non-singleton) components $C\in\ensuremath{\hat{\mathcal{C}}_3}\xspace$. This happens exactly due to vertices in $A$ that are adjacent to $C$ and that are not selected by $X$. Between components of $G[B]$ and non-singleton components of $G[D]$ there are $M$-alternating paths with vertices alternatingly from $A$ and from singleton components of $G[D]$ since vertices in $A$ are all matched to $D$ and singleton components in $G[D]$ have all their neighbors in $A$. Unless $X$ contains both vertices of a matching edge, it contains the $A$- or the $D$-vertices of such a path. Unmatched components of $G[D]$ and components of $G[B]$ have all neighbors in $A$. Matched components $C$ in $G[D]$ with matched neighbor $v\in A$ enforce not selecting $v$ for $X$ unless $X$ spends more than the lower bound; in this way, they lead to selection of $D$-vertices on $M$-alternating paths. Intuitively, this leads to two ``factions'' that favor either $A$- or $D$-vertices and that are effectively separated when $X$ selects both $A$- and $D$-endpoint of a matching edge. An optimal solution need not separate all neighbors in $A$ of any component $C\in\ensuremath{\hat{\mathcal{C}}_3}\xspace$, and $C$ may still have a tight vertex cover or paying for a larger cover of $C$ is overall beneficial.
The following auxiliary directed graph $H$ captures this situation and for certain vertex covers $X$ reachability of $v\in A$ in $H-\ensuremath{X_{\mathtt{op}}}\xspace$ will be proved to be equivalent with $v\textbf{no}\xspacetin X$.
\begin{definition}[auxiliary directed graph $H$]\label{definition:graphh}
Let $G=(V,E)$ be a graph and let $(A,B,D,M)$ be a nice decomposition of $G$. Define a \emph{directed graph $H=H(G,A,B,D,M)$} on vertex set $A$ by letting $(u,v)$ be a directed edge of $H$, for $u,v\in A$, whenever there is a vertex $w\in D$ with $\{u,w\}\in E\setminus M$ and $\{w,v\}\in M$.
\end{definition}
The first relation between $G$, with decomposition~$(A,B,D,M)$, and the corresponding directed graph $H=H(G,A,B,D,M)$ is straightforward: It shows how inclusion and exclusion of vertices in a vertex cover work along an $M$-alternating path, when $X$ contains exactly one vertex per edge. We will later prove a natural complement of this lemma, but it involves significantly more work and does not hold for all vertex covers.
\begin{lemma}\label{lemma:reachable}
Let $G=(V,E)$ be a graph, let $(A,B,D,M)$ be a nice decomposition of $G$, and let $X$ be a vertex cover of $G$. Let $H=H(G,A,B,D,M)$ and $\ensuremath{X_{\mathtt{op}}}\xspace=\ensuremath{X_{\mathtt{op}}}\xspace(A_1,A_3,M,X)$. If $v\in A$ is reachable from $A_3$ in $H-\ensuremath{X_{\mathtt{op}}}\xspace$ then $X$ does not contain $v$.
\end{lemma}
\begin{proof}
Let $P_H=(v_1,\ldots,v_p)$ be a directed path in $H-\ensuremath{X_{\mathtt{op}}}\xspace$ from some vertex $v_1\in A_3\subseteq A$ to $v_p=v\in A$, and with $v_1,\ldots,v_p\in A\setminus \ensuremath{X_{\mathtt{op}}}\xspace=V(H-\ensuremath{X_{\mathtt{op}}}\xspace)$. By construction of $H$, for each edge $(v_i,v_{i+1})$ with $i\in[p-1]$ there is a vertex $u_i\in D$ with $\{v_i,u_i\}\in E\setminus M$ and $\{u_i,v_{i+1}\}\in M$. Since $M$ is a matching, all vertices $u_i$ are pairwise different and none of them are in $P_H$ as $u_i\in D$ and $A\cap D=\emptyset$. It follows that there is a path
\[
P=(v_1,u_1,v_2,u_2,v_3,\ldots,v_{p-1},u_{p-1},v_p)
\]
in $G$ where $\{v_i,u_i\}\in E\setminus M$ and $\{u_i,v_{i+1}\}\in M$ for $i\in[p-1]$. In other words, $P$ is an $\overline{M},M$-path from $v_1\in A_3$ to $v_p=v\in A$.
Consider any edge $\{u_i,v_{i+1}\}\in M$ of $P$ and apply Definition~\ref{definition:setxop}: If $v_{i+1}\in A_3$ then $v_{i+1}\textbf{no}\xspacetin \ensuremath{X_{\mathtt{op}}}\xspace$ implies that $v_{i+1}\textbf{no}\xspacetin X$. If $v_{i+1}\in A_1$ then $v_{i+1}\textbf{no}\xspacetin \ensuremath{X_{\mathtt{op}}}\xspace$ implies that $X$ does not contain both $u_i$ and $v_{i+1}$. In both cases $X$ does not contain both vertices of the edge $\{u_i,v_{i+1}\}\in M$. Thus, $X$ contains exactly one vertex each from $\{u_1,v_2\},\ldots,\{u_{p-1},v_p\}$.
Let us check that this implies that $u_{p-1}\in X$ and $v_p\textbf{no}\xspacetin X$. Observe that $v_1\textbf{no}\xspacetin X$ since $v_1\in A_3$ and $v_1\in X$ would imply $v_1\in \ensuremath{X_{\mathtt{op}}}\xspace$. Clearly, $X$ must then contain $u_1$ to cover the edge $\{v_1,u_1\}$, but then it does not contain $v_2$, which would be a second vertex from $\{u_1,v_2\}$. Thus, to cover $\{v_2,u_2\}$ the set $X$ must contain $u_2$, implying that it does not contain also $v_3$ from $\{u_2,v_3\}\in M$. By iterating this argument we get that $u_{p-1}\in X$ and $v_p\textbf{no}\xspacetin X$. Since $v=v_p$, this completes the proof.
\end{proof}
We will now work towards a complement of Lemma~\ref{lemma:reachable}: We would like to show that, under the same setup as in Lemma~\ref{lemma:reachable}, if $v$ is not reachable from $A_3$ in $H-\ensuremath{X_{\mathtt{op}}}\xspace$ then $X$ does contain $v$. In general, this does not hold. Nevertheless, one can show that there always \emph{exists} a vertex cover of at most the same size, and with same set $\ensuremath{X_{\mathtt{op}}}\xspace$, that does contain $v$. Equivalently, we may put further restrictions on $X$ under which the lemma holds; to this end, we define the notion of a \emph{dominant} vertex cover.
\begin{definition}[dominant vertex cover]
Let $G=(V,E)$ be a graph and let $(A,B,D,M)$ be a nice decomposition of $G$. A vertex cover $X\subseteq V$ of $G$ is \emph{dominant} if $G$ has no vertex cover of size less than $|X|$ and no vertex cover of size $|X|$ contains fewer vertices of $D$.
\end{definition}
We continue with a technical lemma that will be used to prove two lemmas about dominant vertex covers. The lemma statement is unfortunately somewhat opaque, but essentially it comes down to a fairly strong replacement routine that, e.g., can turn a given vertex cover into one that contains further vertices of $A$ and strictly less vertices of $D$.
\begin{lemma}\label{lemma:unify}
Let $G=(V,E)$ be a graph, let $(A,B,D,M)$ be a nice decomposition of $G$, and let $H=H(G,A,B,D,M)$. Let $X\subseteq V$ and $\ensuremath{X_{\mathtt{op}}}\xspace=\ensuremath{X_{\mathtt{op}}}\xspace(A_1,A_3,M,X)$. Suppose that there is a nonempty set $Z\subseteq A\setminus X$ such that
\begin{enumerate}
\item $X\cup Z$ is a vertex cover of $G$,
\item $X$ contains $M(z)$ for all $z\in Z$, and
\item $Z$ is not reachable from $A_3$ in $H-\ensuremath{X_{\mathtt{op}}}\xspace$.
\end{enumerate}
Then there exists a vertex cover $\ensuremath{\overline{X}}\xspace$ of size at most $|X|$ that contains $Z$. Moreover, $\ensuremath{\overline{X}}\xspace\cap D\subsetneq X\cap D$ and $\ensuremath{\overline{X}}\xspace\cap A\supsetneq X\cap A$.
\end{lemma}
\begin{proof}
We give a proof by minimum counterexample. Assume that the lemma does not hold, and pick sets $X$ and $Z$ that fulfill the conditions of the lemma but for which the claimed set $\overline{X}$ does not exist, and with minimum value of $|X\cap D|$ among such pairs of sets. (It is no coincidence that the choice of $X$ is reminiscent of a dominant vertex cover, but note that $X$ is not necessarily a vertex cover.) We will derive sets $X'$ and $Z'$ such that either $Z'=\emptyset$ and we can choose $\ensuremath{\overline{X}}\xspace:=X'$, or $Z'\neq\emptyset$ but then $X'$ and $Z'$ fulfill the conditions of the lemma and we have $|X'\cap D|<|X\cap D|$. In the latter case, the lemma must hold for $X'$ and $Z'$ and we will see that $\ensuremath{\overline{X}}\xspace:=\ensuremath{\overline{X}}\xspace'$ fulfills the then-part of the lemma for $X$ and $Z$. Thus, both cases contradict the assumption that $X$ and $Z$ constitute a counterexample, proving correctness of the lemma.
First, let us find an appropriate set $X'$. To this end, let $U:=\{M(z) \mid z\in Z\}$. Since $Z\subseteq A$ we know that $U\subseteq D$ and that each vertex $z\in Z$ is matched to a private vertex $M(z)\in U$; hence $|U|=|Z|\geq 1$. We have $U\subseteq X$ since $X$ contains $M(z)$ for all $z\in Z$. Define $X':=(X\setminus U)\cup Z$. We have $|X'|=|X|-|U|+|Z|=|X|$ since $U\subseteq X$ and $|Z|=|U|$, and because $Z\subseteq A\setminus X$ entails that $X\cap Z=\emptyset$. Moreover, since $\emptyset\neq U\subseteq D$ and $Z\cap D=\emptyset$, we get that $X'\cap D\subsetneq X\cap D$; this also means that $|X'\cap D|<|X\cap D|$. Similarly, since $\emptyset\neq Z\subseteq A$, $X\cap Z=\emptyset$, and $U\cap A=\emptyset$, we get $X'\cap A\supsetneq X\cap A$. Finally, note that $X'\cup U$ is a vertex cover since $X'\cup U = X\cup Z$ is a vertex cover.
Second, we define $Z':=\{v\mid v\in N(u) \mbox{ for some } u\in U\}\setminus X'$, i.e., $Z'$ contains all vertices $v$ that are neighbor of some $u\in U$ and that are not in $X'$. (Note that this not the same as $N(U)\setminus X'$ since a vertex $u\in U$ could have a neighbor $u'\in U$. Nevertheless, we show in a moment that $Z'\subseteq A$, ruling out this case as $U\subseteq D$ and $A\cap D=\emptyset$.) Clearly $X'\cap Z'=\emptyset$, and $Z\cap Z'=\emptyset$ since $Z\subseteq X'$. Observe that $X'\cup Z'$ is a vertex cover since $X'\cup U$ is a vertex cover: The only edges not covered by $X'\subsetneq X'\cup U$ have one endpoint in $U$ and the other one not in $X'$; these edges are covered by $Z'$ by definition.
Let us prove that $Z'\subseteq A\setminus X'$; it remains to prove $Z'\subseteq A$: Since $U\subseteq D$ and $N(D)=A$ we know that $N(u)\subseteq A\cup D$ for $u\in U$. Assume for contradiction that some $u\in U\subseteq D$ has a neighbor $v\in D$, and let $z\in Z$ with $u=M(z)$, using the definition of $U$. It follows that $u$ and $v$ are contained in the same non-singleton component $C$ of $G[D]$, as they are adjacent vertices of $D$. Moreover, $C$ is matched to $z$ since $u=M(z)$ implies $\{u,z\}\in M$. This in turn implies that $C$ is a matched non-singleton component, i.e., $C\in\ensuremath{\mathcal{C}_3}\xspace$, and, hence, $z\in A_3$. We also know find that $z\textbf{no}\xspacetin\ensuremath{X_{\mathtt{op}}}\xspace$ since $Z\subseteq A\setminus X$ entails $z\textbf{no}\xspacetin X$ (cf.\ Definition~\ref{definition:setxop}). Together, however, this implies that $z$ is reachable from $A_3$ in $H-\ensuremath{X_{\mathtt{op}}}\xspace$, namely from $z\in A_3$; a contradiction. Thus, no vertex $u\in U$ has a neighbor $v\in D$, implying that $Z'\subseteq A$. Together with $X'\cap Z'=\emptyset$ we get $Z'\subseteq A\setminus X'$.
We now prove that $M(z')\in X'$ for all $z'\in Z'$. Pick any $z'\in Z'$ and note that $z'\in A\setminus X'$. Thus, $z'$ is matched to some vertex $w\in D$, i.e., $w=M(z')$. The set $X'\cup U$ is a vertex cover, implying that it contains at least one vertex of the edge $\{z',w\}$. Since $z'\in A\setminus X'\subseteq A$, it is neither in $X'$ nor in $U$ (recall that $U\subseteq D$ and $A\cap D=\emptyset$). Thus, $w\in X'\cup U$. If $w\in U$ then there exists $z\in Z$ with $w=M(z)$ by definition of $U$. Clearly, as $M$ is matching, we must have $z=z'$. This, however, violates our earlier observation that $Z\cap Z'=\emptyset$ since both sets would contain $z$. Thus, the only remaining possibility is that $w\in X'$. Hence, we get $M(z')=w\in X'$, as claimed.
Define $\ensuremath{X_{\mathtt{op}}}\xspace':=\ensuremath{X_{\mathtt{op}}}\xspace(A_1,A_3,M,X')$; to prove that no vertex of $Z'$ is reachable from $A_3$ in $H-\ensuremath{X_{\mathtt{op}}}\xspace'$ it will be convenient to first prove $\ensuremath{X_{\mathtt{op}}}\xspace\subseteq\ensuremath{X_{\mathtt{op}}}\xspace'$: Let $v\in\ensuremath{X_{\mathtt{op}}}\xspace$ and recall that $\ensuremath{X_{\mathtt{op}}}\xspace\subseteq A$. If $v\in A_3$ then $v\in\ensuremath{X_{\mathtt{op}}}\xspace$ implies that $v \in X$. By definition of $X'$ we have $v\in X'$ as only vertices in $U\subseteq D$ are in $X$ but not in $X'$. From $v\in X'$, for $v\in A_3$, we directly conclude that $v\in \ensuremath{X_{\mathtt{op}}}\xspace'$. If $v\in A_1$ then $v\in\ensuremath{X_{\mathtt{op}}}\xspace$ implies that $v,M(v)\in X$. This implies $v\in X'$ as before but we still need to show that $M(v)\in X'$. Assume for contradiction that $M(v)\textbf{no}\xspacetin X'$. Observe that this implies $M(v)\in U$ by definition of $X'$, as $M(v)\in X$. Thus, by definition of $U$, we get that $M(v)$ is matched to some vertex $z\in Z$, i.e., $M(v)=M(z)$. Since $M$ is a matching and $M(v)$ is matched to $v$, we of course get $v=z$. This implies $v=z\in Z$, which contradicts $v\in X$ as $Z\subseteq A\setminus X$. Thus, we have both $v\in X'$ and $M(v)\in X'$, which, for $v\in A_1$, implies that $v\in\ensuremath{X_{\mathtt{op}}}\xspace'$. Both cases together imply that $\ensuremath{X_{\mathtt{op}}}\xspace\subseteq\ensuremath{X_{\mathtt{op}}}\xspace'$.
We will now prove that no vertex of $Z'$ is reachable from $A_3$ in $H-\ensuremath{X_{\mathtt{op}}}\xspace'$, using $\ensuremath{X_{\mathtt{op}}}\xspace\subseteq\ensuremath{X_{\mathtt{op}}}\xspace'$. Let $P=(v_1,\ldots,v_p)$ be any directed path in $H$ with $v_1\in A_3$ and $v_p=z'\in Z'$. As $z'\in Z'$ there is $u\in U$ with $z'\in N(u)\setminus X'$. Similarly, since $u\in U$ there must be $z\in Z$ with $u=M(z)$; we have $z\neq z'$ since $Z\cap Z'=\emptyset$. Observe that this means that $\{z,u\}\in M$ and $\{u,z'\}\in E\setminus M$ as $u$ cannot be incident with two matching edges. This implies, by Definition~\ref{definition:graphh}, that $(z',z)$ is an edge in $H$. Thus, there is a directed walk $W$ from $v_1\in A_3$ to $z\in Z$ in $H$ by using path $P$ and appending the edge $(z',z)$. (With slightly more work one could see that this must be a path, but we do not need this fact.) Since no vertex of $Z$ is reachable from $A_3$ in $H-\ensuremath{X_{\mathtt{op}}}\xspace$ we conclude that $W$ contains at least one vertex of $\ensuremath{X_{\mathtt{op}}}\xspace$. Note that $\ensuremath{X_{\mathtt{op}}}\xspace$ does not contain $z\in Z$ since we assumed $Z\subseteq A\setminus X$ and $\ensuremath{X_{\mathtt{op}}}\xspace\subseteq X$. Thus, $\ensuremath{X_{\mathtt{op}}}\xspace$ contains a vertex of $P$ (noting that $z$ is the only vertex of $W$ that may not be in $P$). Since $\ensuremath{X_{\mathtt{op}}}\xspace\subseteq\ensuremath{X_{\mathtt{op}}}\xspace'$ it follows that $\ensuremath{X_{\mathtt{op}}}\xspace'$ also contains a vertex of $P$; since $P$ was chosen arbitrarily it follows that no vertex of $Z'$ is reachable from $A_3$ in $H-\ensuremath{X_{\mathtt{op}}}\xspace'$, as claimed.
Finally, we distinguish two cases: (1) $Z'=\emptyset$ and (2) $Z'\neq\emptyset$. In the former case, we show that $\ensuremath{\overline{X}}\xspace:=X'$ is feasible; in the latter case we use the lemma on $X'$ and $Z'$ to get $\ensuremath{\overline{X}}\xspace'$ and then show that $\ensuremath{\overline{X}}\xspace:=\ensuremath{\overline{X}}\xspace'$ fulfills the then-part of the lemma.
(1) $Z'=\emptyset$: We get that $X'=X'\cup Z'$ is a vertex cover of $G$. We showed that $|X'|=|X|$, and that $X'\cap D\subsetneq X\cap D$ and $X'\cap A\supsetneq X\cap A$. Finally, by construction we have that $Z\subseteq X'$. Thus, $\ensuremath{\overline{X}}\xspace:=X'$ fulfills the properties claimed in the lemma, contradicting the fact that $X$ and $Z$ constitute a counterexample.
(2) $Z'\neq\emptyset$: Together with $Z'\neq\emptyset$ the above considerations show that $X'$ and $Z'$ fulfill the conditions of the lemma: The set $Z'$ is a nonempty subset of $A\setminus X'$; the set $X'\cup Z'$ is a vertex cover of $G$; the set $X'$ contains $M(z')$ for all $z'\in Z'$; and $Z'$ is not reachable from $A_3$ in $H-\ensuremath{X_{\mathtt{op}}}\xspace'$, where $\ensuremath{X_{\mathtt{op}}}\xspace'=\ensuremath{X_{\mathtt{op}}}\xspace(A_1,A_3,M,X')$. Moreover, we know that $|X'\cap D|<|X\cap D|$, which implies that the lemma must hold for this choice of sets, as $X$ and $Z$ was assumed to be a counterexample with minimum value of $|X\cap D|$. Let $\ensuremath{\overline{X}}\xspace'$ be the outcome of applying the lemma to $X'$ and $Z'$; let us check that $\ensuremath{\overline{X}}\xspace:=\ensuremath{\overline{X}}\xspace'$ is feasible:
\begin{itemize}
\item The lemma guarantees that $\ensuremath{\overline{X}}\xspace'$ is a vertex cover of $G$.
\item The lemma guarantees $|\ensuremath{\overline{X}}\xspace'|\leq |X'|$, and using $|X'|=|X|$ we conclude that $|\ensuremath{\overline{X}}\xspace'|\leq |X|$.
\item We know, as discussed in case (1), that $Z\subseteq X'$. The lemma guarantees that $\ensuremath{\overline{X}}\xspace'\cap A\supsetneq X'\cap A$ and $\ensuremath{\overline{X}}\xspace'\cap D\subsetneq X'\cap D$. The former, together with $Z\subseteq X'$ and $Z\subseteq A$, yields $Z\subseteq X'\cap A\subsetneq \ensuremath{\overline{X}}\xspace'\cap A$. Together with $X'\cap A\supsetneq X\cap A$ and $X'\cap D\subsetneq X\cap D$, we get $\ensuremath{\overline{X}}\xspace'\cap A\supsetneq X'\cap A \supsetneq X\cap A$ and $\ensuremath{\overline{X}}\xspace'\cap D\subsetneq X'\cap D\subsetneq X\cap D$.
\end{itemize}
Thus, $\ensuremath{\overline{X}}\xspace:=\ensuremath{\overline{X}}\xspace'$ is a feasible choice. Altogether, we find that in both cases there does in fact exist a valid set $\ensuremath{\overline{X}}\xspace$. This means that $X$ and $Z$ do not constitute a counterexample. Since there is no minimum counterexample, the lemma holds as claimed.
\end{proof}
Now, as a first application of Lemma~\ref{lemma:unify} we prove a complement to Lemma~\ref{lemma:reachable}. Note that this lemma only applies to dominant vertex covers, whereas Lemma~\ref{lemma:reachable} holds for any vertex cover of $G$. Fortunately, after the rather long proof of Lemma~\ref{lemma:unify}, the present lemma is now a rather straightforward conclusion.
\begin{lemma}\label{lemma:notreachable}
Let $G=(V,E)$ be a graph, let $(A,B,D,M)$ be a nice decomposition of $G$, and let $H=H(G,A,B,D,M)$. Let $X$ be a dominant vertex cover of $G$ and let $\ensuremath{X_{\mathtt{op}}}\xspace=\ensuremath{X_{\mathtt{op}}}\xspace(A_1,A_3,M,X)$. If $v\in A$ is not reachable from $A_3$ in $H-\ensuremath{X_{\mathtt{op}}}\xspace$ then $X$ contains $v$.
\end{lemma}
\begin{proof}
First, let us note that if $v\in\ensuremath{X_{\mathtt{op}}}\xspace$ then, by Definition~\ref{definition:setxop}, we know that $v\in X$. It remains to consider the more interesting case that $v\in A\setminus \ensuremath{X_{\mathtt{op}}}\xspace$.
Assume for contradiction that $v\textbf{no}\xspacetin X$. We will apply Lemma~\ref{lemma:unify} to reach a contradiction. To this end, we will define a set $Z$ such that $X$ and $Z$ fulfill the conditions of Lemma~r\ref{lemma:unify}. Let $Z:=\{v\}$. Clearly, we have $\emptyset\neq Z\subseteq A\setminus X$. Since $X$ is a vertex cover and $v\textbf{no}\xspacetin X$, the vertex $M(v)$ must be in $X$ in order to cover the edge $\{v,M(v)\}$. (Note that $v\in A$ implies that $M(v)\in D$ exists.) By assumption of the present lemma, $v$ is not reachable from $A_3$ in $H-\ensuremath{X_{\mathtt{op}}}\xspace$, where $\ensuremath{X_{\mathtt{op}}}\xspace=\ensuremath{X_{\mathtt{op}}}\xspace(A_1,A_3,M,X)$. Thus, Lemma~\ref{lemma:unify} applies to $X$ and $Z$, and yields a set $\ensuremath{\overline{X}}\xspace$ that is a vertex cover of $G$ of size at most $X$ and with $|\ensuremath{\overline{X}}\xspace\cap D|<|X\cap D|$, contradicting the assumption that $X$ is a dominant vertex cover. Thus, the assumption that $v\textbf{no}\xspacetin X$ is wrong, and the lemma follows.
\end{proof}
As a second application of Lemma~\ref{lemma:unify} we prove that sets $\ensuremath{X_{\mathtt{op}}}\xspace$ corresponding to dominant vertex covers are always closest to $A_3$ in the auxiliary directed graph $H$. This is a requirement for applying the matroid tools from Kratsch and Wahlstr\"om~\cite{KratschW12} later since closest sets allow to translate between reachability with respect to a closest cut and independence in an appropriate matroid. Unlike the previous lemma, there is still quite some work involved before applying Lemma~\ref{lemma:unify} in the proof.
\begin{lemma}\label{lemma:closest}
Let $G=(V,E)$ be a graph, let $(A,B,D,M)$ be a nice decomposition of $G$, and let $H=H(G,A,B,D,M)$. Let $X$ be a dominant vertex cover of $G$ and let $\ensuremath{X_{\mathtt{op}}}\xspace=\ensuremath{X_{\mathtt{op}}}\xspace(A_1,A_3,M,X)$. Then $\ensuremath{X_{\mathtt{op}}}\xspace$ is closest to $A_3$ in $H$.
\end{lemma}
\begin{proof}
Assume that $\ensuremath{X_{\mathtt{op}}}\xspace$ is not closest to $A_3$ in $H$ and, consequently, let $Y\subseteq V(H)=A$ be a minimum $A_3,\ensuremath{X_{\mathtt{op}}}\xspace$-separator in $H$ with $|Y|\leq|\ensuremath{X_{\mathtt{op}}}\xspace|$ and $Y\neq \ensuremath{X_{\mathtt{op}}}\xspace$. We will apply Lemma~\ref{lemma:unify} to appropriately chosen sets $X'$ and $Z$ (with $X'$ and $Z$ playing the roles of $X$ and $Z$ in the lemma).
Let $X':=(X\setminus (\ensuremath{X_{\mathtt{op}}}\xspace\setminus Y))\cup (Y\setminus \ensuremath{X_{\mathtt{op}}}\xspace)$. Note that
\begin{align*}
|\ensuremath{X_{\mathtt{op}}}\xspace\setminus Y|=|\ensuremath{X_{\mathtt{op}}}\xspace|-|\ensuremath{X_{\mathtt{op}}}\xspace\cap Y|\geq |Y|-|\ensuremath{X_{\mathtt{op}}}\xspace\cap Y|=|Y\setminus \ensuremath{X_{\mathtt{op}}}\xspace|.
\end{align*}
This implies that $|X'|\leq|X|$, using that $\ensuremath{X_{\mathtt{op}}}\xspace\setminus Y \subseteq \ensuremath{X_{\mathtt{op}}}\xspace \subseteq X$ (see Definition~\ref{definition:setxop}). We can also observe that $X'$ and $X$ contain the same vertices of $D$, and hence also the same number since $\ensuremath{X_{\mathtt{op}}}\xspace\setminus Y$ and $Y\setminus \ensuremath{X_{\mathtt{op}}}\xspace$ are both subsets of $A$. (Let us mention that these two properties are not needed to apply Lemma~\ref{lemma:unify} to $X'$ but they are needed for the outcome to have relevance for $X$.)
Let $\ensuremath{X_{\mathtt{op}}}\xspace'=\ensuremath{X_{\mathtt{op}}}\xspace(A_1,A_3,M,X')$ according to Definition~\ref{definition:setxop}. We show that $Y\subseteq \ensuremath{X_{\mathtt{op}}}\xspace'$ by proving that $y\in\ensuremath{X_{\mathtt{op}}}\xspace'$ for all $y\in Y$; we distinguish two cases depending on whether $y\in\ensuremath{X_{\mathtt{op}}}\xspace$.
Let $y\in Y\cap \ensuremath{X_{\mathtt{op}}}\xspace$. If $y\in A_1$ then $y\in\ensuremath{X_{\mathtt{op}}}\xspace$ implies $y,M(y)\in X$. By definition of $X'$ we also have $y,M(y)\in X'$: Only elements of $\ensuremath{X_{\mathtt{op}}}\xspace\setminus Y\subseteq A$ are in $X$ but not in $X'$; neither $y\in Y\cap\ensuremath{X_{\mathtt{op}}}\xspace$ nor $M(y)\in D$ are affected by this. Thus, if $y\in A_1$, then $y,M(y)\in X'$, which implies $y\in \ensuremath{X_{\mathtt{op}}}\xspace'$. If $y\in A_3$ then $y\in\ensuremath{X_{\mathtt{op}}}\xspace$ implies $y\in X$. As before, the definition of $X'$ implies $y\in X'$, which yields $y\in \ensuremath{X_{\mathtt{op}}}\xspace'$. Thus, all $y\in Y\cap\ensuremath{X_{\mathtt{op}}}\xspace$ are also contained in $\ensuremath{X_{\mathtt{op}}}\xspace'$.
Now, let $y\in Y\setminus\ensuremath{X_{\mathtt{op}}}\xspace$. Since $Y$ is a minimal $A_3,\ensuremath{X_{\mathtt{op}}}\xspace$-separator, there must be an $A_3,y$-path in $H-(Y\setminus\{y\})$ or else $Y\setminus\{y\}$ would also be an $A_3,\ensuremath{X_{\mathtt{op}}}\xspace$-separator. (This is a standard argument, if $Y\setminus\{y\}$ were not a separator then there would be an $A_3,\ensuremath{X_{\mathtt{op}}}\xspace$-path avoiding $Y\setminus\{y\}$. This path needs to contain $y$, as $Y$ is a separator, and can be shortened to a path from $A_3$ to $y$.) Let $P$ be a directed path from some vertex $v\in A_3$ to $y$ in $H-(Y\setminus\{y\})$, i.e., a path in $H$ containing no vertex of $Y\setminus\{y\}$. We find that there can be no vertex of $\ensuremath{X_{\mathtt{op}}}\xspace$ on $P$: We already know that the final vertex $y$ of $P$ is not in $\ensuremath{X_{\mathtt{op}}}\xspace$. If $u$ is any earlier vertex of $P$ that is in $\ensuremath{X_{\mathtt{op}}}\xspace$ then $P$ could be shortened to a path from $v\in A_3$ to $u\in \ensuremath{X_{\mathtt{op}}}\xspace$ that avoids all vertices of $Y$ (since $y$ was the only vertex of $Y$ on $P$ but it comes after $u$); thus $Y$ would not separate $A_3$ from $\ensuremath{X_{\mathtt{op}}}\xspace$ in $H$. Since $P$ contains no vertex of $\ensuremath{X_{\mathtt{op}}}\xspace$, we conclude that $y$ is reachable from $A_3$ in $H-\ensuremath{X_{\mathtt{op}}}\xspace$. By Lemma~\ref{lemma:reachable} we conclude that $y\textbf{no}\xspacetin X$. Since $Y\subseteq A$, the vertex $y$ is matched to some vertex $u\in D$, and $X$ must contain $u$ to cover the edge $\{u,y\}$. Since $X$ and $X'$ contain the same vertices of $D$, as observed above, we have $u\in X'$. Additionally, by construction of $X'$, we have $Y\setminus\ensuremath{X_{\mathtt{op}}}\xspace\subseteq X'$, implying that $y\in X'$. Thus, if $y\in A_1$ then we have $y\in X'$ and $M(y)=u\in X'$, which implies $y\in\ensuremath{X_{\mathtt{op}}}\xspace'$; if $y\in A_3$ then $y\in X'$ suffices to conclude $y\in\ensuremath{X_{\mathtt{op}}}\xspace'$. Together we get that $y\in Y\setminus\ensuremath{X_{\mathtt{op}}}\xspace$ implies $y\in\ensuremath{X_{\mathtt{op}}}\xspace'$; combined with the case $y\in Y\cap\ensuremath{X_{\mathtt{op}}}\xspace$ we get $Y\subseteq\ensuremath{X_{\mathtt{op}}}\xspace'$.
Let $Z:=\ensuremath{X_{\mathtt{op}}}\xspace\setminus Y$. By definition of $X'$ we have $X'\cap Z=\emptyset$; since $\ensuremath{X_{\mathtt{op}}}\xspace\subseteq A$ this entails $Z\subseteq A\setminus X'$. Since $|Y|\leq|\ensuremath{X_{\mathtt{op}}}\xspace|$ and $Y\neq\ensuremath{X_{\mathtt{op}}}\xspace$, we conclude that $Z=\ensuremath{X_{\mathtt{op}}}\xspace\setminus Y\neq\emptyset$. The set $X'\cup Z$ contains $X$ by definition of $X'$ and hence it is also a vertex cover of $G$. To get that $M(z)\in X'$ for $z\in Z$ we need to distinguish two cases: If $z\in A_1$ then $z\in\ensuremath{X_{\mathtt{op}}}\xspace$ implies $M(z)\in X$; note that $M(z)\in D$ as $z\in A$. Since $X'$ contains the same vertices of $D$ as $X$ we get $M(z)\in X'$. If $z\in A_3$ then we reach a contradiction: Recall that $Y$ is an $A_3,\ensuremath{X_{\mathtt{op}}}\xspace$-separator. This necessitates that $Y$ contains all vertices of $A_3\cap\ensuremath{X_{\mathtt{op}}}\xspace$, implying that $z\in Y$, contradicting $z\in\ensuremath{X_{\mathtt{op}}}\xspace\setminus Y$. Thus, if $z\in\ensuremath{X_{\mathtt{op}}}\xspace\setminus Y$ then $z\in A_1$ and we get $M(z)\in X'$ as claimed. Finally, let us check that no vertex of $Z$ is reachable from $A_3$ in $H-\ensuremath{X_{\mathtt{op}}}\xspace'$. This follows immediately from $Z\subseteq \ensuremath{X_{\mathtt{op}}}\xspace$ and $Y\subseteq\ensuremath{X_{\mathtt{op}}}\xspace'$, and the fact that $Y$ is an $A_3,\ensuremath{X_{\mathtt{op}}}\xspace$-separator in $H$.
By the above considerations we may apply Lemma~\ref{lemma:unify} to $X'$ and $Z$ and obtain a vertex cover \ensuremath{\overline{X}}\xspace of $G$ of size at most $|X'|\leq|X|$ that contains fewer vertices of $D$ than $X'$. Since $X$ and $X'$ contain the same number of vertices of $D$, we get $|\ensuremath{\overline{X}}\xspace\cap D|<|X\cap D|$, contradicting the choice of $X$ as a dominant vertex cover.
\end{proof}
\section{Randomized polynomial kernelization}\label{section:kernelization}
In this section, we describe our randomized polynomial kernelization for \probname{Vertex Cover}\xspaceanb. For convenience, let us fix an input instance $(G,k,\ell)$, i.e., $G=(V,E)$ is a graph for which we want to know whether it has a vertex cover of size at most $k$; the parameter is $\ell=k-(2LP(G)-MM(G))$, where $LP(G)$ is the minimum cost of a fractional vertex cover of $G$ and $MM(G)$ is the size of a largest matching.
From previous work of Garg and Philip~\cite{GargP16} we know that the well-known linear program-based preprocessing for \probname{Vertex Cover}\xspace (cf.~\cite{CyganFKLMPPS15}) can also be applied to \probname{Vertex Cover}\xspaceanb; the crucial new aspect is that this operation does not increase the value $k-(2LP-MM)$. The LP-based preprocessing builds on the half-integrality of fractional vertex covers and a result of Nemhauser and Trotter~\cite{NemhauserT1975} stating that all vertices with value $1$ and $0$ in an optimal fractional vertex cover $x\colon V\to\{0,\frac12,1\}$ are included respectively excluded in at least one minimum (integral) vertex cover. Thus, only vertices with value $x(v)=\frac12$ remain and the best LP solution costs exactly $\frac12$ times number of (remaining) vertices. For our kernelization we only require the fact that if $G$ is reduced under this reduction rule then $LP(G)=\frac12(|V(G)|)$; e.g., we do not require $x\colon V\to\{\frac12\}$ to be the unique optimal fractional vertex cover. Without loss of generality, we assume that our given graph $G=(V,E)$ already fulfills $LP(G)=\frac12|V|$.
\begin{observation}
If $LP(G)=\frac12|V|$ then $2LP(G)-MM(G)=|V|-MM(G)$. In other words, if $M$ is a maximum matching of $G$ then the lower bound $2LP(G)-MM(G)=|V|-MM(G)=|V|-|M|$ is equal to cardinality of $M$ plus the number of isolated vertices.
\end{observation}
As a first step, let us compute the Gallai-Edmonds decomposition $V=A\mathbin{\dot\cup} B\mathbin{\dot\cup} D$ of $G$ according to Definition~\ref{definition:ged}; this can be done in polynomial time.\footnote{The main expenditure is finding the set $D$. A straightforward approach is to compute a maximum matching $M_v$ of $G-v$ for each $v\in V$. If $|M_v|=MM(G)$ then $v$ is in $D$ as $M_v$ is maximum and exposes $v$; otherwise $v\textbf{no}\xspacetin D$ as no maximum matching exposes $v$.} Using $LP(G)=\frac12|V|$ we can find a maximum matching $M$ of $G$ such that $(A,B,D,M)$ is a nice decomposition of $G$.
\begin{lemma}\label{lemma:kernel:nicedecomposition}
Given $G=(V,E)$ with $LP(G)=\frac12|V|$ and a Gallai-Edmonds decomposition $V=A\mathbin{\dot\cup} B\mathbin{\dot\cup} D$ of $G$ one can in polynomial time compute a maximum matching $M$ of $G$ such that $(A,B,D,M)$ is a nice decomposition of $G$.
\end{lemma}
\begin{proof}
Let $\ensuremath{\mathcal{C}_1}\xspace$ denote the set of singleton components of $G[D]$ and let $I=V(\ensuremath{\mathcal{C}_1}\xspace)\subseteq D$ contain all vertices that are in singleton components of $G[D]$. Clearly, $I$ is an independent set since $G[I]$ is the subgraph of $G[D]$ containing just the singleton components. Assume for contradiction that there is a set $I'\subseteq I$ with $|N_G(I')|<|I'|$. It follows directly that there would be a fractional vertex cover of $G$ of cost less than $\frac12|V|$, namely assign $0$ to vertices of $I'$, assign $1$ to vertices of $N(I')$, and assign $\frac12$ to all other vertices. The total cost is
\begin{align*}
0\cdot |I'| + 1\cdot |N(I')| + \frac12 |V\setminus(I'\cup N(I'))|<\frac12|I'|+\frac12 |N(I')|+\frac12|V\setminus(I'\cup N(I'))| = \frac12|V|.
\end{align*}
All edges incident with $I'$ have their other endpoint in $N(I')$, which has value $1$. All other edges have two endpoints with value at least $\frac12$. This contradicts the assumption that $LP(G)=\frac12|V|$.
Thus, each $I'\subseteq I$ has at least $|I'|$ neighbors in $G$. By Hall's Theorem there exists a matching of $I'$ into $N(I')$, and standard bipartite matching algorithms can find one in polynomial time; let $M_1$ be such a matching. Using any matching algorithm that finds a maximum matching by processing augmenting paths, we can compute from $M_1$ in polynomial time a maximum matching $M$ of $G$. The matching $M$ still contains edges incident with all vertices of $I$ since extending a matching along an augmenting path does not expose any previously matched vertices.
Using the maximum matching $M$, let us check briefly that $(A,B,D,M)$ is indeed a nice decomposition of $G$. We know already that there are no unmatched singleton components since $M$ contains matching incident with all vertices of $I=V(\ensuremath{\mathcal{C}_1}\xspace)$ and all these edges are also incident to a vertex in $A$. (Recall that the neighborhood of each component of $G[D]$ in $G$ lies in $A$.) Since $V=A\mathbin{\dot\cup} B\mathbin{\dot\cup} D$ is a Gallai-Edmonds decomposition of $G$ we get from Definition~\ref{definition:ged} and Theorem~\ref{theorem:ged} that $A=N(D)$, each component of $G[D]$ is factor-critical, and that $M$ (by being a maximum matching of $G$) must induce a perfect matching of $G[B]$, a near-perfect matching of each component $C$ of $G[D]$, and a matching of $A$ into $D$. This completes the proof.
\end{proof}
We fix a nice decomposition $(A,B,D,M)$ of $G$ obtained via Lemma~\ref{lemma:kernel:nicedecomposition}. We have already learned about the relation of dominant vertex covers $X$, their intersection with the set $A$, and separation of $A$ vertices from $A_3$ in $H-\ensuremath{X_{\mathtt{op}}}\xspace$, where $H=H(G,A,B,D,M)$. It is safe to assume that solutions are dominant vertex covers as among minimum vertex covers there is a minimum intersection with $D$. We would now like to establish that most components of $\ensuremath{\hat{\mathcal{C}}_3}\xspace$ can be deleted (while reducing $k$ by the cost for corresponding tight vertex covers). Clearly, since any vertex cover pays at least for tight covers of these components, we cannot turn a yes- into a no-instance this way. However, if the instance is no then it might become yes.
In the following, we will try to motivate both the selection process for components of $\ensuremath{\hat{\mathcal{C}}_3}\xspace$ that are deleted as well as the high-level proof strategy for establishing correctness. We will tacitly ignore most technical details, like parameter values, getting appropriate nice decompositions, etc., and refer to the formal proof instead. Assume that we are holding a no-instance $(G,k,\ell)$. Consider for the moment, the effect of deleting all components $C\in\ensuremath{\hat{\mathcal{C}}_3}\xspace$ that have tight vertex covers and updating the budget accordingly; for simplicity, say they all have such vertex covers. Let $(G_0,k_0,\ell)$ be the obtained instance; if this instance is no as well, then deleting any subset of $\ensuremath{\hat{\mathcal{C}}_3}\xspace$ also preserves the correct answer (namely: no). Else, if $(G_0,k_0,\ell)$ is yes then pick any dominant vertex cover $X^0$ for it. We could attempt to construct a vertex cover of $G$ of size at most $k$ by adding back the components of $C$ and picking a tight vertex cover for each; crucially, these covers must also handle edges between $C$ and $A$. Since $(G,k,\ell)$ was assumed to be a no-instance, there must be too many components $C\in\ensuremath{\hat{\mathcal{C}}_3}\xspace$ for which this approach fails. For any such component, the adjacent vertices in $A\setminus X^0$ force a selection of their neighbors $Z_A=N(A)\cap C$ that cannot be completed to a tight vertex cover of $C$. To avoid turning the no-instance $(G,k,\ell)$ into a yes-instance $(G',k',\ell)$ we have to keep enough components of $\ensuremath{\hat{\mathcal{C}}_3}\xspace$ in order to falsify any suggested solution $X'$ of size at most $k'$ for $G$. The crux is that there may be an exponential number of such solutions and that we do not know any of them. This is where the auxiliary directed graph and related technical lemmas as well as the matroid-based tools of Kratsch and Wahlstr\"om~\cite{KratschW12} are essential.
Let us outline how we arrive at an application of the matroid-based tools. Crucially, if $C$ (as above) has no tight vertex cover containing $Z_A=N(A)\cap C$ then, by Lemma~\ref{lemma:criticalsets:boundsize}, there is a set $Z\subseteq Z_A$ of size at most three such that no tight vertex cover contains $Z$. Accordingly, there is a set $T\subseteq A\setminus X^0$ of size at most three whose neighborhood in $C$ contains $Z$. Thus, the fact that $X^0$ contains no vertex of $T$ is responsible for not allowing a tight vertex cover of $C$. This in turn, by Lemma~\ref{lemma:notreachable} means that all vertices in $T$ are reachable from $A_3$ in $H-\ensuremath{X_{\mathtt{op}}}\xspace^0$. Recalling that a set $\ensuremath{X_{\mathtt{op}}}\xspace^0$ corresponding to a dominant vertex cover is also closest to $A_3$, we can apply a result from~\cite{KratschW12} that generates a sufficiently small representative set of sets $T$ corresponding to components of $\ensuremath{\hat{\mathcal{C}}_3}\xspace$. If a dominant vertex cover has any reachable sets $T$ then the lemma below guarantees that at least one such set is in the output. For each set we select a corresponding component $C\in\ensuremath{\hat{\mathcal{C}}_3}\xspace$ and then start over on the remaining components. After $\ell+1$ iterations we can prove that for any not selected component $C$, which we delete, and any proposed solution $X'$ for the resulting graph that does not allow a tight vertex cover for $C$, there are $\ell+1$ other selected components on which $X'$ cannot be tight. This is a contradiction as there are at most $\ell$ such active components by Lemma~\ref{lemma:nice:boundxh:boundac}.
Concretely, we will use the following lemma about representative sets of vertex sets of size at most three regarding reachability in a directed graph (modulo deleting a small set of vertices). Notation of the lemma is adapted to the present application. The original result is for pairs of vertices in a directed graph (see~\cite[Lemma 2]{KratschW11_arxiv}) but extends straightforwardly to sets of fixed size $q$ and to sets of size at most $q$; a proof is provided in Section~\ref{section:proofofmatroidresult} for completeness. Note that the lemma is purely about reachability of small sets in a directed graph (like the \problem{Digraph Pair Cut} problem studied in~\cite{KratschW11_arxiv,KratschW12}) and we require the structural lemmas proved so far to negotiate between this an \probname{Vertex Cover}\xspaceanb.
\begin{lemma}\label{lemma:repsetofcriticalsets}
Let $H=(V_H,E_H)$ be a directed graph, let $S_H\subseteq V_H$, let $\ell\in\mathbb{N}$, and let $\ensuremath{\mathcal{T}}\xspace$ be a family of nonempty vertex sets $T\subseteq V_H$ each of size at most three. In randomized polynomial time, with failure probability exponentially small in the input size, we can find a set $\ensuremath{\mathcal{T}}\xspace^*\subseteq\ensuremath{\mathcal{T}}\xspace$ of size $\mathcal{O}(\ell^3)$ such that for any set $X_H\subseteq V_H$ of size at most $\ell$ that is closest to $S_H$ if there is a set $T\in\ensuremath{\mathcal{T}}\xspace$ such that all vertices $v\in T$ are reachable from $S_H$ in $H-X_H$ then there is a corresponding set $T^*\in\ensuremath{\mathcal{T}}\xspace^*$ satisfying the same properties.
\end{lemma}
Using the lemma we will be able to identify a small set $\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace$ of components of $\ensuremath{\hat{\mathcal{C}}_3}\xspace$ that contains for each dominant vertex cover $X$ of $G$ of size at most $k$ all active components with respect to $X$. Conversely, if there is no solution of size $k$, we will have retained enough components of $\ensuremath{\hat{\mathcal{C}}_3}\xspace$ to preserve this fact. Concretely, the set $\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace$ is computed as follows:
\begin{enumerate}
\item Let $\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace^0$ contain all components $C\in\ensuremath{\hat{\mathcal{C}}_3}\xspace$ that have no vertex cover of size at most $\frac12(|C|+1)$. Clearly, these components are active for every vertex cover of $G$. We know from Lemma~\ref{lemma:nice:boundxh:boundac} that there are at most $\ell$ such components if the instance is \textbf{yes}\xspace. We can use the algorithm of Garg and Philip~\cite{GargP16} to test in polynomial time whether any $C\in\ensuremath{\hat{\mathcal{C}}_3}\xspace$ has a vertex cover of size at most $k_C:=\frac12(|C|+1)$: We have parameter value
\[
k_C-(2LP(G[C])-MM(G[C]))=\frac12(|C|+1)-(|C|-\frac12(|C|-1))=0.
\]
We could of course also use an algorithm for \probname{Vertex Cover}\xspace parameterized above maximum matching size, where we would have parameter value $1$. If there are more than $\ell$ components $C$ with no vertex cover of size $\frac12(|C|+1)$ then we can safely reject the instance. Else, as indicated above, let $\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace^0$ contain all these components and continue.
\item Let $i=1$. We will repeat the following steps for $i\in\{1,\ldots,\ell+1\}$.
\item Let $\ensuremath{\mathcal{T}}\xspace^i$ contain all nonempty sets $T\subseteq A$ of size at most three such that there is a component $C\in\ensuremath{\hat{\mathcal{C}}_3}\xspace\setminus(\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace^0\cup\ldots\cup\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace^{i-1})$ such that:\label{step:selectti}
\begin{enumerate}
\item There is a set $Z\subseteq N_G(T)\cap C$ of at most three neighbors of $T$ in $C$ such that no vertex cover of $G[C]$ of size $\frac12(|C|+1)$ contains $Z$. Note that $Z\neq\emptyset$ since $C\textbf{no}\xspacetin \ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace^0$ implies that it has at least some vertex cover of size $\frac12(|C|+1)$.
\item For each $C$ and $Z\subseteq C$ of size at most three, existence of a vertex cover of $G[C]$ of size $k_C:=\frac12(|C|+1)$ containing $Z$ can be tested by the algorithm of Garg and Philip~\cite{GargP16} since the parameter value is constant. Concretely, run the algorithm on $G[C\setminus Z]$ and solution size $k_C-|Z|$ and observe that the parameter value is
\[
(k_C-|Z|)-(2LP(G[C\setminus Z])-MM(G[C\setminus Z])).
\]
Using that $LP(G[C\setminus Z])\geq LP(G[C])-|Z|$ and $MM(G[C\setminus Z])\leq MM(G[C])=\frac12(|C|-1)$ this value can be upper bounded by
\begin{align*}
& k_C-|Z|-2LP(G[C]) +2|Z| + MM(G[C])\\
={} & \frac12(|C|+1) - |Z| - |C| + 2 |Z| + \frac12(|C|-1)\\
={} & |Z|.
\end{align*}
Since $|Z|\leq 3$ the parameter value is at most three and the FPT-algorithm of Garg and Philip~\cite{GargP16} runs in polynomial time.
\end{enumerate}
Intuitively, the condition is that $C$ must always be active for vertex covers not containing $T$, but for the formal correctness proof that we give later the above description is more convenient.
\item Apply Lemma~\ref{lemma:repsetofcriticalsets} to graph $H=H(G,A,B,D,M)$ on vertex set $V_H=A$, set $S_H=A_3\subseteq A$, integer $\ell$, and family $\ensuremath{\mathcal{T}}\xspace^i$ of nonempty subsets of $A$ of size at most three to compute a subset $\ensuremath{\mathcal{T}}\xspace^{i*}$ of $\ensuremath{\mathcal{T}}\xspace^i$ in randomized polynomial time. The size of $|\ensuremath{\mathcal{T}}\xspace^{i*}|$ is $\mathcal{O}(\ell^3)$.
\item Select a set $\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace^i$ as follows: For each $T\in\ensuremath{\mathcal{T}}\xspace^{i*}$ add to $\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace^i$ a component $C\in\ensuremath{\hat{\mathcal{C}}_3}\xspace\setminus(\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace^0\cup\ldots\cup\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace^{i-1})$ such that $C$ fulfills the condition for $T$ in Step~\ref{step:selectti}, i.e., such that: \label{step:selectci}
\begin{enumerate}
\item There is a set $Z\subseteq N_G(T)\cap C$ of at most three neighbors of $T$ in $C$ such that no vertex cover of $G[C]$ of size $\frac12(|C|+1)$ contains $Z$. (We know that $Z$ must be nonempty.)
\end{enumerate}
Clearly, the size of $|\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace^i|$ is $\mathcal{O}(\ell^3)$. Note that the same component $C$ can be chosen for multiple sets $T\in\ensuremath{\mathcal{T}}\xspace^{i*}$ but we only require an upper bound on $|\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace^i|$
\item If $i<\ell+1$ then increase $i$ by one and return to Step~\ref{step:selectti}. Else return the set
\[
\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace:=\bigcup_{i=0}^{\ell+1}\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace^i.
\]
The size of $\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace$ is $\mathcal{O}(\ell^4)$ since it is the union of $\ell+2$ sets that are each of size $\mathcal{O}(\ell^3)$.\label{step:returnrelc}
\end{enumerate}
In particular, we will be interested in the components $C\in\ensuremath{\hat{\mathcal{C}}_3}\xspace$ that are not in $\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace$. We call these \emph{irrelevant components} and let $\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace:=\ensuremath{\hat{\mathcal{C}}_3}\xspace\setminus\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace$ denote the set of all irrelevant components. (Of course we still need to prove that they are true to their name.)
\begin{lemma}\label{lemma:removeirrelevantcomponents}
Let $G'$ be obtained by deleting from $G$ all vertices of irrelevant components, i.e., $G':=G-\bigcup_{C\in\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace}C$, and let $k'=k-\sum_{C\in\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace}\frac12(|C|+1)$, i.e., $k'$ is equal to $k$ minus the lower bounds for vertex covers of the irrelevant components. Then $G$ has a vertex cover of size at most $k$ if and only if $G'$ has a vertex cover of size at most $k'$.
Moreover, $k-(2LP(G)-MM(G))=k'-(2LP(G')-MM(G'))$, i.e., the instances $(G,k,\ell)$ and $(G',k,\ell')$ of \probname{Vertex Cover}\xspaceanb have the same parameter value $\ell=\ell'$.
\end{lemma}
\begin{proof}
Let us first discuss the easy direction: Assume that $G$ has a vertex cover $X$ of size at most $k$; prove that $G'$ has a vertex cover of size at most $k'$. Let $X'$ denote the restriction of $X$ to $G'$, i.e., $X'=X\cap V(G')=X\setminus U$ where $U=\bigcup_{C\in\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace} C$. Clearly, $X'$ is a vertex cover of $G'$. Concerning the size of $X'$ let us observe the following: For each component $C\in\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace$ the set $X\cap C$ must be a vertex cover of $G[C]$ (this of course holds for any set of vertices in $G$). We know that each graph $G[C]$ for $C\in\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace\subseteq\ensuremath{\hat{\mathcal{C}}_3}\xspace$ is factor-critical and, hence, the size of $X\cap C$ is at least $\frac12(|C|+1)$. Summing over all $C\in\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace$ we find that $X'$ contains at least $\sum_{C\in\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace}\frac12(|C|+1)$ vertices less than $X$. This directly implies that $|X'|\leq k'$ and completes this part of the proof.
Now, assume that $G'$ has a vertex cover of size at most $k'$; let $V':=V(G')$ and $E':=E(G')$. This part requires most of the lemmas that we established in the previous sections. It is of particular importance, that from the nice decomposition $(A,B,D,M)$ of $G$ we can derive a very similar nice decomposition of $G'$. For convenience let $\ensuremath{V_{\mathtt{irr}}}\xspace:=\bigcup_{C\in\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace}C$. By Lemma~\ref{lemma:inheritance} we may repeatedly delete unmatched components, such as $\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace\subseteq \ensuremath{\hat{\mathcal{C}}_3}\xspace$, and always derive a nice decomposition of the resulting graph. Doing this for all components in $\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace$ we end up with graph $G'$ and the nice decomposition $(A,B,D',M')$ where $M'$ is the restriction of $M$ to $V(G')=V\setminus \ensuremath{V_{\mathtt{irr}}}\xspace$ and $D'=D\setminus \ensuremath{V_{\mathtt{irr}}}\xspace$.
Let us now fix an arbitrary dominant vertex cover $X'$ of $G'$ with respect to $(A,B,D',M')$, i.e., $X'$ is of minimum size and contains the fewest vertices of $D'$ among minimum vertex covers of $G'$; clearly $|X'|\leq k'$. Our strategy will be to construct a vertex cover of $G$ of size at most $k$ by adding a vertex cover of size $\frac12(|C|+1)$ for each component $C\in\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace$. The crux with this idea lies in the edges between components $C$ and the set $A$. We will need to show that we can cover edges between $C$ and $A\setminus X'$ by the selection of vertices in $C$ without spending more than $\frac12(|C|+1)$. Define $H':=H(G',A,B,D',M')$ according to Definition~\ref{definition:graphh} and define $\ensuremath{X_{\mathtt{op}}}\xspace':=\ensuremath{X_{\mathtt{op}}}\xspace(A_1,A_3,M',X')$ according to Definition~\ref{definition:setxop}; by Lemma~\ref{lemma:closest} the set $\ensuremath{X_{\mathtt{op}}}\xspace'$ is closest to $A_3$ in $H'$ and by Lemma~\ref{lemma:nice:boundxh:boundac} we have $|\ensuremath{X_{\mathtt{op}}}\xspace'|\leq \ell$. We claim that $H'$ is in fact identical with $H=H(G,A,B,D,M)$; let us see why this holds: Both graphs are on the same vertex set $A$. There is a directed edge $(u,v)$ in $H$ if there is a vertex $w\in D$ with $\{u,w\}\in E\setminus M$ and $\{w,v\}\in M$. Note that this implies $w\textbf{no}\xspacetin \ensuremath{V_{\mathtt{irr}}}\xspace$ as all components in $\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace\subseteq\ensuremath{\hat{\mathcal{C}}_3}\xspace$ are unmatched, whereas $\{w,v\}\in M$ and $w\in D$ and $v\in A$. Thus, $w$ exists also in $G'$ and $\{w,v\}\in M'$ since it is not an edge between vertices of a component in $\ensuremath{\hat{\mathcal{C}}_3}\xspace$. Similarly, $\{u,w\}\in E'\setminus M'$ since $M'\subseteq M$ and $E'$ contains all edges of $E$ that have no endpoint in $\ensuremath{V_{\mathtt{irr}}}\xspace$. Thus, $(u,v)$ is also a edge of $H'$. Conversely, if $(u,v)$ is a directed edge of $H'$ then there exists $w\in D'$ with $\{w,v\}\in M'$ and $\{u,w\}\in E'\setminus M'$. Clearly, $w\in D\supseteq D'$ and $\{w,v\}\in M\supseteq M'$. Since $M$ is a matching, it cannot contain both $\{u,w\}$ and $\{w,v\}$, hence $\{u,w\}\textbf{no}\xspacetin M$. Thus, using $E'\subseteq E$ we have $\{u,w\}\in E\setminus M$, implying that $(u,w)$ is also an edge of $H$. Thus, the two graphs $H$ and $H'$ are identical and, in particular, $\ensuremath{X_{\mathtt{op}}}\xspace'$ is also closest to $A_3$ in $H$.
Consider now the set $X'$ as a partial vertex cover of $G$. There are uncovered edges, i.e., with no endpoint in $X'$, inside components $C\in\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace$ and between such components and vertices in $A\setminus X'$. Since the remaining budget of $k-k'$ is exactly equal to smallest vertex covers for the components in $\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace$ we cannot add any vertices that are not in such a component (and not more than $\frac12(|C|+1)$ per component $C$). Thus, if $C\in\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace$ and $A\setminus X'$ has a set $Z$ of neighbors in $C$, then the question is whether there is size $\frac12(|C|+1)$ vertex cover of $G$ that includes $Z$. We will prove that this is always the case.
\begin{claim}\label{claim:key}
Let $C\in\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace\subseteq\ensuremath{\hat{\mathcal{C}}_3}\xspace$ and let $Z_C:=N_G(A\setminus X')\cap C$. There is a vertex cover $X_C$ of $G[C]$ with $Z_C\subseteq X_C$ of size at most $\frac12(|C|+1)$.
\end{claim}
\begin{proof}
Assume for contradiction that there is no vertex cover of $G[C]$ that includes $Z_C$ and has size at most $\frac12(|C|+1)$. By Lemma~\ref{lemma:criticalsets:boundsize} there is a subset $Z\subseteq Z_C$ of size at most three such that no vertex cover of $G[C]$ of size at most $\frac12(|C|+1)$ contains $Z$: Let $Z$ be any minimal subset of $Z_C$ with this property; the lemma implies that $|Z|\leq 3$. (Note that $C\in\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace\subseteq \ensuremath{\hat{\mathcal{C}}_3}\xspace$ implies that $C$ has at least three vertices and that it is factor-critical as a component of $G[D]$.) Let $A_C$ be a minimal subset of $A\setminus X'$ such that its neighborhood in $C$ includes the set $Z$; since $Z$ has size at most three, the set $A_C$ also has size at most three. Since $A_C\cap X'=\emptyset$, by Lemma~\ref{lemma:notreachable}, each $v\in A_C$ is reachable from $A_3$ in $H'-\ensuremath{X_{\mathtt{op}}}\xspace'=H-\ensuremath{X_{\mathtt{op}}}\xspace'$.
We first prove that $C$ must have been considered in all $\ell+1$ iterations of computing $\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace$. If $Z=\emptyset$ then $C$ has no vertex cover of size $\frac12(|C|+1)$. This, however, would imply that $C\in\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace^0\subseteq\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace$; a contradiction. For the remainder of the proof we have $Z\neq\emptyset$, i.e., $1\leq |Z|\leq 3$, and hence the set $A_C$ must be nonempty to ensure $Z\subseteq N_G(A_C)\cap C$ (and of size at most three). It follows that in each repetition of Step~\ref{step:selectti} the sets $T=A_C\subseteq A$, component $C$, and set $Z$ were considered. (Note that $C\in\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace=\ensuremath{\hat{\mathcal{C}}_3}\xspace\setminus \ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace=\ensuremath{\hat{\mathcal{C}}_3}\xspace\setminus (\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace^0\cup\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace^1\cup\ldots\cup\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace^{\ell+1})$.) We have $T=A_C\subseteq A$ nonempty and of size at most three, $Z\subseteq N_G(T)\cap C$ of size at most three, and there is no vertex cover of $G[C]$ of size $\frac12(|C|+1)$ that contains $Z$. Thus, the set $A_C$ is contained in all sets $\ensuremath{\mathcal{T}}\xspace^1,\ldots,\ensuremath{\mathcal{T}}\xspace^{\ell+1}$.
Now, for each $i\in\{1,\ldots,\ell+1\}$, we need to consider why $C$ was not added to $\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace^i$. Let us consider two cases, namely $A_C\in\ensuremath{\mathcal{T}}\xspace^{i*}$ and $A_C\textbf{no}\xspacetin\ensuremath{\mathcal{T}}\xspace^{i*}$: If $A_C\in\ensuremath{\mathcal{T}}\xspace^{i*}$ then in Step~\ref{step:selectci} we have selected a component $C^i\neq C$, with $C^i\in\ensuremath{\hat{\mathcal{C}}_3}\xspace\setminus(\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace^0\cup\ldots\cup\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace^{i-1})$, such that there is a nonempty set $Z^i\subseteq N_G(A_C)\cap C^i$ such that $G[C^i]$ has no vertex cover of size $\frac12(|C^i|+1)$ that contains $Z^i$. For later reference let us remember the triple $(C^i,Z^i,A^i)$ with $A^i:=A_C$. We know that $C^i\in\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace^i\subseteq\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace$, we know that there is no vertex cover of $G[C^i]$ of size $\frac12(|C^i|+1)$ that contains $Z^i$, and $Z^i\subseteq N_G(A^i)\cap C^i$. Crucially, all vertices $v\in A^i$ are reachable from $A_3$ in $H-\ensuremath{X_{\mathtt{op}}}\xspace'$. (There will be a second source of such triples in the case that $A_C\textbf{no}\xspacetin\ensuremath{\mathcal{T}}\xspace^{i*}$, but with $A^i\neq A_C$ and with slightly more work for proving these properties of the triples in question.)
In the second case we have $A_C\textbf{no}\xspacetin\ensuremath{\mathcal{T}}\xspace^{i*}$. By Lemma~\ref{lemma:repsetofcriticalsets}, since $A_C\in\ensuremath{\mathcal{T}}\xspace^i$, all vertices of $A_C$ are reachable from $A_3$ in $H-\ensuremath{X_{\mathtt{op}}}\xspace'$, and $\ensuremath{X_{\mathtt{op}}}\xspace'\subseteq A$ of size at most $\ell$, it follows that $\ensuremath{\mathcal{T}}\xspace^{i*}$ contains a set $A^i$ such that all vertices of $A^i$ are reachable from $A_3$ in $H-\ensuremath{X_{\mathtt{op}}}\xspace'$. Thus, in Step~\ref{step:selectci} we have selected a component $C^i$ such that there is a set $Z^i\subseteq N_G(A_i)\cap C^i$ such that $G[C^i]$ has no vertex cover of size $\frac12(|C^i|+1)$ that contains $Z^i$. We remember the triple $(C^i,Z^i,A^i)$.
We find that, independently of whether $A_C\in\ensuremath{\mathcal{T}}\xspace^{i*}$ in iteration $i\in\{1,\ldots,\ell+1\}$ we get a triple $(C^i,Z^i,A_i)$ such that $C^i\in\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace^i$, with $Z^i\subseteq N_G(A_i)\cap C^i$, and such that all vertices $v\in A_i$ are reachable from $A_3$ in $H-\ensuremath{X_{\mathtt{op}}}\xspace'$. We observe that the components $C^i$ are pairwise distinct: Say $1\leq i<j\leq\ell+1$. Then $C^i\in\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace^i$ and $C^j\in \ensuremath{\hat{\mathcal{C}}_3}\xspace\setminus(\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace^0\cup\ldots\cup\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace^{j-1})$, implying that $C^j\textbf{no}\xspacetin \ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace^i$ as $i\leq j-1$, and hence that $C^j\neq C_i$. We use these components to prove that $X'$, as a vertex cover of $G'$, has at least $\ell+1$ active components, namely $C^1,\ldots,C^{\ell+1}$, which will be seen to contradict that it has size at most $k'$.
Let $i\in \{1,\ldots,\ell+1\}$. We have that all vertices of $A^i$ are reachable from $A_3$ in $H-\ensuremath{X_{\mathtt{op}}}\xspace'$; the same is true in $H'-{\ensuremath{X_{\mathtt{op}}}\xspace}$ since $H=H'$. From Lemma~\ref{lemma:reachable} applied to graph $G'$, nice decomposition $(A,B,D',M')$ of $G'$, and vertex cover $X'$ we get that $X'$ contains no vertex of $A^i$.
Since $C^i\in\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace$ we know that $C^i$ is also an unmatched non-singleton component of $G[D']$ with respect to matching $M'$ (Lemma~\ref{lemma:inheritance}). As $X'\cap A^i=\emptyset$ it follows directly that $X'$ contains $N_{G'}(A^i)\cap C^i=N_G(A^i)\cap C^i\supseteq Z^i$ (regarding $N_{G'}(A^i)\cap C^i=N_G(A^i)\cap C^i$ note that $G'$ differs from $G$ only by removing vertices of some other components of $\ensuremath{\hat{\mathcal{C}}_3}\xspace$, none of which are in these sets). Since $G'[C^i]=G[C^i]$ has no vertex cover of size $\frac12(|C^i|+1)$ that contains $Z^i$, it follows that $X'$ contains more than $\frac12(|C^i|+1)$ vertices of $C^i$, making $C^i$ an active component with respect to $X'$.
We proved that the vertex cover $X'$ of $G'$ has at least $\ell+1$ active components. Thus, by Lemma~\ref{lemma:nice:boundxh:boundac} its size is at least $|M'|+|\ensuremath{\hat{\mathcal{C}}_3}\xspace'|+\ell+1$ where $\ensuremath{\hat{\mathcal{C}}_3}\xspace'$ is the number of unmatched non-singleton components of $G'[D']$. On the other hand, we have $|X'|\leq k'= k-\sum_{C\in\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace}\frac12(|C|+1)$ and $\ell=k-(2LP(G)-MM(G))=k-(|M|+|\ensuremath{\hat{\mathcal{C}}_3}\xspace|)$. It remains to compare $|M'|+|\ensuremath{\hat{\mathcal{C}}_3}\xspace'|$ with $|M|+|\ensuremath{\hat{\mathcal{C}}_3}\xspace|$.
\begin{enumerate}
\item Let $\ensuremath{M_{\mathtt{irr}}}\xspace\subseteq M$ denote the set of edges in $M$ whose endpoints are in $\ensuremath{V_{\mathtt{irr}}}\xspace$; recall that $\ensuremath{V_{\mathtt{irr}}}\xspace$ denotes the set of all vertices of (irrelevant) components in $\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace$.
\item Thus, when creating $G'$, we are deleting $|\ensuremath{V_{\mathtt{irr}}}\xspace|$ vertices and $|\ensuremath{M_{\mathtt{irr}}}\xspace|$ matching edges (recalling that there are no matching edges with exactly one endpoint in $\ensuremath{V_{\mathtt{irr}}}\xspace$). Each component $C\in\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace$ contributes $|C|$ vertices to $\ensuremath{V_{\mathtt{irr}}}\xspace$ and $\frac12(|C|-1)$ edges to $\ensuremath{M_{\mathtt{irr}}}\xspace$. Thus, $|\ensuremath{M_{\mathtt{irr}}}\xspace|=\sum_{C\in\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace}\frac12(|C|-1)=\sum_{C\in\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace}\frac12(|C|+1)-|\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace|$.
\item Using this, we get
\begin{align*}
k'&= k-\sum_{C\in\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace}\frac12(|C|+1)\\
&= |M| + |\ensuremath{\hat{\mathcal{C}}_3}\xspace| + \ell - (|\ensuremath{M_{\mathtt{irr}}}\xspace| + |\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace|)\\
&= |M'| + |\ensuremath{\hat{\mathcal{C}}_3}\xspace'| + \ell.
\end{align*}
\end{enumerate}
Since $|X'|\geq |M'|+|\ensuremath{\hat{\mathcal{C}}_3}\xspace'|+\ell+1=k'+1$, this contradicts the assumption that $X'\leq k'$. Thus, the initial assumption in the claim proof must be wrong and we get that there does exist a vertex cover of $G[C]$ that there does exist a vertex cover $X_C$ of size $\frac12(|C|+1)$ that contains $Z_C$.
\end{proof}
Using the claim we can now easily complete $X'$ to a vertex cover $X$ of $G$ of size at most $k$: As observed before, we need to add vertices such as to cover all edges inside components $C\in\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace$ and edges between such components and $A\setminus X'$. Begin with $X:=X'$. Consider any component $C\in\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace$ and let $Z_C:=N_G(A\setminus X')\cap C$. By Claim~\ref{claim:key}, we know that there exists a vertex cover $X_C$ of size $\frac12(|C|+1)$ that contains $Z_C$. Clearly, by adding $X_C$ to $X$ we cover all edges of $C$ and all edges between $C$ and neighbors of $C$ that were not covered by $X'$. (The endpoints of these edges in $C$ exactly constitute the set $Z_C\subseteq X_C$.) By performing this step for all components $C\in\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace$ we add exactly $\sum_{C\in\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace}\frac12(|C|+1)$ vertices, implying that
\[
|X|= |X'|+\sum_{C\in\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace}\frac12(|C|+1) \leq k' + \sum_{C\in\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace}\frac12(|C|+1).
\]
Since $\ensuremath{V_{\mathtt{irr}}}\xspace=\bigcup_{C\in\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace} C$ are the only vertices present in $G$ but not in $G'=G-\ensuremath{V_{\mathtt{irr}}}\xspace$ it follows that all edges with no endpoint in $\ensuremath{V_{\mathtt{irr}}}\xspace$ are already covered by $X'$. Thus, $X$ is indeed a vertex cover of $G$ of size at most $k$, as claimed.
It remains, to prove that $k-(2LP(G)-MM(G))=k'-(2LP(G')-MM(G'))$. We already proved that $k'=|M'|+|\ensuremath{\hat{\mathcal{C}}_3}\xspace'|+\ell$. By Lemma~\ref{lemma:nice:vclb}, since $(A,B,D',M')$ is a nice decomposition of $G'$, we have $|M'|+|\ensuremath{\hat{\mathcal{C}}_3}\xspace'|=2LP(G')-MM(G')$. This directly implies that $k'-(2LP(G')-MM(G'))=k'-(|M'|+|\ensuremath{\hat{\mathcal{C}}_3}\xspace'|)=\ell=k-(2LP(G)-MM(G))$.
\end{proof}
We can now complete our kernelization. According to Lemma~\ref{lemma:removeirrelevantcomponents} it is safe to delete all irrelevant components (and update $k$ accordingly). We obtain a graph $G'$ and integer $k'$ such that the following holds:
\begin{enumerate}
\item $G'$ has a vertex cover of size at most $k'$ if and only if $G$ has a vertex cover of size at most $k$, i.e., the instances $(G,k)$ and $(G',k')$ for \probname{Vertex Cover}\xspace are equivalent.
\item As a part of the proof of Lemma~\ref{lemma:removeirrelevantcomponents} we showed that
\[
k'=|M'|+|\ensuremath{\hat{\mathcal{C}}_3}\xspace'|+\ell
\]
where $\ensuremath{\hat{\mathcal{C}}_3}\xspace'$ is the set of unmatched non-singleton components of $G'[D']$ with respect to $M'$.
\item From Lemma~\ref{lemma:inheritance} we know that $\ensuremath{\hat{\mathcal{C}}_3}\xspace'$ is equal to the set $\ensuremath{\hat{\mathcal{C}}_3}\xspace$ (of unmatched non-singleton components of $G[D]$ with respect to $M$) minus the components $C\in\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace$ that were removed to obtain $G'$. In other words, $\ensuremath{\hat{\mathcal{C}}_3}\xspace'=\ensuremath{\hat{\mathcal{C}}_3}\xspace\setminus\ensuremath{\mathcal{C}_{\mathtt{irr}}}\xspace=\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace$.
\item We know from Step~\ref{step:returnrelc} that $|\ensuremath{\mathcal{C}_{\mathtt{rel}}}\xspace|=\mathcal{O}(\ell^4)$. Hence, $|\ensuremath{\hat{\mathcal{C}}_3}\xspace'|=\mathcal{O}(\ell^4)$.
\item Let us consider $p:=k'-|M'|$, which is the parameter value of $(G',k')$ when considered as an instance of \probname{Vertex Cover}\xspace parameterized above the size of a maximum matching. Clearly,
\[
p=k'-|M'|=|M'|+|\ensuremath{\hat{\mathcal{C}}_3}\xspace'|+\ell-|M'|=\ell+\mathcal{O}(\ell^4)=\mathcal{O}(\ell^4).
\]
\item We can now apply any polynomial kernelization for \probname{Vertex Cover}\xspaceamm to get a polynomial kernelization for \probname{Vertex Cover}\xspaceanb. On input of $(G',k',p)$ it returns an equivalent instance $(G^*,k^*,p^*)$ of size $\mathcal{O}(p^c)$ for some constant $c$. We may assume that $k^*=\mathcal{O}(p^c)$ since else it would exceed the number of vertices in $G^*$ and we may as well return a \textbf{yes}\xspace-instance of constant size.
Let $\ell^*=k^*-(2LP(G^*)-MM(G^*))$, i.e., the parameter value of the instance $(G^*,k^*,\ell^*)$ of \probname{Vertex Cover}\xspaceanb. Clearly, $\ell^*\leq k^*=\mathcal{O}(p^c)$. Thus, $(G^*,k^*,\ell^*)$ has size and parameter value $\mathcal{O}(p^c)$.
\end{enumerate}
Kratsch and Wahlstr\"om~\cite{KratschW12} give a randomized polynomial kernelization for \probname{Vertex Cover}\xspaceamm. The size is not analyzed since it relies on equivalence of \probname{Almost 2-SAT($k$)}\xspace and \probname{Vertex Cover}\xspaceamm under polynomial parameter transformations~\cite{RamanRS11}; the reductions preserve the parameter value but may increase the size polynomially. The size obtained for \probname{Almost 2-SAT($k$)}\xspace is $\mathcal{O}(p^{12})$, following from a kernelization to $\mathcal{O}(p^6)$ variables. Even without a size increase by the transformation back to \probname{Vertex Cover}\xspaceamm, which seems doable, we only get a size of $\mathcal{O}(p^{12})=\mathcal{O}(\ell^{48})$. We note, however, that the kernelization for \probname{Almost 2-SAT($k$)}\xspace also relies, amongst others, on computing a representative set of reachable tuples in a directed graph. It is likely that a direct approach for kernelizing \probname{Vertex Cover}\xspaceanb could make do with only a single iteration of this strategy.
\begin{theorem}\label{theorem:main}
\probname{Vertex Cover}\xspaceanb has a randomized polynomial kernelization with error probability exponentially small in the input size.
\end{theorem}
\section{Proof of Lemma~\ref{lemma:repsetofcriticalsets}}\label{section:proofofmatroidresult}
In this section we provide a proof of Lemma~\ref{lemma:repsetofcriticalsets}, which is a generalization of \cite[Lemma 2]{KratschW11_arxiv}; in that work, it is already pointed out that a generalization to $q$-tuples is possible by the same approach. Accordingly, the proof in this section is provided only to make the present work self-contained.
We need to begin with some basics on matroids; for a detailed introduction to matroids see Oxley~\cite{OxleyBook}: A \emph{matroid} is a pair $M=(U,\ensuremath{\mathcal{I}}\xspace)$ where $U$ is the \emph{ground set} and $\ensuremath{\mathcal{I}}\xspace\subseteq 2^U$ is a family of \emph{independent sets} such that
\begin{enumerate}
\item $\emptyset\in\ensuremath{\mathcal{I}}\xspace$,
\item if $I\subseteq I'$ and $I'\in\ensuremath{\mathcal{I}}\xspace$ then $I\in\ensuremath{\mathcal{I}}\xspace$, and
\item if $I,I'\in\ensuremath{\mathcal{I}}\xspace$ with $|I|<|I'|$ then there exists $u\in I'\setminus I$ with $I\cup\{u\}\in\ensuremath{\mathcal{I}}\xspace$; this is called the augmentation axiom.
\end{enumerate}
A set $I\in\ensuremath{\mathcal{I}}\xspace$ is \emph{independent}; all other subsets of $U$ are \emph{dependent}. The maximal independent sets are called \emph{bases}; by the augmentation axiom they all have the same size. For $X\subseteq U$, the \emph{rank $r(X)$ of $X$} is the cardinality of the largest independent set $I\subseteq X$. The \emph{rank of $M$} is $r(M):=r(U)$.
Let $A$ be a matrix over a field $\ensuremath{\mathbb{F}}\xspace$, let $U$ be the set of columns of $A$, and let $\ensuremath{\mathcal{I}}\xspace$ contain those subsets of $U$ that are linearly independent over $\ensuremath{\mathbb{F}}\xspace$. Then $(U,\ensuremath{\mathcal{I}}\xspace)$ defines a matroid $M$ and we say that $A$ \emph{represents} $M$. A matroid $M$ is \emph{representable (over \ensuremath{\mathbb{F}}\xspace)} if there is a matrix $A$ (over \ensuremath{\mathbb{F}}\xspace) that represents it. A matroid representable over at least one field is called \emph{linear}.
Let $D=(V,E)$ be a directed graph and $S,T\subseteq V$. The set $T$ is \emph{linked to $S$} if there exist $|T|$ vertex-disjoint paths from $S$ to $T$; paths of length zero are permitted. For a directed graph $D=(V,E)$ and $S,T\subseteq V$ the pair $M=(V,\ensuremath{\mathcal{I}}\xspace)$ is a matroid, where $\ensuremath{\mathcal{I}}\xspace$ contains those subsets $T'\subseteq T$ that are linked to $S$ \cite{Perfect1968}. Matroids that can be defined in this way are called \emph{gammoids}; the special case with $T=V$ is called a \emph{strict gammoid}. Marx~\cite{Marx09} gave an efficient randomized algorithm for finding a representation of a strict gammoid given the underlying graph; the error probability can be made exponentially small in the runtime.
\begin{theorem}[\cite{Perfect1968,Marx09}]\label{theorem:gammoidrepresentation}
Let $D=(V,E)$ be a directed graph and let $S\subseteq V$. The subsets $T\subseteq V$ that are linked to $S$ from the independent sets of a matroid over $M$. Furthermore, a representation of this matroid can be obtained in randomized polynomial time with one-side error.
\end{theorem}
As in previous work~\cite{KratschW12} we use the notion of \emph{representative sets}. The definition was introduced by Marx~\cite{Marx09} inspired by earlier work of Lov\'asz~\cite{Lovasz1977}.
\begin{definition}[\cite{Marx09}]\label{definition:representativesets}
Let $M=(U,\ensuremath{\mathcal{I}}\xspace)$ be a matroid and let $\ensuremath{\mathcal{Y}}\xspace$ be a family of subsets of $U$. A subset $\ensuremath{\mathcal{Y}}\xspace^*\subseteq\ensuremath{\mathcal{Y}}\xspace$ is \emph{$r$-representative} for $\ensuremath{\mathcal{Y}}\xspace$ if the following holds: For every $X\subseteq U$ of size at most $r$, if there is a set $Y\in\ensuremath{\mathcal{Y}}\xspace$ such that $X\cap Y=\emptyset$ and $X\cup Y\in\ensuremath{\mathcal{I}}\xspace$ then there is a set $Y^*\in\ensuremath{\mathcal{Y}}\xspace^*$ such that $X\cap Y^*=\emptyset$ and $X\cup Y^*\in\ensuremath{\mathcal{I}}\xspace$.
\end{definition}
Note that in the definition we may as well require that $\ensuremath{\mathcal{Y}}\xspace$ is a family of independent sets of $M$; independence of $X\cup Y$ requires independence of $Y$.
Marx~\cite{Marx09} proved an upper bound on the required size of representative subsets of a family $\ensuremath{\mathcal{Y}}\xspace$ in terms of the rank of underlying matroid and the size of the largest set in $U$. The upper bound proof is similar to \cite[Theorem 4.8]{Lovasz1977} by Lov\'asz.
\begin{lemma}[\cite{Marx09}]\label{lemma:representativeset}
Let $M$ be a linear matroid of rank $r+s$ and let $\ensuremath{\mathcal{Y}}\xspace=\{Y_1,\ldots,Y_m\}$ be a collection of independent sets, each of size of $s$. If $\ensuremath{\mathcal{Y}}\xspace>\binom{r+s}{s}$ then there is a set $Y\in\ensuremath{\mathcal{Y}}\xspace$ such that $\ensuremath{\mathcal{Y}}\xspace\setminus \{Y\}$ is $r$-representative for $\ensuremath{\mathcal{Y}}\xspace$. Furthermore, given a representation $A$ of $M$, we can find such a set $Y$ in $f(r,s)\cdot(||A||m)^{\mathcal{O}(1)}$ time.
\end{lemma}
The factor of $f(r,s)$ in the runtime of Lemma~\ref{lemma:representativeset} is due to performing linear algebra operations on vectors of dimension $\binom{r+s}{s}$. Since our application of the lemma has $s=3$ and $r$ bounded by the number of vertices in the underlying graph, this factor of the runtime is polynomial in the input size. We also remark that we will tacitly use the lemma for directly computing an $r$-representative subset $\ensuremath{\mathcal{Y}}\xspace^*\subseteq\ensuremath{\mathcal{Y}}\xspace$ of size at most $\binom{r+s}{s}$ since the lemma can clearly be iterated to achieve this. We note that faster algorithms for computing independent sets was given by Fomin et al.~\cite{FominLS14}, which leads to significantly better runtimes, in particular for the case of uniform matroids, when $s$ is not constant.
Now we are ready to prove Lemma~\ref{lemma:repsetofcriticalsets}. The proof follows the strategy used for \cite[Lemma 2]{KratschW11_arxiv}. For convenience, let us recall the lemma statement.
\begin{lemma}[recalling Lemma~\ref{lemma:repsetofcriticalsets}]
Let $H=(V_H,E_H)$ be a directed graph, let $S_H\subseteq V_H$, let $\ell\in\mathbb{N}$, and let $\ensuremath{\mathcal{T}}\xspace$ be a family of nonempty vertex sets $T\subseteq V_H$ each of size at most three. In randomized polynomial time, with failure probability exponentially small in the input size, we can find a set $\ensuremath{\mathcal{T}}\xspace^*\subseteq\ensuremath{\mathcal{T}}\xspace$ of size $\mathcal{O}(\ell^3)$ such that for any set $X_H\subseteq V_H$ of size at most $\ell$ that is closest to $S_H$ if there is a set $T\in\ensuremath{\mathcal{T}}\xspace$ such that all vertices $v\in T$ are reachable from $S_H$ in $H-X_H$ then there is a corresponding set $T^*\in\ensuremath{\mathcal{T}}\xspace^*$ satisfying the same properties.
\end{lemma}
\begin{proof}
We begin with constructing a directed graph $D$ and vertex set $S$:
\begin{enumerate}
\item Create a graph $\overline{H}$ from $H$ by adding $\ell+1$ new vertices $s_1,\ldots,s_{\ell+1}$ and adding all edges $(s_i,s)$ for $i\in\{1,\ldots,\ell+1\}$ and $s\in S$. Define $\overline{S}:=\{s_1,\ldots,s_{\ell+1}\}$ and note that $V(\overline{H})=V_H\cup \overline{S}$.
\item Let $D$ consist of three vertex-disjoint copies of $\overline{H}$. The vertex set $V^j$ of copy $j$ is $V^j=\{v^j\mid v\in V(\overline{H})$; let $S^j:=\{s_i^j\mid s_i \in \overline{S}\}\subseteq V^i$.
\item Let $S:=S^1\cup S^2\cup S^3$. Note that $|S|=3(\ell+1)$.
\end{enumerate}
Let $M$ be the strict gammoid defined by graph $D$ and source set $S$. Compute in randomized polynomial time a matrix $A$ that represents $M$ using Theorem~\ref{theorem:gammoidrepresentation}; it suffices to prove that we arrive at the claimed set $\ensuremath{\mathcal{T}}\xspace^*$ if $A$ does indeed represent $M$, i.e., if no error occurred.
We now define a family $\ensuremath{\mathcal{Y}}\xspace$ of subsets of $V(D)$, each of size three; for convenience, let $<$ be an arbitrary linear ordering of the vertex set $V_H$ of $H$:
\begin{enumerate}
\item For $\{u,v,w\}\in\ensuremath{\mathcal{T}}\xspace$ with $u<v<w$ let $Y(\{u,v,w\}):=\{u^1,v^2,w^3\}$.
\item For $\{u,v\}\in\ensuremath{\mathcal{T}}\xspace$ with $u<v$ let $Y(\{u,v\}):=\{u^1,v^2,v^3\}$.
\item For $\{u\}\in\ensuremath{\mathcal{T}}\xspace$ let $Y(\{u\}):=\{u^1,u^2,u^3\}$.
\end{enumerate}
Let us remark that the particular assignment of vertices in $T\in\ensuremath{\mathcal{T}}\xspace$ to the three disjoint copies of $\overline{H}$ is immaterial so long as copies of all vertices are present. The following claim relates reachability of vertices in $T\in\ensuremath{\mathcal{T}}\xspace$ in $H-X_H$ to independence of $Y(T)$ in $M$.
\begin{claim}\label{claim:reachability:independence}
Let $X_H\subseteq V_H$ be a set of at most $\ell$ vertices that is closest to $S_H$ in $H$, and let $T\in\ensuremath{\mathcal{T}}\xspace$. The vertices in $T$ are all reachable from $S_H$ in $H-X_H$ if and only if $Y(T)\cup I$ is independent in $M$ and $Y(T)\cap I=\emptyset$, where $I:=\{x^1,x^2,x^3\mid x\in X_H\}$.
\end{claim}
\begin{proof}
Assume first that each vertex of $T$ is reachable from $S_H$ in $H-X_H$. Observe that this requires $T\cap X_H=\emptyset$. By Proposition~\ref{proposition:closest}, since $X_H$ is closest to $S_H$, we have that there exist $|X_H|+1$ vertex-disjoint paths from $S_H$ to $X_H\cup\{v\}$ for each vertex $v\in T$; in other words, $X_H\cup\{v\}$ is linked to $S_H$ in $H$. Since $|X_H\cup\{v\}|\leq \ell$, it follows directly that $X_H\cup\{v\}$ is linked to $\overline{S}$ in $\overline{H}$. Thus, for $v^j\in Y(T)$ with $j\in\{1,2,3\}$, it follows that $I^j\cup\{v_j\}$ is linked to $S^j$ in $D$, where $I^j:=\{x^j \mid x\in X_H\}$. Since the three copies of $\overline{H}$ in $D$ a vertex-disjoint, we conclude that $Y(T)\cup I^1\cup I^2\cup I^3=Y(T)\cup I$ is linked to $S=S^1\cup S^2\cup S^3$ in $D$. Thus, $Y(T)\cup I$ is independent in $M$, as claimed. To see that $Y(T)\cap I=\emptyset$ note that $v^j\in Y(T)\cap I$ would imply $v\in T$ and $v\in X_H$; a contradiction to $T\cap X_H=\emptyset$.
For the converse, assume that $Y(T)\cup I$ is independent in $M$ and that $Y(T)\cap I=\emptyset$. Let $v\in T$ and let $j\in\{1,2,3\}$ such that $v^j\in Y(T)$. Observe that $Y(T)\cap I=\emptyset$ implies $v\textbf{no}\xspacetin X_H$: Indeed, if $v\in X_H$ then we have $v^j$ in $Y(T)$; a contradiction. Now, independence of $Y(T)\cup I$ implies that $Y(T)\cup I$ is linked to $S$ in $D$. It follows, by vertex-disjointness of the three copies of $\overline{H}$ in $D$ that $y^j\cup I^j$ is linked to $S^j$ using only vertices $v^j$ with $v\in V(\overline{H})$. This implies that $y\cup X_H$ is linked to $\overline{S}$ in $\overline{H}$. Observe now that any path from $\overline{S}$ to $y\cup X_H$ must contain as its second vertex a vertex of $S$; here it is convenient that $X_H\subseteq V_H$ and $\overline{S}\cap V_H=\emptyset$, causing all paths to have length at least one and at least two vertices. Thus, we conclude that $y\cup X_H$ is linked to $S$ in $\overline{H}$, and hence also in $H$ since vertices of $\overline{S}$ cannot be internal vertices of paths (as they have only outgoing edges). Clearly, in a collection of $|X_H|+1$ paths from $S$ to $X_H\cup \{v\}$ the path from $S$ to $v$ cannot contain any vertex of $X_H$ as they are endpoints of the other paths. Thus, there exists a path from $S$ to $v$ that avoids $X_H$, implying that $v$ is reachable from $S$ in $H-X_H$. Since $v$ was chosen arbitrarily from $T$, the claim follows.
\end{proof}
Now, use Lemma~\ref{lemma:representativeset} on the gammoid $M$ defined by graph $D$ and source set $S$, represented by the matrix $A$. The rank of $M$ is obviously exactly $|S|=3\ell+3$ since no set larger than $S$ can be linked to $S$ and $S$ itself is an independent set (as it is linked to itself). For the lemma choose $r=|S|-3=3\ell$ and $s=3$ and note that all sets in $\ensuremath{\mathcal{Y}}\xspace$ have size exactly $s=3$ as required. We obtain a set $\ensuremath{\mathcal{Y}}\xspace^*$ of size at most $\binom{r+s}{s}=\mathcal{O}(|S|^3)=\mathcal{O}(\ell^3)$ that $r$-represents $\ensuremath{\mathcal{Y}}\xspace$. Define a set $\ensuremath{\mathcal{T}}\xspace^*\subseteq\ensuremath{\mathcal{T}}\xspace$ by letting $\ensuremath{\mathcal{T}}\xspace^*$ contain those sets $T\in\ensuremath{\mathcal{T}}\xspace$ with $Y(T)\in\ensuremath{\mathcal{Y}}\xspace^*$. The size of $\ensuremath{\mathcal{T}}\xspace^*$ is equal to $|\ensuremath{\mathcal{Y}}\xspace^*|=\mathcal{O}(\ell^3)$ since each $Y\in\ensuremath{\mathcal{Y}}\xspace^*$ has exactly one $T\in\ensuremath{\mathcal{T}}\xspace$ with $Y=Y(T)$. (To see this, note that dropping the superscripts in $Y$ yields exactly the members of the corresponding set $T$; some may be repeated.)
\begin{claim}\label{claim:tstar}
For any set $X_H\subseteq V_H$ of size at most $\ell$ that is closest to $S_H$ if there is a set $T\in\ensuremath{\mathcal{T}}\xspace$ such that all vertices $v\in T$ are reachable from $S_H$ in $H-X_H$ then there is a corresponding set $T^*\in\ensuremath{\mathcal{T}}\xspace^*$ satisfying the same properties.
\end{claim}
\begin{proof}
Let $T\in\ensuremath{\mathcal{T}}\xspace$ such that all vertices $v\in T$ are reachable from $S_H$ in $H-X_H$. By Claim~\ref{claim:reachability:independence} the set $Y(T)\cup I$ is independent in $M$ and $Y(T)\cap I=\emptyset$, where $I=\{x^1,x^2,x^3\mid x\in X_H\}$. Note that $Y(T)$ in $\ensuremath{\mathcal{Y}}\xspace$ and that $|I|=3|X_H|\leq 3\ell=r$. Thus, by Lemma~\ref{lemma:representativeset} there must be a set $Y^*\in\ensuremath{\mathcal{Y}}\xspace^*$ such that $Y^*\cap I=\emptyset$ and $Y^*\cup I$ is an independent set of $M$. Let $T^*\in\ensuremath{\mathcal{T}}\xspace$ with $Y^*=Y(T^*)$; such a set $T^*$ exists by definition of $\ensuremath{\mathcal{Y}}\xspace$ and, as discussed above, it is uniquely defined. By Claim~\ref{claim:reachability:independence} it follows that that all vertices of $T$ are reachable from $S_H$ in $H-X_H$. This completes the proof of Claim~\ref{claim:tstar}.
\end{proof}
We recall that for our case of $s=3$ the computation of $\ensuremath{\mathcal{Y}}\xspace^*$ can be seen to take time polynomial in the input size. The computed set $\ensuremath{\mathcal{Y}}\xspace^*$ fulfills the lemma statement unless the gammoid representation computed by Theorem~\ref{theorem:gammoidrepresentation} is erroneous, which has exponentially small chance of occurring. Note that boosting the success chance of Theorem~\ref{theorem:gammoidrepresentation} works by increasing the range of the random integers used therein (respectively, the field size): An additional factor of $2^p$ in the range of integers decreases the error probability by a factor of $2^{-p}$, while increasing the encoding size of the integers only by $p$ bits. Thus, by only a polynomial increase in the running time, we can get exponentially small error. This completes the proof.
\end{proof}
\section{Conclusion}\label{section:conclusion}
We have presented a randomized polynomial kernelization for \probname{Vertex Cover}\xspaceanb by giving a (randomized) polynomial parameter transformation to \probname{Vertex Cover}\xspaceamm. This improves upon the smallest parameter, namely $k-LP(G)$, for which such a result was known~\cite{KratschW12}. The kernelization for \probname{Vertex Cover}\xspaceamm \cite{KratschW12} involves reductions to and from \probname{Almost 2-SAT($k$)}\xspace, which can be done without affecting the parameter value (cf.~\cite{RamanRS11}). We have not attempted to optimize the total size. Given an instance $(G,k,\ell)$ for \probname{Vertex Cover}\xspaceanb we get an equivalent instance of \probname{Almost 2-SAT($k$)}\xspace with $\mathcal{O}(k^{24})$ variables and size $\mathcal{O}(k^{48})$, which still needs to be reduced to a \probname{Vertex Cover}\xspace instance.
It seems likely that the kernelization can be improved if one avoids the blackbox use of the kernelization for \probname{Vertex Cover}\xspaceamm and the detour via \probname{Almost 2-SAT($k$)}\xspace. In particular, the underlying kernelization for \probname{Almost 2-SAT($k$)}\xspace applies, in part, the same representative set machinery to reduce the number of a certain type of clauses. Conceivably the two applications can be merged, thus avoiding the double blow-up in size. As a caveat, it appears to be likely that this would require a much more obscure translation into a directed separation problem. Moreover, the kernelization for \probname{Almost 2-SAT($k$)}\xspace requires an approximate solution, and it is likely that the same would be true for this approach. It would of course also be interesting whether a deterministic polynomial kernelization is possible, but this is, e.g., already not known for \probname{Almost 2-SAT($k$)}\xspace and \probname{Vertex Cover}\xspaceamm.
We find the appearance of a notion of critical sets of size at most three and the derived separation problem in the auxiliary directed graph quite curious. For the related problem of separating at least one vertex from each of a given set of triples from some source $s$ by deleting at most $\ell$ vertices (a variant of \problem{Digraph Paircut}~\cite{KratschW12}) there is a natural $\mathcal{O}^*(3^\ell)$ time algorithm that performs at most $\ell$ three-way branchings before finding a solution (if possible). It would be interesting whether a complete encoding of \probname{Vertex Cover}\xspaceanb into a similar form would be possible, since that would imply an algorithm that exactly matches the running time of the algorithm of the algorithm by Garg and Philip~\cite{GargP16}.
\end{document} |
\begin{document}
\newtheorem{theorem}{Theorem}
\newtheorem*{defn}{Definition}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{prop}[theorem]{Proposition}
\newtheorem{cor}[theorem]{Corollary}
\begin{center}
{\Large
A Bijection on Dyck Paths and Its Cycle Structure
}
\vertspace{10mm}
DAVID CALLAN \\
Department of Statistics \\
\vertspace*{-1mm}
University of Wisconsin-Madison \\
\vertspace*{-1mm}
1300 University Ave \\
\vertspace*{-1mm}
Madison, WI \ 53706-1532 \\
{\bf callan@stat.wisc.edu} \\
\vertspace{5mm}
November 21, 2006
\ensuremath{\mathbf e}\xspacend{center}
\vertspace{3mm}
\begin{abstract}
The known bijections on Dyck paths are either involutions or have
notoriously intractable cycle structure. Here we present a size-preserving
bijection on Dyck paths whose cycle structure is amenable to complete analysis.
In particular, each cycle has length a power of 2.
A new manifestation of the
Catalan numbers as labeled forests crops up enroute as does the Pascal matrix mod 2. We use the bijection
to show the equivalence of two known manifestations
of the Motzkin numbers.
Finally, we consider some statistics on the new Catalan manifestation.
\ensuremath{\mathbf e}\xspacend{abstract}
\vertspace{10mm}
{\Large \textbf{1 \ Introduction} }\quad
There are several bijections on Dyck paths in the literature \ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspaceite{twobij04,catfine,
invol1999,bij1998,ordered,simple2003,don80,acp44,lalanne92,lalanne93,vaille97}, usually
introduced to show the equidistribution of statistics: if a bijection
sends statistic A to statistic B, then clearly both have the same
distribution. Another aspect of such a bijection is its cycle structure considered as
a permutation on Dyck paths. Apart from involutions, this question is usually
intractable. For example, Donaghey \ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspaceite{don80} introduces a
bijection, gets some results on a restriction version, and notes its apparently chaotic behavior in general.
In similar vein, Knuth \ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspaceite{acp44} defines a conjugate ($R$)
and transpose ($T$), both involutions, on ordered forests, equivalently on
Dyck paths, and asks when they commute \ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspaceite[Ex.\,17,\,7.2.1.6]{acp44}, equivalently, what are the
fixed points of $(RT)^{2}$? This question is still open. (Donaghey's
bijection is equivalent to the composition $RT$.)
In this paper, after reviewing Dyck path terminology (\S2), we
recursively define a new bijection $F$ on Dyck paths (\S 3) and analyze its cycle structure (\S4,\:\S5).
\S 4 treats the restriction of $F$ to paths that avoid
the subpath $DUU$, and involves an encounter with the Pascal matrix mod 2.
\S 5 generalizes to arbitrary paths. This entails an explicit description of $F$ involving
a new manifestation of the Catalan numbers as certain colored
forests in which each vertex is labeled with an integer composition.
We show that each orbit has length a power of 2, find
generating functions for orbit size, and characterize paths with given orbit
size in terms of subpath avoidance. In particular, the
fixed points of $F$ are those Dyck paths that avoid $DUDD$
and $UUP^{+}DD$ where $P^{+}$ denotes a
nonempty Dyck path.
\S 6 uses the bijection $F$ to show the equivalence of two known manifestations
of the Motzkin numbers.
\S7 considers some statistics on the new Catalan manifestation.
\vertspace{10mm}
{\Large \textbf{2 \ Dyck Path Terminology} }\quad
A Dyck path, as usual, is a lattice path of upsteps $U=(1,1)$ and
downsteps $D=(1,-1)$, the same number of each, that stays weakly
above the horizontal line joining its initial and terminal points (vertices). A peak is an occurrence of $UD$, a valley is a
$DU$.
\vertspace*{-3mm}
\Einheit=0.6cm
\[
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmightarrow}(-5.2,3)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace{\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspaceparrow}(-2,1.9)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cm{ \textrm{{\footnotesize peak upstep}}}(-6.9,3.5)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace{ \textrm{{\footnotesize valley}}}(-2,1)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace{ \textrm{{\footnotesize vertex}}}(-2,0.4)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace{\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspaceparrow}(.4,1.5)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace{ \textrm{{\footnotesize return}}}(.4,.8)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace{ \textrm{{\footnotesize downstep}}}(.4,0.2)
\SPfad(-7,1),1111\ensuremath{\mathbf e}\xspacendSPfad
\SPfad(1,1),111111\ensuremath{\mathbf e}\xspacendSPfad
\SPfad(-1,1),1\ensuremath{\mathbf e}\xspacendSPfad
\Pfad(-7,1),33344344334344\ensuremath{\mathbf e}\xspacendPfad
\DuennPunkt(-7,1)
\DuennPunkt(-6,2)
\DuennPunkt(-5,3)
\DuennPunkt(-4,4)
\DuennPunkt(-3,3)
\DuennPunkt(-2,2)
\DuennPunkt(-1,3)
\DuennPunkt(0,2)
\DuennPunkt(1,1)
\DuennPunkt(2,2)
\DuennPunkt(3,3)
\DuennPunkt(4,2)
\DuennPunkt(5,3)
\DuennPunkt(6,2)
\DuennPunkt(7,1)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace{\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspaceparrow}(4,1)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace{ \textrm{{\footnotesize ground level}}}(4,.2)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{ \textrm{\ensuremath{\mathbf S}\xspacemall A Dyck 7-path with 2 components, 2$DUD$s, and height 3}}(0,-2.5)
\]
\vertspace*{1mm}
The size (or semilength) of a Dyck path is its number of upsteps and a
Dyck path of size $n$ is a Dyck $n$-path. The empty Dyck path (of size
0) is denoted $\ensuremath{\mathbf e}\xspacepsilon$. The number of Dyck $n$-paths is the Catalan
number $C_{n}$, sequence
\htmladdnormallink{A000108}{http://www.research.att.com:80/cgi-bin/access.cgi/as/njas/sequences/eisA.cgi?Anum=A000108}
in
\htmladdnormallink{OEIS}{http://www.research.att.com/~njas/sequences/Seis.html} .
The height of a vertex in a Dyck path is its vertical height above
ground level\ and the height of the path is the maximum height of its vertices.
A return downstep is one that returns the path to ground level. A \ensuremath{\mathbf e}\xspacemph{primitive}
Dyck path is one with exactly one return (necessarily at the end).
Note that the empty Dyck path $\ensuremath{\mathbf e}\xspacepsilon$ is not primitive. Its returns split
a nonempty Dyck path into one or more primitive Dyck paths, called its
\ensuremath{\mathbf e}\xspacemph{components}. Upsteps and downsteps come in matching pairs: travel due east
from an upstep to the first downstep encountered. More precisely,
$D_{0}$ is the matching downstep for upstep $U_{0}$ if $D_{0}$
terminates the shortest Dyck subpath that starts with $U_{0}$.
We use \ensuremath{\mathcal P}\xspace to denote the set of primitive Dyck paths, $\ensuremath{\mathcal P}\xspace_{n}$ for
$n$-paths, $\ensuremath{\mathcal P}\xspace(DUU)$ for those that avoid $DUU$ as a subpath, and
$\ensuremath{\mathcal P}\xspace[DUU]$ for those that contain at least one $DUU$. A path
$UUUDUDDD$, for example, is abbreviated $U^{3}DUD^{3}$.
\vertspace{10mm}
{\Large \textbf{3 \ The Bijection} }\quad
Define a size-preserving bijection $F$ on Dyck paths recursively as follows.
First, $F(\ensuremath{\mathbf e}\xspacepsilon)=\ensuremath{\mathbf e}\xspacepsilon$ and for a non-primitive Dyck path $P$ with components
$P_{1},P_{2},\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmdots,P_{r}\ (r\ge 2)$, $F(P)=F(P_{1})F(P_{2})\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmdots
F(P_{r})$ (concatenation). This reduces matters to primitive paths.
From a consideration of the last vertex at height 3 (if any), every primitive Dyck path $P$ has the
form $UQ(UD)^{i}D$ with $i\ge 0$ and $Q$ a Dyck path that
is either empty (in case no vertex is at height 3) or ends $DD$; define $F(P)$ by
\[
F(P)=
\begin{cases}
U^{i}F(R)UDD^{i} & \textrm{if $Q$ is primitive, say $Q=URD$, and} \\
U^{i+1}F(Q)D^{i+1} & \textrm{if $Q$ is not primitive.}
\ensuremath{\mathbf e}\xspacend{cases}
\]
Schematically,
\vertspace*{-5mm}
\Einheit=0.5cm
\[
\Pfad(-15,5),33\ensuremath{\mathbf e}\xspacendPfad
\Pfad(-11,7),434\ensuremath{\mathbf e}\xspacendPfad
\Pfad(-6,6),344\ensuremath{\mathbf e}\xspacendPfad
\Pfad(0,5),3\ensuremath{\mathbf e}\xspacendPfad
\Pfad(5,7),34\ensuremath{\mathbf e}\xspacendPfad
\Pfad(8,6),4\ensuremath{\mathbf e}\xspacendPfad
\SPfad(-8,6),34\ensuremath{\mathbf e}\xspacendSPfad
\SPfad(1,6),3\ensuremath{\mathbf e}\xspacendSPfad
\SPfad(7,7),4\ensuremath{\mathbf e}\xspacendSPfad
\DuennPunkt(-15,5)
\DuennPunkt(-14,6)
\DuennPunkt(-13,7)
\DuennPunkt(-11,7)
\DuennPunkt(-10,6)
\DuennPunkt(-9,7)
\DuennPunkt(-8,6)
\DuennPunkt(-7,7)
\DuennPunkt(-6,6)
\DuennPunkt(-5,7)
\DuennPunkt(-4,6)
\DuennPunkt(-3,5)
\DuennPunkt(0,5)
\DuennPunkt(1,6)
\DuennPunkt(2,7)
\DuennPunkt(5,7)
\DuennPunkt(6,8)
\DuennPunkt(7,7)
\DuennPunkt(8,6)
\DuennPunkt(9,5)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.5 \Einheit \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.5 \Einheitngrightarrow}(-1.5,5.5)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.5 \Einheit \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.5 \Einheitngrightarrow}(-1.5,0.5)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\ensuremath{\mathbf S}\xspacemall $R$}}}(-12,6.8)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\ensuremath{\mathbf S}\xspacemall $F(R)$}}}(3.5,6.8)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\ensuremath{\mathbf S}\xspacemall $Q$ non-primitive;}}}(12,1.5)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\ensuremath{\mathbf S}\xspacemall $Q=\ensuremath{\mathbf e}\xspacepsilon$ or ends $DD$}}}(12,0.5)
\Pfad(-14,0),3\ensuremath{\mathbf e}\xspacendPfad
\Pfad(-11,1),34\ensuremath{\mathbf e}\xspacendPfad
\Pfad(-7,1),344\ensuremath{\mathbf e}\xspacendPfad
\Pfad(1,0),3\ensuremath{\mathbf e}\xspacendPfad
\Pfad(7,1),4\ensuremath{\mathbf e}\xspacendPfad
\SPfad(-9,1),34\ensuremath{\mathbf e}\xspacendSPfad
\SPfad(2,1),3\ensuremath{\mathbf e}\xspacendSPfad
\SPfad(6,2),4\ensuremath{\mathbf e}\xspacendSPfad
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\nearrow}(1.2,6.2)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\ensuremath{\mathbf S}\xspacewarrow}(1.2,6.2)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\ensuremath{\mathbf S}\xspaceearrow}(7.9,6.2)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\nwarrow}(7.9,6.2)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\ensuremath{\mathbf S}\xspacemall $i$}}}(0.9,6.7)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\ensuremath{\mathbf S}\xspacemall $i$}}}(8.2,6.7)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cm{\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.5 \Einheit \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.5 \Einheitngleftarrow}(-8.7,5.4)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cm{\textrm{{\ensuremath{\mathbf S}\xspacemall ---}}}(-7.7,5.4)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cm{\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.5 \Einheit \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.5 \Einheitngrightarrow}(-5.4,5.4)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cm{\textrm{{\ensuremath{\mathbf S}\xspacemall ---}}}(-5.1,5.4)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\ensuremath{\mathbf S}\xspacemall $i$}}}(-7,5.0)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\ensuremath{\mathbf S}\xspacemall $Q$}}}(-12,0.8)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\ensuremath{\mathbf S}\xspacemall $F(Q)$}}}(4.5,1.8)
\DuennPunkt(-14,0)
\DuennPunkt(-13,1)
\DuennPunkt(-11,1)
\DuennPunkt(-10,2)
\DuennPunkt(-9,1)
\DuennPunkt(-8,2)
\DuennPunkt(-7,1)
\DuennPunkt(-6,2)
\DuennPunkt(-5,1)
\DuennPunkt(-4,0)
\DuennPunkt(1,0)
\DuennPunkt(2,1)
\DuennPunkt(3,2)
\DuennPunkt(6,2)
\DuennPunkt(7,1)
\DuennPunkt(8,0)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\nearrow}(2.2,1.2)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\ensuremath{\mathbf S}\xspacewarrow}(2.2,1.2)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\ensuremath{\mathbf S}\xspaceearrow}(6.9,1.2)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\nwarrow}(6.9,1.2)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\ensuremath{\mathbf S}\xspacemall $i$}}}(1.9,1.7)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\ensuremath{\mathbf S}\xspacemall $i$}}}(7.2,1.7)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cm{\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.5 \Einheit \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.5 \Einheitngleftarrow}(-9.7,.4)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cm{\textrm{{\ensuremath{\mathbf S}\xspacemall ---}}}(-8.7,.4)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cm{\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.5 \Einheit \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.5 \Einheitngrightarrow}(-6.4,.4)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cm{\textrm{{\ensuremath{\mathbf S}\xspacemall ---}}}(-6.1,.4)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\ensuremath{\mathbf S}\xspacemall $i$}}}(-8,0)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{ \textrm{\ensuremath{\mathbf S}\xspacemall definition of $F$ on primitive Dyck
paths}}(0,-2.5)
\]
\vertspace*{3mm}
Note that $R=\ensuremath{\mathbf e}\xspacepsilon$ in the top left path duplicates a case of the
bottom left path but no matter: both formulas give the same result.
The map $G$, defined as follows, serves as an inverse of $F$ and
hence $F$ is indeed a bijection.
Again, $G(\ensuremath{\mathbf e}\xspacepsilon)=\ensuremath{\mathbf e}\xspacepsilon$ and for a non-primitive Dyck path $P$ with components
$P_{1},P_{2},\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmdots,P_{r}\ (r\ge 2)$, $G(P)=G(P_{1})G(P_{2})\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmdots
G(P_{r})$. By considering the lowest valley vertex, every primitive
Dyck path has the form $U^{i+1}QD^{i+1}$ with $i\ge 0$ and $Q$ a
non-primitive Dyck path ($Q=\ensuremath{\mathbf e}\xspacepsilon$ in case valley vertices are absent);
define $G(P)$ by
\[
G(P)=
\begin{cases}
U UG(R)D (UD)^{i}D & \textrm{if $Q$ ends $UD$, say $Q=RUD$, and} \\
UG(Q)(UD)^{i}D & \textrm{otherwise.}
\ensuremath{\mathbf e}\xspacend{cases}
\]
The bijection $F$ is the identity on Dyck paths of size $\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cme 3$,
except that it interchanges $U^{3}D^{3}$ and $U^{2}DUD^{2}$. Its
action on primitive Dyck 4-paths is given in the Figure below.
\Einheit=0.4cm
\[
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.5 \Einheit \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.5 \Einheitngrightarrow}(0,21)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.5 \Einheit \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.5 \Einheitngrightarrow}(0,16)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.5 \Einheit \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.5 \Einheitngrightarrow}(0,11)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.5 \Einheit \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.5 \Einheitngrightarrow}(0,6)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.5 \Einheit \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.5 \Einheitngrightarrow}(0,1)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.5 \Einheit \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.5 \Einheitngrightarrow}(0,26)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{ \textrm{{\ensuremath{\mathbf S}\xspacemall Dyck path $P$}}}(-5,26)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{ \textrm{{\ensuremath{\mathbf S}\xspacemall image $F(P)$}}}(5,26)
\SPfad(-9,0),11111111\ensuremath{\mathbf e}\xspacendSPfad
\SPfad(1,0),11111111\ensuremath{\mathbf e}\xspacendSPfad
\SPfad(-9,5),11111111\ensuremath{\mathbf e}\xspacendSPfad
\SPfad(1,5),11111111\ensuremath{\mathbf e}\xspacendSPfad
\SPfad(-9,10),11111111\ensuremath{\mathbf e}\xspacendSPfad
\SPfad(1,10),11111111\ensuremath{\mathbf e}\xspacendSPfad
\SPfad(-9,15),11111111\ensuremath{\mathbf e}\xspacendSPfad
\SPfad(1,15),11111111\ensuremath{\mathbf e}\xspacendSPfad
\SPfad(-9,20),11111111\ensuremath{\mathbf e}\xspacendSPfad
\SPfad(1,20),11111111\ensuremath{\mathbf e}\xspacendSPfad
\Pfad(-9,0),33434344\ensuremath{\mathbf e}\xspacendPfad
\Pfad(1,0),33334444\ensuremath{\mathbf e}\xspacendPfad
\Pfad(-9,5),33433444\ensuremath{\mathbf e}\xspacendPfad
\Pfad(1,5),33433444\ensuremath{\mathbf e}\xspacendPfad
\Pfad(-9,10),33344344\ensuremath{\mathbf e}\xspacendPfad
\Pfad(1,10),33343444\ensuremath{\mathbf e}\xspacendPfad
\Pfad(-9,15),33343444\ensuremath{\mathbf e}\xspacendPfad
\Pfad(1,15),33434344\ensuremath{\mathbf e}\xspacendPfad
\Pfad(-9,20),33334444\ensuremath{\mathbf e}\xspacendPfad
\Pfad(1,20),33344344\ensuremath{\mathbf e}\xspacendPfad
\DuennPunkt(-9,0)
\DuennPunkt(-8,1)
\DuennPunkt(-7,2)
\DuennPunkt(-6,1)
\DuennPunkt(-5,2)
\DuennPunkt(-4,1)
\DuennPunkt(-3,2)
\DuennPunkt(-2,1)
\DuennPunkt(-1,0)
\DuennPunkt(1,0)
\DuennPunkt(2,1)
\DuennPunkt(3,2)
\DuennPunkt(4,3)
\DuennPunkt(5,4)
\DuennPunkt(6,3)
\DuennPunkt(7,2)
\DuennPunkt(8,1)
\DuennPunkt(9,0)
\DuennPunkt(-9,5)
\DuennPunkt(-8,6)
\DuennPunkt(-7,7)
\DuennPunkt(-6,6)
\DuennPunkt(-5,7)
\DuennPunkt(-4,8)
\DuennPunkt(-3,7)
\DuennPunkt(-2,6)
\DuennPunkt(-1,5)
\DuennPunkt(1,5)
\DuennPunkt(2,6)
\DuennPunkt(3,7)
\DuennPunkt(4,6)
\DuennPunkt(5,7)
\DuennPunkt(6,8)
\DuennPunkt(7,7)
\DuennPunkt(8,6)
\DuennPunkt(9,5)
\DuennPunkt(-9,10)
\DuennPunkt(-8,11)
\DuennPunkt(-7,12)
\DuennPunkt(-6,13)
\DuennPunkt(-5,12)
\DuennPunkt(-4,11)
\DuennPunkt(-3,12)
\DuennPunkt(-2,11)
\DuennPunkt(-1,10)
\DuennPunkt(1,10)
\DuennPunkt(2,11)
\DuennPunkt(3,12)
\DuennPunkt(4,13)
\DuennPunkt(5,12)
\DuennPunkt(6,13)
\DuennPunkt(7,12)
\DuennPunkt(8,11)
\DuennPunkt(9,10)
\DuennPunkt(-9,15)
\DuennPunkt(-8,16)
\DuennPunkt(-7,17)
\DuennPunkt(-6,18)
\DuennPunkt(-5,17)
\DuennPunkt(-4,18)
\DuennPunkt(-3,17)
\DuennPunkt(-2,16)
\DuennPunkt(-1,15)
\DuennPunkt(1,15)
\DuennPunkt(2,16)
\DuennPunkt(3,17)
\DuennPunkt(4,16)
\DuennPunkt(5,17)
\DuennPunkt(6,16)
\DuennPunkt(7,17)
\DuennPunkt(8,16)
\DuennPunkt(9,15)
\DuennPunkt(-9,20)
\DuennPunkt(-8,21)
\DuennPunkt(-7,22)
\DuennPunkt(-6,23)
\DuennPunkt(-5,24)
\DuennPunkt(-4,23)
\DuennPunkt(-3,22)
\DuennPunkt(-2,21)
\DuennPunkt(-1,20)
\DuennPunkt(1,20)
\DuennPunkt(2,21)
\DuennPunkt(3,22)
\DuennPunkt(4,23)
\DuennPunkt(5,22)
\DuennPunkt(6,21)
\DuennPunkt(7,22)
\DuennPunkt(8,21)
\DuennPunkt(9,20)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{ \textrm{\ensuremath{\mathbf S}\xspacemall action of $F$ on primitive Dyck 4-paths}}(0,-4)
\]
\vertspace*{10mm}
{\Large \textbf{4 \ Restriction to \ensuremath{\mathbf e}\xspacemph{DUU}-avoiding Paths} }\quad
To analyze the structure of $F$ a key property, clear by induction,
is that it preserves $\#\,DUU$s, in particular, it
preserves the property ``path avoids $DUU$''. A
$DUU$-avoiding Dyck $n$-path corresponds to a composition
$\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace=(c_{1},c_{2},\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmdots,c_{h})$ of $n$ via $c_{i}=$ number of $D$s
ending at height $h-i,\ i=1,2,\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmdots,h$ where $h$ is the height of
the path:
\Einheit=0.6cm
\[
\SPfad(-10,0),111111111111111111\ensuremath{\mathbf e}\xspacendSPfad
\SPfad(0,3),11111111111\ensuremath{\mathbf e}\xspacendSPfad
\SPfad(5,2),111111\ensuremath{\mathbf e}\xspacendSPfad
\SPfad(8,1),111\ensuremath{\mathbf e}\xspacendSPfad
\Pfad(-10,0),333343434443434434\ensuremath{\mathbf e}\xspacendPfad
\DuennPunkt(-10,0)
\DuennPunkt(-9,1)
\DuennPunkt(-8,2)
\DuennPunkt(-7,3)
\DuennPunkt(-6,4)
\DuennPunkt(-5,3)
\DuennPunkt(-4,4)
\DuennPunkt(-3,3)
\DuennPunkt(-2,4)
\DuennPunkt(-1,3)
\DuennPunkt(0,2)
\DuennPunkt(1,1)
\DuennPunkt(2,2)
\DuennPunkt(3,1)
\DuennPunkt(4,2)
\DuennPunkt(5,1)
\DuennPunkt(6,0)
\DuennPunkt(7,1)
\DuennPunkt(8,0)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{ \textrm{\ensuremath{\mathbf S}\xspacemall $DUU$-avoiding path $P$}}(-1,5)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{ \textrm{\ensuremath{\mathbf S}\xspacemall \# $D$s at each level}}(10,5)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{ \textrm{\ensuremath{\mathbf S}\xspacemall 3}}(10,3)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{ \textrm{\ensuremath{\mathbf S}\xspacemall 1}}(10,2)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{ \textrm{\ensuremath{\mathbf S}\xspacemall 3}}(10,1)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{ \textrm{\ensuremath{\mathbf S}\xspacemall 2}}(10,0)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{ \textrm{\ensuremath{\mathbf S}\xspacemall $DUU$-avoiding path $P\quad \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmeftrightarrow\quad$
composition $(3,1,3,2)$}}(2,-2)
\]
\vertspace*{2mm}
Under this correspondence, $F$ acts on compositions of $n$\,: $F$ is the
identity on compositions of length 1, and for $\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace=(c_{i})_{i=1}^{r}$
with $r\ge 2,\ F(\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace)$ is the concatenation of
$IncrementLast\big(F(c_{1},\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmdots,c_{r-2})\big),\,1^{c_{r-1}-1},\,c_{r}$
where $IncrementLast$ means ``add 1 to the last entry'' and the
superscript refers to repetition. In fact, $F$ can be described
explicitly on compositions of $n$:
\begin{prop}
For a composition \ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace of $n$, $F(\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace)$ is given by the following
algorithm. For each entry $c$ in even position measured from
the end $($so the last entry is in position $1)$, replace it by
$c-1\ 1$s and increment its left neighbor.
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmabel{X}
\ensuremath{\mathbf e}\xspacend{prop}
For example,
$4\,2\,1\,5\,2\,3 = \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cmverset{6}{4}\,\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cmverset{5}{2}\,\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cmverset{4}{1}\,\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cmverset{3}{5}\,
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cmverset{2}{2}\,\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cmverset{1}{3} \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmightarrow 1\ 1^{3}\ 3 \ 1^{0}\ 6 \
1^{1}\ 3 = 1^{4}\,3\,6\,1\,3$. \qed
Primitive $DUU$-avoiding Dyck $n$-paths correspond to compositions of $n$ that end
with a 1. Let $\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspacec_{n}$ denote the set of such compositions. Thus
$\vert \ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspacec_{1} \vert = 1$ and for $n\ge 2$, \
$\vert \ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspacec_{n} \vert = 2^{n-2}$ since there are $2^{n-2}$ compositions of
$n-1$.
Denote the length of a composition \ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace by $\#\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace$. The \ensuremath{\mathbf e}\xspacemph{size} of \ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace is
the sum of its entries. The \ensuremath{\mathbf e}\xspacemph{parity} of \ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace is the parity (even/odd)
of $\#\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace$. There are two operations on nonempty compositions
that increment (that is, increase by 1) the size: $P=$ prepend 1,
and $I=$ increment first entry. For example, for $\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace=(4,1,1)$ we
have size(\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace) = 6, $\ \#\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace=3,$ the parity of \ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace is odd, $P(\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace)=(1,4,1,1),\ I(\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace)=(5,1,1)$.
\begin{lemma}
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmabel{A}
$P$ changes the parity of a composition while $I$ preserves it. \qed
\ensuremath{\mathbf e}\xspacend{lemma}
We'll call $P$ and $I$ \ensuremath{\mathbf e}\xspacemph{augmentation operators} on $\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspacec_{n}$ and for
$A$ an augmentation operator, $A'$ denotes the other one.
\begin{lemma}
Let $A$ be an augmentation
operator. On a composition $\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace$ with $\#\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace \ge 2$,
$A \ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspaceirc F = F \ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspaceirc A$ if $\,\#\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace$ is odd and $A \ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspaceirc F =
F \ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspaceirc A'$ if $\,\#\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace$ is even.
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmabel{B}
\ensuremath{\mathbf e}\xspacend{lemma}
This follows from Proposition \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmef{X}. \qed
Using Lemma \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmef{B}, an $F$-orbit $(\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace_{1},\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmdots,\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace_{m})$ in
$\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspacec_{n}$ together with an augmentation operator $A_{1} \in\{P,I\}$
yields part of an $F$-orbit in $\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspacec_{n+1}$ via a ``commutative diagram''
as shown:
\[
\begin{CD}
\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace_{1} @>F>> \ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace_{2} @>F>> \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmdots @>F>> \ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace_{i} @>F>> \ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace_{i+1} @>F>>
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmdots @>F>>\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace_{m} @>F>> \ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace_{1} \\
@VVA_{1}V @VVA_{2}V @. @VVA_{i}V @VVA_{i+1}V @.
@VVA_{m}V @VVA_{m+1}V \\
\ensuremath{\mathbf e}\xspacensuremath{\mathbf d}\xspace_{1} @>F>> \ensuremath{\mathbf e}\xspacensuremath{\mathbf d}\xspace_{2} @>F>> \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmdots @>F>> \ensuremath{\mathbf e}\xspacensuremath{\mathbf d}\xspace_{i} @>F>> \ensuremath{\mathbf e}\xspacensuremath{\mathbf d}\xspace_{i+1} @>F>>
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmdots @>F>>\ensuremath{\mathbf e}\xspacensuremath{\mathbf d}\xspace_{m} @>F>> \ensuremath{\mathbf e}\xspacensuremath{\mathbf d}\xspace_{m+1}
\ensuremath{\mathbf e}\xspacend{CD}
\]
Let $B(\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace_{1},A_{1})$ denote the sequence of compositions $(\ensuremath{\mathbf e}\xspacensuremath{\mathbf d}\xspace_{1},\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmdots,\ensuremath{\mathbf e}\xspacensuremath{\mathbf d}\xspace_{m})$
thus produced. By Lemma \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmef{B}, $A_{i+1}= A_{i}$ or $A_{i}'$
according as $\,\#\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace_{i}$ is odd or even ($1\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cme i \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cme m$). Hence, if the orbit of
$\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace_{1}$ contains an even number of compositions of even parity,
then $A_{m+1}=A_{1}$ and so $\ensuremath{\mathbf e}\xspacensuremath{\mathbf d}\xspace_{m+1}=\ensuremath{\mathbf e}\xspacensuremath{\mathbf d}\xspace_{1}$ and $B(\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace_{1},A_{1})$
is a complete $F$-orbit in $\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspacec_{n+1}$ for each of $A_{1}=P$ and
$A_{1}=I$. On the other hand, if the orbit of
$\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace_{1}$ contains an odd number of compositions of even parity,
then $A_{m+1}=A_{1}'$ and the commutative diagram will extend for
another $m$ squares before completing an orbit in $\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspacec_{n+1}$, consisting of the
concatenation of $B(\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace_{1},P)$ and $B(\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace_{1},I)$, denoted
$B(\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace_{1},P,I)$. In the former case orbit size is preserved; in the
latter it is doubled.
Our goal here is to generate $F$-orbits recursively and to get
induction going, we now need to investigate the parities of the compositions
comprising these ``bumped-up'' orbits $B(\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace,A)$ and
$B(\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace,P,I)$.
A bit sequence is a sequence of 0s and 1s. \textbf{In the sequel all
operations on bit sequences are modulo 2}. Let $\ensuremath{\mathbf S}\xspace$ denote the partial
sum operator on bit sequences: $\ensuremath{\mathbf S}\xspace\big( (\ensuremath{\mathbf e}\xspacepsilon_{1},\ensuremath{\mathbf e}\xspacepsilon_{2},\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmdots,\ensuremath{\mathbf e}\xspacepsilon_{m})
\big) =(\ensuremath{\mathbf e}\xspacepsilon_{1},\ensuremath{\mathbf e}\xspacepsilon_{1}+\ensuremath{\mathbf e}\xspacepsilon_{2},\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmdots,\ensuremath{\mathbf e}\xspacepsilon_{1}+\ensuremath{\mathbf e}\xspacepsilon_{2}+\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmdots+\ensuremath{\mathbf e}\xspacepsilon_{m})$.
Let $\ensuremath{\mathbf e}\xspace_{m}$ denote the all 1s bit sequence of length $m$ and let
$\ensuremath{\mathbf e}\xspace$ denote the infinite sequences of 1s.
Thus $\ensuremath{\mathbf S}\xspace\ensuremath{\mathbf e}\xspace=(1,0,1,0,1,\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmdots)$.
Let $P$ denote the infinite matrix whose $i$th row ($i\ge 0$) is
$\ensuremath{\mathbf S}\xspace^{i}\ensuremath{\mathbf e}\xspace$ ($\ensuremath{\mathbf S}\xspace^{i}$ denotes the $i$-fold composition of $\ensuremath{\mathbf S}\xspace$). The $(i,j)$ entry $p_{ij}$ of $P$ satisfies $p_{ij}=p_{i-1,j}+p_{i,j-1}$
and hence $P$ is the symmetric Pascal matrix mod 2 with $(i,j)$
entry =$\:\binom{i+j}{i}$ mod 2. The following lemma will be crucial.
\begin{lemma}
Fix $k\ge 1$ and let $P_{k}$ denote the $2^{k}\times 2^{k}$ upper
left submatrix of $P$. Then the sum modulo $2$ of row $i$ in $P_{k}$ is $0$
for $0\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cme i \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cme 2^{k}-1$ and is $1$ for $i=2^{k}-1$.
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmabel{P}
\ensuremath{\mathbf e}\xspacend{lemma}
\textbf{Proof} \quad The sum of row $i$ in $P_{k}$ is, modulo 2,
\[
\ensuremath{\mathbf S}\xspaceum_{j=0}^{2^{k}-1} p_{ij} =
\ensuremath{\mathbf S}\xspaceum_{j=0}^{2^{k}-1}\binom{i+j}{i}=\binom{i+2^{k}}{i+1}=\binom{i+2^{k}}{i+1,2^{k}-1}
\]
and for $i<2^{k}-1$ there is clearly at least one carry in the
addition of $i+1$ and $2^{k}-1$ in base 2 so that, by Kummer's well
known criterion, $2\,\vert\,\binom{i+2^{k}}{i+1,2^{k}-1}$ and the sum of row $i$
is 0 (mod 2). On the other hand, for $i=2^{k}-1$ there are no
carries, so $2\nmid \binom{i+2^{k}}{i+1,2^{k}-1}$ and the sum of row $i$ is 1 (mod 2). \qed
Now let $p(\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace)$ denote the mod-2 parity of a composition $\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace:\ p(\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace)=1$
if $\,\#\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace$ is odd, $=0$ if $\,\#\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace$ is even. For purposes of addition
mod 2, represent the augmentation operators $P$ and $I$ by 0 and 1
respectively so that, for example, $p(A(\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace))=p(\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace)+A+1$ for $A=P$ or
$I$ by Lemma \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmef{A}. Then
the parity of $\ensuremath{\mathbf e}\xspacensuremath{\mathbf d}\xspace_{i+1}$ above can be obtained from the following
commutative diagram (all addition modulo 2)
\[
\begin{CD}
\qquad p(\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace_{i})\qquad @>>> p(\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace_{i+1}) \\
@VVAV @VVp(\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace_{i})+A+1V \\
\qquad \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmdots\qquad @>>> p(\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace_{i+1})+p(\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace_{i})+A
\ensuremath{\mathbf e}\xspacend{CD}
\]
This leads to
\begin{lemma}
Let $p_{i}$ denote the parity of $\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace_{i}$ so that
$\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace=(p_{i})_{i=1}^{m}$ is the parity vector for the $F$-orbit
$(\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace_{i})_{i=1}^{m}$ of the composition $\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace_{1}$. Then the parity
vector for $B(\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace,A)$ is
\[
\ensuremath{\mathbf S}\xspace\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace+\ensuremath{\mathbf S}\xspace\ensuremath{\mathbf e}\xspace_{m}+(A+1)\ensuremath{\mathbf e}\xspace_{m}.
\]\qed
\ensuremath{\mathbf e}\xspacend{lemma}
Now we are ready to prove the main result of this section concerning
the orbits of $F$ on primitive $DUU$-avoiding Dyck $n$-paths identified with
the set $\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspacec_{n}$ of compositions of $n$ that end with a 1. The parity
of an orbit is the sum mod 2 of the parities of the compositions comprising
the orbit, in other words, the parity of the total number of entries
in all the compositions.
\begin{theorem}
For each $n\ge 1$,
\begin{itemize}
\item[$($i\,$)$] all $F$-orbits on $\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspacec_{n}$ have the same
length and this length is a power of $2$.
\item[$($ii\,$)$] all $F$-orbits on $\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspacec_{n}$ have the same parity.
\item[$($iii\,$)$] the powers in $($i\,$)$ and the parities
in $($ii\,$)$ are given as
follows:
For $n=1$, the power $($i.e. the exponent$)$ is $0$ and the parity
is $1$.
For $n=2$, the power and parity are both $0$.
As $n$ increases from $2$, the powers remain unchanged and the parity
stays $0$ except that when $n$ hits a number of the form $2^{k}+1$, the
parity becomes $1$, and at the next number, $2^{k}+2$, the power
increases by $1$ and the parity reverts to $0$.
\ensuremath{\mathbf e}\xspacend{itemize}
\ensuremath{\mathbf e}\xspacend{theorem}
\textbf{Proof}\quad We consider orbits generated by the augmentation
operators $P$ and $I$. No orbits are missed because all compositions,
in particular those
ending 1, can be generated from the unique composition of 1 by
successive application of $P$ and $I$. The base cases $n=1,2,3$ are
clear from the orbits $(1)\to (1),\ (1,1)\to (1,1),\ (2,1) \to
(1,1,1)\to (2,1)$. To establish the induction step, suppose given an
orbit, orb($\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace$), in $\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspacec_{2^{k}+1}\ (k\ge 1)$ with parity vector
$\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace=(a_{i})_{i=1}^{2^{k}}$ and (total) parity 1. Then the next orbit
$B(\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace,P,I)$ has parity vector
\[
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace_{1}=(\ensuremath{\mathbf S}\xspace\, \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace,\ensuremath{\mathbf S}\xspace\, \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace+\ensuremath{\mathbf e}\xspace_{2^{k}})+\ensuremath{\mathbf S}\xspace\,\ensuremath{\mathbf e}\xspace_{2^{k+1}}
\]
with parity ($\ensuremath{\mathbf S}\xspace\,\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace$'s cancel out) $\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacenderbrace{1+1+\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmdots+1}_{2^{k}}+\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacenderbrace{1+0+1+0+\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmdots+1+0}_{2^{k+1}}=0$ for $k\ge 1$.
Successively ``bump up'' this orbit using $A=\ensuremath{\mathbf e}\xspacepsilon_{1},\ensuremath{\mathbf e}\xspacepsilon_{2},\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmdots,$
in turn until the parity hits 1 again.
With Sum$(\vertv)$ denoting the sum
of the entries in \vertv, the successive parity
vectors $\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace_{1},\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace_{2},\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmdots$ are given by
\begin{multline*}
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace_{i}=\big(\ensuremath{\mathbf S}\xspace^{i}\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace,\ensuremath{\mathbf S}\xspace^{i}\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace+\ensuremath{\mathbf S}\xspaceum_{j=1}^{i-2}\textrm{Sum}(\ensuremath{\mathbf S}\xspace^{j}\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace)\ensuremath{\mathbf S}\xspace^{i-1-j}\ensuremath{\mathbf e}\xspace_{2^{k}} + \ensuremath{\mathbf S}\xspace^{i-1}\ensuremath{\mathbf e}\xspace_{2^{k}}\big) + \\
\ensuremath{\mathbf S}\xspace^{i}\ensuremath{\mathbf e}\xspace_{2^{k+1}} + \ensuremath{\mathbf S}\xspace^{i-1}\ensuremath{\mathbf e}\xspace_{2^{k+1}}
+\ensuremath{\mathbf S}\xspaceum_{j=1}^{i-2}\ensuremath{\mathbf e}\xspacepsilon_{j}\ensuremath{\mathbf S}\xspace^{i-1-j}\ensuremath{\mathbf e}\xspace_{2^{k+1}} + (\ensuremath{\mathbf e}\xspacepsilon_{i-1}+1)\ensuremath{\mathbf e}\xspace_{2^{k+1}}.
\ensuremath{\mathbf e}\xspacend{multline*}
Applying Lemma \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmef{P} we see that, independent of the $\ensuremath{\mathbf e}\xspacepsilon_{i}$'s, $\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace_{i}$ has sum 0 for
$i<2^{k}-1$ and sum 1 for $i=2^{k}-1$. This establishes the
induction step in the theorem. \qed
\begin{cor}
For $n\ge 2$, the length of each $F$-orbit in $\ensuremath{\mathcal P}\xspace_{n}(DUU)$ is $2^{k}$ where $k$
is the number of bits in the base-$2$ expansion of $n-2$.
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmabel{base2}
\ensuremath{\mathbf e}\xspacend{cor}
\textbf{Proof}\quad This is just a restatement of part of the
preceding Theorem. \qed
\vertspace*{10mm}
{\Large \textbf{5 \ The Orbits of $\mathbf{F}$} }\quad
The preceding section analyzed $F$ on $\ensuremath{\mathcal P}\xspace(DUU)$, paths avoiding
$DUU$. Now we consider $F$ on $\ensuremath{\mathcal P}\xspace[DUU]$, the primitive Dyck paths containing a
$DUU$. Every $P \in \ensuremath{\mathcal P}\xspace[DUU]$ has the form $AQB$ where
\begin{itemize}
\item[(i)] $A$ consists of one or more $U$s
\item[(ii)] $C:=AB \in \ensuremath{\mathcal P}\xspace(DUU)$
\item[(iii)] $Q \notin \ensuremath{\mathcal P}\xspace $ and $Q$ ends $DD$ (and hence $Q$ contains
a $DUU$ at ground level).
\ensuremath{\mathbf e}\xspacend{itemize}
To see this, locate the rightmost of the lowest $DUU$s in $P$, say at
height $h$. Then $A=U^{h},\ Q$ starts at step number $h+1$ and
extends through the matching downstep of the middle $U$ in this
rightmost lowest $DUU$, and $B$ consists of the rest of the path.
\Einheit=0.4cm
\[
\Pfad(-15,0),3\ensuremath{\mathbf e}\xspacendPfad
\Pfad(-13,2),33\ensuremath{\mathbf e}\xspacendPfad
\Pfad(-8,4),433\ensuremath{\mathbf e}\xspacendPfad
\Pfad(-2,5),44\ensuremath{\mathbf e}\xspacendPfad
\Pfad(6,3),4\ensuremath{\mathbf e}\xspacendPfad
\Pfad(14,1),4\ensuremath{\mathbf e}\xspacendPfad
\SPfad(-14,1),3\ensuremath{\mathbf e}\xspacendSPfad
\SPfad(-11,4),413\ensuremath{\mathbf e}\xspacendSPfad
\SPfad(-5,5),413\ensuremath{\mathbf e}\xspacendSPfad
\SPfad(13,2),4\ensuremath{\mathbf e}\xspacendSPfad
\DuennPunkt(-15,0)
\DuennPunkt(-14,1)
\DuennPunkt(-13,2)
\DuennPunkt(-12,3)
\DuennPunkt(-11,4)
\DuennPunkt(-8,4)
\DuennPunkt(-7,3)
\DuennPunkt(-6,4)
\DuennPunkt(-5,5)
\DuennPunkt(-2,5)
\DuennPunkt(-1,4)
\DuennPunkt(0,3)
\DuennPunkt(6,3)
\DuennPunkt(7,2)
\DuennPunkt(13,2)
\DuennPunkt(14,1)
\DuennPunkt(15,0)
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmed{
\DuennPunkt(1,4)
\DuennPunkt(2,3)
\DuennPunkt(4,3)
\DuennPunkt(5,4)
\DuennPunkt(8,3)
\DuennPunkt(9,2)
\DuennPunkt(11,2)
\DuennPunkt(12,3)
\Pfad(0,3),34\ensuremath{\mathbf e}\xspacendPfad
\Pfad(4,3),34\ensuremath{\mathbf e}\xspacendPfad
\Pfad(7,2),34\ensuremath{\mathbf e}\xspacendPfad
\Pfad(11,2),34\ensuremath{\mathbf e}\xspacendPfad
\SPfad(2,3),34\ensuremath{\mathbf e}\xspacendSPfad
\SPfad(9,2),34\ensuremath{\mathbf e}\xspacendSPfad
}
\textcolor{blue} {
\Pfad(-15,0),111111111111111111111111111111\ensuremath{\mathbf e}\xspacendPfad
\Pfad(-15,0),2222222\ensuremath{\mathbf e}\xspacendPfad
\Pfad(-12,0),2222222\ensuremath{\mathbf e}\xspacendPfad
\Pfad(0,0),2222222\ensuremath{\mathbf e}\xspacendPfad
\Pfad(15,0),2222222\ensuremath{\mathbf e}\xspacendPfad
}
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspaceparrow}(-7,1.7)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\footnotesize $h$}}}(-7,0.8)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace{\ensuremath{\mathbf e}\xspacensuremath{\mathbf d}\xspaceownarrow}(-7,1.2)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\footnotesize $A$}}}(-13.5,6)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\footnotesize $\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmeftarrow$ matching
$\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmightarrow$}}}(-3.5,2.7)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\footnotesize $Q$}}}(-6,6)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\footnotesize $B$}}}(7,6)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\footnotesize red $UD$s may be absent}}}(7,4.5)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\ensuremath{\mathbf S}\xspacemall The $AQB$ decomposition of a path containing a $DUU$}}}(0,-3)
\]
\vertspace*{2mm}
Call the path $AB$ the ($DUU$-avoiding) \ensuremath{\mathbf e}\xspacemph{skeleton} of $P$ and
$Q$ the ($DUU$-containing) \ensuremath{\mathbf e}\xspacemph{body} of $P$. In case $P\in\ensuremath{\mathcal P}\xspace(DUU)$, its skeleton
is itself and its body is empty. If the skeleton of $P$ is $UD$,
then $P$ is uniquely determined by its skeleton and body. On the
other hand, a
skeleton of size $\ge 2$ and a nonempty body determine precisely two
paths $P$ in $\ensuremath{\mathcal P}\xspace[DUU]$, obtained by inserting the body at either the
top or the bottom of the first peak upstep in the skeleton, as
illustrated.
\Einheit=0.4cm
\[
\Pfad(-17,0),3334344344\ensuremath{\mathbf e}\xspacendPfad
\Pfad(-5,0),33\ensuremath{\mathbf e}\xspacendPfad
\Pfad(-2,2),34344344\ensuremath{\mathbf e}\xspacendPfad
\Pfad(7,0),333\ensuremath{\mathbf e}\xspacendPfad
\Pfad(11,3),4344344\ensuremath{\mathbf e}\xspacendPfad
\SPfad(-17,0),1111111111\ensuremath{\mathbf e}\xspacendSPfad
\SPfad(-5,0),11111111111\ensuremath{\mathbf e}\xspacendSPfad
\SPfad(7,0),11111111111\ensuremath{\mathbf e}\xspacendSPfad
\DuennPunkt(-17,0)
\DuennPunkt(-16,1)
\DuennPunkt(-15,2)
\DuennPunkt(-14,3)
\DuennPunkt(-13,2)
\DuennPunkt(-12,3)
\DuennPunkt(-11,2)
\DuennPunkt(-10,1)
\DuennPunkt(-9,2)
\DuennPunkt(-8,1)
\DuennPunkt(-7,0)
\DuennPunkt(-5,0)
\DuennPunkt(-4,1)
\DuennPunkt(-3,2)
\DuennPunkt(-2,2)
\DuennPunkt(-1,3)
\DuennPunkt(0,2)
\DuennPunkt(1,3)
\DuennPunkt(2,2)
\DuennPunkt(3,1)
\DuennPunkt(4,2)
\DuennPunkt(5,1)
\DuennPunkt(6,0)
\DuennPunkt(7,0)
\DuennPunkt(8,1)
\DuennPunkt(9,2)
\DuennPunkt(10,3)
\DuennPunkt(11,3)
\DuennPunkt(12,2)
\DuennPunkt(13,3)
\DuennPunkt(14,2)
\DuennPunkt(15,1)
\DuennPunkt(16,2)
\DuennPunkt(17,1)
\DuennPunkt(18,0)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\footnotesize $S$}}}(-12,-2)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\footnotesize two possible $P$s}}}(6.5,-2)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\footnotesize $B$}}}(-2.5,1.8)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\footnotesize $B$}}}(10.5,2.8)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\ensuremath{\mathbf S}\xspacemall Recapturing a path $P\in \ensuremath{\mathcal P}\xspace[DUU]$ from a skeleton
$S$ and body $B$}}}(0,-4)
\]
\vertspace*{2mm}
Thus paths in $\ensuremath{\mathcal P}\xspace[DUU]$ correspond bijectively to triples $(S,B,pos)$
where $S\in\ensuremath{\mathcal P}\xspace(DUU)$ is the skeleton, $B\ne \ensuremath{\mathbf e}\xspacepsilon$ is the body, and $pos
=top$ or $bot$ according as $B$ is positioned at the top or bottom
of the first peak upstep in $S$, with the proviso that $pos=top$ if
$S=UD$.
In these terms, $F$ can be specified on $\ensuremath{\mathcal P}\xspace[DUU]$ as follows.
\begin{prop}
\[
F\big( (S,B,pos)\big)=
\begin{cases}
(F(S),F(B),\:pos\,) \textrm{ if height$(S)$ is odd, and} \\
(F(S),F(B),\:pos'\,) \textrm{ if height$(S)$ is even.}
\ensuremath{\mathbf e}\xspacend{cases}
\]
\ensuremath{\mathbf e}\xspacend{prop}
\textbf{Proof}\quad Let $h(P)$ denote the height of the terminal point
of the lowest $DUU$ in $P\in \ensuremath{\mathcal P}\xspace[DUU]$. The result clearly holds for
$h(P)=1$. If $h(P)\ge 2$, then $P$ has the form
$U^{2}Q(UD)^{a}D(UD)^{b}D$ with $a,b\ge 0$ and $Q$ a Dyck path that
ends $DD$. So $F(P)=U^{b+1}F(Q)(UD)^{a+1}D^{b+1}$ and $h(Q)=h(P)-2$.
These two facts are the basis for a proof by induction that begins as
follows. If $h(Q)=0$, then the body of $F(P)$ has position = bottom,
while the body of $P$ has position bottom or top according as $a\ge
1$ or $a=0$. In the former case, the skeleton of $P$ has height 3 and
position has been preserved, in the latter height 2 and position has
been reversed. \qed
Iterating the skeleton-body-position decomposition on each component,
a Dyck path has a forest representation as illustrated below. Each vertex
represents a skeleton and is labeled with the corresponding composition.
When needed, a color ($top$ or $bot$) is also applied to a vertex to
capture the position of that skeleton's body.
\Einheit=0.4cm
\[
\Pfad(-16,0),33344334443433343433444334344344\ensuremath{\mathbf e}\xspacendPfad
\SPfad(-16,0),11111111111111111111111111111111\ensuremath{\mathbf e}\xspacendSPfad
\DuennPunkt(-16,0)
\DuennPunkt(-15,1)
\DuennPunkt(-14,2)
\DuennPunkt(-13,3)
\DuennPunkt(-12,2)
\DuennPunkt(-11,1)
\DuennPunkt(-10,2)
\DuennPunkt(-9,3)
\DuennPunkt(-8,2)
\DuennPunkt(-7,1)
\DuennPunkt(-6,0)
\DuennPunkt(-5,1)
\DuennPunkt(-4,0)
\DuennPunkt(-3,1)
\DuennPunkt(-2,2)
\DuennPunkt(-1,3)
\DuennPunkt(0,2)
\DuennPunkt(1,3)
\DuennPunkt(2,2)
\DuennPunkt(3,3)
\DuennPunkt(4,4)
\DuennPunkt(5,3)
\DuennPunkt(6,2)
\DuennPunkt(7,1)
\DuennPunkt(8,2)
\DuennPunkt(9,3)
\DuennPunkt(10,2)
\DuennPunkt(11,3)
\DuennPunkt(12,2)
\DuennPunkt(13,1)
\DuennPunkt(14,2)
\DuennPunkt(15,1)
\DuennPunkt(16,0)
\]
\begin{center}
\begin{pspicture}(-6,-1.4)(6,3)
\ensuremath{\mathcal P}\xspacesline(-4,1)(-3,0)(-2,1)
\ensuremath{\mathcal P}\xspacesline(1,2)(2,1)(3,2)
\ensuremath{\mathcal P}\xspacesline(2,2)(2,1)(3,0)(4,1)
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmput(-3,0){$\bullet$}
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmput(0,0){$\bullet$}
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmput(3,0){$\bullet$}
\ensuremath{\mathcal P}\xspacesdots(-4,1)(-2,1)(2,1)(4,1)(1,2)(2,2)(3,2)
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmput(-4.2,1.2){\textrm{{\footnotesize 11}}}
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmput(-1.8,1.2){\textrm{{\footnotesize 11}}}
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmput(2.5,1){\textrm{{\footnotesize 1}}}
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmput(0.9,2.2){\textrm{{\footnotesize 1}}}
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmput(2,2.3){\textrm{{\footnotesize 1}}}
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmput(3.1,2.2){\textrm{{\footnotesize 11}}}
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmput(4.1,1.2){\textrm{{\footnotesize 21}}}
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmput(0,-.3){\textrm{{\footnotesize 1}}}
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmput(-3,-.3){\textrm{{\footnotesize 1}}}
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmput(3,-.3){\textrm{{\footnotesize \quad 11, bot}}}
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmput(0,-1.3){\textrm{{\ensuremath{\mathbf S}\xspacemall A Dyck path and corresponding LCO forest}}}
\ensuremath{\mathbf e}\xspacend{pspicture}
\ensuremath{\mathbf e}\xspacend{center}
The 3 trees in the forest correspond to the 3 components of the Dyck
path. The skeleton of the first component is $UD$ and its body has 2
identical components, each consisting of a skeleton alone, yielding
the leftmost tree. The skeleton of the third component is $UUDD$ and
its body is positioned at the bottom of its first peak upstep, and so
on.
Call this forest the LCO (labeled, colored, ordered) forest
corresponding to the Dyck path. Here is the precise definition.
\begin{defn}
An LCO forest is a labeled, colored, ordered forest such that
\begin{itemize}
\item the underlying forest consists of a list of ordered trees
(a tree may consist of a root only)
\item no vertex has outdegree $1$ $($i.e., exactly one child\,$)$
\item each vertex is labeled with a composition that ends $1$
\item each vertex possessing children and labeled with a composition
of size $\ge 2$ is also colored $top$ or $bot$
\item For each leaf $($i.e. vertex with a parent but no child\,$)$ that
is the rightmost child of its parent, its label composition has
size $\ge 2$.
\ensuremath{\mathbf e}\xspacend{itemize}
\ensuremath{\mathbf e}\xspacend{defn}
The \ensuremath{\mathbf e}\xspacemph{size} of an LCO forest is the sum of the sizes of its label compositions.
The correspondence Dyck path $\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmeftrightarrow$ LCO forest
preserves size, and primitive Dyck paths correspond to one-tree forests.
Thus we have
\begin{prop}
The number of LCO forests of size $n$ is the Catalan number
$C_{n}$, as is the number of one-tree LCO forests of size $n+1$. \qed
\ensuremath{\mathbf e}\xspacend{prop}
The $C_{4}=14$ one-tree LCO forests corresponding to primitive
Dyck 5-paths are shown, partitioned into $F$-orbits.
\Einheit=0.5cm
\[
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmightarrow}(-13,5)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmightarrow}(-9,5)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmightarrow}(-5,5)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace{ \textrm{{\footnotesize $1^{5}$}}}(-15,5)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace{\textrm{{\footnotesize 221}}}(-11,5)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace{\textrm{{\footnotesize 311}}}(-7,5)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace{\textrm{{\footnotesize 41}}}(-3,5)
\NormalPunkt(-15,5)
\NormalPunkt(-11,5)
\NormalPunkt(-7,5)
\NormalPunkt(-3,5)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmightarrow}(13,5)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmightarrow}(9,5)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmightarrow}(5,5)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace{\textrm{{\footnotesize 1211}} }(15,5)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace{\textrm{{\footnotesize 1121}} }(11,5)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace{\textrm{{\footnotesize 2111}} }(7,5)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace{\textrm{{\footnotesize 131}} }(3,5)
\NormalPunkt(15,5)
\NormalPunkt(11,5)
\NormalPunkt(7,5)
\NormalPunkt(3,5)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmightarrow}(-12,0)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmightarrow}(-0,0)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace{ \textrm{{\footnotesize 11,\ bot}}}(-15,0)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace{ \textrm{{\footnotesize 11,\ top}}}(-9,0)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\footnotesize 1}} }(-16,1)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\footnotesize 11}} }(-14,1)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\footnotesize 1}} }(-10,1)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\footnotesize 11}} }(-8,1)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\footnotesize 1}} }(-4,1)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\footnotesize 21}} }(-2,1)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\footnotesize 1}} }(2,1)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\footnotesize 111}} }(4,1)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\footnotesize 1}} }(8,1)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\footnotesize 1}} }(9,1)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\footnotesize 11}} }(10,1)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\footnotesize 11}} }(16,1)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{\textrm{{\footnotesize 11}} }(14,1)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace{\textrm{{\footnotesize 1}}}(-3,0)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace{\textrm{{\footnotesize 1}}}(3,0)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace{\textrm{{\footnotesize 1}}}(9,0)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace{\textrm{{\footnotesize 1}}}(15,0)
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmed{\Pfad(0,3),2222\ensuremath{\mathbf e}\xspacendPfad
\Pfad(-6,-1),2222\ensuremath{\mathbf e}\xspacendPfad
\Pfad(6,-1),2222\ensuremath{\mathbf e}\xspacendPfad
\Pfad(12,-1),2222\ensuremath{\mathbf e}\xspacendPfad}
\Pfad(-16,1),4\ensuremath{\mathbf e}\xspacendPfad
\Pfad(-15,0),3\ensuremath{\mathbf e}\xspacendPfad
\Pfad(-10,1),4\ensuremath{\mathbf e}\xspacendPfad
\Pfad(-9,0),3\ensuremath{\mathbf e}\xspacendPfad
\Pfad(-4,1),4\ensuremath{\mathbf e}\xspacendPfad
\Pfad(-3,0),3\ensuremath{\mathbf e}\xspacendPfad
\Pfad(2,1),4\ensuremath{\mathbf e}\xspacendPfad
\Pfad(3,0),3\ensuremath{\mathbf e}\xspacendPfad
\Pfad(8,1),4\ensuremath{\mathbf e}\xspacendPfad
\Pfad(9,0),3\ensuremath{\mathbf e}\xspacendPfad
\Pfad(9,0),2\ensuremath{\mathbf e}\xspacendPfad
\Pfad(14,1),4\ensuremath{\mathbf e}\xspacendPfad
\Pfad(15,0),3\ensuremath{\mathbf e}\xspacendPfad
\DuennPunkt(-16,1)
\NormalPunkt(-15,0)
\DuennPunkt(-14,1)
\DuennPunkt(-10,1)
\DuennPunkt(-9,0)
\DuennPunkt(-8,1)
\DuennPunkt(-4,1)
\NormalPunkt(-3,0)
\DuennPunkt(-2,1)
\DuennPunkt(2,1)
\NormalPunkt(3,0)
\DuennPunkt(4,1)
\DuennPunkt(8,1)
\DuennPunkt(9,1)
\NormalPunkt(9,0)
\DuennPunkt(10,1)
\DuennPunkt(14,1)
\DuennPunkt(16,1)
\NormalPunkt(15,0)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspace{\textrm{{\ensuremath{\mathbf S}\xspacemall The LCO one-tree forests of size 5, partitioned into
$F$-orbits}} }(0,-2)
\]
\vertspace*{1mm}
We can now give an explicit description of $F$ on Dyck paths
identified with LCO forests. On an LCO forest, $F$ acts as follows:
\begin{itemize}
\item the underlying list of ordered trees is preserved
\item each label $\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace$ becomes $F(\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace)$ as defined in Prop.\:\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmef{X}
\item each color ($top$/$bot$) is preserved or switched according
as the associated label \ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspace has odd or even length.
\ensuremath{\mathbf e}\xspacend{itemize}
From this description and Cor. \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmef{base2}, the size of the $F$-orbit of a Dyck path $P$
can be determined as follows. In the LCO forest for $P$, let $\ensuremath{\mathbf e}\xspacell$
denote the maximum size of a leaf label and $i$ the maximum size of an
internal (i.e., non-leaf) label (note that an isolated root is an
internal vertex). Let $k$ denote the number of bits in the base-2
expansion of $\max\{\ensuremath{\mathbf e}\xspacell-2,i-1\}$. Then the $F$-orbit of $P$ has size
$2^{k}$.
It is also possible to specify orbit sizes in terms of subpath
avoidance. For Dyck paths $Q$ and $R$, let $Q$ \ensuremath{\mathbf e}\xspacemph{top} $R$ (resp. $Q$ \ensuremath{\mathbf e}\xspacemph{bot}
$R$) denote the Dyck path obtained by inserting $R$ at the top (resp.
bottom) of the first peak upstep in $Q$. Then the $F$-orbit of a Dyck path $P$ has
size $\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cme 2^{k}$ iff $P$ avoids subpaths in the set $\{Q\ top\ R,\ Q \
bot\ R\, :\, R\ne \ensuremath{\mathbf e}\xspacepsilon,\ Q\in\ensuremath{\mathcal P}\xspace_{i}(DUU),\ 2^{k-1}+1 < i \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cme
2^{k}+1\}$. For $k\ge 1,$ listing these $Q$s explicitly would give
$2^{2^{k}}-2^{2^{k-1}}$ proscribed patterns of the form $Q\ top\ R,\
R\ne \ensuremath{\mathbf e}\xspacepsilon$ (and the same number of the form $Q\ bot\ R$). For $k=0$,
that is, for fixed points of $F$, the proscribed patterns are
$UP^{+}UDD$ and $UUP^{+}DD$ with $P^{+}$ a nonempty Dyck path,
and avoiding the first of these amounts to avoiding the subpath $DUDD$.
The generating function\ for the number of $F$-orbits of size $\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cme 2^{k}$ can be found
using the ``symbolic'' method \ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspaceite{flaj}. With
$F_{k}(x),\: G_{k}(x),\: H_{k}(x)$ denoting the respective generating function s for
general Dyck paths, primitive Dyck paths, and primitive Dyck paths
that end $DD$ ($x$ always marking size), we find
\begin{eqnarray*}
F_{k}(x) & = & 1+G_{k}(x)F_{k}(x) \\
G_{k}(x) & = & x
+\frac{x(1-(2x)^{2^{k}}}{1-2x}\big(x+(F_{k}(x)-1)H_{k}(x)\big) \\
H_{k}(x) & = & G_{k}(x)-x
\ensuremath{\mathbf e}\xspacend{eqnarray*}
leading to
\[
F_{k}(x)=\frac{1-a_{k}-\ensuremath{\mathbf S}\xspaceqrt{1-4x-\frac{\textrm{{\ensuremath{\mathbf S}\xspacemall $a_{k}(2-a_{k})x $}}}{\textrm{{\ensuremath{\mathbf S}\xspacemall $1-x$}}}}}{2x-a_{k}},
\]
where $a_{k}=(2x)^{2^{k}+1}$. In this formulation it is clear, as
expected, that $\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmim_{k \to
\infty}F_{k}(x)=\frac{1-\ensuremath{\mathbf S}\xspaceqrt{1-4x}}{2x}$, the generating function\ for the Catalan
numbers. The counting sequence for fixed points of $F$, with generating function\
$F_{0}(x)$, is sequence
\htmladdnormallink{A086625}{http://www.research.att.com:80/cgi-bin/access.cgi/as/njas/sequences/eisA.cgi?Anum=A086625}
in
\htmladdnormallink{OEIS}{http://www.research.att.com/~njas/sequences/Seis.html} .
\vertspace*{10mm}
{\Large \textbf{6 \ An Application} }\quad
Ordered trees and binary trees are manifestations of the Catalan
numbers
\htmladdnormallink{A000108}{http://www.research.att.com:80/cgi-bin/access.cgi/as/njas/sequences/eisA.cgi?Anum=A000108} .
Donaghey \ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspaceite{motz77,restricted77} lists several types of restricted tree
counted by the Motzkin numbers
\htmladdnormallink{A001006}{http://www.research.att.com:80/cgi-bin/access.cgi/as/njas/sequences/eisA.cgi?Anum=A001006} .
In particular, the following result
is implicit in item III\,C of \ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspaceite{restricted77}.
\begin{prop}
The Motzkin number $M_{n}$ counts right-planted binary trees on
$n+1$ edges with no erasable vertices.
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmabel{erasable}
\ensuremath{\mathbf e}\xspacend{prop}
Here, planted means the root has only one child, and erasable
refers to a vertex incident with precisely 2 edges \ensuremath{\mathbf e}\xspacemph{both of the same
slope}---the vertex could then be erased, preserving the slope, to produce a smaller binary
tree. The $M_{3}=4$ such trees on 4 edges are shown.
\Einheit=0.6cm
\[
\Pfad(-7,2),3\ensuremath{\mathbf e}\xspacendPfad
\Pfad(-7,2),43\ensuremath{\mathbf e}\xspacendPfad
\Pfad(-7,0),3\ensuremath{\mathbf e}\xspacendPfad
\Pfad(-3,0),33\ensuremath{\mathbf e}\xspacendPfad
\Pfad(-3,2),4\ensuremath{\mathbf e}\xspacendPfad
\Pfad(-2,3),4\ensuremath{\mathbf e}\xspacendPfad
\Pfad(1,3),43\ensuremath{\mathbf e}\xspacendPfad
\Pfad(2,2),4\ensuremath{\mathbf e}\xspacendPfad
\Pfad(2,0),3\ensuremath{\mathbf e}\xspacendPfad
\Pfad(5,0),3\ensuremath{\mathbf e}\xspacendPfad
\Pfad(5,2),4\ensuremath{\mathbf e}\xspacendPfad
\Pfad(5,2),3\ensuremath{\mathbf e}\xspacendPfad
\Pfad(5,4),4\ensuremath{\mathbf e}\xspacendPfad
\DuennPunkt(-7,0)
\DuennPunkt(-7,2)
\DuennPunkt(-6,1)
\DuennPunkt(-6,3)
\DuennPunkt(-5,2)
\DuennPunkt(-3,0)
\DuennPunkt(-3,2)
\DuennPunkt(-2,1)
\DuennPunkt(-2,3)
\DuennPunkt(-1,2)
\DuennPunkt(1,3)
\DuennPunkt(2,0)
\DuennPunkt(2,2)
\DuennPunkt(3,1)
\DuennPunkt(3,3)
\DuennPunkt(5,0)
\DuennPunkt(5,4)
\DuennPunkt(5,2)
\DuennPunkt(6,1)
\DuennPunkt(6,3)
\Label\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\ydim by.25cm{ \textrm{\ensuremath{\mathbf S}\xspacemall The right-planted binary 4-trees with no erasable
vertices}}(0,-2)
\]
\vertspace*{2mm}
Translated to Dyck paths, Prop. \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmef{erasable} is equivalent to
\begin{prop}
$M_{n}$ counts Dyck $(n+1)$-paths that end $DD$ and avoid
subpaths $DUDU$ and $UUP^{+}DD$ with $P^{+}$ denoting a nonempty Dyck subpath.
\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cmabel{UUXDD}
\ensuremath{\mathbf e}\xspacend{prop}
We will use $F$ to give a bijective proof of Prop. \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmef{UUXDD} based
on the fact \ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspaceite{udu} that $M_{n}$ also counts $DUD$-avoiding Dyck
$(n+1)$-paths. (Of course, path reversal shows that $\#\,UDU$s and
$\#\,DUD$s are equidistributed on Dyck paths.) Define statistics $X$ and $Y$ on Dyck paths by
$X=\#\:DUD$s and $Y=\#\:DUDU$s $ +\ \#\:UUP^{+}DD$s + [paths ends with
$UD$] (Iverson notation) so that the paths in Prop. \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmef{UUXDD} are
those with $Y=0$. Prop. \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by.30cmef{UUXDD} then follows from
\begin{prop}
On Dyck $n$-paths with $n\ge 2$, $F$ sends the statistic $X$ to
the statistic $Y$.
\ensuremath{\mathbf e}\xspacend{prop}
\textbf{Proof}\quad Routine by induction from the recursive definition
of $F$. However, using the explicit form of $F$, it is also possible to
specify precisely which $DUD$s correspond to each of the three summands
in $Y$. For this purpose, given a $DUD$ in a Dyck path $P$, say $D_{1}U_{2}D_{3}$
(subscripts used simply to
identify the individual steps), let
$\ensuremath{S}\xspace(D_{1}U_{2}D_{3})$
denote the longest Dyck subpath of $P$ containing $D_{1}U_{2}D_{3}$ in its
skeleton and let $h$ denote the height at which $D_{1}U_{2}D_{3}$ terminates
in $\ensuremath{S}\xspace(D_{1}U_{2}D_{3})$. If $h$ is odd, $D_{1}U_{2}D_{3}$ is
immediately followed in $P$ by $D_{4}$ or by $UD_{4}$ (it cannot be followed by
$UU$). In either case, let $U_{4}$ be the matching upstep for
$D_{4}$. Then the steps $D_{1},U_{2},D_{3},U_{4}$ show up in $F(P)$ as
part of
a subpath $U_{4}U_{2}P^{+}D_{3}D_{4}$ with $P^{+}$ a Dyck path that
ends $D_{1}$. On the other hand, if $h$ is even, $D_{1}U_{2}D_{3}$
either (i) ends the path (here $\ensuremath{S}\xspace(D_{1}U_{2}D_{3})=P$ and $h=0$) or is
immediately followed by (ii) $U_{4}$ or (iii) $D$. In case (iii),
let $U_{4}$ be the matching upstep. Then $D_{1},U_{2},D_{3},U_{4}$ show
up in $F(P)$ as a subpath in that order (cases (ii) and (iii)) or
$F(P)$ ends $U_{2}D_{3}$ (case (i)). The details are left to the reader.
\vertspace*{10mm}
{\Large \textbf{7 \ Statistics Suggested by LCO Forests} }\quad
There are various natural statistics on LCO forests, some of which
give interesting counting results. Here we present two such. First
let us count one-tree LCO forests by size of root label. This is equivalent
to counting primitive Dyck paths by skeleton size. Recall that
the generalized Catalan number sequence $\big(C^{(j)}_{n}\big)_{n\ge
0}$ with $C^{(j)}_{n}:=\frac{j}{2n+j}\binom{2n+j}{n}$ is the $j$-fold
convolution of the ordinary Catalan number sequence
\htmladdnormallink{A000108}{http://www.research.att.com:80/cgi-bin/access.cgi/as/njas/sequences/eisA.cgi?Anum=A000108}.
(See \ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspaceite{woan} for a nice bijective proof.)
And, as noted above, in the skeleton-body-position decomposition of a
primitive Dyck path, if the body is nonempty it contains a $DUU$ at
(its own)
ground level \ and ends $DD$.
\begin{lemma}
The number of Dyck $n$-paths that contain a $DUU$ at
ground level \ and end $DD$ is $C^{(4)}_{n-3}$.
\ensuremath{\mathbf e}\xspacend{lemma}
\textbf{Proof}\quad In such a path, let $U_{0}$ denote the middle $U$
of the \ensuremath{\mathbf e}\xspacemph{last} $DUU$ at ground level. The path then has the form $AU_{0}BD$
where $A$ and $B$ are arbitrary \ensuremath{\mathbf e}\xspacemph{nonempty} Dyck paths, counted
by $C^{(2)}_{n-1}$. So the desired counting sequence is the convolution
of $\big(C^{(2)}_{n-1}\big)$ with itself and, taking the $U_{0}D$ into
account, the lemma follows. \qed
The number of primitive $DUU$-avoiding Dyck $k$-paths is 1 if $k=1$,
and $2^{k-2}$ if $k\ge 2$. But if $k\ge 2$, there are two choices
(top/bottom) to insert the body. So the number of primitive Dyck $(n+1)$-paths
with skeleton size $k$ is $2^{k-1}C^{(4)}_{n-k-2}$ for $1\ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cme k \ensuremath{\mathbf e}\xspacensuremath{\mathbf p}\xspacedvance\xdim by-.30cme
n-2$ and is $2^{n-1}$ for $k=n+1$. Since there are $C_{n}$ primitive Dyck
$(n+1)$-paths altogether, we have established the following identity.
\begin{prop}
\[
C_{n} = 2^{n-1} + \ensuremath{\mathbf S}\xspaceum_{k=1}^{n-2}\frac{2^{k}}{n-k}\binom{2n-2k}{n-2-k}.
\]
\ensuremath{\mathbf e}\xspacend{prop} \qed
Lastly, turn an LCO forest into an LCO tree by joining all roots to a new root.
The purpose of doing this is so that isolated roots in the forest will
qualify as leaves in the tree. The symbolic method then yields
\begin{prop}
The generating function\ for LCO trees by number of leaves $(x$ marks size, $y$
marks number of leaves\,$)$ is
\[
\frac{1-\ensuremath{\mathbf S}\xspaceqrt{1-4x\:\frac{\textrm{{\ensuremath{\mathbf S}\xspacemall $1-x$}}}{\textrm{{\ensuremath{\mathbf S}\xspacemall $1-xy$}}}}}{2x}.
\]
\ensuremath{\mathbf e}\xspacend{prop}
The first few values are given in the following table.
\[
\begin{array}{c|cccccccc}
n^{\textstyle{\,\backslash \,k}} & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 \\
\hline
1& 1 & & & & & & & \\
2& 1 & 1 & & & & & & \\
3& 2 & 2 & 1 & & & & & \\
4& 4 & 6 & 3 & 1 & & & & \\
5& 8 & 17 & 12 & 4 & 1 & & & \\
6& 16 & 46 & 44 & 20 & 5 & 1 & & \\
7& 32 & 120 & 150 & 90 & 30 & 6 & 1 & \\
8& 64 & 304 & 482 & 370 & 160 & 42 & 7 & 1 \\
\ensuremath{\mathbf e}\xspacend{array}
\]
\ensuremath{\mathbf e}\xspacensuremath{\mathbf c}\xspaceenterline{{\ensuremath{\mathbf S}\xspacemall number of LCO trees of size $n$ with $k$ leaves}}
\begin{thebibliography}{99}
\bibitem{twobij04} David Callan, Two bijections for Dyck path parameters,
\htmladdnormallink{math.CO/0406381}{http://front.math.ucdavis.edu/math.CO/0406381}, 2004, 4pp.
\bibitem{catfine} David Callan, Some bijections and identities for the Catalan and Fine numbers,
\htmladdnormallink{S\'{e}m. Lothar. Combin.}{http://www.math.ethz.ch/EMIS/journals/SLC/index.html}
\textbf{53} (2004/06), Art. B53e, 16 pp.
\bibitem{invol1999} Emeric Deutsch, An involution on Dyck paths and its consequences. \ensuremath{\mathbf e}\xspacemph{Discrete Math.} \textbf{204} (1999), no. 1-3, 163--166.
\bibitem{bij1998} Emeric Deutsch, A bijection on Dyck paths and its consequences,
\ensuremath{\mathbf e}\xspacemph{Discrete Math.} \textbf{179} (1998), no. 1-3, 253--256.
\bibitem{ordered} Emeric Deutsch, A bijection on ordered trees
and its consequences, \ensuremath{\mathbf e}\xspacemph{J. Combin. Theory Ser. A} \textbf{90} (2000), no. 1, 210--215.
\bibitem{simple2003} Emeric Deutsch and Sergi Elizalde, A simple and unusual bijection for Dyck paths and its consequences,
\ensuremath{\mathbf e}\xspacemph{Ann. Comb.} \textbf{7} (2003), no. 3, 281--297.
\bibitem{don80} Robert Donaghey, Automorphisms on Catalan trees and bracketings. \ensuremath{\mathbf e}\xspacemph{J. Combinatorial Theory Ser. B}
\textbf{29} (1980), no. 1, 75--90. MR0584162
\bibitem{acp44}
Donald Knuth, \ensuremath{\mathbf e}\xspacemph{Art of Computer Programming, Vol.4, Fascicle 4: Generating all Trees -- History
of Combinatorial Generation}, Addison-Wesley, 2006, vi+120pp, draft available from
\htmladdnormallink{http://www-cs-faculty.stanford.edu/$\,\widetilde{\ }\,$knuth/fasc4a.ps.gz}{http://www-cs-faculty.stanford.edu/~knuth/fasc4a.ps.gz}
\bibitem{lalanne92} J.-C. Lalanne, Une involution sur les chemins de
Dyck, \ensuremath{\mathbf e}\xspacemph{European J. Combin.} \textbf{13} (1992), no. 6, 477--487.
\bibitem{lalanne93} J.-C. Lalanne, Sur une involution sur les chemins de Dyck, Conference on Formal Power Series and Algebraic
Combinatorics \ensuremath{\mathbf e}\xspacemph{Theoret. Comput. Sci.} \textbf{117} (1993), no. 1-2, 203--215.
\bibitem{vaille97} J. Vaill\'{e}, Une bijection explicative de plusieurs
propri\'{e}t\'{e}s remarquables des ponts,
\ensuremath{\mathbf e}\xspacemph{European J. Combin.} \textbf{18} (1997), no. 1, 117--124.
\bibitem{motz77} Robert Donaghey and Louis Shapiro, Motzkin numbers,
\ensuremath{\mathbf e}\xspacemph{ J. Combinatorial Theory Ser. A} \textbf{23}, 291--301, 1977. MR0505544
\bibitem{restricted77} Robert Donaghey, Restricted plane tree representations
of four Motzkin-Catalan equations, \ensuremath{\mathbf e}\xspacemph{J. Combinatorial Theory
Ser. B} \textbf{22}, (1977), no. 2, 114--121, 1977. MR0432532
\bibitem{udu} Y. Sun, The statistic ``number of udu's'' in Dyck paths, \ensuremath{\mathbf e}\xspacemph{Disc. Math.},
\textbf{287} (2004), Issue 1-3 (October 2004), 177-186.
\bibitem{flaj} Robert Sedgewick and Philippe Flajolet,
An Introduction to the Analysis of Algorithms,
Addison-Wesley, 1996.
\bibitem{woan} Wen-jin Woan,
Uniform partitions of lattice paths and Chung-Feller generalizations.
\ensuremath{\mathbf e}\xspacemph{Amer. Math. Monthly} 108 (2001), no. 6, 556--559.
\ensuremath{\mathbf e}\xspacend{thebibliography}
\ensuremath{\mathbf e}\xspacend{document} |
\begin{document}
\title{Irreducibility of $x^n-a$}
\begin{abstract}
A. Capelli gave a necessary and sufficient condition for the reducibility of $x^n-a$ over $\Q$. In this article, we are providing an alternate elementary proof for the same.
\end{abstract}
{\bf{Key Words}}: Irreducible polynomials, cyclotomic polynomials.\\
{\bf{AMS(2010)}}: 11R09, 12D05.\\
In this article, we present an elementary proof of a theorem about the irreducibility of $x^n-a$ over $\Q$. Vahlen\cite{vahlen} is the first mathematician who characterized the irreducibility conditions of $x^n-a$ over $\Q$. A. Capelli~\cite{capelli} extended this result to all fields of characteristic zero. Later L. R\'edei~\cite{redei} proved this result for all fields of positive characteristic. But this theorem referred to as Capelli's theorem.
\begin{theorem}[\cite{capelli}, \cite{vahlen}, \cite{redei}] \label{thm:vahlen}
Let $n\ge 2.$ A polynomial $x^n-a\in \Q[x]$ is reducible over $\Q$ if and only if
either $a=b^t$ for some $t|n, t>1$, or $4|n$ and $a=-4b^4$, for some $b\in \Q.$
\end{theorem}
Since Theorem \ref{thm:vahlen} is true for arbitrary fields, all of the proofs are proved by using field extensions except the proof given by Vahlen \cite{vahlen}. Vahlen assumes that the binomial $x^n-a$ is reducible and proves Theorem \ref{thm:vahlen} by using the properties of $n^{th}$ roots of unity and by comparing the coefficients on both sides of the following
equation
$$x^n-a=(x^m+a_{m-1}x^{m-1}+\cdots+a_0)(x^{n-m}+b_{n-m-1}x^{n-m-1}+\cdots+b_0)$$ for some $m$, $0<m<n$. Reader can consult (\cite{karpilovsky}, p.425) for a proof using field theory. We give a proof particularly over $\Q$ by using very little machinery.
Let $f(x)=x^n-a$, $a=\frac{b}{c}\in \Q$ and $(b, c)=1$. Then $c^nf(x)=(cx)^n-c^{n-1}b\in \Z[x].$ Hence $x^n-a$ is reducible over $\Q$ if and only if $y^n-c^{n-1}b$ is reducible over $\Z$. It is, therefore, sufficient to consider $a\in \Z$ and throughout the article, by reducibility, we will mean reducible over $\Z$.
\begin{theorem}\label{thm:capelli}
Let $n\ge 2.$ A polynomial $x^n-a\in \Z[x]$ is reducible over $\Z$ if and only if
either $a=b^t$ for some $t|n, t>1$, or $4|n$ and $a=-4b^4$, for some $b\in \Z.$
\end{theorem}
The polynomial $x^n-1$ is a product of cyclotomic polynomials and if $n=2^{n_1}u$ with $2\nmid u$, then
\begin{equation*}
x^n+1=\prod\limits_{d|u}\Phi_{2^{n_1+1}d}(x).
\end{equation*}
Therefore, from now onwards we assume that $a>1,$ if not specified, and
check the reducibility of the polynomial $x^n\pm a$ for $n\ge 2$. If there exists a prime $p$ such that $p|a$ but $p^2\nmid a,$ then $x^n\pm a$ is irreducible by Eisenstein's criterion. In other words, if $a=p_1^{a_1}p_2^{a_2}\cdots p_k^{a_k}$ is the prime factorization of $a$ and $x^n\pm a$ is reducible, then
$a_i\ge 2$ for every $i\in \{1,2,\ldots,k\}.$ More generally,
\begin{lemma}\label{lem:gcdge2}
Let $n\ge 2$, $a=p_1^{a_1}p_2^{a_2}\cdots p_k^{a_k}$ be the prime factorization of $a$ and let $x^n\pm a$ be reducible. Then $\gcd(a_1,a_2,\ldots,a_k)\ge 2$ and $\gcd(\gcd(a_1,a_2,\ldots,a_k),n)>1.$
\end{lemma}
\begin{proof}
We prove the result by induction on $k=\omega(a)$, the number of distinct prime divisors of $a.$ The roots of $x^n\pm a$ are of the form $a^{1/n}(\mp \zeta_n)^e$, where $e\in \Z$ and $\zeta_n$ is a primitive $n^{\text{th}}$ root of unity. Since the proof barely depends upon the sign of roots, we restrict to the case $x^n-a$. Let $f(x)$ be a proper factor of $x^n-a,$ where $\deg(f)=s<n$. If $f(0)=\pm d$, then $\pm d=a^{s/n}\zeta_n^w$ for some $w\in \Z$.
Let $k=1$ and $a=p_1^{a_1}$. From Eisenstein's criterion, $a_1\ge 2$. If $d=p_1^{\alpha},$ then $d^n=a^s$ gives, $\alpha n=a_1s$. Since $a_1\ge 2$ and $s<n$, we deduce that $(a_1, n)>1$.
Let $k=2$ and $a=p_1^{a_1}p_2^{a_2}$. From $d^n=a^s,$ let $d=p_1^{d_1}p_2^{d_2}$ be the prime factorization of $d$. Then $nd_1=a_1s, nd_2=a_2s$ would give $d_1a_2=d_2a_1$. If $(a_1, a_2)=1$, then $d_1=a_1c$ for some $c|d_2$ and $nc=s<n$ is a contradicton. Thus, $m=(a_1, a_2)\ge 2$. Next we need to show that $(n,m)>1$. Suppose $a_1=mb_1, a_2=mb_2$ with $(b_1,b_2)=1.$ From $d_1a_2=d_2a_1$, we deduce that $d_1b_2=d_2 b_1.$ Then $d_1=b_1 r$ for some $r|d_2.$ If $(n,m)=1$, then $nd_1=a_1s=mb_1s$ will give $n|s$, a contradiction. Hence, $(n,m)>1$.
Suppose the result is true for some $k\ge 2$. Thus, if $a=p_1^{a_1}\cdots p_k^{a_k}$, then $(a_1, \ldots, a_k)=u>1$ and $(u,n)>1$. To show that the result is true for $k+1$. Let $a=p_1^{a_1}\cdots p_k^{a_k}p_{k+1}^{a_{k+1}}=a_1^up_{k+1}^{a_{k+1}}$, where $a_1=p_1^{w_1}\cdots p_k^{w_k}$ and $(w_1, \ldots, w_k)=1$. From $d^n=a^s$, we can write $d$ as $d=b_1^vp_{k+1}^{d_{k+1}},$ where $b_1=p_1^{v_1}\cdots p_k^{v_k},$ $(v_1, \ldots, v_k)=1$. From the fundamental theorem of arithmetic, $d_{k+1}n=a_{k+1}s$ and $b_1=a_1,$ $vn=us$. That is $d_{k+1}u=a_{k+1}v$. If $(u,a_{k+1})=1,$ then $d_{k+1}=a_{k+1}h$ for some $h|v,$ and $na_{k+1}h=a_{k+1}s$ implies $n|s$. This contradicts the fact that $s<n$. Thus, $(u,a_{k+1})=m>1$.
To show that $(n,m)>1$. Let $u=mu_1, a_{k+1}=ma_{k+1}',$ where $(u_1, a_{k+1}')=1$. From $d_{k+1}u=a_{k+1}v$, we get $u_1d_{k+1}=va_{k+1}'$. Since $(u_1,a_{k+1}')=1$, $d_{k+1}=a_{k+1}'t$ for some $t|v$. On the other hand, $nd_1=a_1s, nd_{k+1}=a_{k+1}s$ would imply $a_1d_{k+1}=d_1a_{k+1}$. If $a_1=ua_1'=mu_1a_1',$ then $a_1d_{k+1}=d_1a_{k+1}$ implies $d_1=u_1a_1't$. Using this in $nd_1=a_1s$, we have $nt=ms$. If $(n,m)=1$, then $n|s$ is a contradiction. Thus, $(n,m)>1$.
By induction principle, the result is true for every $k\ge 1.$
\end{proof}
In other words, if $x^n\pm a$ is reducible, then $a$ has to be of the form $b^m,$ where $(n,m)>1$ and $m\ge 2$. With a rearrangement in powers, we can say
\begin{cor}\label{cor:restrictedb}
Let $n\ge 2$ and $x^n\pm a $ be reducible over $\Z$. Then $ a= b^m$ for some $m\ge 2, m|n$, and $b$ is either a prime number or $b=(p_1^{b_1}p_2^{b_2}\cdots p_k^{b_k})^d,$ where $k\ge 2, (b_1, b_2, \ldots, b_k)=1$ and $(d, n)=1.$
\end{cor}
Suppose $f(x)=x^{25}\pm 6^8$. Then $b=6,$ $m=8$, and $(8,25)=1$ implies that the polynomial $x^{25}\pm 6^8$ is irreducible by Corollary \ref{cor:restrictedb}. Let $g(x)=x^{25}\pm (243)^2$. If we consider $b=243$, then $(2,25)=1$ imply that $x^{25}\pm (243)^2$ is irreducible. But $x^5\pm 9|g(x)$. The reason is, $b=243$ is not as in Corollary \ref{cor:restrictedb}. Since $243=3^5,$ $b$ will be $3^2$ and $m=5$ so that $m|n$. Because of this reason, we will say
A positive integer `{\em $b$ has the property $\P$'} if $b$ is in the form as given in Corollary \ref{cor:restrictedb}.
\begin{lemma}\label{lem:m|n}
Let $m\ge 2, m|n,$ and $b$ has the property $\P$. Then $x^n\pm b^m$ is reducible except possibly for $x^n+b^{2^r}, r\ge 1$.
\end{lemma}
\begin{proof} If $m|n$, then
\begin{equation*}
x^n-b^m=\left(x^{n/m}-b\right)\left(x^{n(m-1)/m}+x^{n(m-2)/m}b+\cdots+x^{n/m}b^{m-2}+b^{m-1}\right).
\end{equation*}
Let $m=2^{r}m_1$, where $2\nmid m_1$ and $r\ge 0$. Then
\begin{align}
x^n+b^m&= \prod\limits_{d|m_1}b^{2^r\varphi(d)}\Phi_{2^{r+1}d}\left(\frac{x^{n/m}}{b}\right), \notag
\end{align}
where $b^{2^r\varphi(d)}\Phi_{2^{r+1}d}\left(\frac{x^{n/m}}{b}\right)\in \Z[x]$ and $\varphi$ is the Euler totient function.
\end{proof}
Lemma \ref{lem:m|n} is true even if $b$ does not have the property $\P$. If $m=2^r\ge 2, m|n,$ and $b$ has the property $\P$, then the reducibility condition of $x^n+b^m$ completes the proof of Theorem \ref{thm:capelli}.
Selmer(\cite{selmer}, p.298) made the following observation. Let $g(x)\in \Z[x]$ be an arbitrary irreducible polynomial of degree $n$. If $g(x^2)$ is reducible, then, using the fact that $\Z[x]$ is a unique factorization domain, we get $$g(x^2)=(-1)^ng_1(x)g_1(-x),$$ where $g_1(x)$ is an irreducible polynomial in $\Z[x]$. Thus, if $g_1(x)=a_nx^n+a_{n-1}x^{n-1}+\cdots+a_1x+a_0$, then
\begin{equation*}
g(x^2)=(a_nx^n+a_{n-2}x^{n-2}+\cdots+a_0)^2-(a_{n-1}x^{n-1}+\cdots+a_1x)^2.
\end{equation*}
Let $k$ be an odd integer. Then $g(k^2)\equiv g(1)\pmod{4}$. Since the right hand side of the last equation is the difference between the two squares, $g(k^2)\equiv 0, \pm 1\pmod{4}$. Combining all of these, one can conclude that
\begin{lemma}\label{general}
Let $g(x)\in \Z[x]$ be an irreducible polynomial.
\begin{enumerate}[label=(\alph*)]
\item\label{gpart1} If $g(k^2)\equiv 2\pmod{4}$ for an odd integer $k$, then $g(x^2)$ is irreducible over $\Z$.
\item\label{gpart2} If $g(x^2)$ is reducible, then there are unique (up to sign) polynomials $f_1(x)$ and $f_2(x)$ such that $g(x^2)=f_1(x)^2-f_2(x)^2$. Furthermore, in this case, we can write $f_1(x)=h_1(x^2)$ and $f_2(x)=xh_2(x^2),$ where $h_1(x),h_2(x)\in \Z[x]$.
\end{enumerate}
\end{lemma}
The proof of \ref{gpart2} follows from the fact that $\Z[x]$ is a unique factorization domain.
\begin{lemma}\label{capelli:lem3}
Let $m=2^r\ge 2$ and $n$ be an odd positive integer. If $b$ has the property $\P$, then $x^{2^in}+b^m$ is irreducible for every $i$, $0\le i\le r$.
\end{lemma}
\begin{proof}
We proceed by induction on $i$. If $i=0$ and $b$ has the property $\P$, then $f(x)=x^n+b^m$ is irreducible by Lemma \ref{lem:gcdge2}. If $i=1$, then $f(x)=x^n+b^m$ is irreducible, and if $f(x^2)=x^{2n}+b^m=(x^n+b^{m/2})^2-2b^{m/2}x^n$ is reducible, from Lemma \ref{general}, $2b^{m/2}x^n$ has to be of the form $x^2h(x^2)^2$ for some $h(x)\in \Z[x]$. Since $n$ is odd, this is not possible and hence $f(x^2)$ is irreducible.
Suppose the result is true for some $i$, $0\le i\le r$ and we will show that it is true for $i+1\le r$. So, $f(x)=x^{2^in}+b^m$ is irreducible for some $i\le r$. From Lemma \ref{general}, if
\begin{equation*}
f(x^2)=x^{2^{i+1}n}+b^m=(x^{2^in}+b^{m/2})^2-2b^{m/2}x^{2^in}
\end{equation*}
is reducible, then $2b^{m/2}x^{2^in}$ has to be of the form $(xg(x^2))^2$ for some $g\in \Z[x]$. This is possible only when $m=2$ and $b=2^{\alpha}b_1^2,$ where $\alpha, b_1$ are odd positive integers. That is $r=1$ and hence $i=0$. We have already seen that $f(x^2)$ is irreducible in this case. Therefore, by the induction principle, $x^{2^in}+b^{2^r}$ is irreducible for every $i\le r$.
\end{proof}
\begin{lemma}
Let $m=2^r\ge 2$ and let $b$ be an odd integer which has the property $\P$. If $m|n,$ then $x^n+b^m$ is irreducible.
\end{lemma}
\begin{proof}
Let $n=mt$. If $t$ is odd, then by Lemma~\ref{capelli:lem3}, $x^n+b^m$ is irreducible. Let $t=2^{t_1}u$, $t_1\ge 1$ and $u$ is odd. From Lemma \ref{capelli:lem3}, $g(x)=x^{mu}+b^m$ is irreducible. Since $b$ is odd, $g(k^2)\equiv 2\pmod{4}$ for any odd integer $k$. Applying Lemma \ref{general} repeatedly to $g(x)$, the result follows.
\end{proof}
\begin{cor}\label{cor:tbeven}
Let $m=2^r\ge 2,$ $m|n,$ and $b$ has the property $\P$. If $x^n+b^m$ is reducible, then both $b$ and $\frac{n}{m}$ are even integers.
\end{cor}
\begin{lemma}\label{lastlem}
Let $t,r\in \N$ and $b$ has the property $\P$. Then $x^{2^rt}+b^{2^r}$ is reducible if and only if $t$ is even, $r=1$, and $b=2d^2$ for some $d\in \N$.
\end{lemma}
\begin{proof}
If $t$ is even, $r=1$ ,and $b=2d^2,$ then $$x^{2t}+4d^4=(x^t+2d^2)^2-4d^2x^t=(x^{t}-2dx^{t/2}+2d^2)(x^{t}+2dx^{t/2}+2d^2).$$
Conversely, let $g(x)=x^{2^rt}+b^{2^r}$ is reducible. By Corollary \ref{cor:tbeven}, both $t$ and $b$ are even integers. Let $t=2^{t_1}u, b=2^{b_1}v,$ where $u,v$ are odd integers and $t_1, b_1\ge 1$. By Lemma \ref{capelli:lem3}, the polynomial $h(x)=x^{2^ru}+b^{2^r}$ is irreducible. Since $g(x)=h(x^{2^{t_1}})$ is reducible, there is some $i$, $1\le i \le t_1$ such that $h(x^{2^{i-1}})$ is irreducible and
\begin{equation*}
h(x^{2^i})=x^{2^{r+i}u}+2^{2^rb_1}v^{2^r}=(x^{2^{r+i-1}u}+2^{2^{r-1}b_1}v^{2^{r-1}})^2-2^{2^{r-1}b_1+1}v^{2^{r-1}}x^{2^{r+i-1}u}
\end{equation*}
is reducible. From uniqueness property of Lemma \ref{general}, $2^{2^{r-1}b_1+1}v^{2^{r-1}}x^{2^{r+i-1}u}$ has to be of the form $(xl(x^2))^2$ for some $l(x)\in \Z[x]$. This is possible only when $r=1$ and $2^{2^{r-1}b_1+1}v^{2^{r-1}}$ is a perfect square. Hence, $2^{b_1+1}v=c^2$ would imply $b=2\left(\frac{c}{2}\right)^2$ with $\frac{c}{2}\in \N$.
\end{proof}
Proof of Theorem~\ref{thm:capelli} and hence Theorem~\ref{thm:vahlen} follows from Lemma~\ref{lem:gcdge2} and \ref{lem:m|n}, \ref{lastlem}.\\
{\bf Acknowledgement.} We would like to thank the referee for valuable comments.
\thebibliography{99}
\bibitem{capelli}
A. Capelli, {\em Sulla riduttibilita delle equazioni algebriche}, Nota prima, Red. Accad. Fis. Mat. Soc. Napoli(3), 3(1897), 243--252.
\bibitem{selmer}
E. S. Selmer, {\em On the irreducibility on certain trinomials}, Math. Scand.,
4 (1956), 287--302.
\bibitem{karpilovsky}
G. Karpilovsky, {\em Topics in field theory}, ISBN: $0444872973$, North-Holland, 1989.
\bibitem{vahlen}
K. Th. Vahlen, {\em \"Uber reductible Binome}, Acta Math., 19(1)(1895), 195--198.
\bibitem{redei} L. R\'edei, {\em Algebra}, Erster Teil, Akademische Verlaggesellschaft, Leipzig, 1959.
\end{document} |
\begin{document}
\title{The Visibility Center of a Simple Polygon}
\begin{abstract}
We introduce
the \emph{visibility center} of a set of points inside a
polygon---a point
$c_V$ such that the maximum geodesic distance from $c_V$ to see any point in the set is minimized.
For a simple polygon of $n$ vertices and a set of $m$ points inside it, we give an $O((n+m) \log {(n+m)})$ time algorithm to find the visibility center. We find the visibility center of \emph{all} points in a simple polygon in $O(n \log n)$ time.
Our algorithm reduces the visibility center problem to the problem of finding the geodesic center of a set of half-polygons inside a polygon, which is of independent interest. We give an $O((n+k) \log (n+k))$ time algorithm for this problem, where $k$ is the number of half-polygons.
\end{abstract}
\pagenumbering{arabic}
\section{Introduction}
Suppose you want to guard a polygon and you have many sensors but only one guard to check on the sensors.
The guard must be positioned at a point $c_V$ in the polygon such that when a sensor at any query point $u$ sends an alarm, the guard travels from $c_V$ on a shortest path inside the polygon to \emph{see} point $u$;
the goal is to minimize the maximum distance the guard must travel.
More precisely, we must choose $c_V$ to minimize the maximum, over points $u$, of the geodesic distance from $c_V$ to a point that sees $u$.
The optimum guard position $c_V$ is called the \defn{visibility center} of the set $U$ of possible query points.
See Figure~\ref{fig:figure_with_visibility_center_paths}.
We give an $O((n+m) \log {(n+m)})$ time algorithm to find the visibility center of a set $U$ of size $m$ in an $n$-vertex simple polygon.
To find the visibility center of \textit{all} points inside a simple polygon,
we can restrict our attention to the
vertices of the polygon, which
yields an $O(n \log n)$ time algorithm.
\begin{figure}
\caption{
(left) Point $c_V$ is the \defn{visibility center}
\label{fig:figure_with_visibility_center_paths}
\end{figure}
To the best of our knowledge, the idea of visibility centers is new,
though it is a very natural concept
that combines
two significant branches of computational geometry:
visibility problems~\cite{ghosh2007visibility};
and center problems and farthest Voronoi diagrams~\cite{aurenhammer2013voronoi}.
There is a long history of finding ``center points'', for various definitions of ``center''.
The most famous of these is Megiddo's linear time algorithm~\cite{megiddo_linear} to find the
center of a set of points in the plane (Sylvester's
``smallest circle'' problem).
Inside a polygon the relevant distance measure is not the Euclidean distance but rather the shortest path, or
\emph{geodesic}, distance.
The \defn{geodesic center} of a simple polygon is a point $p$ that minimizes the maximum geodesic distance from $p$ to any point $q$ of the polygon, or equivalently, the maximum geodesic distance from $p$ to any vertex of the polygon.
Pollack, Sharir, and Rote~\cite{pollack_sharir} gave an $O(n \log n)$ time divide-and-conquer algorithm to find the geodesic center of a polygon.
Our algorithm builds on theirs.
A more recent algorithm by Ahn et al.~\cite{linear_time_geodesic} finds the geodesic center of a polygon in linear time.
Another notion of the center of a polygon is the link center, which can be found in $O(n \log n)$ time~\cite{djidjev1992ano}.
Center problems are closely related to farthest Voronoi diagrams, since the center is
a vertex of the corresponding farthest Voronoi diagram,
\changed{or a point on an edge of the Voronoi diagram in case the center has only two farthest sites}. Finding the farthest Voronoi diagram of points in the plane takes $\Theta(n \log n)$ time---thus is it strictly harder to find the farthest Voronoi diagram than to find the center.
However, working inside a simple polygon helps for farthest Voronoi diagrams:
the farthest geodesic Voronoi diagram of the vertices of a polygon can be found in time $O(n \log \log n)$~\cite{oh2020geodesic}.
\newcomment{Generalizing the two scenarios (points in the plane, and polygon vertices), yields the}
problem of finding the farthest Voronoi diagram of $m$ points in a polygon, which was first solved by Aronov et al.~\cite{aronov1993furthest} with run-time $O((n+m)\log (n+m))$, and improved in a sequence of papers~\cite{oh2020geodesic,barba2019optimal,oh2020voronoi}, with the current best run-time of
$O(n + m \log m)$~\cite{wang2021}.
Turning to visibility problems in a polygon,
there are algorithms for the
``quickest visibility problem''---to find the shortest path from point $s$ to see point $q$, and to solve the query version where $s$ is fixed and $q$ is a query point~\cite{arkin2016shortest,wang2019quickest}. For a simple polygon~\cite{arkin2016shortest}, the preprocessing time and space are $O(n)$ and
the query time is $O(\log n)$.
\changed{We do not use these results}
in our algorithm to find the visibility center $c_V$, but they are useful afterwards to find the actual shortest path from $c_V$ to see a query point.
A more basic version of our problem is to find, if there is one, a point that sees all points in $U$. The set of such points is the \defn{kernel} of $U$.
When $U$ is the set of vertices, the kernel can be found in linear time~\cite{lee1979optimal}. For a general set $U$, Ke and O'Rourke~\cite{ke1989computing} gave an $O(n + m\log (n + m))$ time algorithm to compute the kernel, and we use some of their results in our algorithm.
Another problem somewhat similar to the visibility center problem is
the watchman problem~\cite{chin1991shortest,touring}---to find a minimum length tour from which a single guard can see the whole polygon. Our first step is similar in flavour to the first step for the watchman problem, namely, to replace the condition of ``seeing'' everything by a condition of visiting certain ``essential chords''.
\paragraph*{Our Results}
The \defn{distance to visibility} from a point $x$ to point $u$ in $P$, denoted $d_V(x,u)$ is the minimum distance in $P$ from $x$ to a point $y$ such that $y$ sees $u$.
For a set of points $U$ in $P$,
the \defn{visibility radius} of $x$ with respect to $U$ is $r_V(x,U) := \max \{d_V(x,u) : u \in U\}$.
The \defn{visibility center} $c_V$ of $U$ is a point $x$ that minimizes $r_V(x,U)$.
Our main result is:
\begin{theorem}
\label{thm:vis-center}
There is an algorithm to find the visibility center of a point set $U$ of size $m$ in a simple $n$-vertex polygon $P$ with run-time $O((n+m)\log (n+m))$.
\end{theorem}
The key to our algorithm is to reformulate the visibility center problem in terms of distances to certain \emph{half-polygons} inside the polygon.
We illustrate the idea by means of the example in Figure~\ref{fig:figure_with_visibility_center_paths} where the visibility center of the 6-element point set $U$ is the
\emph{geodesic center} of a set of five half-polygons.
More generally, we will reduce the problem of finding the visibility center to the problem of finding a geodesic center of a linear number of half-polygons.
The input to this problem is a set $\cal H$ of $k$ half-polygons (see Section~\ref{sec:preliminaries} for precise definitions) and the goal is to find a \emph{geodesic center} $c$ that minimizes the maximum distance from $c$ to a half-polygon.
More precisely,
the \defn{geodesic radius} from a point $x$ to $\cal H$ is $r(x,{\cal H}) := \max \{d(x,H) : H \in {\cal H}\}$, and
the \defn{geodesic center} $c$ of $\cal H$ is a point $x$ that minimizes $r(x,{\cal H})$.
Our second main result is:
\begin{theorem}
\label{thm:geo-center}
There is an algorithm to find the geodesic center of a set $\cal H$ of $k$ half-polygons in a simple $n$-vertex polgyon $P$ with run-time $O((n+k)\log (n+k))$.
\end{theorem}
Our algorithm extends the divide-and-conquer approach
that Pollack et al.~\cite{pollack_sharir} used to compute the geodesic center of the vertices of a simple polygon.
Our main motivation for finding the geodesic center of half-polygons is to find the visibility center, but the geodesic center of half-polygons is of independent interest.
Euclidean problems of a similar flavour are to find the center (or the farthest Voronoi diagram) of line segments or convex polygons in the plane~\cite{bhattacharya1994optimal, jadhav1996optimal}. These problems are less well-studied than the case of point sites (e.g., see~\cite{aurenhammer2006farthest} for remarks on this). \newcomment{The literature for geodesic centers is even more sparse, focusing almost exclusively on geodesic centers of points in a polygon.}
It is thus
interesting that the center of half-polygons inside a polygon can be found efficiently.
As a special case, we can find the geodesic center of the edges of a simple polygon
in $O(n \log n)$ time.
The reduction from the visibility center problem to the geodesic center of half-polygons is in
Section~\ref{section:essential-half-polygons}. The run time is $O((n+m) \log (n+m))$.
The algorithm that proves Theorem~\ref{thm:geo-center} \changed{(finding the geodesic center of half-polygons)} is in Section~\ref{section:half-polygon-center}. Together these prove Theorem~\ref{thm:vis-center} \changed{(finding the visibility center)}.
\section{Preliminaries}
\label{sec:preliminaries}
We add a few more basic definitions to augment the main definitions given above.
We work with a simple polygon $P$ of $n$ vertices whose boundary $\partial P$ is directed clockwise. A \defn{chord} of $P$ is a line segment inside $P$
\changed{with endpoints on $\partial P$.}
Any chord divides $P$ into two \changed{weakly simple} \defn{half-polygons}.
A half-polygon is specified by its \changed{defining} chord $(p,q)$ with the convention that the half-polygon contains the path clockwise from $p$ to $q$.
The \defn{geodesic distance $d(x,y)$} (or simply, \defn{distance}) between two points $x$ and $y$ in $P$ is the length of the \defn{shortest path $\pi(x,y)$} in $P$ from $x$ to $y$. For half-polygon $H$,
the \defn{geodesic distance $d(x,H)$} is the minimum distance from $x$ to a point in $H$.
Points $x$ and $y$ in $P$ are \defn{visible} ($x$ ``sees'' $y$) if the segment $xy$ lies inside $P$. The \defn{distance to visibility} from $x$ to $u$, denoted $d_V(x,u)$ is the minimum distance from $x$ to a point $y$ such that $y$ sees $u$. If $x$ sees $u$, then this distance is 0, and otherwise it is the distance from $x$ to the half-polygon defined as follows.
Let $r$ be the last reflex vertex on the shortest path from $x$ to $u$. Extend the ray $\overrightarrow{ur}$ from $r$ until it hits the polygon boundary $\partial P$ at a point $p$ to obtain a chord $rp$
(which is an edge of the \defn{visibility polygon} of $u$).
Of the two half-polygons defined by $rp$,
let $H(u,r)$ be the one that contains $u$. See Figure~\ref{fig:figure_with_visibility_center_paths}.
\begin{observation}
\label{obs:vis-to-half-polygon}
$d_V(x,u) = d(x,H(u,r))$.
\end{observation}
\changed{In the remainder of this section we establish the basic result that
the visibility center of a set of points $U$ and the geodesic center of a set of half-polygons $\cal H$ are unique except in very special cases, and that two or three tight constraints suffice to determine the centers.}
We explain this for the geodesic center of half-polygons, but the same argument works for the visibility center (or, alternatively, one can use the reduction from the visibility center to the geodesic center in Section~\ref{section:essential-half-polygons}).
\changed{Note that
if the geodesic radius is 0, then any point in the intersection of the half-polygons is a geodesic center.}
\begin{claim}
\label{claim:2-or-3-vectors}
\changed{Suppose that the geodesic radius $r$ satisfies $r > 0$.}
There is a set ${\cal H}' \subseteq {\cal H}$ of two or three half-polygons such that the set of geodesic centers of ${\cal H}$ is equal to the set of geodesic centers of ${\cal H}'$ and furthermore
\begin{enumerate}
\item if ${\cal H}'$ has size 3 then the geodesic center is unique
(e.g., see Figure~\ref{fig:figure_with_visibility_center_paths})
\item if ${\cal H}'$ has size 2 then either the geodesic center is unique
or
the two half-polygons of ${\cal H}'$ have chords that are parallel and the geodesic center consists of a line segment parallel to them and midway between them.
\end{enumerate}
\end{claim}
\changed{The proof of this claim depends on a basic convexity property of the geodesic distance function that was proved for the case of distance to a vertex by
Pollack et al.~\cite[Lemma 1]{pollack_sharir} and that we extend to a half-polygon.
}
\noindent
\begin{figure}
\caption{
Proving that
\revised{$d(x,H)$}
\label{fig:convexity-proof}
\end{figure}
\newestchanged{A subset $Q$ of $P$ is \defn{geodesically convex} if for any two points $a$ and $b$ in $Q$, the shortest (or ``geodesic'') path $\pi(a,b)$ in $P$ is contained in $Q$.
A function $f$ defined on $P$ is \defn{geodesically convex} if $f$ is convex on every geodesic path $\pi(a,b)$ in $P$, i.e., for points $x \in \pi(a,b)$, $f(x)$ is a convex function of $d(a,x)$.
}
\begin{lemma}
\label{lemma:distance_to_half_polygon_is_convex}
\newestchanged{For any half-polygon $H$, the distance function $d(x,H)$ is geodesically convex.
Furthermore, on any geodesic path $\pi(a,b)$ with $a$ and $b$ outside $H$, the minimum of $d(x,H)$ occurs at a point or along a line segment parallel to $h$, the defining chord of $H$.}
\end{lemma}
\begin{proof}
Pollack et al.~\cite{pollack_sharir} proved the version of this where $H$ is replaced by a point\changed{---in particular, they proved that the distance function is strictly convex which implies that the minimum occurs at a point}.
If $a$ and $b$ are inside $H$, then so is $\pi(a,b)$ and the distance function is constantly 0. So suppose $a \notin H$. If the path $\pi(a,b)$ intersects $H$, then it does so at only one point $b^* \in h$ (otherwise $h$ provides a shortcut for part of the path).
\changed{Then $\pi(b^*,b)$ lies inside $H$ and it suffices to prove convexity
for the path $\pi(a,b^*)$.} In other words, we may assume that $b$ is not in the interior of $H$.
The shortest paths $\pi(x,H)$ for $x \in \pi(a,b)$ reach a subinterval $[h_1,h_2]$ of $h$.
See Figure~\ref{fig:convexity-proof}. In case this subinterval is a single point, i.e., $h_1=h_2$, the convexity result of Pollack et al.~proves the lemma.
Otherwise, since shortest paths do not cross, there are points $c$ and $d$ on $\pi(a,b)$ such that for $x \in \pi(a,c)$ the path $\pi(x,H)$ arrives at $h_1$, for $x \in \pi(c,d)$ the path $\pi(x,H)$ is a straight line segment reaching $h$ at a right angle, and for $x \in \pi(d,b)$ the path $\pi(x,H)$ arrives at $h_2$.
The convexity result of Pollack et al.~applies to the paths arriving at the points $h_1$ and $h_2$.
It remains to consider $x \in \pi(c,d)$.
As $x$ moves along $\pi(c,d)$, the endpoint of $\pi(x,H)$ moves continuously with a one-to-one mapping along the segment $[h_1,h_2]$. Since the curve $\pi(c,d)$ is convex, this implies that $d(x,H)$ is a convex function of $d(a,x)$ for $x \in \pi(c,d)$.
\changed{Furthermore, the minimum is unique unless a segment of the geodesic is parallel to $h$.
}
Finally, one can verify that convexity holds at the points $c$ and $d$, i.e., that the three convex functions join to form a single convex function.
\end{proof}
\newestchanged{
Because the intersection of geodesically convex sets is geodesically convex, and the max of geodesically convex functions is geodesically convex, we get the following consequences.
\begin{corollary}
\label{cor:geodesic-convexity}
The geodesic radius function $r(x,{\cal H})$ is geodesically convex.
The geodesic ball $B(t,H) := \{x \in P : d(x,H) \le t \}$ for any half-polygon $H$, and the geodesic ball $B(t) :=\{x \in P : r(x,{\cal H}) \le t \}$ are geodesically convex.
\end{corollary}
}
\begin{proof}[Proof of Claim~\ref{claim:2-or-3-vectors}]
\changed{
The set of geodesic centers is $C := \{x \in P : d(x,H) \le r \text{ for all } H \in {\cal H} \}$.
\newestchanged{By Corollary~\ref{cor:geodesic-convexity}, $C$ is geodesically convex.}
If $C$ contains two distinct points $x_1$ and $x_2$ then it contains the geodesic path $\gamma = \pi(x_1,x_2)$.
By Lemma~\ref{lemma:distance_to_half_polygon_is_convex}, along $\gamma$, for each $H \in {\cal H}$, the minimum of the distance function $d(x,H)$ occurs at a point or along a line segment parallel to $h$. This implies that $\gamma$ can only be a single line segment parallel to two of the half-polygons of $\cal H$, which is Case 2 of the Claim.
For Case 1, let us now suppose that
$C$ consists of a single point.
Because the boundary of each
geodesic ball $\{x \in P : d(x,H) \le r \}$ consists of circular arcs and line segments, the single point $C$
is uniquely determined as the intersection of some circular arcs and line segments, and three of those suffice to determine the point.
}
\end{proof}
\section{Reducing the Visibility Center to the Center of Half-Polygons}
\label{section:essential-half-polygons}
In this section we reduce the problem of finding the visibility center of a set of points $U$ in a polygon $P$ to the problem of finding the geodesic center of a linear number of ``essential'' half-polygons $\cal H$, which is solved in Section~\ref{section:half-polygon-center}.
By Observation~\ref{obs:vis-to-half-polygon} (and see Figure~\ref{fig:figure_with_visibility_center_paths})
the visibility center of $U$
is the geodesic center of
the set of $O(mn)$ half-polygons $H(u,r)$ where $u \in U$, $r$ is a reflex vertex of $P$ that sees $u$, and $H(u,r)$ is the half-polygon containing $u$ and bounded by the chord that extends
$\overrightarrow{ur}$ from $r$ until it hits $\partial P$ at a point $t$.
Note that finding $t$ is a ray shooting problem and costs $O(\log n)$ time after an $O(n)$ time preprocessing step~\cite{hershberger1995pedestrian}.
However, this set of half-polygons is too large.
We will find a set $\cal H$ of $O(n)$ ``essential'' half-polygons that suffice, i.e., such that
the visibility center of $U$ is the geodesic center of the half polygons of $\cal H$.
In fact, we give two possible sets of essential half-polygons, ${\cal H}_{\rm reflex}$ and ${\cal H}_{\rm hull}$, where the latter set can be found more efficiently. \changed{Although the bottleneck is still the algorithm for geodesic center of half-polygons, it seems worthwhile to optimize the reduction.}
We first observe that any half-polygon that contains another one is redundant.
For example, in Figure~\ref{fig:figure_with_visibility_center_paths},
$H(u_4,r_4)$ is redundant because it contains $H(u_5,r_4)$.
At each reflex vertex $r$ of $P$, there are at most two minimal half-polygons $H(u,r)$.
Define ${\cal H}_{\rm reflex}$ to be this set of minimal half-polygons. Note that ${\cal H}_{\rm reflex}$ has size $O(n_r)$ where $n_r$ is the number of reflex vertices of $P$.
Observe that for the case of finding the visibility center of
\emph{all} points of $P$, ${\cal H}_{\rm reflex}$ consists of the half-polygons $H(v,r)$ where $(v,r)$ is an edge of $P$, so ${\cal H}_{\rm reflex}$ can be found in time $O(n + n_r\log n)$.
For a point set $U$,
the set ${\cal H}_{\rm reflex}$ was also used by
Ke and O'Rourke~\cite{ke1989computing} in their algorithm
to compute the kernel of point set $U$ in polygon $P$. (Recall from the Introduction that the kernel of $U$ is the set of points in $P$ that see all points of $U$.)
They gave a sweep line algorithm
(``Algorithm 2'') to find
${\cal H}_{\rm reflex}$ in time $O((n+m) \log (n+m))$.
To summarize:
\begin{proposition}
The geodesic center of ${\cal H}_{\rm reflex}$ is the visibility center of $U$.
Furthermore, ${\cal H}_{\rm reflex}$ can be found in time $O((n+m) \log (n+m))$.
\end{proposition}
In the remainder of this section
we present a second approach using ${\cal H}_{\rm hull}$ that eliminates the $O(n \log n)$ term.
\newcomment{This does not change the runtime to find the visibility center, but it means that improving the algorithm to find the geodesic center of half-polygons will automatically improve the visibility center algorithm.}
The idea is that ${\cal H}_{\rm reflex}$ is wasteful in that a single point $u \in U$ can give rise to $n_r$ half-polygons.
Note that we really only need three half-polygons in an essential set, though the trouble is to find them!
\newcomment{We first eliminate the case where the kernel of $U$ is non-empty (i.e., $r_V=0$) by running the $O(n + m \log (n+m))$ time kernel-finding algorithm of Ke and O'Rourke~\cite{ke1989computing}.}
Next we find ${\cal H}_{\rm hull}$ in two steps. First make a subset ${\cal H}_0$ as follows.
Construct $R$, the geodesic convex hull of $U$ in $P$ in time $O(n + m \log(m+n))$~\cite{guibas1989optimal,toussaint1989computing}.
For each edge $(u,r)$ of $R$ where $u \in U$ and $r$ is a reflex vertex of $P$, put $H(u,r)$ into ${\cal H}_0$.
Note that ${\cal H}_0$ has size $O(\min\{n_r,m\})$ so ray shooting to find the endpoints of the chords $H(u,r)$ takes time $O(n + \min\{n_r,m\} \log n)$.
Unfortunately, as shown in Figure~\ref{fig:first_phase_relCH}, ${\cal H}_0$
can miss an essential half-polygon.
Next, construct a geodesic center $c_0$ of ${\cal H}_0$ using the algorithm of Section~\ref{section:half-polygon-center}.
\newcomment{(Note that the geodesic center can be non-unique and in such cases $c_0$ denotes any one point from the set of geodesic centers.)}
Then repeat the above step for $U \cup \{c_0\}$,
\newcomment{more precisely,}
construct $R'$, the geodesic convex hull of $U \cup \{c_0\}$ in $P$ and
for each edge $(u,r)$ of $R'$ where $u \in U$ and $r$ is a reflex vertex of $P$, add ${H}(u,r)$ to ${\cal H}_0$.
This defines ${\cal H}_{\rm hull}$.
Again, ${\cal H}_{\rm hull}$ has size
$O(\min\{n_r,m\})$ and ray shooting costs $O(n + \min\{n_r,m\} \log n)$.
\begin{figure}
\caption{
The geodesic convex hull of $U=\{u_1, \ldots, u_5\}
\label{fig:first_phase_relCH}
\end{figure}
\begin{theorem}
\label{thm:essential-half-polygons}
\newcomment{Suppose the kernel of $U$ is empty. Then the}
geodesic center of ${\cal H}_{\rm hull}$ is the visibility center
of $U$.
Furthermore ${\cal H}_{\rm hull}$ can be found in time $O(n + m \log (n+m))$
plus the time to find the geodesic center of $O(\min\{n_r,m\})$ half-polygons.
\end{theorem}
\begin{proof}The run-time was analyzed above.
Consider the visibility center $c_V$.
\newcomment{By assumption,
$r_V > 0$.}
We consider the half-polygons $H(u,r) \in {\cal H}_{\rm reflex}$ such that
$r_V = d(c_V, H(u,r))$.
By Claim~\ref{claim:2-or-3-vectors} either there
are three of these half-polygons, \newcomment{$H_1$, $H_2$ and $H_3$,} that uniquely determine $c_V$, or there are two, \newcomment{$H_1$ and $H_2$,} that determine $c_V$.
Then $c_V$ is the geodesic center of $H_i$
$i=1,2,3$ or $i=1,2$ depending on which case we are in.
\newcomment{Let $H_i = H(u_i, r_i)$.}
If all the $H_i$'s are in ${\cal H}_0$, we are done.
We will show that at least two are in ${\cal H}_0$ and the third one (if it exists) is ``caught'' by $c_0$. See Figure~\ref{fig:first_phase_relCH}.
Let $h_i$ be the chord defining $H_i$ and let $\overline{H}_i$ be the other half-polygon determined by $h_i$.
\begin{claim}
\label{claim:H0-condition}
If $U$ contains a point in $\overline{H}_i$ then $(u_i,r_i)$ is an edge of $R$ so $H_i \in {\cal H}_0$.
\end{claim}
\begin{proof}
Let $u$ be a point in $\overline{H}_i$. Observe that
$\pi(u_i,u)$ contains the segment $u_i r_i$.
Thus $r_i$ is a vertex of $R$.
Furthermore $u_i r_i$ is an edge of $R$. (Note that $H_i$ is extreme at $r$ since we picked it from ${\cal H}_{\rm reflex}$.)
Thus $H_i$ is in ${\cal H}_0$.
\end{proof}
\begin{claim}
\label{claim:at_least_two}
At least two of the ${H}_i$'s lie in ${\cal H}_0$.
\end{claim}
\begin{proof}
\newcomment{
First observe that if two of the half-polygons are disjoint, say $H_i$ and $H_j$, then they lie in ${\cal H}_0$, because
$u_i \in H_i$ implies $u_i \in \overline{H}_j$ so by Claim~\ref{claim:H0-condition}, $H_i \in {\cal H}_0$, and
symmetrically, $H_j \in {\cal H}_0$.}
\newcomment{We separate the proof into cases depending on the number of $H_i$'s.
If there are two then they must be
disjoint otherwise a point in their intersection would be a visibility center with visibility radius $r_V=0$.
Then by the above observation, they are both in ${\cal H}_0$}.
It remains to consider the case of three half-polygons.
\newcomment{If two are disjoint, we are done,
so suppose each pair $H_i, H_j$ intersects.} Then the three
chords $h_i$
form a triangle. Furthermore, since $\bigcap {\overline{H}_i}$ is non-empty (it contains $c_V$), the inside of the triangle is $ \bigcap {\overline{H}_i}$.
Now suppose $H_1 \notin {\cal H}_0$.
Then by Claim~\ref{claim:H0-condition}, $u_2, u_3 \in H_1$.
This implies (see Figure~\ref{fig:first_phase_relCH}) that $u_2 \in \overline{H}_3$ and $u_3 \in \overline{H}_2$, so by Claim~\ref{claim:H0-condition}, $H_2$ and $H_3$ are in ${\cal H}_0$.
\end{proof}
\newcomment{We now complete the proof of the theorem.
We only need to consider the case of three $H_i$'s, where one of them, say $H_1$, is not in ${\cal H}_0$.
}
Our goal is to show that
$c_0$, the geodesic center of ${\cal H}_0$, lies in $\overline{H}_1$ and thus $H_1$ is in ${\cal H}_{\rm hull}$.
Let
$X = \{x \in P : d(x,{H}_2) \le r_V {\rm \ and\ } d(x,{H}_3) \le r_V \}$. Observe that $c_0 \in X$ (because the radius is non-increasing as we eliminate half-polygons). Now, $c_V$ is the unique point within distance $r_V$ of the half-polygons ${H}_1$, ${H}_2$ and ${H}_3$. If $c_0 \in H_1$, then $c_0$'s distance to ${H}_1$ would be 0
\newcomment{which contradicts the uniqueness property of $c_V$.}
Thus $c_0 \in \overline{H}_1$.
By the same reasoning as in Claim~\ref{claim:H0-condition}, this implies that $u_1 r_1$ is an edge of $R'$, the geodesic convex hull of $U \cup \{c_0\}$. Thus
$H_1$ is in ${\cal H}_{\rm hull}$ by definition of ${\cal H}_{\rm hull}$.
\end{proof}
\section{The Geodesic Center of Half-Polygons}\label{section:half-polygon-center}
In this section, we
give an algorithm to find
the geodesic center of a set $\mathcal{H}$ of $k$ half-polygons inside an $n$-vertex polygon $P$
\changed{with run time $O((n+k) \log (n+k))$}.
\fchanged{(Note that although we say ``the'' geodesic center,
it need not be unique, see Claim~\ref{claim:2-or-3-vectors}.)}
We preprocess by sorting the half-polygons in cyclic order of their first endpoints around $\partial P$ in time $O(k \log k)$.
\changed{We assume that no half-polygon contains another---such irrelevant non-minimal half-polygons can be detected from the sorted order and discarded.}
\fchanged{We also make the general position assumption that no point in $P$
has equal non-zero distances
to more than
a constant number of
half-polygons of $\cal H$.}
We follow the approach that Pollack et al.~\cite{pollack_sharir} used to find the geodesic center of the vertices of a polygon. Many steps of their algorithm rely, in turn, on search algorithms of Megiddo's~\cite{megiddo_linear}.
The main ingredient of the algorithm
is a linear time \defn{chord oracle} that, given a chord $K=ab$ of the polygon,
finds the \defn{relative geodesic center},
$c_K$ (the optimum center point restricted to points on the chord),
and tells us which side of the chord contains the center.
We must completely redo the chord oracle
in order to handle paths to half-polygons instead of vertices, but the main steps are the same.
Our chord oracle runs in time $O(n+k)$.
The chord oracle of Pollack et al.~was
used as a black box in subsequent faster algorithms~\cite{linear_time_geodesic}, so we imagine that our version will be an ingredient in any faster algorithm for the geodesic center of half-polygons.
Using the chord oracle, we again follow the approach of Pollack et al.~to find the geodesic center.
The total run time is $O((n+k) \log (n+k))$.
We
give a road-map
for the remainder of this section, listing the main steps, \newcomment{which are the same as those of Pollack et al.,}
and
highlighting the parts that
\newcomment{we must rework.}
\noindent{\bf \S~\ref{section:chord_oracle} A Linear Time Chord Oracle}
\begin{enumerate}
\item
Test a candidate center point. \changed{Given the relative geodesic center $c_K$ on chord $K = ab$, is the geodesic center to the left of right of chord $K$?
This test reduces the chord oracle to finding the relative geodesic center, which is done via the following steps.}
\item Find shortest paths from $a$ and from $b$ to all half-polygons.
The details of this step are novel, because we need shortest paths to half-polygons rather than vertices.
\item Find a linear number of simple functions defined on $K$ whose upper envelope is the geodesic radius function.
We must redo this from the ground up.
\item Find the relative center on $K$ (the point that minimizes the geodesic radius function) using Megiddo's technique.
\end{enumerate}
\noindent
{\bf \S~\ref{section:center_using_oracle} Finding the Geodesic Center of Half-Polygons}
\begin{enumerate}
\item Use the chord oracle to find
a region of $P$ that contains the center and such that for any half-polygon $H \in \cal H$, all geodesic paths from the region to $H$ are combinatorially the same.
We give a more modern version of this step using epsilon nets.
\item Solve the resulting Euclidean
\changed{problem of finding}
a smallest disk that contains given disks and
\newestchanged{intersects given half-planes.}
This is new because of the condition about intersecting
\newestchanged{half-planes}.
\end{enumerate}
\subsection{A Linear Time Chord Oracle}
\label{section:chord_oracle}
In this section we give a linear time chord oracle.
Given a chord $K=ab$ the chord oracle tells us whether the geodesic center of $\cal H$
lies to the left, to the right, or on the chord $K$.
It does this by first finding the relative geodesic center
$c_K = \argmin \{r(x,{\cal H}): x \in K\}$, together with the half-polygons
that are farthest from $c_K$
\changed{and the first segments of the shortest paths from $x$ to those farthest half-polygons.}
From this information,
we can identify which side of $K$ contains the geodesic center $c$ in the same way as Pollack
et al.~by testing the vectors of the first segments of the shortest paths from $c_K$ to its furthest half-polygons. This test is described in Subsection~\ref{sec:testing_center}.
The chord oracle thus reduces to the problem of finding the relative geodesic center and its farthest half-polygons.
The main idea here is to capture the geodesic radius function along the chord (i.e., the function $r(x,{\cal H})$ for $x \in K$) as the upper envelope of a linear number of \changed{easy-to-compute convex} functions defined on overlapping subintervals of $K$.
In order to find the
functions (Section~\ref{section:functions_to_capture}) we first
compute shortest paths from $a$ and from $b$ to all the half-polygons (Section~\ref{section:shortest_path_tree_half_polygons}). Finally we apply Megiddo's techniques (Section~\ref{sec:find-rel-center}) to find the point $c_K$ on $K$ that minimizes the geodesic radius function.
\subsubsection{Testing a Candidate Center Point}
\label{sec:testing_center}
\changed{In this section we show how to test in constant time
whether a candidate point is a geodesic center, or relative center, and if not,
in which direction the center lies.
The basic idea is that a local optimum is a global optimum, so a local test suffices.
In more detail, the input is a point $x$ on a chord $K=ab$ together with
\fchanged{its geodesic radius $r(x, {\cal H})$ and}
the first segments of the shortest paths from $x$ to its farthest half-polygons. The goal is to test in constant time:
(1) whether $x$ is a relative geodesic center of $K$, and if not, which direction to go on $K$ to reach a relative center; and (2) if $x$ is a relative geodesic center,
whether $x$ is a geodesic center of $P$, and if not, which side of $K$ contains a geodesic center of $P$.
These tests are illustrated in Figure~\ref{fig:master_center_conditions}.}
\fchanged{Note that if $r(x, {\cal H})$ is zero, then
$x$ is a geodesic center and no further work is required.}
\begin{figure}
\caption{
Points $x_i$ on chord $K=ab$ with directions of paths to farthest half-polygons in dashed blue,
wedge $W_{\alpha}
\label{fig:master_center_conditions}
\end{figure}
\changed{
The tests are accomplished via the following lemma,
which is analogous to Lemmas 2 and 3 of Pollack et al.~\cite{pollack_sharir}.
}
\begin{lemma}
\label{lemma:test_for_global_center}
Let $x$ be a point on chord $K$, and
let $V$ be the vectors of the first segments of the shortest paths from $x$ to its farthest half-polygons
\newestchanged{$\cal F$}.
\revised{Let $\alpha$ be the smallest angle of a wedge $W_\alpha$ with apex $x$ that contains all the vectors of $V$ and such that $W_\alpha$, restricted to a small neighbourhood of $x$, is contained in $P$.}
\begin{enumerate}
\item {\bf Location of the relative center.}
Let $L$ be the line through $x$ perpendicular to $K$.
If one of the open half-planes determined by $L$ contains $W_\alpha$,
then $x$ is not a relative center, and all relative centers lie on that side of $L$.
Otherwise, $x$ is a relative center.
\item {\bf Location of the center.}
Now suppose that $x$ is a relative geodesic center.
\revised{If $\alpha < \pi$ then $x$ is not a geodesic center, and all geodesic centers lie on the side of $K$ that contains the
\newestchanged{ray bisecting the angle of $W_\alpha$.}
If $\alpha > \pi$, then $x$ is the unique geodesic center, and furthermore, $x$ is determined by two or three vectors of $V$---the two that bound $W_\alpha$, plus one inside $W_\alpha$ unless $x$ is on the boundary of $P$. Finally, if $\alpha = \pi$ then $x$ is a geodesic center (though not necessarily unique), and furthermore, $x$ is determined by the two vectors of $V$ that bound $W_\alpha$. }
\end{enumerate}
\end{lemma}
\newestchanged{
\begin{proof}
We prove the two parts separately.
\begin{enumerate}
\item
Suppose $W_\alpha$ lies in an open half-plane determined by $L$ (say, the left side of $L$).
Then moving $x$ an epsilon distance left along $K$ gives a point with smaller geodesic radius since the distance to any half-polygon in $\cal F$ decreases, and no other half-polygon becomes a farthest half-polygon.
Therefore $x$ is not a relative center. Furthermore, because the geodesic radius function $r(x,{\cal H})$ is convex on $K$ (by Corollary~\ref{cor:geodesic-convexity}), the relative center lies to the left on $K$.
Next suppose $W_\alpha$ does not lie in an open half-plane of $L$. Then any epsilon movement of $x$ along $K$ increases the distance to some half-polygon in $\cal F$, so $x$ is a local minimum on $K$ and therefore $x$ is the relative center (again using the face that the geodesic radius function is convex on $K$).
\item Suppose $\alpha < \pi$. Let $b$ be the ray that bisects $W_\alpha$. Moving $x$ an epsilon distance along $b$ gives a point $x'$ with smaller geodesic radius. Therefore $x$ is not the center.
Next we prove that the center $c$ lies on the side of $K$ that contains $b$.
Suppose not. Consider the geodesic $\pi(c,x')$. By Corollary~\ref{cor:geodesic-convexity}, the geodesic radius function is convex on $\pi(c,x')$. But then the point where the geodesic crosses $K$ has a smaller geodesic radius than $x$, a contradiction to $x$ being the relative center.
Next suppose $\alpha > \pi$. Let $v_1$ and $v_2$ be the two vectors that bound $W_\alpha$.
If $x$ is on the boundary of $P$ it must be at a reflex vertex of $P$.
Otherwise,
since no smaller wedge contains $V$, there must be a third vector $v_3$ in $V$, making an angle $< \pi$ with each of $v_1$ and $v_2$. In either case ($x$ on the boundary of $P$ or not)
any epsilon movement of $x$ in $P$ increases the distance to the half-polygon corresponding to one of the $v_i$'s.
Thus $x$ is a local minimum in $P$ and (by geodesic convexity of the radius function) $x$ is the center. Furthermore, $x$ is determined by $v_1$ and $v_2$---and $v_3$ if $x$ is interior to $P$.
Finally, suppose $\alpha = \pi$. As in the previous case, $x$ is a geodesic center
and is determined by the two vectors $v_1$ and $v_2$ of $V$ that bound $W_\alpha$. Furthermore, $x$ is unique unless the two corresponding half-polygons
have parallel defining chords, and $v_1$ and $v_2$ reach those chords at right angles. In this case the set of geodesic centers consists of a line segment through $x$ parallel to the chords.
\end{enumerate}
\end{proof}
}
\subsubsection{Shortest Paths to Half-Polygons}
\label{section:shortest_path_tree_half_polygons}
In this section we give a linear time algorithm to find the shortest path tree from point $a$ on the polygon boundary to all the half-polygons $\cal H$. Recall that each half-polygon is specified by an ordered pair of endpoints on $\partial P$, and the half-polygons are sorted in clockwise cyclic order by their first endpoints. From this, we identify the half-polygons that contain $a$, and we discard them---their distance from $a$ is 0.
Let $H_1, \ldots, H_{k'}$ be the remaining half-polygons where $H_i$ is bounded by endpoints $p_i q_i$, and the $H_i$'s are sorted by $p_i$, starting at $a$.
The idea is to first find the shortest path map $T_a$ from $a$ to the set consisting of the polygon vertices and the points $p_i$ and $q_i$. Recall that the shortest path map is an augmentation of the shortest path tree that partitions the polygon into triangular regions in which the shortest path from $a$ is combinatorially the same (see Figure~\ref{fig:shortest-paths}).
The shortest path map can be found in linear time~\cite{SPT_linear}.
Note that $T_a$ is embedded in the plane (none of its edges cross)
and the ordering of its leaves matches their ordering on $\partial P$.
Our algorithm will traverse $T_a$ in depth-first order, and visit the triangular regions along the way.
Our plan is to augment $T_a$ to a shortest path tree $\bar T_a$ that includes the shortest paths from $a$ to each half-polygon $H_i$. Note that $\bar T_a$ is again an embedded ordered tree.
We can find $\pi(a, H_i)$ by examining the regions of the shortest path map intersected by $p_i q_i$. These lie in
the \emph{funnel}
between the shortest paths
$\pi(a,p_i)$ and $\pi(a, q_i)$.
Note that edges of the shortest path map $T_a$ may cross the chord $p_i q_i$. Also,
the funnels for different half-polygons may overlap. The key to making the search efficient is the following lemma:
\begin{lemma}
The ordering $H_1, H_2, \ldots, H_{k'}$ matches the ordering of the paths $\pi(a,H_i)$ in the tree $\bar T_a$.
\end{lemma}
\begin{proof}
Consider two half-polygons $H_i = p_iq_i$ and $H_j = p_jq_j$, with $i < j$. We prove that $\pi(a,H_i)$ comes before $\pi(a,H_j)$ in $\bar T_a$.
If $H_i$ and $H_j$ are disjoint, the result is immediate since the corresponding funnels do not overlap. Otherwise (because neither half-polygon is contained in the other) $p_i q_i$ and $p_j q_j$ must intersect, say at point $x$. See Figure~\ref{fig:shortest-paths}.
Let $t_i$ and $t_j$ be the terminal points of the paths $\pi(a,H_i)$ and $\pi(a,H_j)$, respectively.
If $t_i$ lies in $p_ix$ and $t_j$ lies in $xq_j$ then the result follows since $t_i$ and $t_j$ lie in order on the boundary of the truncated polygon formed by removing $H_i$ and $H_j$.
So suppose that $t_j$ lies in $p_jx$ (the other case is symmetric). Then $\pi(a,t_j)$ crosses $p_i q_i$ at a point $z$ in $p_i x$.
From $z$ to \newcomment{$t_j$} the path $\pi(a,t_j)$ lies inside the
\newcomment{
cone
with apex $x$ bounded by the rays from $x$ through $z$ and from $x$ through $t_j$.
Within that cone, the path only turns left.}
The angle $\alpha_j$ at $t_j$ is $\ge 90^\circ$ (it may be $> 90^\circ$ if $t_j = p_j$), which implies that the angle $\alpha_i$ at $z$ is $> 90^\circ$. Therefore $t_i$ lies to the left of $z$, as required.
\end{proof}
\begin{figure}
\caption{The shortest path map $T_a$ (thin blue) and the augmentation (dashed red) to include shortest paths to the
two half-polygons bounded by chords $p_i q_i$ and $p_j q_j$ (thick red).
}
\label{fig:shortest-paths}
\end{figure}
Based on the Lemma, the algorithm
traverses the regions of the shortest path map $T_a$ in depth first search order, and traverses the half-polygons $H_i$ in order $i=1,2, \ldots, k'$.
It is easy to test if one region contains the shortest path to $H_i$
(either to $p_i$, or to $q_i$, or reaching an internal point of $p_i q_i$ at a right angle);
if it does, we increment $i$, and otherwise we proceed to the next region.
The total time is $O(n+k)$.
\subsubsection{Functions to Capture the Distance to Farthest Half-Polygons}
\label{section:functions_to_capture}
\changed{In this section we capture the geodesic radius function for points on a chord $K=ab$ as the upper envelope of functions defined on overlapping subintervals of $K$.
Besides extending the method of Pollack et al.~\cite{pollack_sharir} to deal with half-polygons (rather than vertices),
our aim is to give a clearer and easier-to-verify presentation.}
In more detail, we
give a linear time algorithm to find
a linear number of
\changed{easy-to-compute convex} functions defined on the chord $K=ab$ whose upper envelope is the geodesic radius function $r(x,{\cal H})$ for $x \in K$.
Specifically, a \defn{coarse cover} is
a set of
\changed{triples $(I,f,H)$}
where:
\begin{enumerate}
\squeezelist
\item
$I$ is a subinterval of $K$, $f$ is a function defined on domain $I$,
\fchanged{and $H \in {\cal H}$.}
\item
$f(x) = d(x,H)$
\fchanged{for all $x \in I$}, and
$f$
\newestchanged{
has one of the following forms:
\begin{itemize}
\squeezelist
\item $f(x) = 0$.
\item $f(x) = d_2(x,v) + \kappa$ where $d_2$ is Euclidean distance, $\kappa$ is a constant,
$v$ is
a vertex of $P$, and
the segment $xv$ is the first segment of the path $\pi(x,H)$.
\item $f(x) = d_2(x, {\bar h})$, where $d_2$ is Euclidean distance, $\bar h$ is the line through the defining chord of $H$, and the path $\pi(x,H)$ is the straight line segment from $x$ to $\bar h$ (meeting $\bar h$ at right angles).
\end{itemize}
}
\item
\newestchanged{For any point $x \in K$ and any half-polygon $H$ that is farthest from $x$, there is a triple $(I,f,H)$ in the coarse cover with $x \in I$---with the exception that if two triples have identical $I$ and
\attention{identical $f=d_2(x,v) + \kappa$},
then we may eliminate one of them.
In particular, this implies that the upper envelope of the functions is the geodesic radius, i.e.,
for any $x \in K$, the
maximum of $f(x)$ over intervals $I$ containing $x$ is equal to $r(x,{\cal H})$.
}
\end{enumerate}
For intuition, see Figure~\ref{fig:intervals-functions},
which shows several intervals and their associated functions.
\fchanged{We will find the elements of the coarse cover separately for the two pieces of the polygon on each side of $K$, and then take the union of the two sets.}
In this section we visualize $K$ as horizontal and deal with the upper piece of the polygon.
\begin{figure}
\caption{
An illustration of functions and intervals. For $x$ in interval $I_1$, $d(x,H_1) = d_2(x,u) + \kappa_1$. For $x$ in $I_2$, $d(x,H_1) = d_2(x,v) + \kappa_2$. For $x$ in $I_3$, $d(x,H_2) = d_2(x,H_2)$.
}
\label{fig:intervals-functions}
\end{figure}
\noindent{\bf A large coarse cover.}
We first describe a
coarse cover of $O(nk)$ triples
and then show how to
reduce to linear size.
Consider a half-polygon $H$ with defining chord $h$. Suppose first that $K$ does not intersect $H$, i.e., $a$ and $b$ lie outside $H$.
All shortest paths from points on $K$ to $H$ lie in the \defn{funnel} $Y(H)$ which is a subpolygon bounded by the chord $K$, the path $\pi(a,H)$ (which is a path in ${\bar T}_a$), the path $\pi(b,H)$ (in ${\bar T}_b$), and the segment along $h$ between the terminals of those two paths.
See Figure~\ref{fig:funnels}.
If the paths $\pi(a,H)$ and $\pi(b,H)$ are disjoint then they are both reflex paths and all vertices on the paths are visible from $K$ (see Figure~\ref{fig:funnels}(a)). Otherwise, the paths are reflex and visible from $K$ until they reach the first common vertex $u$, and then they have a common subpath from $u$ to $H$ that is not visible from $K$ (see Figure~\ref{fig:funnels}(b)).
\fchanged{Before describing how to obtain triples of the coarse cover from $Y(H)$, we first consider the case when $K$ intersects $H$, i.e., $a$ or $b$ lies inside $H$.
If both $a$ and $b$ are inside $H$, then
we add the triple $(I=ab,f=0,H)$ to the coarse cover.
If $b$ is outside but $a$ is inside (the other case is symmetric), then $h$ and $K$ intersect at a point $p$.
If $\pi(b,H)$ reaches $H$ below $K$, then $H$ will be handled when we deal with the piece of the polygon below $K$.
Otherwise (see Figure~\ref{fig:funnels}(c)) we
add the triple $(I=ap, f=0, H)$ to the coarse cover, and we deal with the $pb$ portion of the chord as in the general case above but modifying the funnel $Y(H)$ so that the path $\pi(a,H)$ is replaced by $p$.
}
\begin{figure}
\caption{The funnel $Y(H)$ (shaded) and its shortest path map which is bounded by edge extensions (dashed segments). (a)
The case of disjoint paths $\pi(a,H)$ and $\pi(b,H)$.
For $x \in I_1$, $d(x,H) = d_2(x,u) + \kappa$. For $x \in I_2$, $d(x,H) = d_2(x,{\bar h}
\label{fig:funnels}
\end{figure}
Each funnel $Y(H)$ can be partitioned into its \defn{shortest path map
$M(H)$}
where two points are in the same region of
$M(H)$
if their paths to $H$ are combinatorially the same.
(We consider a path that arrives at an endpoint of $h$ and a path that arrives at an interior point of $h$ to be combinatorially different.)
Observe that boundaries of the regions of $M(H)$ are extensions of tree edges plus lines perpendicular to $h$.
See Figure~\ref{fig:funnels}.
The regions of
$M(H)$
are triangles, plus possibly one trapezoid.
A triangle region has
a base segment $I \subseteq K$, and
an apex vertex $u$ on $\pi(a,H)$ [or $\pi(b,H)$]; the shortest path from any point $x \in I$ to $H$ consists of the line segment $xu$ plus the path in ${\bar T}_a$ [or ${\bar T}_b$] from $u$ to $H$, so $d(x,H) = d_2(x,u) + \kappa$ where $\kappa$ is the tree distance from $u$ to the leaf corresponding to $H$.
A trapezoid region has
a base segment $I \subseteq K$, and
two sides orthogonal to $h$; the shortest path from any point $x \in I$ to $H$ consists of the line segment orthogonal to $h$ from $x$ to $h$,
so $d(x,H) = d_2(x,{\bar h})$ where $\bar h$ is the line through $h$.
Thus each region of $M(H)$ gives rise to a triple $(I,f,H)$ satisfying properties (1) and (2) of a coarse cover.
\changed{We claim that the set of triples defined above, i.e.,
all the triples
defined from $Y(H)$ together with the special triples when $H$ intersects $K$, form a coarse cover. Properties (1) and (2) are satisfied, and property (3) is satisfied
because}
we have captured all shortest paths from $x$ to $H$ for all $x \in K$ and all half-polygons $H$.
Since each $Y(H)$ has size $O(n)$, this coarse cover has size $O(nk)$.
\noindent{\bf Intuition for a linear-size coarse cover.}
The secret to reducing the size of the coarse cover is to observe that if the funnels
\newestchanged{for some
half-polygons ${\cal H}' \subseteq {\cal H}$}
share an edge $uv$ of ${\bar T}_a$ with $u$ closer to the root, and both $u$ and $v$ visible from $K$, then their shortest path maps share the same triangle with apex $u$,
\newestchanged{base $I$},
and sides bounded by the
\changed{extension of the edge from $v$ to $u$ and the extension of the edge from $u$ to its parent in ${\bar T}_a$ (see Figure~\ref{fig:coarse-cover}(a)).}
\newestchanged{In this case, we claim that for this triangle, we only need a coarse cover element for one of the half-polygons in ${\cal H}'$, specifically, for one
half-polygon that has the maximum distance from $v$ in the tree ${\bar T}_a$. This is because only half-polygons farthest from $v$ matter, and furthermore,
we need not keep more than one half-polygon that has the maximum distance because the interval $I$ and the function $f(x) = d_2(x,u) + \kappa$ are the same.}
We first specify the coarse cover precisely and then prove correctness, which makes the above observation formal.
\noindent{\bf Definitions.}
Let ${\bar T}_a$ and ${\bar T}_b$ be directed from root to leaves.
For any node $v$ in $\bar T_a$
define \defn{$\ell_a(v)$} to
be the maximum length of a directed path in $\bar T_a$ from $v$ to a leaf node
representing a terminal point on some half-polygon, and define
\defn{$F_a(v)$} to be that farthest half-polygon
\newestchanged{(breaking ties arbitrarily)}.
Define functions \defn{$\ell_b$} and
\defn{$F_b$} similarly.
We can compute these functions in linear time in leaf-to-root order. In particular,
we compute $\ell_a(u)$ for the nodes $u$ of $\bar T_a$ as follows. Initialize $\ell_a(u)$ to 0 if $u$ represents a terminal point of a half-polygon chord, and to $-\infty$ otherwise. Then from leaf-to-root order, update $\ell_a(u)$ to $\max \{\ell_a(u), \max \{|u v| + \ell_a(v) : v$ a child of $u \}\}$.
We can compute $\ell_b(u)$ similarly. The runtime is $O(n+k)$.
Define \defn{$p_a(u)$} and \defn{$p_b(u)$} to be the parents of node $u$ in $\bar T_a$ and $\bar T_b$, respectively.
As noted by
Pollack et al.~\cite{pollack_sharir},
a vertex $u$ is visible from some point on $K$ if and only if $p_a(u) \ne p_b(u)$.
Furthermore, we note that if $u$ is visible from some point on $K$, then extending the edge from $u$ through $p_a(u)$ reaches a point
\defn{$x_a(u)$}
on $K$ from which $u$ is visible.
Similarly, extending the edge from $u$ through $p_b(u)$ reaches a point
\defn{$x_b(u)$}
on $K$ from which $u$ is visible.
\changed{
In defining the shortest path map $M(H)$, we added boundary lines orthogonal to the defining chord $h$ at the terminals of the paths $\pi(a,H)$ and $\pi(b,H)$. If a path terminates at an internal point of $h$ then the last edge of the path is orthogonal to $h$, and the boundary line extends the last edge.
In order to avoid special cases, it will be convenient if all boundary lines are extensions of tree edges, i.e.,
to assume that even the paths that terminate at endpoints of $h$ end with a segment orthogonal to $H$. We add $0$-length segments to the trees
${\bar T}_a$ and ${\bar T}_b$ to make this true. The extension of such a $0$-length edge is orthogonal to $H$.
Note that it is possible that both $\pi(a,H)$ and $\pi(b,H)$ terminate at the same endpoint of $h$, in which case the added 0-length segment is common to both trees, so we regard the terminal point of the paths as \emph{not} visible from $K$.
See Figure~\ref{fig:coarse-cover}(c).
(The other endpoint of the 0-length segment may or may not be visible.)}
\noindent{\bf The coarse cover $\cal T$.}
Define \defn{$\cal T$} to have elements
of the following
\fchanged{four}
types. See Figure~\ref{fig:coarse-cover}.
\begin{enumerate}
\squeezelist
\setcounter{enumi}{-1}
\item
\fchanged{For each half-polygon $H$ that intersects $K$ there is a coarse cover element $(I,f,H)$ where $I = K \cap H$ and $f(x) = 0$.}
\item For each edge $(u,v)$ in ${\bar T}_a$ where $u = p_a(v)$,
\changed{$u \ne a$}, and $u$ and $v$ are both visible from $K$, there is an associated
{\bf $a$-side triangle} that has apex $u$ and base $I = [x_a(u), x_a(v)] \subseteq K$.
The associated coarse cover element is $(I,f,H)$ where $H = F_a(v)$ and
\attention{$f(x) = d_2(x,u) + |uv| + \ell_a(v)$.}
Define {\bf $b$-side triangles} and their associated coarse cover elements symmetrically.
\item For each edge $(u,v)$ that is common to ${\bar T}_a$ and ${\bar T}_b$ where $u$ is visible from $K$ and $v$ is not
(i.e., $u = p_a(v) = p_b(v)$) there is an associated
{\bf central triangle} that has apex $u$ and
base $I = [x_a(u), x_b(u)] \subseteq K$.
The associated coarse cover element is $(I,f,H)$ where $H = F_a(v)= F_b(v)$ and
\attention{$f(x) = d_2(x,u) + |uv| + \ell_a(v)$.}
\item For each half-polygon $H$ such that the terminal points \defn{$t(a,H)$} of $\pi(a,H)$ and \defn{$t(b,H)$} of $\pi(b,H)$ are distinct,
there is a {\bf central trapezoid}
with base $I \subseteq K$ bounded by the
two lines perpendicular to $h$ emanating from $t(a,H)$ and $t(b,H)$---these lines are the extensions of the (possibly $0$-length) last edges of the paths.
The associated coarse cover element is $(I,f,H)$ where $H$ is the given half-polygon and $f(x) = d_2(x,{\bar h})$ where $\bar h$ is the line through the defining chord of $H$.
\end{enumerate}
\begin{figure}
\caption{
Elements of the coarse cover: (a) $I$ is the base of an $a$-side triangle associated with edge $uv$; (b) $I$ is the base of a central triangle associated with edge $uv$;
(c) $I$ is the base of a central triangle associated with $0$-length edge $uv$;
(d) $I_1$ is the base of a central trapezoid associated with terminal points $v$ and $v'$ on $H$; $I_2$ is the base of an $a$-side triangle associated with $0$-length edge $uv$.
}
\label{fig:coarse-cover}
\end{figure}
Note that we include $0$-length edges in cases 1 and 2 above.
Altogether, $\cal T$ contains $O(n+k)$ triples---at most one associated with each edge of the trees, and at most
\fchanged{two}
associated with each half-polygon $H \in {\cal H}$.
\begin{lemma}\label{lemma:coarse_cover}
$\cal T$ is a coarse cover.
\end{lemma}
\begin{proof}
There are three properties for a coarse cover.
For each triple $(I,f,H)$ it is clear that $I$ is a subinterval of $K$ and $f$ is defined on $I$---this is property (1). For property (2),
\newestchanged{$f$ is defined to have one of the three forms.}
\changed{Furthermore, we claim that for each triple
$(I,f,H)$, and each $x \in I$,
$f(x) = d(x,H)$.
\fchanged{This is clear for Case 0. For the other three Cases,}
the triangle or trapezoid is part of the shortest path map $M(H)$, so the formula for $f(x)$ matches the distance $d(x,H)$.}
Finally, we must prove property (3).
\newestchanged{Let ${\cal T}^0$ be the initial large coarse cover defined above, consisting of the set of triples $(I,f,H)$ from Case 0 and
the union of all the
\newestchanged{triples arising from the}
shortest path maps $\{M(H): H \in {\cal H}\}$.
Then ${\cal T} \subseteq {\cal T}^0$, and
we must show that no triple of ${\cal T}^0$ that is omitted from $\cal T$ causes a violation of property (3).}
Any triple from the shortest path maps corresponds to a triangle that arises in Case 1 or 2 (i.e., with the same $u,v,I$), or to a trapezoid considered in Case 3.
No trapezoids are omitted in $\cal T$, so it suffices to consider Cases 1 and 2.
We first examine Case 1.
Consider an edge $(u,v)$ in ${\bar T}_a$ where $u = p_a(v)$ and $u$ and $v$ are both visible from $K$, and consider the interval $I = [x_a(u),x_a(v)]$.
Suppose that for some $f,H$, there is a triple $(I,f,H)$ that is
included in the triples from the shortest path maps, but omitted from $\cal T$. Then there is a directed path in ${\bar T}_a$ from $v$ to a leaf corresponding to $H$, and $f(x)$ is $d_2(x,u) + |uv| + \kappa$ where $\kappa$ is the length of the tree path from $v$ to the leaf corresponding to $H$.
But then $f(x) \le d_2(x,u) + |uv| + \ell_a(v)$, since $\ell_a(v)$ is the maximum distance from $v$ to a leaf
\newestchanged{corresponding to farthest half-polygon $F_a(v)$. If the inequality is strict, then $H$ is not a farthest half-polygon from any $x \in I$ so property (3) is satisfied without the
triple $(I,f,H)$.
And if equality holds, then property (3) allows us to omit the triple $(I,f,H)$
since the triple $(I,f,F_a(v))$ has the same $I$ and $f$.}
The case of triples omitted in Case 2 is similar.
\end{proof}
\subsubsection{Finding the Relative Geodesic Center on a Chord}
\label{sec:find-rel-center}
The last step of the chord oracle is exactly the same as in Pollack et al.~\cite{pollack_sharir}.
Given a chord $K$ and the
\changed{coarse cover $\cal T$ from Section~\ref{section:functions_to_capture}---which provides a set of $O(n+k)$ functions whose upper envelope is the geodesic radius function on chord $K$---we want to find the relative center, $c_K$, that minimizes the geodesic radius function.}
Pollack et al.~use a technique of Megiddo's to do this in $O(n+k)$ time
\newestchanged{by recursively reducing to a smaller subinterval of $K$ while eliminating elements
of the coarse cover whose functions are strictly dominated by others.}
In brief, the idea is to pair up the functions,
define a set of at most 6 ``extended intersection points'' for each pair,
and test medians of those points in order to \changed{restrict the search to a subinterval of $K$ and} eliminate a constant fraction of the functions.
\newestchanged{
Testing median points is done via
the test from Section~\ref{sec:testing_center}
of whether the relative center is left/right of a query point $x$ on $K$.
This test depends on having
the first segments of the shortest paths from $x$ to its farthest half-polygons.
Observe that
the initial coarse cover from Section~\ref{section:functions_to_capture} captures these segments, and they are preserved throughout the recursion because only strictly dominated functions are eliminated.
}
We fill in a bit more detail.
In each round we have a
subinterval $K'$ of $K$ that contains $c_K$ and a subset ${\cal T}'$ of the coarse cover $\cal T$
\fchanged{such that any function omitted from ${\cal T}'$ is strictly dominated on interval $K'$ by a function of ${\cal T}'$}.
\newestchanged{We want to eliminate a constant fraction of ${\cal T}'$ in time $O(|{\cal T}'|)$.}
We pair up the functions of ${\cal T}'$.
Consider a pair of functions $f_1$ and $f_2$. Each function is defined on a subinterval of $K$ and we define it to be $- \infty$ outside its interval.
The upper envelope of $f_1$ and $f_2$ switches between $f_1$ and $f_2$ at \defn{extended intersection points} which include the points where $f_1$ and $f_2$
intersect \fchanged{(are equal)},
and also possibly the endpoints of their intervals.
\newestchanged{If a subinterval of $K$ does not contain an extended intersection point for $f_1$ and $f_2$, then}
one of $f_1, f_2$ is irrelevant because it is dominated by the other (or both are $- \infty$).
We know from Section~\ref{section:functions_to_capture} that each function
has the form \fchanged{$f(x) =0$} or $f(x) = d_2(x,s) + \kappa$ where $\kappa$ is a constant,
$d_2$ is Euclidean distance, and
$s$ is
a point or line.
This implies that there are at most two intersection points of the functions $f_1$ and $f_2$, and thus at most six extended intersection points. In fact, a closer examination shows that there are at most four extended intersection points.
Pollack et al.~show how to successively test three medians of extended intersections in order to reduce the interval $K'$ and eliminate a constant fraction of the functions of ${\cal T}'$.
\fchanged{The first median test reduces the domain to a subinterval containing half the extended intersections, so three successive median tests reduce the domain to a subinterval containing one eighth of the extended intersections. This implies that for at least half the pairs $f_1, f_2$, all four of their extended intersections lie outside the domain, and one of $f_1, f_2$ is dominated by the other and can be eliminated.}
This completes one round of their procedure,
\newestchanged{with a runtime of $O(|{\cal T}'|)$.}
When ${\cal T}'$ is reduced to constant size, the relative center $c_K$ can be found directly.
The total run time is then $O(n+k)$.
\subsection{Finding the Geodesic Center of Half-Polygons}\label{section:center_using_oracle}
In this section we show how to use the
$O(n+k)$ time chord oracle from Section~\ref{section:chord_oracle} to find the geodesic center of the $k$ half-polygons in $O((n+k) \log (n+k))$ time.
The basic structure of the algorithm is the same as that of Pollack et al.~\cite{pollack_sharir}.
In the first step we use the chord oracle to restrict
the search for the geodesic center
to a small region where the problem reduces to a Euclidean problem
\changed{of finding a minimum radius disk that intersects some half-planes and contains some disks}.
\changed{This step takes $O((n+k) \log (n+k))$ time.}
\newcomment{In the second step we solve the resulting Euclidean problem
\fchanged{in linear time}, which}
involves some
new ingredients to handle our case of half-polygons.
\subsubsection{Finding a Region that Contains the Geodesic Center}
\label{section:restrict}
\label{section:triangle}
Triangulate $P$ in linear time~\cite{chazelle1991triangulating}.
Choose a chord of the triangulation that splits the polygon into two subpolygons so that the number of triangles on each side is balanced \changed{(the dual of a triangulation is a tree of maximum degree 3, which has a balanced cut vertex).}
Run the chord oracle on this chord, and recurse in the appropriate subpolygon.
In $O(\log n)$ iterations, we narrow our search down to one triangle $T^*$ of the triangulation. This step takes $O((n + k) \log n)$ time.
Next, we refine $T^*$ to a
region
$R$ that contains the center and such that
$R$ is \defn{homogeneous}, meaning that
for any $H \in \cal H$ the shortest paths from points in $R$ to $H$ have the same combinatorial structure, i.e., the same sequence of polygon vertices along the path.
The idea is to subdivide $T^*$ by $O(n+k)$ lines so that each cell in the resulting line arrangement is homogeneous, and then to find the cell containing the center.
Construct the shortest path trees to $\cal H$ from each of the three corners of triangle $T^* = (a^*,b^*,c^*)$ using the algorithm of Section~\ref{section:shortest_path_tree_half_polygons}. For each edge $(u,v)$ of each tree, add the line through $uv$ if it intersects $T^*$. (In fact, we do not need all these lines---\changed{as in the construction of the coarse cover in Section~\ref{section:functions_to_capture}, it suffices to use tree edges $(u,v)$ such that $u$ is visible from an edge of $T^*$.)}
We add three more lines for each half-polygon $H \in {\cal H}$, specifically,
the chord $h$ that defines $H$,
and the two lines perpendicular to $h$ through the endpoints of $h$.
The result is a set $L$ of $O(n+k)$ lines that we obtain in time $O(n+k)$. It is easy to
see
that the resulting line arrangement has homogeneous regions.
All that remains is to find the cell of the arrangement that contains the geodesic center.
It is simpler to state the algorithm in terms of $\epsilon$-nets
instead
of the rather involved description of Megiddo's technique used by Pollack et al.~\cite{pollack_sharir}.
\changed{
For background on $\epsilon$-nets see the survey by
Mustafa and Varadarajan~\cite[Chapter 47]{toth2017handbook} or the book by Mustafa~\cite{mustafa2022sampling}.}
\changed{The
\newestchanged{high-level} idea is to define a range space with ground set $L$ and to find a constant-sized $\epsilon$-net in time $O(|L|)$.
Then the lines of the $\epsilon$-net divide our region into a
\newestchanged{constant}
number of subregions and we can find which subregion contains the geodesic center by applying the chord oracle $O(1)$ times. By the property of $\epsilon$-nets the subregion is intersected by only a constant fraction of the lines of $L$,
so repeating this step for $O(\log (n+k))$ times, we arrive at a region $R$ with the required properties.}
\changed{We fill in a bit more detail.
The range space has ground set $L$. To define the ranges, let}
$\mathcal{T}$ be the
\newestchanged{(infinite) set of all
triangles contained in} $T^*$.
\revised{For $t \in \mathcal{T}$, let $\Delta_t = \{ \ell \in L : \ell \ {\rm intersects}\ t\}$.
Let $\Delta = \{ \Delta_t: t \in {\cal T} \}$.}
\changed{Then the range space is $S = (L,\Delta)$.
To show that constant-sized $\epsilon$-nets exist, we must show that $S$ has constant VC-dimension, or constant shattering dimension.
We argue
that the shattering dimension is 6, i.e., that for any subset $L'$ of $L$ of size $m$ the number of ranges is $O(m^6)$.
The lines intersecting a triangle $t$ are the same as the lines intersecting the convex hull of the three cells of the arrangement of $L'$ that contain the endpoints of $t$. There are $O(m^2)$ cells in the arrangement and we choose three of them, giving the bound of $O(m^6)$ possible ranges.}
Thus, a constant sized $\epsilon$-net of size $O(\frac{1}{\epsilon} \log (\frac{1}{\epsilon}))$ exists for our range space.
\changed{In order to construct an $\epsilon$-net in deterministic $O(|L|)$ time, we need a subspace oracle that, given a subset $L'$ of $L$ of size $m$, computes the set of ranges of $L'$ in time proportional to the output size,
$O(m^{6+1})$. Begin by finding, for each line in $L'$, which cells of the arrangement lie to each side of the line. Then, for every choice of three cells (there are $O(m^6)$ choices), the lines intersecting their convex hull can be listed in time $O(m)$ time.}
\changed{For the algorithm, we choose $\epsilon = \frac{1}{2}$, and construct an $\epsilon$-net $N$.}
\changed{Triangulate each cell of the arrangement of $N$---this is a constant time operation since the arrangement has constant complexity.}
We can locate the triangle $T'$ of this triangulated arrangement that contains the visibility center in $O(n+k)$ time by running the chord oracle a constant number of times.
By the $\epsilon$-net property, no more than $\epsilon \cdot |L|$ lines of $L$ intersect the interior of $T'$.
Thus, in $O(|L|)$ time, we have halved the number of lines going through our domain of interest (the region that contains the geodesic center).
Repeating the same sequence of steps $O(\log |L|) = O(\log (n+k))$ times, we will arrive at a triangle containing a constant number of these lines.
At this point, a brute force method suffices to locate a
region $R$ that satisfies the properties stated in the beginning of this section.
In each iteration of the process, we
apply the $O(n+k)$ chord oracle a constant number of times and thus the
total runtime for this step is
$O((n+k) \log (n+k))$.
\subsubsection{Solving an Unconstrained Problem}\label{section:unconstrained_problem}
At this point,
\changed{we have a
homogeneous polygonal region $R$ that}
contains a geodesic center of the set $\mathcal{H}$ of half-polygons.
Our goal is to find
the point $x \in R$ that minimizes the maximum over $H \in {\cal H}$ of $d(x,H)$.
\newestchanged{We give a linear time algorithm (in this final step there is no need for an extra logarithmic factor).}
\changed{We show that the problem reduces to one in the Euclidean plane, i.e., the polygon no longer matters.}
Pick an \changed{arbitrary} point $p$ in $R$ and find the shortest path tree from $p$ to all half-polygons (this takes linear time).
\changed{If $p$ has distance 0 to half-polygon $H$, then the same is true for all points in $R$, so $H$ is irrelevant and can be discarded.
If $\pi(p,H)$ \newestchanged{consists of a single line segment that} reaches an internal point of the chord defining $H$ (we denote these half-polygons by ${\cal H}_1$), then
$d(x,H) = d_2(x, {\bar H})$ for all $x \in R$, where $\bar H$ is the half-plane defined by $H$.
And if the first segment of $\pi(x,H)$
reaches
a vertex $u$ (we denote these half-polygons by ${\cal H}_2$), then $d(x,H) = d_2(x,u) + \kappa$ for all $x \in R$, where $\kappa$ is a constant.
}
\changed{Thus we seek a point $x = (x_1,x_2)$
and a value $\rho$
to solve:
\begin{equation*}
\begin{array}{ll@{}ll}
\text{minimize} & \rho &\\
\text{subject to}&
d_2(x,{\bar H}) \le \rho & H \in {\cal H}_1\\
&d_2(x,u) + \kappa \le \rho \ \ \ &
\text{ for point $u$ and constant $\kappa$ corresponding to } H \in {\cal H}_2\\
\end{array}
\end{equation*}
Because $x$ is guaranteed to lie in the region $R$,
we can completely disregard the underlying polygon $P$ in solving the problem.
}
In the Euclidean plane, the problem may be reinterpreted in a geometric manner.
We wish to find the
\changed{disk}
of smallest radius $\rho$ that \textit{intersects}
each of a given set of \changed{half-planes}
and \textit{contains}
each of a given set of disks.
\changed{For $H \in {\cal H}_1$, we have $d(x,H) \le \rho$ if and only if the disk of radius $\rho$ centered at $x$ intersects $\bar H$.
For $H \in {\cal H}_2$, with $d(x,H) = d_2(x,u) + \kappa$, we have
$d(x,H) \le \rho$ if and only if the disk of radius $\rho$ centered at $x$ contains the disk of radius $\kappa$ centered at $u$.
}
\changed{We will call this Euclidean problem the ``minimum feasible disk'' problem.}
\newestchanged{The constraints of the problem that correspond to the set of half-polygons ${\cal H}_1$ will be referred to as \defn{half-plane constraints}, while the constraints for ${\cal H}_2$ will be called \defn{disk constraints}.}
\changed{We observe here that the minimum feasible disk problem belongs to the class of `LP-type' problems described by Sharir and Welzl~\cite{sharir1992combinatorial}.
In fact, it satisfies the computational assumptions that allow a derandomization of the Sharir-Welzl algorithm yielding a \textit{deterministic} linear-time algorithm for the problem (see Chazelle and Matousek~\cite{chazelle1996linear}).
However, as this approach is rather complex, we will outline a more direct linear-time algorithm to solve the problem.
}
\changed{The minimum feasible disk problem is a combination of two well-known problems that have linear time algorithms.
\newestchanged{If all the constraints are half-plane constraints,}
then, because each such constraint
can be written as a linear inequality,
we have a 3-dimensional linear program, which can be solved in linear time as shown by Meggido~\cite{megiddo_linear}
and independently by Dyer~\cite{dyer1984linear}.
On the other hand,
\newestchanged{if all the constraints are disk constraints,}
then this is the ``spanning circle problem''---to find the smallest disk that contains some given disks. This problem arose from the geodesic vertex center problem~\cite{pollack_sharir} and generalizes the Euclidean 1-center problem where the disks degenerate to points. The problem was solved in linear time by Megiddo~\cite{megiddo_spanned_ball} using an approach similar to that for the 1-center problem and for linear programming.
Because the approaches are similar, it is not difficult to combine them, as we show below.
}
\changed{We begin by describing the main idea of Megiddo's prune-and-search approach for both linear programming
\newestchanged{in 3D}
and for the spanning circle problem (also see the survey by Dyer et al.~in the Handbook of Discrete and Computational Geometry~\cite[Chapter 49]{dyer2017linear}).
The goal is to spend linear time to prune away a constant fraction of the constraints that do not define the final answer, and to repeat this until there are only a constant number of constraints left, after which a brute force method may be employed.
The idea is to pair up the constraints, and for each pair of constraints $c_1,c_2$ compute a ``bisecting'' plane $\Pi$ such that on one side of the $\Pi$ the constraint $c_1$ is redundant, and on the other side of $\Pi$ the constraint $c_2$ is redundant.
If we could identify which side of $\Pi$ contains the optimum solution, then one of the constraints $c_1, c_2$ can be removed.
We address the
existence of such bisecting planes below.
There are two other issues. Issue 1 is to identify which side of a plane contains the optimum solution, a subproblem that Megiddo calls an ``oracle''. This is done by finding the optimum point restricted to the plane (a problem one dimension down), from which the side of the plane can be decided. (The Chord Oracle from Section~\ref{section:chord_oracle} was doing a similar thing.) Issue 2 is to identify the position of the optimum point relative to ``many'' of the bisecting planes, while testing only a ``small'' sample of them---\newestchanged{this can be done using \emph{cuttings}.}
We will not discuss these two issues since they are the same as in Megiddo's papers~\cite{megiddo_linear,megiddo1984linear} (or see the survey by Dyer et al.~\cite{dyer2017linear}).}
\changed{For our minimum feasible disk problem,
\newestchanged{we have two types of constraints---half-plane constraints and disk constraints.}
\newestchanged{Megiddo's prune-and-search approach based on pairing up the constraints can still be applied so long as we pair each constraint with another constraint of the same type.}
\newestchanged{(We note that this idea was previously used by Bhattacharya et al.~\cite{bhattacharya1994optimal} in their linear time algorithm to find the smallest disk that contains some given points and intersects some lines, a problem they call the ``intersection radius problem''.)}
Thus it suffices to describe what are the bisecting planes for the two types of constraints in our minimum feasible disk problem.}
\changed{
\newestchanged{A half-plane constraint}
has the form
$d_2(x, {\bar H}) \le \rho$. If the
halfplane $\bar H$ is given by $a_i^T x \le b_i $,
normalized so that $||a_i|| = 1$,
then the constraint is $a_i^T x \le b_i + \rho$, a linear inequality.
For two such constraints indexed by $i$ and $j$, the bisecting plane is given by $(a_i^T - a_j^T) x - (b_i - b_j) = 0$.
}
\changed{
\newestchanged{A disk constraint}
has the form $d_2(x,u_i) + \kappa_i \le \rho$, corresponding to a disk with center $u_i$ and radius $\kappa_i$.
As Megiddo~\cite{megiddo_spanned_ball} noted, by adding the constraint $\rho \ge \kappa_i$, this can be written as
$$ ||x - u_i||^2 \le (\rho - \kappa_i)^2
$$
or as
$$f_i(x,\rho) \le 0$$ where $f_i$ is defined as
$$ f_i(x,\rho) = ||x||^2 - 2u_i^T x + ||u_i||^2 - \rho^2 + 2\kappa_i \rho - \kappa_i^2.$$
This is not a linear constraint, but
for $i \ne j$, the equation $f_i(x,\rho) = f_j(x,\rho)$ defines a plane since the quadratic terms, $||x||^2$ and $\rho^2$, cancel out. So the bisecting plane is
$f_i(x,\rho) = f_j(x,\rho)$.}
\newestchanged{This completes the summary of how to solve the minimum feasible disk problem in linear time, and completes our algorithm to find the geodesic center of half-polygons.}
\section{Conclusions}
We introduced the notion of the visibility center of a set of points in a polygon and gave an algorithm with run time $O((n+m) \log (n+m))$ to find the visibility center of $m$ points in an $n$-vertex polygon. To do this, we gave an algorithm \changed{with run time $O((n+k)\log(n+k))$} to find the geodesic center of a given set of $k$ half-polygons inside a polygon, a problem of independent interest. We conclude with some open questions.
Can the visibility center of a simple polygon be found more efficiently?
Note that the geodesic center of the vertices of a simple polygon can be found in linear time~\cite{linear_time_geodesic}.
Our current method involves ray shooting
and sorting (Section~\ref{section:essential-half-polygons} and the preprocessing in Section~\ref{section:half-polygon-center})
, which are serious barriers. A more reasonable goal
\newcomment{is to find the visibility center of $m$ points in a polygon in time $O(n + m \log m)$.}
Is there a more efficient algorithm to find the geodesic center of (sorted) half-polygons?
In forthcoming work we give a linear time algorithm for the special case of finding the geodesic center of the \emph{edges} of a polygon (this is the case where the half-polygons hug the edges).
How hard is it to find
the farthest visibility Voronoi diagram of a polygon?
Finally, what about the 2-visibility center of a polygon, where we can deploy two guards instead of one?
\begin{appendix}
\end{appendix}
\end{document} |
\betaegin{document}
\deltaef{\mathbb R}{{\mathbb R}}
\deltaef{\mathbb Z}{{\mathbb Z}}
\deltaef{\mathbb C}{{\mathbb C}}
\nuewcommand{\rm trace}{\rm trace}
\nuewcommand{{\mathbb{E}}}{{\mathbb{E}}}
\nuewcommand{{\mathbb{P}}}{{\mathbb{P}}}
\nuewcommand{{\betaar{c}al E}}{{\betaar{c}al E}}
\nuewcommand{{\betaar{c}al F}}{{\betaar{c}al F}}
\nuewtheorem{df}{Definition}
\nuewtheorem{theorem}{Theorem}
\nuewtheorem{lemma}{Lemma}
\nuewtheorem{pr}{Proposition}
\nuewtheorem{co}{Corollary}
\deltaef\nu{\nuu}
\deltaef\mbox{ sign }{\mbox{ sign }}
\deltaef\alpha{\alphalpha}
\deltaef{\mathbb N}{{\mathbb N}}
\deltaef{\betaar{c}al A}{{\betaar{c}al A}}
\deltaef{\betaar{c}al L}{{\betaar{c}al L}}
\deltaef{\betaar{c}al X}{{\betaar{c}al X}}
\deltaef{\betaar{c}al F}{{\betaar{c}al F}}
\deltaef\betaar{c}{\betaar{c}}
\deltaef\nu{\nuu}
\deltaef\delta{\deltaelta}
\deltaef\deltaiam{\mbox{\rm dim}}
\deltaef\nuol{\mbox{\rm Vol}}
\deltaef\beta{\betaeta}
\deltaef\theta{\thetaheta}
\deltaef\lambda{\lambdaambda}
\deltaef\varepsilon{\nuarepsilon}
\deltaef\betaar{c}olon{{:}\;}
\deltaef\noindent {\bf Proof : \ }{\nuoindent {\betaf Proof : \ }}
\deltaef\varepsilonndpf{ \betaegin{flushright}
$ \Box $ \\
\varepsilonnd{flushright}}
\thetaitle[Hyperplane inequality for measures]{A $\sqrt{n}$ estimate for measures of hyperplane sections of convex bodies}
\alphauthor{Alexander Koldobsky}
\alphaddress{Department of Mathematics\\
University of Missouri\\
Columbia, MO 65211}
\varepsilonmail{koldobskiya@@missouri.edu}
\betaegin{abstract} The hyperplane (or slicing) problem asks whether there exists
an absolute constant $C$ so that for any origin-symmetric convex body $K$ in ${\mathbb R}^n$
$$
|K|^{\frac {n-1}n} \lambdae C \max_{\xi \in S^{n-1}} |K\betaar{c}ap \xi^\betaot|,
$$
where $\xi^\betaot$ is the central hyperplane in ${\mathbb R}^n$ perpendicular to $\xi,$ and
$|K|$ stands for volume of proper dimension. The problem is still open, with the best-to-date estimate $C\sim n^{1/4}$ established
by Klartag, who slightly improved the previous estimate of Bourgain. It is much easier to get a weaker estimate with $C=\sqrt{n}.$
In this note we show that the $\sqrt{n}$ estimate holds for arbitrary measure in place of volume. Namely,
if $L$ is an origin-symmetric convex body in ${\mathbb R}^n$ and $\mu$ is a measure
with non-negative even continuous density on $L,$ then
$$\mu(L)\ \lambdae\ \sqrt{n} \frac n{n-1} c_n\max_{\xi \in S^{n-1}}
\mu(L\betaar{c}ap \xi^\betaot)\ |L|^{1/n} \ ,$$
where $c_n= \lambdaeft|B_2^n\right|^{\frac{n-1}n}/ \lambdaeft|B_2^{n-1}\right| < 1,$
and $B_2^n$ is the unit Euclidean ball in ${\mathbb R}^n.$ We deduce this inequality from a stability
result for intersection bodies.
\varepsilonnd{abstract}
\maketitle
\section{Introduction}
The hyperplane (or slicing) problem \betaar{c}ite{Bo1, Bo2, Ba, MP} asks whether there exists
an absolute constant $C$ so that for any origin-symmetric convex body $K$ in ${\mathbb R}^n$
\betaegin{equation} \lambdaabel{hyper}
|K|^{\frac {n-1}n} \lambdae C \max_{\xi \in S^{n-1}} |K\betaar{c}ap \xi^\betaot|,
\varepsilonnd{equation}
where $\xi^\betaot$ is the central hyperplane in ${\mathbb R}^n$ perpendicular to $\xi,$ and
$|K|$ stands for volume of proper dimension.
The problem is still open, with the best-to-date estimate $C\sim n^{1/4}$ established
by Klartag \betaar{c}ite{Kl}, who slightly improved the previous estimate of Bourgain \betaar{c}ite{Bo3}.
We refer the reader to [BGVV] for the history and
current state of the problem.
In the case where $K$ is an intersection body (see definition and properties below),
the inequality (\ref{hyper}) can be proved with
the best possible constant (\betaar{c}ite[p. 374]{G2}):
\betaegin{equation}\lambdaabel{hyper-inter}
|K|^{\frac {n-1}n} \lambdae \frac{\lambdaeft|B_2^n\right|^{\frac{n-1}n}}{\lambdaeft|B_2^{n-1}\right|}
\max_{\xi \in S^{n-1}} |K\betaar{c}ap \xi^\betaot|,
\varepsilonnd{equation}
with equality when $K=B_2^n$ is the unit Euclidean ball. Here $|B_2^n|= \pi^{n/2}/\Gamma(1+n/2)$
is the volume of $B_2^n.$ Throughout the paper, we denote the constant in (\ref{hyper-inter}) by
$$c_n= \frac{\lambdaeft|B_2^n\right|^{\frac{n-1}n}}{\lambdaeft|B_2^{n-1}\right|} .$$
Note that $c_n<1$ for every $n\in {\mathbb N};$ this is an easy consequence of the log-convexity
of the $\Gamma$-function.
It was proved in \betaar{c}ite{K3} that inequality (\ref{hyper}) holds for intersection bodies
with arbitrary measure in place of volume. Let $f$ be an even continuous non-negative
function on ${\mathbb R}^n,$ and denote by $\mu$ the measure on ${\mathbb R}^n$ with density $f$.
For every closed bounded set $B\subset {\mathbb R}^n$ define
$$\mu(B)=\int\lambdaimits_B f(x)\ dx.$$ Suppose that $K$ is an intersection body in ${\mathbb R}^n.$ Then,
as proved in \betaar{c}ite[Theorem 1]{K3} (see also a remark at the end of the paper \betaar{c}ite{K3}),
\betaegin{equation} \lambdaabel{arbmeas}
\mu(K) \lambdae \frac n{n-1} c_n \max_{\xi \in S^{n-1}} \mu(K\betaar{c}ap \xi^\betaot)\ |K|^{1/n}.
\varepsilonnd{equation}
The constant in the latter inequality is the best possible.
This note was motivated by a question of whether one can remove the assumption that
$K$ is an intersection body and prove the inequality (\ref{arbmeas}) for all origin-symmetric convex
bodies, perhaps at the expense of a greater constant in the right-hand side. One would like this extra
constant to be independent of the body or measure. In this note we prove the following inequality.
\betaegin{theorem}\lambdaabel{main} Let $L$ be an origin-symmetric convex body in ${\mathbb R}^n,$ and
let $\mu$ be a measure with even continuous non-negative density on $L.$ Then
\betaegin{equation} \lambdaabel{sqrtn}
\mu(L)\ \lambdae\ \sqrt{n} \frac n{n-1} c_n\max_{\xi \in S^{n-1}}
\mu(L\betaar{c}ap \xi^\betaot)\ |L|^{1/n}.
\varepsilonnd{equation}
\varepsilonnd{theorem}
In the case of volume, the estimate (\ref{hyper}) with $C=\sqrt{n}$ can be proved relatively
easy (see \betaar{c}ite[p. 96]{MP} or \betaar{c}ite[Theorem 8.2.13]{G2}), and it is not optimal, as mentioned above.
The author does not know whether the estimate (\ref{sqrtn}) is optimal for arbitrary measures.
\section{Proof of Theorem \ref{main}}
We need several definitions and facts.
A closed bounded set $K$ in ${\mathbb R}^n$ is called a {\it star body} if
every straight line passing through the origin crosses the boundary of $K$
at exactly two points different from the origin, the origin is an interior point of $K,$
and the {\it Minkowski functional}
of $K$ defined by
$$\|x\|_K = \min\{a\ge 0:\ x\in aK\}$$
is a continuous function on ${\mathbb R}^n.$
The {\it radial function} of a star body $K$ is defined by
$$\rho_K(x) = \|x\|_K^{-1}, \qquad x\in {\mathbb R}^n.$$
If $x\in S^{n-1}$ then $\rho_K(x)$ is the radius of $K$ in the
direction of $x.$
If $\mu$ is a measure on $K$ with even continuous density $f$, then
\betaegin{equation} \lambdaabel{polar-measure}
\mu(K) = \int_K f(x)\ dx = \int\lambdaimits_{S^{n-1}}\lambdaeft(\int\lambdaimits_0^{\|\thetaheta\|^{-1}_K} r^{n-1} f(r\thetaheta)\ dr\right) d\thetaheta.
\varepsilonnd{equation}
Putting $f=1$, one gets
\betaegin{equation} \lambdaabel{polar-volume}
|K|
=\frac{1}{n} \int_{S^{n-1}} \rho_K^n(\thetaheta) d\thetaheta=
\frac{1}{n} \int_{S^{n-1}} \|\thetaheta\|_K^{-n} d\thetaheta.
\varepsilonnd{equation}
The {\it spherical Radon transform}
$R:C(S^{n-1})\mapsto C(S^{n-1})$
is a linear operator defined by
$$Rf(\xi)=\int_{S^{n-1}\betaar{c}ap \xi^\betaot} f(x)\ dx,\quad \xi\in S^{n-1}$$
for every function $f\in C(S^{n-1}).$
The polar formulas (\ref{polar-measure}) and (\ref{polar-volume}), applied to a hyperplane section of $K$, express
volume of such a section in terms of the spherical Radon transform:
$$\mu(K\betaar{c}ap \xi^\betaot) = \int_{K\betaar{c}ap \xi^\betaot} f =
\int_{S^{n-1}\betaar{c}ap \xi^\betaot} \lambdaeft(\int_0^{\|\thetaheta\|_K^{-1}} r^{n-2}f(r\thetaheta)\ dr \right)d\thetaheta$$
\betaegin{equation} \lambdaabel{measure=spherradon}
=R\lambdaeft(\int_0^{\|\betaar{c}dot\|_K^{-1}} r^{n-2}f(r\ \betaar{c}dot)\ dr \right)(\xi).
\varepsilonnd{equation}
and
\betaegin{equation} \lambdaabel{volume=spherradon}
|K\betaar{c}ap \xi^\betaot| = \frac{1}{n-1} \int_{S^{n-1}\betaar{c}ap \xi^\betaot} \|\thetaheta\|_K^{-n+1}d\thetaheta =
\frac{1}{n-1} R(\|\betaar{c}dot\|_K^{-n+1})(\xi).
\varepsilonnd{equation}
The spherical Radon
transform is self-dual (see \betaar{c}ite[Lemma 1.3.3]{Gr}), namely,
for any functions $f,g\in C(S^{n-1})$
\betaegin{equation} \lambdaabel{selfdual}
\int_{S^{n-1}} Rf(\xi)\ g(\xi)\ d\xi = \int_{S^{n-1}} f(\xi)\ Rg(\xi)\ d\xi.
\varepsilonnd{equation}
Using self-duality, one can extend the spherical Radon transform to measures.
Let $\mu$ be a finite Borel measure on $S^{n-1}.$
We define the spherical Radon transform of $\mu$ as a functional $R\mu$ on
the space $C(S^{n-1})$ acting by
$$(R\mu,f)= (\mu, Rf)= \int_{S^{n-1}} Rf(x) d\mu(x).$$
By Riesz's characterization of continuous linear functionals on the
space $C(S^{n-1})$,
$R\mu$ is also a finite Borel measure on $S^{n-1}.$ If $\mu$ has
continuous density $g,$ then by (\ref{selfdual}) the
Radon transform of $\mu$ has density $Rg.$
The class of intersection bodies was introduced by Lutwak \betaar{c}ite{L}.
Let $K, L$ be origin-symmetric star bodies in ${\mathbb R}^n.$ We say that $K$ is the
intersection body of $L$ if the radius of $K$ in every direction is
equal to the $(n-1)$-dimensional volume of the section of $L$ by the central
hyperplane orthogonal to this direction, i.e. for every $\xi\in S^{n-1},$
\betaegin{equation} \lambdaabel{intbodyofstar}
\rho_K(\xi)= \|\xi\|_K^{-1} = |L\betaar{c}ap \xi^\betaot|.
\varepsilonnd{equation}
All bodies $K$ that appear as intersection bodies of different star bodies
form {\it the class of intersection bodies of star bodies}.
Note that the right-hand
side of (\ref{intbodyofstar}) can be written in terms of the spherical Radon transform using (\ref{volume=spherradon}):
$$\|\xi\|_K^{-1}= \frac{1}{n-1} \int_{S^{n-1}\betaar{c}ap \xi^\betaot} \|\thetaheta\|_L^{-n+1} d\thetaheta=
\frac{1}{n-1} R(\|\betaar{c}dot\|_L^{-n+1})(\xi).$$
It means that a star body $K$ is
the intersection body of a star body if and only if the function $\|\betaar{c}dot\|_K^{-1}$
is the spherical Radon transform of a continuous positive function on $S^{n-1}.$
This allows to introduce a more general class of bodies. A star body
$K$ in ${\mathbb R}^n$ is called an {\it intersection body}
if there exists a finite Borel measure \index{intersection body}
$\mu$ on the sphere $S^{n-1}$ so that $\|\betaar{c}dot\|_K^{-1}= R\mu$ as functionals on
$C(S^{n-1}),$ i.e. for every continuous function $f$ on $S^{n-1},$
\betaegin{equation} \lambdaabel{defintbody}
\int_{S^{n-1}} \|x\|_K^{-1} f(x)\ dx = \int_{S^{n-1}} Rf(x)\ d\mu(x).
\varepsilonnd{equation}
We refer the reader to the books \betaar{c}ite{G2, K2}
for more information about intersection bodies and their applications. Let us just say that
intersection bodies played a crucial role in the solution of the Busemann-Petty problem.
The class of intersection bodies is rather rich. For example, every origin-symmetric convex
body in ${\mathbb R}^3$ and ${\mathbb R}^4$ is an intersection body \betaar{c}ite{G1, Z}. The unit ball of any finite
dimensional subspace of $L_p,\ 0<p\lambdae 2$ is an intersection body, in particular every polar
projection body is an intersection body \betaar{c}ite{K1}.
We deduce Theorem 1 from the following stability result for intersection bodies.
\betaegin{theorem}\lambdaabel{stab}
Let $K$ be an intersection body in ${\mathbb R}^n,$ let $f$
be an even continuous function on $K,$ $f\ge 1$ everywhere on $K,$ and let $\varepsilon>0.$ Suppose that
\betaegin{equation}\lambdaabel{comp1}
\int_{K\betaar{c}ap \xi^\betaot} f \ \lambdae\ |K\betaar{c}ap \xi^\betaot| +\varepsilon,\qquad \forall \xi\in S^{n-1},
\varepsilonnd{equation}
then
\betaegin{equation}\lambdaabel{comp2}
\int_K f\ \lambdae\ |K| + \frac {n}{n-1}\ c_n\ |K|^{1/n}\varepsilon.
\varepsilonnd{equation}
\varepsilonnd{theorem}
\noindent {\bf Proof : \ } First, we use the polar formulas (\ref{measure=spherradon}) and (\ref{volume=spherradon}) to write
the condition (\ref{comp1}) in terms of the spherical Radon transform:
$$R\lambdaeft(\int_0^{\|\betaar{c}dot\|_K^{-1}} r^{n-2}f(r\ \betaar{c}dot)\ dr \right)(\xi) \lambdae \frac{1}{n-1} R(\|\betaar{c}dot\|_K^{-n+1})(\xi) + \varepsilon.$$
Let $\mu$ be the measure on $S^{n-1}$ corresponding to $K$ by the definition of an intersection body (\ref{defintbody}).
Integrating both sides of the latter inequality over $S^{n-1}$ with the measure $\mu$ and using (\ref{defintbody}),
we get
$$\int_{S^{n-1}} \|\thetaheta\|_K^{-1} \lambdaeft(\int_0^{\|\thetaheta\|_K^{-1}} r^{n-2}f(r\thetaheta)\ dr \right)d\thetaheta $$
\betaegin{equation} \lambdaabel{eq11}
\lambdae \frac{1}{n-1} \int_{S^{n-1}} \|\thetaheta\|_K^{-n}\ d\thetaheta + \varepsilon \int_{S^{n-1}} d\mu(\xi).
\varepsilonnd{equation}
Recall (\ref{polar-measure}), (\ref{polar-volume}) and the assumption that $f\ge 1.$ We write the integral in the left-hand side
of (\ref{eq11}) as
$$\int_{S^{n-1}} \|\thetaheta\|_K^{-1} \lambdaeft(\int_0^{\|\thetaheta\|_K^{-1}} r^{n-2}f(r\thetaheta)\ dr \right)d\thetaheta $$
$$= \int_{S^{n-1}} \lambdaeft(\int_0^{\|\thetaheta\|_K^{-1}} r^{n-1}f(r\thetaheta)\ dr \right)d\thetaheta$$
$$+ \int_{S^{n-1}} \lambdaeft(\int_0^{\|\thetaheta\|_K^{-1}} (\|\thetaheta\|_K^{-1} - r) r^{n-2}f(r\thetaheta)\ dr \right)d\thetaheta$$
$$\ge \int_K f + \int_{S^{n-1}} \lambdaeft(\int_0^{\|\thetaheta\|_K^{-1}} (\|\thetaheta\|_K^{-1} - r) r^{n-2}\ dr \right)d\thetaheta$$
\betaegin{equation} \lambdaabel{eq22}
=\int_K f + \frac 1{(n-1)n} \int_{S^{n-1}} \|\thetaheta\|_K^{-n}\ d\thetaheta = \int_K f + \frac1{n-1} |K|.
\varepsilonnd{equation}
Let us estimate the second term in the right-hand side of (\ref{eq11}) by adding the Radon transform of the unit constant
function under the integral ($R1(\xi)=\lambdaeft|S^{n-2}\right|$ for every $\xi \in S^{n-1}$),
using again the fact that $\|\betaar{c}dot\|_K^{-1}=R\mu$ and then applying H\"older's inequality:
$$\varepsilon \int_{S^{n-1}} d\mu(\xi) = \frac{\varepsilon}{\lambdaeft|S^{n-2}\right|} \int_{S^{n-1}} R1(\xi)\ d\mu(\xi)$$
$$=\frac{\varepsilon}{\lambdaeft| S^{n-2} \right| } \int_{S^{n-1}} \|\thetaheta\|_K^{-1}\ d\thetaheta $$
$$ \lambdae \frac{\varepsilon}{\lambdaeft|S^{n-2}\right|} \lambdaeft|S^{n-1}\right|^{\frac{n-1}n} \lambdaeft(\int_{S^{n-1}} \|\thetaheta\|_K^{-n}\ d\thetaheta\right)^{\frac1n}$$
\betaegin{equation}\lambdaabel{eq33}
= \frac{\varepsilon}{\lambdaeft|S^{n-2}\right|} \lambdaeft|S^{n-1}\right|^{\frac{n-1}n} n^{1/n}|K|^{1/n}= \frac n{n-1} c_n |K|^{1/n} \varepsilon.
\varepsilonnd{equation}
In the last step we used $|S^{n-1}|=n|B_2^n|, |S^{n-2}|=(n-1)|B_2^{n-1}|.$ Combining (\ref{eq11}), (\ref{eq22}),(\ref{eq33}) we get
$$\int_K f + \frac 1{n-1} |K| \lambdae \frac n{n-1} |K| + \frac n{n-1} c_n |K|^{1/n} \varepsilon. \qed$$
\betaigbreak
Now we prove our main result.
\smallbreak
\nuoindent {\betaf Proof of Theorem \ref{main}: }
Let $g$ be the density of the measure $\mu,$ so $g$ is an even non-negative continuous function
on $L.$ By John's theorem \betaar{c}ite{J}, there exists an origin-symmetric ellipsoid $K$ such that
$$\frac 1{\sqrt{n}} K \subset L \subset K.$$
The ellipsoid $K$ is an intersection body (see for example \betaar{c}ite[Corollary 8.1.7]{G2}).
Let $f= \betaar{c}hi_K + g \betaar{c}hi_L,$ where $\betaar{c}hi_K,\ \betaar{c}hi_L$ are the indicator functions of $K$ and $L.$
Clearly, $f\ge 1$ everywhere on $K.$ Put
$$\varepsilon=\max_{\xi\in S^{n-1}} \lambdaeft(\int_{K\betaar{c}ap \xi^\betaot} f - |K\betaar{c}ap \xi^\betaot| \right)= \max_{\xi\in S^{n-1}} \int_{L\betaar{c}ap \xi^\betaot} g$$
and apply Theorem \ref{stab} to $f,K,\varepsilon$ (the function $f$ is not necessarily continuous on $K,$
but the result holds by a simple approximation argument). We get
$$\mu(L)= \int_L g = \int_K f -\ |K|$$
$$ \lambdae \frac n{n-1} c_n |K|^{1/n}\max_{\xi\in S^{n-1}} \int_{L\betaar{c}ap \xi^\betaot} g$$
$$ \lambdae \sqrt{n}\ \frac n{n-1} c_n |L|^{1/n}\max_{\xi\in S^{n-1}} \mu(L\betaar{c}ap \xi^\betaot),$$
because $|K|^{1/n}\lambdae \sqrt{n}\ |L|^{1/n}.$ \qed
\betaigbreak
{\betaf Acknowledgement.} I wish to thank the US National Science Foundation for support through
grant DMS-1265155.
\betaegin{thebibliography}{99}
\betaibitem[Ba]{Ba} {K.~Ball}, {\varepsilonm Logarithmically concave functions
and sections of convex sets in $ R\sp n$}, Studia Math. {\betaf 88} (1988), 69--84.
\betaibitem[Bo1]{Bo1} {J.~Bourgain}, \thetaextit{On high-dimensional maximal functions associated to
convex bodies}, Amer. J. Math. \thetaextbf{108} (1986), 1467--1476.
\betaibitem[Bo2]{Bo2} {J.~ Bourgain}, \thetaextit{Geometry of Banach spaces and harmonic analysis},
Proceedings of the International Congress of Mathematicians (Berkeley, Calif., 1986), Amer. Math. Soc.,
Providence, RI, 1987, 871--878.
\betaibitem[Bo3]{Bo3} {J.~ Bourgain}, \thetaextit{On the distribution of polynomials on high-dimensional
convex sets}, Geometric aspects of functional analysis, Israel seminar (1989Ð90), Lecture Notes in Math.
\thetaextbf{1469} Springer, Berlin, 1991, 127--137.
\betaibitem[BGVV]{BGVV} {S.~Brazitikos, A.~Giannopoulos, P.~Valettas and B.~Vritsiou},
Notes on isotropic convex bodies, preprint.
\betaibitem[G1]{G1} { R.~J.~Gardner}, \thetaextit{A positive answer to the Busemann-Petty
problem in three dimensions}, Ann. of Math. (2) \thetaextbf{140} (1994), 435--447.
\betaibitem[G2]{G2} { R.~J.~Gardner}, \thetaextit{Geometric tomography}, Second edition,
Cambridge University Press, Cambridge, 2006.
\betaibitem[Gr]{Gr} { H.~Groemer}, \thetaextit{Geometric applications of Fourier series
and spherical harmonics}, Cambridge University Press, New York, 1996.
\betaibitem[J]{J} {F.~John}, \thetaextit{Extremum problems with inequalities as subsidiary conditions}, Courant
Anniversary Volume, Interscience, New York (1948), 187-204.
\betaibitem[Kl]{Kl} { B.~Klartag}, \thetaextit{On convex perturbations with a bounded isotropic constant},
Geom. Funct. Anal. \thetaextbf{16} (2006), 1274--1290.
\betaibitem[K1]{K1} { A.~ Koldobsky}, \thetaextit{Intersection bodies, positive definite
distributions and the Busemann-Petty problem}, Amer. J. Math. \thetaextbf{120} (1998), 827--840.
\betaibitem[K2]{K2} {A.~Koldobsky}, \thetaextit{Fourier analysis in convex geometry},
Amer. Math. Soc., Providence RI, 2005.
\betaibitem[K3]{K3} { A.~Koldobsky}, \thetaextit{A hyperplane inequality for measures of convex bodies in ${\mathbb R}^n, n\lambdae 4$},
Dicrete Comput. Geom. \thetaextbf{47} (2012), 538--547.
\betaibitem[L]{L} { E.~Lutwak}, \thetaextit{Intersection bodies and dual mixed volumes},
Adv. Math. \thetaextbf{71} (1988), 232--261.
\betaibitem[MP]{MP} {V.~Milman and A.~Pajor}, {\varepsilonm Isotropic position
and inertia ellipsoids and zonoids of the unit ball of
a normed $n$-dimensional space}, in: Geometric Aspects
of Functional Analysis, ed. by J.~Lindenstrauss and V.~Milman,
Lecture Notes in Mathematics {\betaf 1376}, Springer, Heidelberg, 1989, pp.~64--104.
\betaibitem[Z]{Z} { Gaoyong Zhang}, \thetaextit{A positive answer to the Busemann-Petty
problem in four dimensions}, Ann. of Math. (2) \thetaextbf{149} (1999), 535--543.
\varepsilonnd{thebibliography}
\varepsilonnd{document} |
\begin{equation}gin{document}
\newcommand{\comment}[1]{}
\newcommand{\mathrm{E}}{\mathrm{E}}
\newcommand{\mathrm{Var}}{\mathrm{Var}}
\newcommand{\bra}[1]{\langle #1|}
\newcommand{\ket}[1]{|#1\rangle}
\newcommand{\braket}[2]{\langle #1|#2 \rangle}
\newcommand{\mean}[2]{\langle #1 #2 \rangle}
\newcommand{\begin{equation}}{\begin{equation}gin{equation}}
\newcommand{\end{equation}}{\end{equation}}
\newcommand{\begin{eqnarray}}{\begin{equation}gin{eqnarray}}
\newcommand{\end{eqnarray}}{\end{eqnarray}}
\newcommand{\SD}[1]{{\color{magenta}#1}}
\newcommand{\rem}[1]{{\sout{#1}}}
\newcommand{\alert}[1]{\textbf{\color{red} \uwave{#1}}}
\newcommand{\Y}[1]{\textcolor{blue}{#1}}
\newcommand{\R}[1]{\textcolor{red}{#1}}
\newcommand{\B}[1]{\textcolor{blue}{#1}}
\newcommand{\C}[1]{\textcolor{cyan}{#1}}
\newcommand{\color{darkblue}}{\color{darkblue}}
\newcommand{\int_{-\infty}^{\infty}\!}{\int_{-\infty}^{\infty}\!}
\newcommand{\mathop{\rm Tr}\nolimits}{\mathop{\rm Tr}\nolimits}
\newcommand{\mathop{\rm const}\nolimits}{\mathop{\rm const}\nolimits}
\title{Preparing a mechanical oscillator in non-Gaussian quantum states}
\author{Farid Khalili}
\affiliation{Physics Faculty, Moscow State University, Moscow
119991, Russia}
\author{Stefan Danilishin}
\affiliation{Physics Faculty, Moscow State University, Moscow
119991, Russia}
\author{Haixing Miao}
\affiliation{School of Physics, University of Western Australia,
WA 6009, Australia}
\author{Helge M\"uller-Ebhardt}
\affiliation{Max-Planck Institut f\"ur Gravitationsphysik
(Albert-Einstein-Institut) and Leibniz Universit\"at Hannover,
Callinstr. 38, 30167 Hannover, Germany}
\author{Huan Yang}
\affiliation{Theoretical Astrophysics 130-33, California Institute
of Technology, Pasadena, CA 91125, USA}
\author{Yanbei Chen}
\affiliation{Theoretical Astrophysics 130-33, California Institute
of Technology, Pasadena, CA 91125, USA}
\begin{equation}gin{abstract}
We propose a protocol for coherently transferring non-Gaussian
quantum states from optical field to a mechanical oscillator.
The open quantum dynamics and continuous-measurement process, which
can not be treated by the stochastic-master-equation formalism, are
studied by a new path-integral-based approach. We obtain an elegant
relation between the quantum state of the mechanical oscillator
and that of the optical field, which is valid for general linear
quantum dynamics. We demonstrate the experimental feasibility of
such protocol by considering the cases of both large-scale
gravitational-wave detectors and small-scale cavity-assisted
optomechanical devices.
\end{abstract}
\maketitle
{\it Introduction.}---It is becoming experimentally possible to
prepare a macroscopic mechanical oscillator near its quantum ground
state by either active feedback or passive cooling in
optomechanical devices~\cite{Marquardt}. This activity has been
motivated by (i) the necessity to increase the sensitivity of
high-precision measurements with mechanical test bodies up to
and beyond the {\it Standard Quantum Limit} (SQL)~\cite{92BookBrKh},
and (ii) the test and interpretation of quantum theory, when
macroscopic degrees of freedom are involved. However, for unequivocal
evidences of quantum behavior, {\it merely} achieving quantum
ground state, or preparing coherent/squeezed states, or
overcoming the SQL is {\it insufficient}: In these situations,
the oscillator initially occupies a Gaussian state and remains
Gaussian, and therefore its Wigner function is positive and can
always be interpreted in terms of a classical probability.
{\it A true demonstration of the quantum behavior requires non-Gaussian
quantum states or nonlinear measurements} \cite{Bell1987, Braunstein2005}. A natural approach is
to create nonlinear coupling between a mechanical oscillator and
external degrees of freedom, e.g., probing mechanical energy
\cite{Thompson, Santamore, Martin, Miao}, coupling the oscillator to
a qubit \cite{Jacobs,Clerk,LaHaye} or (low) cavity photon number
\cite{Mancini, Bose, Marshall}. For optomechanical devices,
this generally requires zero-point uncertainty of the oscillator
displacement $x_q$ to be comparable to the cavity linear dynamical
range which is characterized by the optical wavelength $\lambda$
divided by the finesse $\cal F$, i.e.,
\begin{equation}\label{cond1}
\lambda/({\cal F} x_q)\lesssim 1.
\end{equation}
Since $\lambda\sim 10^{-6}$m and ${\cal F}\lesssim10^6$, we have
$x_q\gtrsim 10^{-12}\,{\rm m}$, which is several orders of magnitude
above the current technology ability.
\begin{equation}gin{figure}
\includegraphics[width=0.48\textwidth, bb=0 0 270 125,clip]{config.eps}
\caption{(Color online) Possible schemes for preparing non-Gaussian quantum
states of mechanical oscillators. The left is a Michelson interferometer,
similar to an advanced gravitational-wave detector with kg-scale suspended
test masses \cite{LIGO, AdvLIGOsite}. The right panel shows a small
coupled-cavity scheme with a ng-scale membrane inside a high-finesse cavity
\cite{Thompson}. In both cases, a non-Gaussian optical state (a photon pulse)
is injected into the dark port of the interferometer (local oscillator light
for homodyne detection is not shown).
\label{config}}
\end{figure}
In this article, we propose a protocol for preparations of non-Gaussian
quantum states which {\it does not require} nonlinear optomechanical
coupling. The idea is to inject a non-Gaussian optical state, e.g.,
a single-photon pulse created by cavity QED \cite{Kimble1, Kimble2, Walther},
into the optomechanical devices. Possible configurations are shown schematically
in Fig. \ref{config}. The radiation pressure induced by the photon pulse
is coherently amplified by the classical pumping at the bright port, and
the qualitative requirement for preparing a non-Gaussian state is
\begin{equation}\label{cond2}
\lambda /({\cal F}\,x_q)\lesssim \sqrt{N_\gamma}.
\end{equation}
Here $N_\gamma=I_0\,\tau/(\hbar \omega_0)$ ($I_0$ the pumping laser power and
$\omega_0$ the frequency) is the number of pumping photons within
the duration $\tau$ of the single-photon pulse, and we gain a significant
factor of $\sqrt{N_\gamma}$ compared with Eq.~\eqref{cond1}, which makes
it experimentally achievable. This radiation-pressure-mediated optomechanical coupling is similar to what
was considered in Refs. \cite{92BookBrKh,Zhang2003, Mancini2, Romero}.
However, there are significant differences: (i) This protocol includes both finite interaction time
and photon shape, in which case neither the rotating-wave approximation
\cite{Zhang2003} nor the three-mode
approach \cite{Mancini2} applies; (ii)
To better model an actual experiment, we consider a continuous measurement process rather than a
single measurement at some given instant as assumed in Ref. \cite{Romero}. This
takes into account all the information of the oscillator motion that is distributed in the output field, and thus
allows us to prepare a nearly {\it pure} non-Gaussian quantum state of the oscillator;
(iii) There are non-trivial quantum correlations at different times (non-Markovianity)
due to the finite-duration photon pulse, which cannot be treated by
the conventional {\it stochastic-master-equation} (SME) approach \cite{Hopkins, Gardiner, Milburn, Doherty1, Doherty2}.
Here we develop a path-integral-based approach, and it applies to general
linear quantum dynamics and continuous measurement process.
{\it A simple case.}---To illustrate the non-Gaussian state-preparation
procedure, we first make an order-of-magnitude estimate of experimental
requirements by considering a simple case where the cavity decay is
much faster than all other time scales and the oscillator can be
approximated as a free mass. The corresponding input-output relations,
in the Heisenberg picture, simply read:
\begin{equation}gin{align}
&\dot{\hat x}(t) = {\hat p}(t)/m \,,\quad\;\;
\dot{\hat p}(t) = \alpha\,\hat a_1(t) + \hat F_{\rm th}(t) \,,\label{1} \\
&\hat b_1(t) = \hat a_1(t)\,,\quad\quad
\hat b_2(t) =\hat a_2(t) + ({\alpha}/{\hbar})\hat x(t) \,. \label{2}
\end{align}
Here $\hat x$ and $\hat p$ are position and momentum; the coupling
constant {$\alpha\equiv 8\sqrt{2}({\cal F}/\lambda) \sqrt{\hbar I_0/\omega_0}$;
$\hat a_{1,2}$ and $\hat b_{1,2}$ are input and output optical
amplitude and phase quadratures,
with $[\hat a_1(t),\hat a_2(t')]=[\hat b_1(t),\hat b_2(t')] = i\,\delta(t-t')$;
$\alpha\, \hat a_1$ is the back-action noise; $\hat F_{\rm th}$ is the
force thermal noise.
Suppose at $t=-\tau$ the oscillator was prepared in some initial Gaussian
state $\ket{\psi_m}=\int_{-\infty}^{\infty}\!\psi_m(x)\ket{x}\,dx$ (the procedure is detailed
in Ref. \cite{state_pre}).
Subsequently, a photon pulse is injected into the dark port of the interferometer
and starts to interact with the oscillator. During this interaction, phase
quadrature $\hat b_2(t)$ is continuously measured by a homodyne detection,
until the photon pulse ends at $t=0$. If photon pulse (i.e., $\tau$) is short such that
oscillator position almost does not change, we obtain:
\begin{equation}gin{align}\label{3}
&\hat{X}(0)=\hat{X}(-\tau),\quad\hat{P}(0) = \hat{P}(-\tau) + \kappa\,\hat{A}_1
+ \hat P_{\rm th} \,, \\\label{4}
&\hat{B}_1 =\hat{A}_1,\quad\quad\quad\;\;\, \hat{B}_2 = \hat{A}_2 + \kappa\,\hat{X}(0) \,.
\end{align}
We have normalized the oscillator position and momentum by their zero-point
uncertainties: $\hat X\equiv \hat x/ x_q$ [$x_q\equiv\sqrt{\hbar/(2m\omega_m)}$]
and $\hat P\equiv \hat p/p_q$ [$p_q\equiv \sqrt{\hbar m\omega_m/2}$]; $\hat{A}_j =
\sqrt{1/\tau}\int_{-\tau}^0dt\,\hat{a}_j(t)\,(j=1,2)$ which has an uncertainty of unity
(i.e., $\Delta \hat A_j$=1); $\hat{B}_j = \sqrt{1/\tau}\int_{-\tau}^0dt\,\hat{b}_j(t)$;
$\hat{P}_{\rm th} = \int_{-\tau}^0 dt\,\hat{F}_{\rm th}(t)/p_q$; $
\kappa \equiv {\alpha\sqrt{\tau}}/{\hbar}=8\sqrt{2}\sqrt{N_{\gamma}}{{\cal F}\,x_q}/{{\lambda}}$.
Eqs.~\eqref{3} and \eqref{4} describe the joint evolution of the oscillator, the
optical field and heat bath in the Heisenberg picture (with $\hat B_j$ viewed
as the evolved versions of $\hat A_j$). They transform back into an evolution
operator of $\hat{U}= \exp[i(\kappa\hat{A}_1\hat{X} + \hat{P}_{\rm th}\hat{X})]$
in the Schr\"odinger picture. The corresponding density matrix of the system
at $t=0$ is given by $ \hat \rho=\hat{U}\ket{\psi_o}\ket{\psi_m}\hat{\rho}_{\rm th}\bra{\psi_m}\bra{\psi_o}\hat{U}^\dagger$, where $\ket{\psi_o} = \int_{-\infty}^{\infty}\!\psi_o(A_2)\ket{A_2}\,dA_2$ is the initial
non-Gaussian optical state, and $\hat{\rho}_{\rm th}$
describes the heat bath associated with $\hat F_{\rm th}$. Given homodyne
detection of $\hat B_2$ with a precise result $y$, the oscillator is projected
into the following conditional state: $\hat{\rho}_m(y) = \mathop{\rm Tr}\nolimits_{\rm th}\left[ \bra{y}\hat{U}\ket{\psi_o}\ket{\psi_m}\hat{\rho}_{\rm th}\bra{\psi_m}\bra{\psi_o}\hat{U}^\dagger \ket{y} \right]$.
In the ideal case of negligible thermal noise, the conditional
wave function $\psi_m^c(x)$ of the mechanical oscillator is simply
\begin{equation}gin{equation}
\psi_m^c(x)=\psi_o(y-\kappa x)\psi_m(x) \,
\end{equation}
---{\it the optical state is mapped onto the mechanical oscillator} as
illustrated in Fig. \ref{Int}. A complete mapping occurs when
$\psi_m(x)\approx\mathop{\rm const}\nolimits$, and this requires the momentum fluctuation
due to optomechanical coupling be larger than the initial one, namely,
$\kappa >1$ or equivalently
\begin{equation}\label{8}
{\lambda}/({{{\cal F}\,x_q}})<8\sqrt{2}{\sqrt{N_{\gamma}}},
\end{equation}
which justifies Eq.\,\eqref{cond2}.
\begin{equation}gin{figure}
\includegraphics[width=0.4\textwidth, bb=0 0 422 229,clip]{interaction.eps}
\caption{(Color online) A schematic of the non-Gaussian state-preparation
process. The interaction entangles the oscillator state and the optical state
(depicted by their Wigner functions). Subsequent measurements of the optical
fields disentangle the system and projects the oscillator into a non-Gaussian
conditional quantum state.
\label{Int}}
\end{figure}
When thermal noise is considered, non-Gaussianity can still remain, as long as thermal noise induces a
smaller momentum fluctuation than the optomechanical interaction. This condition, in the high-temperature limit
--- $\langle \hat F_{\rm th}(t)\hat F_{\rm th}(t')\rangle=4m\gamma_m k_B T\delta (t-t') $,
reads
\begin{equation}\label{9}
{\lambda}/({{\cal F}\,x_q})\sqrt{n_{\rm th}/Q_m}\sqrt{\omega_m \tau}< 8\sqrt{2}\sqrt{N_{\gamma}}
\end{equation}
with $Q_m\equiv \omega_m/\gamma_m$ the mechanical quality factor and
$n_{\rm th}\equiv k_B T/(\hbar\,\omega_m)$ the thermal occupation number.
{\it These two conditions set the benchmarks for a successful non-Gaussian
state-preparation experiment.} They can be satisfied with experimentally
feasible specifications as shown in Table \ref{tab1},
in which the first row is similar to the case of large-scale gravitational-wave detectors \cite{AdvLIGOsite}
\begin{equation}gin{table}[!h]
\caption{Possible experimental specifications}\label{tab1}
\begin{equation}gin{tabular}{l|ccccccc}
&$\lambda$& ${\cal F}$ & $m$ & $\omega_m/2\pi$ & $Q_m$ & $T$ & $\tau$\\
\hline
large scale & $1\mu{\rm m}$ & $6000$ & 4kg & 1Hz & $10^8$ & 300K & 1ms\\
small scale & $1\mu{\rm m}$ & $10^4$ & 1ng & $10^5$Hz & $10^7$ & 4K & 0.01ms
\end{tabular}
\end{table}
and the second row is for small-scale optomechanical devices (e.g., the
one in Ref. \cite{Thompson}). These qualitative results will be justified
by a rigorous treatment below.
{\it General formalism.}---In general, the optomechanical interaction strength is
finite and the oscillator has non-negligible displacement during the
interaction, the cavity bandwidth can be comparable to the mechanical
frequency, and thermal noises can be non-Markovian. All these factors
obstruct finding a finite set of variables similar to $(\hat X, \hat P, \hat A_1, \hat A_2)$
that satisfy a closed set of equations [cf. Eqs.~\eqref{3} and
\eqref{4}]. It is therefore hard to determine, {\it a priori}, the finite
number of observables that one has to measure to project the oscillator into
a desired conditional state.
To address these issues, we adopt the Heisenberg picture starting from $t=-\infty$, and write down
the initial density matrix as $\hat \rho_{in} =\hat \rho_m(-\infty)
\otimes\hat \rho_o\otimes\hat \rho_{\rm th}.$ Details of $\hat\rho_m(-\infty)$
for the oscillator and whether the initial state is truly a direct product, do not
matter, because the system is stable, and the initial position and momentum will decay away
after several mechanical relaxation
times. For the optical state, we consider an arbitrary spatial mode
given by $f(x/c)$, whose annihilation operator is
\begin{equation}gin{equation}
\hat \Gamma\equiv \textstyle \int_{-\infty}^0 dt{f(t)}[\hat a_1(t)+i\hat a_2(t)]/{\sqrt{2}}.
\end{equation}
A general state of this mode can be written in the P-representation as
$\hat \rho_o=\int d\bm\zeta\, P(\bm \zeta)|\zeta\rangle \langle \zeta|$,
where vector $\bm \zeta\equiv(\Re[\zeta],\Im[\zeta])$ and
$|\zeta\rangle\equiv\exp[\zeta\,\hat \Gamma^{\dag}-\zeta^*\hat\Gamma]|0\rangle$.
A continuous measurement of the output optical quadrature
$\hat y(t)\equiv \cos\theta \,\hat b_1(t)+\sin\theta \,\hat b_2(t)$
for $t \in (-\infty,0]$, {\it projects the entire system into a
conditional state:}
\begin{equation}\label{rhom}
\hat \rho_c[y(t)]={\hat {\cal P}_y\,\hat \rho_{in} \hat {\cal P}_y}
/{\mathrm{Tr}[\hat {\cal P}_y\,\hat \rho_{in} \hat {\cal P}_y]}.
\end{equation}
The operator $\hat {\cal P}_y$ projects the output field into the
subspace where $\hat y(t)$ agrees exactly with the measured results
$y(t)$. To simplify output correlations at different times, we can
{\it causally whiten} $\hat y(t)$ into $\hat z(t)$ such that
$\langle \hat z(t)\hat z(t')\rangle=\delta(t-t')$, as detailed in
Ref. \cite{state_pre}. Since the output quadratures at different
times also commute, i.e., $[\hat z(t), \hat z(t')]=0$, the projection
$\hat {\cal P}_y$ can then be expressed as the product of
Dirac-$\delta$ functions that project each $\hat z(t)$ into its
measured value $z(t)$:
\begin{equation}gin{align}\nonumber
\hat {\cal P}_y =\hat {\cal P}_z&=\prod_{-\infty<t<0}\delta[\hat z(t)-z(t)]\\&=\int{\cal D}[\xi]\exp\left\{i\mbox{$\int_{-\infty}^0$}dt\,\xi(t)[\hat z(t)-z(t)] \right\}.\label{12}
\end{align}
with $\int {\cal D}[\xi]$ denoting the path integral.
This allows us to take the entire measurement history
for $z$ (or equivalently $y$) and project into the corresponding
subspace in a single step, instead of having to successively project
output-field degrees of freedom continuously at each time step as
in the case of SME approach, thereby allowing a non-Markvonian input field.
The generating function for the oscillator state is then
\begin{equation}
{\cal J}[\bm \alpha; z(t)]\equiv {\rm Tr}\left[e^{i \,\bm \alpha\,
\hat{\bm x}_0'}{\hat \rho_c[z(t)]}\right],
\end{equation}
where ${\bm \alpha}\equiv(\alpha_x, \alpha_p)$, $\hat{\bm x}_0\equiv
(\hat x(0),\hat p(0))$, and superscript $'$ denotes transpose. From
Eqs. \eqref{rhom} and \eqref{12}, we have
\begin{equation}
{\cal J}={\int d\bm \zeta\,{P(\bm \zeta)}\int{\cal D}[\xi]\,
e^{i[\zeta^*\hat\Gamma-\zeta\,\hat \Gamma^{\dag},\,\hat B]}\langle 0|e^{i\hat B} |0\rangle}
\label{J}
\end{equation}
with $\hat B\equiv \bm\alpha \,\hat{\bm x}_0' +\int_{-\infty}^0 dt\,\xi(t)[\hat z(t)-z(t)]$.
This can be evaluated by decomposing
$
\hat{\bm {x}}_0 \equiv \hat{\bm R}+\int_{-\infty}^0 dt\, \bm K(-t)\hat z(t)$
where $\bm K \equiv (K_x,K_p)$ are causal Wiener filters,
$\bm {K}(-t) = \langle 0 |\hat z(t) {\bm {\hat x}_0}|0\rangle$ and
$\hat{\bm R} \equiv (\hat R_x ,\hat R_p)$ are parts of displacement and
momentum uncorrelated with the output: $\bra{0}\hat R_{x,p} \hat z\ket{0}=0$.
Completing path integral, we obtain
{\begin{equation}gin{eqnarray}
{\cal J}\!=\!\!\!\int d\bm \zeta e^
{-[{\bm\alpha{\mathbb V}_c\bm \alpha' +\|z-2\,\bm \zeta\bm L' \|^2 }]/2+ i\,
\bm\alpha\,(\zeta^*\gamma'+\zeta\gamma^\dagger +\bm x_c')
}P(\bm \zeta). \;
\end{eqnarray}
Here $\|a\|^2\equiv\int_{-\infty}^0a(t) a^*(t) dt $ and we have
defined vectors $\bm{\gamma}\equiv [\hat\Gamma, \hat{ \bm R}]$ and
$\bm L\equiv (\Re[L], \Im[L])$ with $L(t)\equiv [\hat\Gamma, \hat z(t)]$,
which characterize the extent of photon mode influence on the
fluctuations of $\hat x(0)$ and $\hat p(0)$, and output field $\hat z$;
quantities ${\mathbb V}_c\equiv \langle 0|\bm {\hat R}^T\bm {\hat R}|0\rangle$
and $\bm x_c\equiv(x_c, p_c)=\int_{-\infty}^0 dt \bm K(-t)z(t)$ are
the conditional covariance matrix and means of $\hat x(0)$ and $\hat p(0)$
when the optical state is vacuum. The resulting {\it conditional} Wigner function reads
\begin{equation}gin{equation}
W[{\bm x}; z(t)] =\int d\bm\zeta e^{-[{{\bm \chi}{\mathbb V}_c^{-1}{\bm \chi}'
+ \|z-2\,\bm\zeta\bm L'\|^2}]/{2}}P(\bm\zeta)\,\label{W}
\end{equation}
with $\bm \chi\equiv {\bm x} - {\bm x}_c-\zeta^* {\bm \gamma}-\zeta {\bm \gamma}^*$.
{\it This formula directly relates the injected optical state to the state of
the mechanical oscillator.} In deriving it, we only use the linearity of
quantum dynamics rather than specific equations of motion. For cavity-assisted
optomechanical system, one can obtain $\bm\gamma$, ${\mathbb V}_c$, $\bm K$
and $L$ from input-output relations in Refs.~\cite{Marquardt2, Rae, Genes}
by using formalism developed in Ref.~\cite{state_pre}.}
{\it Single-photon case.}---As an example, we consider the simplest case of a single-photon injection,
with $\hat \rho_o=|1\rangle \langle 1|$ and $P(\bm\zeta) =e^{|\zeta|^2}
\partial^2 \delta^{(2)}(\zeta)/\partial \zeta \partial \zeta^*$.
From Eqs. \eqref{W}, it gives
\begin{equation}gin{eqnarray}\nonumber
W[{ \bm x}; z(t)]&=&\frac{1-\bm \gamma \mathbb V_c^{-1}\bm \gamma^{\dag}-\| L \|^2
+|\bm \gamma V_c^{-1}\delta{\bm x}'+Z|^2}{1-\| L\|^2+|Z|^2}\nonumber \\&&
\frac{1}{2\pi\sqrt{\det {\mathbb V}_c}}\exp\left[-\frac{1}{2}\delta {\bm x}
{\mathbb V}_c^{-1}\delta {\bm x}'\right]\label{Wig}
\end{eqnarray}
where $\delta \bm x\equiv {\bm x} - {\bm x}_c$ and $Z \equiv\int_{-\infty}^0dt\,z(t)L(t)$.
This Wigner function depends on the measurement result $z(t)$, $t\in(-\infty,0]$ through
four quantities, the two components of ${\bm x}_c$ (through $\delta \bm{x}$) and
the real and imaginary parts of $Z$: $Z$ determines the shape of $W$, and ${\bm x}_c$
describes the translation of $W$. The random vector $\bm Z= (\Re[Z], \Im[Z])$ has a
two-dimensional probability density of \begin{equation} w[\bm Z]=\frac{1-\| L\|^2 +\bm Z\bm Z'}{2\pi
\sqrt{\det {\mathbb V}_L}} \exp[-\bm Z {\mathbb V}^{-1}_L \bm Z'/2], \end{equation} where matrix
${\mathbb V}_L\equiv \int_{-\infty}^0 dt \bm{L}'\bm{L}$.
The pre-factor in the Wigner function [cf. Eq.\,\eqref{Wig}] is a second-order
polynomial in $\bm x$, which resembles that of a single-photon. For
strong non-Gaussianity, significant $\bm \gamma$ and $\|L\|^2$ (making $\bm \gamma$
terms in the pre-factor to prevail) are essential --- these physically
correspond to requiring that the photon mode must influence the fluctuation
of $\hat x$ and $\hat p$, as well as $\hat z$ strongly. It in turn requires the photon
coherence time to be comparable to the {\it measurement time scale} characterized
by $\bm K$. It is possible for small-scale optomechanical devices with
high-frequency mechanical oscillators. The corresponding photon can be generated
by a cavity QED scheme \cite{Kimble1, Kimble2, Walther}. While for large-scale
gravitational-wave detectors, the time scale is $\sim 10$ ms and it is challenging to create
photons with comparable coherent length. However, developments of low-frequency
squeezing source \cite{Corbitt2006} will eventually solve this issue.
\begin{equation}gin{figure}
\includegraphics[width=0.48\textwidth, bb=0 0 745 250,clip]{Wigner_fm_new.eps}
\includegraphics[width=0.48\textwidth, bb=0 0 745 250,clip]{Wigner_os_new.eps}
\caption{(Color online) Distributions of measurement results (left panels) and
the corresponding Wigner function of the oscillator given the most probable
measurement results (middle panels) and less probable results but with a significant
non-Gaussianity (right panels). The upper panels show the case of non-Gaussian
state-preparation with future gravitational-wave detectors, and the lower panels for small-scale
devices. We used normalized coordinates (with respect to $x_q$ and $p_q$) and
introduced $\Omega_q\equiv \sqrt{\hbar m/\alpha^2}$. \label{Wigner}}
\end{figure}
With Eq.\,\eqref{Wig}, we can justify the simple-case qualitative results. We
use the same specifications listed in Table \ref{tab1}. As an example, we assume
a photon shape of $f(t)=\sqrt{2\gamma_f}e^{(\gamma_f+i\omega_f)t}$ and specify
that $\omega_f/2\pi=\gamma_f/2\pi=70$ Hz in the case of future gravitational-wave detectors,
and $\omega_f/\omega_m=0.1,\,\gamma_f/\omega_m=0.3$ for small-scale experiments.
The Wigner functions for some given measurement results are shown in
Fig. \ref{Wigner}. In both cases, there are negative regions in the Wigner function,
which is a unique feature of the quantumness. The prepared non-Gaussian quantum state can be
independently verified using the quantum tomography protocol developed in Ref. \cite{state_ver}
that allows sub-Heisenberg accuracy of Wigner function reconstruction, which is crucial
for revealing those negativity regions.
{\it Acknowledgment.} We thank our colleagues at Caltech Theoretical
Astrophysics group and LIGO Macroscopic-Quantum-Mechanics (MQM) group
for fruitful discussions. S.D., H.M.-E., H.Y. and Y.C. are supported by
the Alexander von Humboldt Foundation's Sofja Kovalevskaja Programme,
NSF grants PHY-0653653 and PHY-0601459, as well as the David and Barbara
Groce startup fund at Caltech. H.M. is supported by the Australian Research Council.
\begin{equation}gin{thebibliography}{99}
\bibitem{Marquardt} F. Marquardt, and S. M. Girvin, Physics {\bf 2}, 40 (2009) and references therein.
\bibitem{92BookBrKh} {V. B. Braginsky, and F. Khalili}, {\it Quantum Measurement},
Cambridge University Press (1992).
\bibitem{Bell1987} J. S. Bell, {\em Speakable and Unspeakable in Quantum Mechanics}, Cambridge University Press, Cambridge, 1987.
\bibitem{Braunstein2005} S. L. Braunstein, and P. van Loock, Rev. Mod. Phys. {\bf 77}, 513 (2005).
\bibitem{Santamore} D. H. Santamore {\it et al.}, Phys. Rev. B {\bf 70}, 144301 (2004).
\bibitem{Martin} I. Martin, and W. H. Zurek, Phys. Rev. Lett. {\bf 98}, 120401 (2007).
\bibitem{Thompson} J. Thompson {\it et al.}, Nature {\bf 452}, 72 (2008).
\bibitem{Miao} H. Miao {\it et al.}, Phys. Rev. Lett. {\bf 103}, 100402 (2009).
\bibitem{Jacobs} K. Jacobs {\it et al.}, Phys. Rev. Lett. {\bf 98}, 147201 (2007) and Phys. Rev. Lett. {\bf 99}, 117203 (2007).
\bibitem{Clerk} A. A. Clerk, and D. W. Utami, Phys. Rev. A {\bf 75}, 042302 (2007).
\bibitem{LaHaye} M. D. LaHaye {\it et al.}, Nature (London) {\bf 459}, 960 (2009).
\bibitem{Mancini} S. Mancini {\it et al.}, Phys. Rev. A {\bf 55}, 3042 (1997).
\bibitem{Bose} S. Bose {\it et al.}, Phys. Rev. A {\bf 56}, 4175 (1997).
\bibitem{Marshall} W. Marshall {\it et al.}, Phys. Rev. Lett. {\bf 91}, 130401 (2003).
\bibitem{Kimble1} C. K. Law, and H. J. Kimble, Journal of Modern Optics, {\bf 44}, 2067 (1997).
\bibitem{Kimble2} J. Mckeever {\it et al.}, Science {\bf 303}, 1992 (2004).
\bibitem{Walther} M. Keller {\it et al.}, Nature (London) {\bf 431}, 1075 (2004).
\bibitem{LIGO} LIGO Scientific Collaboration, New Journal of Physics {\bf 11}, 073032 (2009).
\bibitem{AdvLIGOsite} \url{http://www.ligo.caltech.edu/advLIGO}.
\bibitem{Zhang2003} J. Zhang {\it et al.}, Phys. Rev. A {\bf 68}, 013808 (2003).
\bibitem{Mancini2} S. Mancini {\it et al.}, Phys. Rev. Lett. {\bf 90}, 137901 (2003).
\bibitem{Romero} O. Romero-Isart, M. L. Juan, R. Quidant, and J. I. Cirac, arXiv: 0909.1469 (2009).
\bibitem{Hopkins} A. Hopkins {\it et al.}, Phys. Rev. B {\bf 68}, 235328 (2003).
\bibitem{Gardiner} C. Gardiner and P. Zoller, Quantum noise (Springer-Verlag, Berlin, 2004), 3rd ed.
\bibitem{Milburn} G. Milburn, Quantum Semiclass. Opt. {\bf 8}, 269 (1996).
\bibitem{Doherty1} A. Doherty {\it et al.}, Phys. Rev. A {\bf 60}, 2380 (1999).
\bibitem{Doherty2} A. Doherty, and K. Jacobs, Phys. Rev. A {\bf 60}, 2700 (1999).
\bibitem{state_pre} H. M\"{u}ller-Ebhardt {\it et al.}, Phys. Rev. A {\bf 80}, 043802 (2009).
\bibitem{Marquardt2} F. Marquardt {\it et al.}, Phys. Rev. Lett. {\bf 99}, 093902 (2007).
\bibitem{Rae} I. Wilson-Rae {\it et al.}, Phys. Rev. Lett. {\bf 99}, 093901 (2007).
\bibitem{Genes} C. Genes {\it et al.}, Phys. Rev. A {\bf 77}, 033804 (2008).
\bibitem{Corbitt2006} T. Corbitt {\it et al.}, Phys. Rev. A {\bf 73}, 023801 (2006).
\bibitem{state_ver} H. Miao {\it et al.}, \url{http://arxiv.org/abs/0905.3729}.
\end{thebibliography}
\end{document} |
\begin{document}
\title{Propagation of coupled dark-state polaritons and storage of light in a tripod medium}
\author{Stefan Beck and Igor E. Mazets}
\affiliation{
Vienna Center for Quantum Science and Technology, Atominstitut, TU~Wien,~Stadionallee~2,~1020~Vienna,~Austria; \\
Wolfgang Pauli Institute c/o Fakult\"{a}t f\"{u}r Mathematik,
Universit\"{a}t Wien, Oskar-Morgenstern-Platz 1, 1090 Vienna, Austria}
\begin{abstract}
We consider the slow light propagation in an atomic medium with a tripod level scheme. We show that the coexistence
of two types of dark-state polaritons leads to the propagation dynamics, which is qualitatively different from that
in a $\Lambda $-medium, and allows therefore
for very efficient conversion of signal photons into spin excitations. This
efficiency is shown to be very close to 1 even for very long
signal light pulses, which
could not be entirely compressed into a $\Lambda $-medium at a comparable strength of the control field.
\end{abstract}
\maketitle
\section{Introduction}
\label{s-i}
The phenomenon of the electromagnetically induced transparency (EIT) based on the creation of a coherent superposition
of long-living quantum states in a medium irradiated by a two laser light fields has been known since a long ago,
see, e.g., the review
\cite{HarrisPT}. The EIT has become especially interesting and promising for the quantum memory applications
\cite{qm} since
the discovery of the method to ``stop the light" by conversion of photons of the weak (signal)
into spin excitations of a medium by adiabatic turning off the second (control) field \cite{LF1,LF2}.
Experimental realizations followed the publication of the idea \cite{LF1}
immediately and employed as the EIT medium ultracold atoms \cite{Liu},
hot atomic vapor in a cell \cite{Phillips}, and doped crystal \cite{Turukhin}.
\begin{figure}\label{au-1}
\end{figure}
The $\Lambda $-scheme containing three quantum levels coupled to a laser radiation is a simplest one that admits
coherent population trapping and the light propagation in a medium in the EIT regime. The tripod scheme that
contains three low-energy, stable (or metastable) sublevels supports two different quantum superpositions
that are decoupled from coherent three-component laser radiation resonant to the optically excited state.
Various aspects of the slow-light propagation and storage in a tripod medium have been
theoretically studied \cite{Paspalakis,Petrosyan,Mazets2005,R1,gradient-sto,log-op,Ruseckas}. There are numerous
experimental studies of the EIT in media with the tripod level scheme
\cite{tripod2000e,Karpa,tripod2009e,tripod2011e,tripod2011ee,tripod2014e}, culminating in the demonstration
of the storage and retrieval of light pulses at a single-photon level \cite{Pan}.
The dynamical coupling between dark-states potations of the two types arising due to the time dependence
of the control fields has been introduced in Ref. \cite{R1} but not fully investigated.
Indeed, the Hong-Ou-Mandel interferometer operation in a tripod medium \cite{R1} requires the change of
the control field parameters during the time interval of no signal photon coming. In this paper we consider the
situation of signal photons interacting with a tripod medium where the coupling between the two types
of dark-state polaritons is present.
The signal laser pulse can be fully converted into spin excitations in a conventional $\Lambda $-medium
only if the medium is long enough to
accommodate the whole slowed-down incoming pulse (spatially compressed in proportion to the ratio of its
group velocity in the EIT regime and in the vacuum) \cite{Liu,Phillips,Turukhin}.
In a tripod medium the existence of two coupled dark-state modes allows
for a conversion of almost the whole signal light field into spin
excitations under less restrictive conditions.
Note that in a similar case a conventional $\Lambda $-type medium irradiated by
a control field of comparable strength and characterized by a
comparable slow group velocity would accommodate only a part of
the signal pulse.
\section{Basic equations}
\label{s-ii}
We consider a medium consisting of atoms with the level scheme shown in Fig.~\ref{au-1}. The ground-state
sublevels $|0\rangle $, $|1\rangle $, and $|2\rangle $
are coupled to an excited state $|e\rangle $ by three coherent fields. The control fields driving the
transitions $|1\rangle \leftrightarrow |e\rangle $ and $|2\rangle \leftrightarrow |e\rangle $ are
characterized by Rabi frequencies $\Omega _1 \equiv \Omega \cos \beta $ and $\Omega _2 \equiv \Omega \sin \beta $,
respectively. These fields are phase-locked or obtained from a common source by acousto-optical modulator in
order to provide perfect cross-correlation of their noise and to prevent thus a noise-induced decay of the
quantum coherence between the states $|1\rangle $ and $ |2\rangle $ \cite{Dalton}.
The transition $|0\rangle \leftrightarrow |e\rangle $ is driven by a quantized signal field.
We can consider a field propagating freely through the atomic sample \cite{Pan} or
in a nanofiber \cite{Arno1,Kimble2012}; the propagation direction of the signal field defines the axis $z$.
The quantum field for the signal photons can
be expressed as $\hat{\cal E}(z,t)\exp [-i\omega _{e0}(t-z/c)]$, where
$\hat{\cal E}$ is its slowly varying amplitude subjected to the standard bosonic commutation rules,
$\omega _{e0}$ is the resonance frequency of the $|0\rangle \leftrightarrow |e\rangle $ atomic transition,
$c$ is the speed of light (in vacuum or in the nanofiber, depending on the type of the set-up).
In contrast to Ref. \cite{R1}, we assume the amplitudes of the two control fields to be constant, but instead
we introduce the detuning of the second field $\nu (t)$, which is time-dependent in a general case, see
Fig.~\ref{au-1}. It is convenient to express the atomic collective spin variables through bosonic fields
$\hat f_\alpha (z,t)$, where $\hat f_\alpha (z,t)$ annihilates an atom in the state
$|\alpha \rangle $, $\alpha =0,1,2,e$, at the point $z$ at time $t$. The set of equations for these bosonic fields
and the signal photons is
\begin{eqnarray}
\frac \partial {\partial t}\hat {\cal E }&=& -c\frac \partial {\partial z}\hat {\cal E }+i\kappa
\hat f_0^\dag \hat f_e, \label{e-E}\\
\frac \partial {\partial t}\hat {f}_0 &=& i\kappa \hat {\cal E}^\dag \hat f_e , \label{e-f0} \\
\frac \partial {\partial t}\hat {f}_e &=&i\kappa \hat{\cal E}\hat f_0+i\Omega (\cos \beta \hat f_1 +
\sin \beta \hat f_2), \label{e-fe} \\
\frac \partial {\partial t}\hat {f}_1&=& i\Omega \cos \beta \hat f_e , \label{e-f1} \\
\frac \partial {\partial t}\hat {f}_2&=&i\nu (t)\hat f_2+i\Omega \sin \beta \hat f_e , \label{e-f2}
\end{eqnarray}
which is an obvious generalization from the case of EIT in a $\Lambda $-medium \cite{Mazets2014}
to the case of tripod medium. The atom-field coupling constant $\kappa =
d_{e0} \sqrt{\omega _{e0}/(2\hbar \varepsilon _0A) } $, where $d_{e0}$ is the electric dipole moment of the
transition $|0\rangle \leftrightarrow |e\rangle $ and $A$ is the effective area of the signal beam, can be
expressed through the optical density $s$ of the medium for the resonant signal light as
$\kappa =\sqrt{ \gamma s c/(2N)}$, where $N$ is the number of atoms inside the interaction volume $AL$ and
$L$ is the atomic sample length in the propagation direction. The radiative decay rate $\gamma $ arises
due to the coupling of the $|0\rangle \leftrightarrow |e\rangle $ transition to side modes of the
electromagnetic field, which is not explicitly written, for the sake of brevity, in Eq. (\ref{e-fe}).
Integrating out the vacuum modes of the electromagnetic field, we would get, instead of Eq. (\ref{e-fe}),
\begin{equation}
\frac \partial {\partial t}\hat {f}_e =i\kappa \hat{\cal E}\hat f_0+i\Omega (\cos \beta \hat f_1 +
\sin \beta \hat f_2)-\gamma \hat f_e +\hat \varsigma _e(z,t) ,
\label{e-fe-g}
\end{equation}
where $\hat \varsigma _e(z,t)$ is a delta-correlated Langevin-type operator \cite{LF1,LF2} that
describes vacuum quantum noise and is needed to preserve bosonic commutation properties of $\hat f_e$ after
introducing the decay term $-\gamma \hat f_e$.
We work in the weak-pulse limit, i.e., assume that the linear density of dark-state polaritons is always
much less than the linear density of atoms $n_\mathrm{1D}=N/L$,
which are initially all in the state $|0\rangle $ \cite{Mazets2014,Kuang}. Then we linearize
Eqs. (\ref{e-E}--\ref{e-f2}) by replacing $\hat f_0$ by a number $\sqrt{n_\mathrm{1D}}$ and find in a standard way
\cite{LF1,LF2,R1}, i.e., by adiabatic elimination of excitation modes separated from the
dark-state polaritons by large energy gaps, the equations of motion
\begin{eqnarray}
\left( \frac \partial {\partial t}+v_\mathrm{g} \frac \partial {\partial z}\right) \hat \Psi &=&
i\tilde \nu (t) \sin \tilde \beta (\sin \tilde \beta \hat \Psi +\cos \tilde \beta \hat \Upsilon ) ,
\label{e-X} \\
\frac \partial {\partial t}\hat \Upsilon &=&
i\tilde \nu (t) \cos \tilde \beta (\sin \tilde \beta \hat \Psi +\cos \tilde \beta \hat \Upsilon )
\label{e-Y}
\end{eqnarray}
for two dark-state polariton fields
\begin{eqnarray}
\hat \Psi &=& \cos \theta \hat{\cal E}-\sin \theta (\cos \beta \hat f_1+\sin \beta \hat f_2) ,
\label{def-X} \\
\hat \Upsilon &=& \sin \beta \hat f_1-\cos \beta \hat f_2.
\end{eqnarray}
The mixing angle in Eq. (\ref{def-X}) is defined by the usual expression
$\tan \theta =\kappa \sqrt{n_\mathrm{1D}}/\Omega $, and $v_\mathrm{g}=c \cos ^2 \theta $ \cite{LF1,LF2}. Also we get
$\tilde \nu (t) =(\sin ^2\theta \sin ^2\beta +\cos ^2 \beta )\nu (t)$ and $\tan \tilde \beta =\sin \theta
\tan \beta $.
We assume the slow-light regime, $\Omega \ll \kappa \sqrt{n_\mathrm{1D}}$ and, hence, $\sin \theta \approx 1$,
$v_\mathrm{g}\ll c$. In this limit, $\tilde \nu (t)\approx \nu (t)$ and $\tilde \beta \approx \beta $.
In what follows, we do not distinguish therefore between the values with and without tilde
and omit this symbol over $\nu $ and $\beta $.
Before specifying the initial and boundary conditions to Eqs. (\ref{e-X}, \ref{e-Y}), we reformulate them
for classical complex variables $\Psi $ and $\Upsilon $. This may be done for coherent states of the dark-state
polariton fields as well as for a single-quantum states. In the latter case, we use the Schr\"{o}dinger representation
and write the wave function of the system as $|\Xi (t)\rangle =\int _0^L dz\, [\Psi (z,t)\hat \Psi ^\dag (z,t)+
\Upsilon (z,t)\hat \Upsilon ^\dag (z,t)]|\mathrm{vac}\rangle +|\Xi _\mathrm{ph}(t)\rangle $, where
$|\mathrm{vac}\rangle $ is the vacuum state of excitations (all atoms being in their internal state $|0\rangle $),
and $|\Xi _\mathrm{ph}(t)\rangle $ describes a single photon either before entering the medium (at $z<0$) or
after leaving it (at $z>L$). The evolution of $|\Xi _\mathrm{ph}(t)\rangle $ is not interesting for us, and the
evolution of the remaining component of $|\Xi (t)\rangle $ is given by Eqs. (\ref{e-X}, \ref{e-Y}) with
the operators $\hat \Psi $ and $\hat \Upsilon $ replaced by the complex fields $\Psi $ and $ \Upsilon $,
respectively.
It is convenient to introduce new variables, $\tau =t-z/v_\mathrm{g}$ and $\zeta =z/v_\mathrm{g}$.
Then $\nu (t) =\nu (\tau +\zeta )$ and the
equations of motion for dark-state polaritons become
\begin{eqnarray}
\frac \partial {\partial \zeta } \Psi &=&
i\nu \sin \beta (\sin \beta \Psi +\cos \beta \Upsilon ) ,
\label{e-Xzeta} \\
\frac \partial {\partial \tau } \Upsilon &=&
i \nu \cos \beta (\sin \beta \Psi +\cos \beta \Upsilon ) .
\label{e-Ytau}
\end{eqnarray}
The boundary and initial conditions are
\begin{equation}
\Psi (0,\tau ) =\Psi _0(\tau ), \qquad \Upsilon (\zeta ,0)=0,
\label{bic}
\end{equation}
where the function $\Psi _0(\tau )$ is determined by the shape of the incoming signal
light pulse. We assume
that $\Psi _0(\tau )=0$ for $\tau \leq 0$.
\section{Propagation dynamics}
\label{sec-iii}
The main features of the dark-polariton dynamics can be determined from the solution of
Eqs. (\ref{e-Xzeta}--\ref{bic}) in the case of constant two-photon detuning, $\nu \equiv \nu _0 =\, \mathrm{const}$.
A simple phase transformation
\begin{eqnarray}
\Psi (\zeta ,\tau )&=&e^{i\chi (\zeta ,\tau )}\Psi ^\prime (\zeta ,\tau ), \quad
\Upsilon (\zeta ,\tau )=e^{i\chi (\zeta ,\tau )}\Upsilon ^\prime (\zeta ,\tau ), \nonumber \\
{\chi (\zeta ,\tau )}&=& \nu _0 (\sin ^2 \beta \, \zeta +\cos ^2 \beta \, \tau )
\label{phase-tr}
\end{eqnarray}
casts Eqs. (\ref{e-Xzeta}--\ref{e-Ytau}) into the form
\begin{eqnarray}
\frac \partial {\partial \zeta } \Psi ^\prime &=&
i\nu _0 \sin \beta \, \cos \beta \Upsilon ^\prime ,
\label{e-Xprime} \\
\frac \partial {\partial \tau } \Upsilon ^\prime &=&
i \nu _0 \sin \beta \, \cos \beta \Psi ^\prime .
\label{e-Yprime}
\end{eqnarray}
A similar set of equations has been derived in Ref. \cite{R1} for a different
driving protocol of the tripod medium where the coupling between the
the two dark-state polaritons was induced by changing the angle $\beta $
in time. We note that the influence of this coupling on the pulse
propagation was not studied there.
Eqs. (\ref{e-Xprime}, \ref{e-Yprime})
can be easily solved by means of Laplace's transform. Also we can note that after elimination
of one of the fields the equation for the remaining one is reduced to the relativistic Klein-Gordon equation
\begin{eqnarray}
\frac \partial {\partial \zeta }\frac \partial {\partial \tau } \Psi ^\prime &=&
\frac 14 \left( \frac {\partial ^2}{\partial T^2 } -
\frac {\partial ^2}{\partial X^2 }\right) \Psi ^\prime \nonumber \\
&=&-(\nu _0 \sin \beta \, \cos \beta )^2\Psi ^\prime ,
\label{KGe}
\end{eqnarray}
where
$
\frac {\partial }{\partial T}\equiv \frac {\partial }{\partial \tau }+\frac {\partial }{\partial \zeta }
$ and $
\frac {\partial }{\partial X}\equiv \frac {\partial }{\partial \tau }-\frac {\partial }{\partial \zeta }
$.
Green's function for the Klein-Gordon equation are well known \cite{Greiner}
and can be used to solve Eqs.
(\ref{e-Xprime},~\ref{e-Yprime}).
Finally, we obtain the solutions,
\begin{widetext}
\begin{eqnarray}
\Psi (\zeta ,\tau )&=& e^{i\nu_0\sin ^2\beta \, \zeta } \left[ \Psi _0(\tau ) -
\nu _0 \sin \beta \cos \beta \int _0^\tau d\tau ^\prime \, \Psi _0(\tau -\tau ^\prime )
e^{i\nu_0\cos ^2\beta \, \tau ^\prime } \sqrt{\frac \zeta {\tau ^\prime }}
J_1 (2\nu _0 \sin \beta \cos \beta \, \sqrt{\zeta {\tau ^\prime }})\right] ,
\label{sol-Psi} \\
\Upsilon (\zeta ,\tau )&=&i\nu _0 \sin \beta \cos \beta \, e^{i\nu_0\sin ^2\beta \, \zeta }
\int _0^\tau d\tau ^\prime \, \Psi _0(\tau -\tau ^\prime )
e^{i\nu_0\cos ^2\beta \, \tau ^\prime }J_0 (2\nu _0 \sin \beta \cos \beta \, \sqrt{\zeta {\tau ^\prime }}),
\label{sol-Y}
\end{eqnarray}
\end{widetext}
where $J_0$ and $J_1$ are the Bessel functions of the zeroth and first order, respectively.
First we analyze Eq. (\ref{sol-Psi}). The first term in its r.h.s. describes, apart from gaining a
$\zeta $-dependent phase shift, pulse propagation at the group velocity $v_\mathrm{g}$. However, this regime
holds only for small values of $\zeta $, where the integrand in the second term is small because of the small
values taken by the Bessel function $J_1 (2\nu _0 \sin \beta \cos \beta \, \sqrt{\zeta {\tau ^\prime }})$.
This propagation picture is typical for dark-state polaritons in a $\Lambda $-medium. However, it will be distorted
at larger distances, when the second term becomes important. What happens then, one can see from the analysis
of the dynamics of the $\Upsilon $ field.
This field corresponds to spin excitations, which possess zero group velocity and are induced by coupling to
the $\Psi $-type polaritons through the finite two-photon detuning $\nu _0$.
Eqs. (\ref{e-Xprime}, \ref{e-Yprime}) together with the initial condition $\Upsilon (\zeta ,0)=0$ yields a simple
conservation law
\begin{equation}
\int _0^{\zeta _L}\! d\zeta \, |\Upsilon (\zeta ,\tau )|^2 =
\int _0^\tau \! d\tau ^\prime |\Psi (0,\tau ^\prime )|^2
-\int _0^\tau \! d\tau ^\prime |\Psi (\zeta _L ,\tau ^\prime )|^2,
\label{c-law}
\end{equation}
where $\zeta _L=L/v_\mathrm{g}$. It relates the total population of the $\Upsilon $-type mode inside the
medium of the length $L$ to the loss of the output pulse energy at the exit from the medium compared to
the case of propagation in a $\Lambda $-medium at the group velocity $v_\mathrm{g}$. We define the
efficiency of the conversion of signal photons to $\Upsilon $-type spin excitations as
\begin{equation}
\eta (\tau ) = \frac{ \int _0^{\zeta _L}\! d\zeta \, |\Upsilon (\zeta ,\tau )|^2 }{\int _0^\infty
\! d\tau ^\prime |\Psi (0,\tau ^\prime )|^2}.
\label{def-eta}
\end{equation}
From Eq. (\ref{sol-Y}) we can evaluate the enumerator of Eq. (\ref{def-eta}) if we recall the formula for
integral of a product of two Bessel functions \cite{AbrSteg1}:
\\
\begin{widetext}
\begin{eqnarray}
\int _0^{\zeta _L} d\zeta \, |\Upsilon (\zeta ,\tau )|^2&=&
\nu _0\sin \beta \cos \beta \int _0^\tau d\tau _1 \int _0^\tau d\tau _2 \,
\Psi _0(\tau _1)\Psi _0^*(\tau _2) e^{-i\nu _0\cos^2\beta (\tau _1 -\tau _2)} \frac 1{\tau _1 -\tau _2}
\nonumber \\*
&& \times \Big{ \{ } \sqrt{\tau -\tau _2} J_0[2\nu _0\sin \beta \cos \beta \sqrt{ \zeta _L(\tau-\tau _1)}]
J_1[2\nu _0\sin \beta \cos \beta \sqrt{ \zeta _L(\tau-\tau _2) }] \nonumber \\*
&& - \sqrt{\tau -\tau _1} J_0[2\nu _0\sin \beta \cos \beta \sqrt{ \zeta _L(\tau-\tau _2)}]
J_1[2\nu _0\sin \beta \cos \beta \sqrt{ \zeta _L(\tau-\tau _1) }] \Big{ \} } .
\label{int-Y1}
\end{eqnarray}
\end{widetext}
\begin{figure}\label{au-2}
\end{figure}
\begin{figure}\label{au-3}
\end{figure}
Let $\tau _\mathrm{p}$ be the typical time scale of the incoming signal light pulse and consider Eq. (\ref{int-Y1})
for $\tau \gtrsim \tau _\mathrm{p}$. In this case the time integrals practically converge to their final values on
the scale $\tau _{1,2} \lesssim \tau $. Assume that
\begin{equation}
|\nu _0\sin \beta \cos \beta |\sqrt{ \zeta _L\tau }\gg 1.
\label{cond-1}
\end{equation}
In this case we can recall the asymptotic expression \cite{AbrSteg2}
for the Bessel function of the order $n$ of a large argument $x\rightarrow \infty $,
$J_n(x) =\sqrt{2/(\pi x)}\cos ( x -\frac {n\pi }2-\frac \pi 4)$, and find an approximation for Eq. (\ref{int-Y1})
\begin{eqnarray}
\int _0^{\zeta _L} d\zeta \, |\Upsilon (\zeta ,\tau )|^2=
\nu _0\sin \beta \cos \beta \int _0^\tau d\tau _1 \int _0^\tau d\tau _2 \,
\Psi _0(\tau _1) && \nonumber \\
\qquad \times \Psi _0^*(\tau _2) e^{-i\nu _0\cos^2\beta (\tau _1 -\tau _2)} ~\qquad ~ && \nonumber \\
\qquad \times
\frac {\sin [|\nu _0\sin \beta \cos \beta |\sqrt{ \zeta _L/\tau }\, (\tau _1-\tau _2)]}{\pi (\tau _1 -\tau _2)}.
~~~~~ &&
\label{int-Y2}
\end{eqnarray}
If we consider asymptotically long times, such that
\begin{equation}
|\nu _0\sin \beta \cos \beta |\sqrt{ \zeta _L/\tau } \ll \frac 1{\tau _\mathrm{p}},
\label{cond-2}
\end{equation}
we immediately see that asymptotically
\begin{eqnarray}
\int _0^{\zeta _L} d\zeta \, |\Upsilon (\zeta ,\tau )|^2\vert _{\tau \rightarrow +\infty } \approx
\frac {\nu _0 \sin \beta \cos \beta }\pi \sqrt{ \frac {\zeta _L}\tau } &&
\nonumber \\
~\qquad \times \left| \int _0^\infty d\tau ^\prime \, \Psi _0 (\tau ^\prime )
e^{-i\nu_0 \cos ^2 \beta \, \tau ^\prime } \right| ^2 .
&&
\label{int-Y3}
\end{eqnarray}
This means that the spin excitations in the medium, being coupled to the $\Psi $-type dark-state polariton mode
via the two-photon detuning $\nu _0$, decay in a very slow, non-exponential way, namely, proportionally
to $1/\sqrt{\tau }$.
What occurs for $\tau $ larger than but close to $\tau _\mathrm{p}$, requires further analysis.
Recall that we are interested in long pulses, which cannot be entirely fit into the medium. Therefore we
assume $\tau _\mathrm{p}\gtrsim \zeta _L=L/v_\mathrm{g}$.
Also we need, in order to satisfy the condition (\ref{cond-1}),
to have values of $\beta $ not too close to $0, ~\pm \frac \pi 2$, or $\pi $. In other words, we assume that
$\cos \beta $ and $\sin \beta $ are of the same order.
If the incoming signal photons are tuned exactly in resonance with the $|0\rangle \leftrightarrow |e\rangle $
transition, then $|\nu _0\sin \beta \cos \beta |\sqrt{ \zeta _L/\tau } \lesssim |\nu _0|\cos ^2 \beta $,
the convergence of the integrals in Eq. (\ref{int-Y2}) is achieved on the time scale of about
$1/(|\nu _0|\cos ^2 \beta )$ and Eq. (\ref{int-Y3}) remains a satisfactory estimation. The conversion
efficiency $\eta $ thus remains well below 1. However, if the detuning of the
signal pulse from the resonance is chosen such that $\Psi _0(\tau )=|\Psi _0 (\tau )| \exp (i\nu _0 \cos ^2\beta \,
\tau )$ then, in order to determine the time scale of convergence of the time integrals in Eq. (\ref{int-Y2}), we
have to compare $|\nu _0\sin \beta \cos \beta |\sqrt{ \zeta _L/\tau } \sim
|\nu _0\sin \beta \cos \beta |\sqrt{ \zeta _L/\tau _\mathrm{p} } $ with the spectral width of $|\Psi _0|$, i.e.,
with $1/\tau _\mathrm{p}$. By taking $|\nu _0|$ large enough, one may attain
$|\nu _0\sin \beta \cos \beta |\sqrt{ \zeta _L \tau _\mathrm{p} } \gg 1$. In this case,
the function $\sin [|\nu _0\sin \beta \cos \beta |\sqrt{\zeta _L/\tau }(\tau _1-\tau _2)]/[\pi (\tau _1-\tau _2)]$
can be approximated by a delta-function, $\delta (\tau _1-\tau _2)$. Therefore the efficiency of conversion
of signal photons into spin excitations of the $\Upsilon $-type is
\begin{equation}
\eta (\tau ) \approx
\frac{ \int _0^\tau \! d\tau ^\prime |\Psi (0,\tau ^\prime )|^2 }{\int _0^\infty
\! d\tau ^\prime |\Psi (0,\tau ^\prime )|^2}.
\label{extrem-eta}
\end{equation}
For $\tau \gtrsim \tau _\mathrm{p}$ (say, $\tau \approx 3 \tau _\mathrm{p}$) this efficiency may get very close to 1.
Of course, at very large times $\eta (\tau )$ decays, as we have shown, in proportion to $1/\sqrt{\tau }$. But
one can prevent such a long-time decay of the spin excitations
by radiating photons out of the medium
by sudden switching off the control fields (or by sudden changing $\nu $ from $\nu _0$ to 0 and
thus decoupling the $\Psi $-type and $\Upsilon $-type polaritons). This means that the use of a tripod medium permits
one to trap and convert into spin excitations very long signal light pulses, which would be only partially fit into a
medium with a standard $\Lambda $-scheme of atomic levels. The retrieval of the stored quanta may be implemented
using the standard protocol \cite{R1} (note also the observed modulation of the retrieved pulse shape \cite{Pan}). For example, one may retrieve stored signal photons by applying the two control
fields with the new amplitudes $\Omega _1^\mathrm{new}= \sin \beta \, \Omega $ and
$\Omega _2^\mathrm{new}=-\cos \beta \, \Omega $ and
zero detuning, $\nu ^\mathrm{new}(t)=0$. Then the spin excitation stored in the medium turns into
a dark-state polariton that moves at the group velocity $v_\mathrm{g}$ and leaves the medium without
coupling to the dark-state polariton of the other type.
The numerical evaluation of the efficiency of conversion of signal photons into spin excitations based on
Eq. (\ref{sol-Y}) is presented in Figs.~\ref{au-2} and \ref{au-3}. The incoming pulse used here is
slightly (at the level of $10^{-4}$)
modified in comparison to a Gaussian in order to formally
provide its continuity at $\tau =0$, since
$\Psi _0(0)=0$ by our assumption.
We can see from Fig.~\ref{au-2}
that for a perfectly resonant
signal light the maximum efficiency is always appreciably below 1. On the contrary, if the probe light is detuned
by $-\nu _0\cos ^2 \beta $ from the frequency $\omega _{e0}$, than the values of $\eta $ very close to 1 can be
attained, see Fig.~\ref{au-3}(a). Note that for the $L/(v_\mathrm{g}\tau _\mathrm{p})$ equal to 1.0 and 0.5 the
maximum fraction of the Gaussian incident pulse that can be simultaneously contained inside the medium is
0.843 and 0.521, respectively, while the maximum values of $\eta $ on the Fig.~\ref{au-3}(a) for the
respective lengths are 0.999 (solid line) and 0.990 (long-dashed line).
\section{Discussion}
\label{sec-iv}
Now we examine the effects of the signal light absorption on the propagation regime considered in the previous Section.
Eq. (\ref{cond-1}) ensures highly efficient conversion of signal photons into spin excitations on intermediate
time scales of about few incoming pulse duration times $\tau _\mathrm{p}$. This high efficiency implies
a large optical density of the medium, $s\gg 1$. Also we consider $\beta \approx \frac \pi 4$.
Since $v_\mathrm{g}\approx c\Omega ^2/(\kappa ^2 n_\mathrm{1D})\ll c$ and $\kappa ^2n_\mathrm{1D}L =
\gamma c s/2$, we can rewrite Eq. (\ref{cond-1}) as
\begin{equation}
|\nu _0 |\sqrt{ \frac {\sqrt{s} \tau _\mathrm{p}}{\Delta \omega _\mathrm{EIT}} }\gg 1,
\label{cond-3}
\end{equation}
where
\begin{equation}
\Delta \omega _\mathrm{EIT} = \frac {\Omega ^2}{\gamma \sqrt{s}}
\label{w-EIT}
\end{equation}
is the width of the EIT transmission window in an optically dense medium \cite{LF2,Gornyi} (see also
the review \cite{review1}). To minimize the signal pulse absorption, we have to assume its duration to be much longer
than $1/\Delta \omega _\mathrm{EIT}$:
\begin{equation}
\tau _\mathrm{p} =\frac {K_\mathrm{p}}{\Delta \omega _\mathrm{EIT}}, \qquad K_\mathrm{p}\gg 1.
\label{def-K}
\end{equation}
Also the two-photon detuning must be small compared to the width of the EIT window, $|\nu _0|\ll
\Delta \omega _\mathrm{EIT}$. This means that for a large optical depth and long enough pulses,
\begin{equation}
\sqrt{s}K_\mathrm{p} \gg \left| \frac {\Delta \omega _\mathrm{EIT}}{\nu _0}\right| ^2 ,
\label{cond-4}
\end{equation}
the condition (\ref{cond-1}) is satisfied.
To summarize, we investigated theoretically the propagation of weak signal pulses in a medium with a tripod
scheme of atomic levels in the slow-light regime.
Dark-state polaritons of two kinds exist in such a medium \cite{R1}. When they are mutually coupled
via non-zero detuning $\nu (t)$ of one of the control fields, the propagation becomes non-trivial. In the case
of constant detuning, $\nu (t)\equiv \nu _0$, the propagation bears analogy with the relativistic physics because
its Green's function is formally identical to that of the Klein--Gordon equation \cite{Greiner}. The light pulse leaving
the medium has a very long ``tail" decreasing as $1/\sqrt{\tau }$. Under certain conditions [Eq. (\ref{cond-4})
together with the detuning of the signal pulse by $-\nu _0 \cos ^2\beta $ from the single-photon resonance] it is
possible to trap temporally almost the entire incoming pulse even if it is so long that it cannot be accommodated in
a $\Lambda $-medium characterized by a comparable reduction of the group velocity, $v_\mathrm{g}/c =
\Omega ^2/(\kappa ^2n_\mathrm{1D})\ll 1$. Fast switching off the control fields or setting $\nu (t)$ to zero
prevents the spin excitations from decay through the radiation of photons in the forward direction and leads
to their storage in the medium.
Note that the proposed scheme may be termed ``passive", since it, unlike the conventional one
\cite{LF1,LF2,Liu,Phillips,Turukhin}, does not require gradual tuning of the control field
during the signal pulse propagation in the medium, but
implies instead rapid switching off of both the control fields or a fast change of $\nu (t)$ to zero,
as soon as the maximum conversion efficiency is achieved.
\begin{acknowledgments}
The authors thank N.J. Mauser, A. Rauschenbeutel, and Ph.
Schneewei{\ss } for helpful discussions.
We acknowledge financial support
by the Austrian Ministry of Science, Research and Economy (BMWFW)
via its grant for the Wolfgang Pauli Institute,
by the EU via the ERC advance grant QuantumRelax,
and by the Austrian Science Foundation
(FWF) via the projects P~25329-N27,
F41 (SFB ViCoM), and W1245 (DK ``Dissipation und Dispersion
in nichtlinearen partiellen Differentialgleichungen").
\end{acknowledgments}
\end{document} |
\betaegin{document}
\mathbf{m}aketitle
\betaegin{abstract}
We study two models of population with migration. We assume that we are given infinitely many islands with the same number $r$ of resources, each individual consuming one unit of resources. On an island lives an individual whose genealogy is given by a critical Galton-Watson tree. If all the resources are consumed, any newborn child has to migrate to find new resources. In this sense, the migrations are constrained, not random. We will consider first a model where resources do not regrow, so the $r$ first born individuals remain on their home island, whereas their children migrate. In the second model, we assume that resources regrow, so only $r$ people can live on an island at the same time, the supernumerary ones being forced to migrate. In both cases, we are interested in how the population spreads on the islands, when the number of initial individuals and available resources tend to infinity. This mainly relies on computing asymptotics for critical random walks and functionals of the Brownian motion.
\end{abstract}
\betaegin{scriptsize}
\mathbf{m}athbf{n}oindent \textit{MSC 2010}: 60J80, 60F05, 60J70
\mathbf{m}athbf{n}oindent \textit{Keywords}: Population model, random measure, weak convergence, branching process, migration, Brownian motion
\end{scriptsize}
\sigmaection{Introduction}
The incentive for this work is the series of three papers by Bertoin (see \cite{BertoinToA,BertoinAP,BertoinAR} and references therein) considering population models with neutral mutations, the latter occurring randomly. Mutations can also be viewed as migrations from an island to another, the individuals living on the same island being exactly those with the same alleles. While considering random mutations is natural, it is just as legitimate to assume that migrations do not happen randomly but are constrained: individuals will migrate when they need to find new resources to survive.
We shall study two models, which can be loosely described as follows. Individuals live on different islands, and each consumes one unit of resources to live. If one is born on an island where the resources have run out, they will migrate to a virgin island (that is, where no one has ever lived) and found their own colony. We shall thus assume that there are infinitely many virgin islands, and moreover, that each contains the same quantity of resources, denoted $r$ in the following. Our two models differ only by one point: in the first one, the resources do not regrow, whereas they do in the second. In other words, in the first model, when $r$ individuals have lived on an island, the next ones being born on this same island will migrate. In the second model, when $r$ individuals live on an island at the same time, and more individuals are born at that time, the latter have to migrate. Our models are reminiscent of the virgin island model of Hutzenthaler \cite{Hutzenthaler}, though once again, in our case, migrations are not random.
Our goal is to study how the population spreads on the different islands, that is, compute the number of islands where $k$ people live, for $k \in \mathbf{m}athbb{N}$. We do not wish to give a dynamical version of this, and we will thus wait until the population is extinct, and compute the number $Z_k$ of islands where $k$ people have lived. To this end, we shall study the measure
\[
\sigmaum_{k \in \mathbf{m}athbb{N}} Z_k \delta_k.
\]
This is thus probably a good time to be more precise and introduce the quantities we will consider.
For both models, our study shall be fourfold. The first step is to construct precisely the object of interest. This will be called the tree of isles, in reference to the tree of alleles of \cite{BertoinToA}, and it will encode all the relevant information. This is a multitype tree, and when referring to it, we shall adopt an unusual language. A vertex of this tree corresponds to an occupied island and we will thus call its vertices ``islands''. A descendant of an island $\mathbf{m}athcal{I}$ is an island founded by a migrant coming from $\mathbf{m}athcal{I}$, so we call ``colonies'' these descendants. Finally, the type of an island will be the total number of individuals who lived on it, and we shall thus instead refer to this type as its ``population''.
The construction of the tree of isles is deterministic from a given finite tree. Consider the latter as the genealogical tree of the first existing individual, whom we shall impiously call Lucy\mathbf{m}athfrak{o}otnote{Which is the name given to the earliest known hominin, before the subsequent discovery of the earlier remains of ``Ardi''. Another reason for this choice is that lately, the literature has grown quite fond of the name ``Eve cluster'', designating thus a group of individuals, not a single one.}. Her children may have to migrate to find new resources -- obviously, this depends on the model. They move each to a different island and found their own colony, from which other individuals may migrate, and so on. Then, the root of the tree of isles corresponds to the home island of Lucy, its colonies to the islands founded by the migrants from this island, and so on. We conclude the construction of the tree of isles by attaching to each island its population.
The second step of our study is to find a relevant way to compute the population and number of colonies of the home island of Lucy, i.e. of the root of the tree of isles. We will be able to encode this quantities through an exploration process of the genealogical tree of Lucy, and for each model, we shall give an algorithm to construct this process in such a way that this information can be easily read on it. For the first model, the construction is simple since it is just the usual breadth-first search algorithm. Unfortunately, for the second model, we are led to writing quite abstruse definitions, but the figures should be enough to enlighten the reader.
The third step is now to add some randomness, in taking the genealogical tree of Lucy to be a Galton-Watson tree. Let us fix once and for all a reproduction law $\rho$, which is critical, i.e. has mean 1, and a second moment $0 < \sigma^2 < + \infty$. In particular, this tree is a.s. finite, and we can construct the tree of isles $\omega$-wise. It will be clear that the branching property of the initial tree carries on to to the tree of isles, which turns out to be a multitype Galton-Watson tree. This explains in particular why, in the previous paragraph, we were only interested in encoding the population and number of colonies of the root of the tree of isles: once we know this is a multitype Galton-Watson tree, these quantities are indeed enough to characterize it.
This ends what we can do starting from one individual and a fixed number of resources. Our last step is to pass to the limit, in different senses which will be made precise later. To this end, we will start from a number $N$, meant to go to infinity, of initial individuals spread on $N$ different islands. Their genealogies are assumed to be given by i.i.d. Galton-Watson trees with reproduction law $\rho$. We wish to rescale the number of resources so that, on every island, there is a probability of order $1/N$ that some people migrate. This is similar to the rate of mutation considered in \cite{BertoinToA}, or what is classically done in the Wright-Fisher model when considering rare mutations. In this case, there shall thus be a Poisson number of these initial islands which will have colonies. Clearly, this implies that there should be a number $r = r_N$ of resources tending to infinity, but the precise speed actually depends on the model. The main part of the work will be to compute the empirical measure describing how the population is spread on the different islands, more precisely its limit after a proper rescaling. We will also provide a direct way to construct this limiting measure, which enlightens the structure of the tree of isles.
Finally, for the first model, we will provide an extra result, concerning the limit of the tree of isles in terms of trees. In other words, the goal here is to keep track of the genealogy of the islands, which is lost in the mere computation of the empirical measure. A similar result could be obtained for the second model, but it would just be a technical modification of the first one, and would not bring, we think, more understanding of the model.
The paper is organized in four sections following this introduction. The second is devoted to defining the objects we will consider and explain the techniques we shall use. In the third, we will recall some definitions about exploration processes, most of it being rephrasing of known (but quite sparse in the literature) folklore. The fourth and fifth section then deal each with one model. The second model is more involved than the first one and we can thus only advise the reader to follow the given order. We however hope to show that the second model has an interest on its own, on the one hand because it is probably more natural to consider regrowing resources, and on the other hand because it leads to the computation of nice asymptotics for random walks and formulas for the Brownian motion.
\paragraph{Acknowledgments} The author would like to thank Jean Bertoin for suggesting to study this topic, and for his always accurate insights. Heartily thanks also to Lorenzo Zambotti, Amaury Lambert and Olivier H\'enard for stimulating discussions, as well as to Marc Yor for providing several less known results on Bessel processes.
\sigmaection{Definitions and techniques}
\sigmaubsection{Trees}
We shall always consider rooted and ordered trees, thus having natural notions of ancestors, descendants, or individuals being on the right or the left of another. We shall adopt the following classical formalism. First, define the universal tree
\[
\mathbf{m}athcal{U} = \betaigcup_{n \in \mathbf{m}athbb{Z}^+} \mathbf{m}athbb{N}^n
\]
with $\mathbf{m}athbb{N}^0=\{\emptysettyset\}$, and let $\mathbf{m}athcal{U}^*=\mathbf{m}athcal{U} \betaackslash \{\emptyset\}$. If $u \in \mathbf{m}athbb{N}^k$, we say that $u$ is at generation $k$ and write $|u|=k$. The root is $\emptyset$, and the children of an individual $u=(u_1,\dots,u_k)$ at generation $k$ are $u\,j:=(u_1,\dots,u_k,j)$ for $j \in \mathbf{m}athbb{N}$. We call a tree rooted at $\emptyset$ a subset $\mathbf{m}athcal{T}$ of $\mathbf{m}athcal{U}$ such that
\betaegin{itemize}
\item $\emptyset \in \mathbf{m}athcal{T}$;
\item if $v \in \mathbf{m}athcal{T}$ and $v = u \, j$ for some $u \in \mathbf{m}athcal{U}$ and $j \in \mathbf{m}athbb{N}$, then $u \in \mathbf{m}athcal{U}$;
\item for every $u \in \mathbf{m}athcal{T}$, there exists a number $k_u(\mathbf{m}athcal{T}) \in \mathbf{m}athbb{Z}^+$ such that $u \, j \in \mathbf{m}athcal{T}$ if and only if $j \lambdaeq k_u(\mathbf{m}athcal{T})$.
\end{itemize}
The quantity $k_u(\mathbf{m}athcal{T})$ is the number of children, or descendants, of $u$ ($k$ stands for ``kids''). A tree rooted at $u \in \mathbf{m}athcal{U}$ is a subset $\mathbf{m}athcal{T}$ of $\mathbf{m}athcal{U}$ which can be written as $u \, \mathbf{m}athcal{T}'$, where $\mathbf{m}athcal{T}'$ is a tree rooted at $\emptyset$. If no detail is given, a tree will always be assumed to be rooted at $\emptyset$, though we will just call tree a tree rooted at another vertex if the context is clear. A Galton-Watson tree (with types or not) can thus be defined in a natural way, see \cite{DuquesneLeGall,LegallMBPB}. We let $s(\mathbf{m}athcal{T})$ to be the size of $\mathbf{m}athcal{T}$, that is, its cardinality. A labeling of a tree $\mathbf{m}athcal{T}$ is a bijection between $\mathbf{m}athcal{T}$ and $\{1, \dots, s(\mathbf{m}athcal{T}) \}$, and, given a labeling, we may refer to ``vertex $i$'' instead of ``the vertex labeled $i$''.
We call these trees discrete and will also use, for the second model, continuous trees, that is trees whose branches have lengths, interpreted as life-lengths of the individuals: the latter give birth to all of their children just before dying. Let us not dwell on an obscure formal definition, since once again, figures shall make things lucid.
A multitype tree $\mathbf{m}athcal{A}$ is a tree such that a type $\mathbf{m}athcal{A}_u$ is attached to each vertex $u$ of the tree. The types here will be positive, and we will give type 0 to the vertices which do not belong to $\mathbf{m}athcal{A}$, i.e. define $\mathbf{m}athcal{A}_u = 0$ for $u \in \mathbf{m}athcal{U} \betaackslash \mathbf{m}athcal{A}$. In particular, the set of vertices with non-zero type is a tree, and we will thus say that $u \in \mathbf{m}athcal{A}$ if $\mathbf{m}athcal{A}_u \mathbf{m}athbf{n}eq 0$.
\sigmaubsection{Construction of the tree of isles} \lambdaanglebel{sec:treeofisles}
Let us start by recalling that the trees involved in the first model will be discrete, whereas they shall be continuous for the second model. Let us fix a tree $T$, interpreted s the genealogical tree of an individual funding an island $\mathbf{m}athcal{I}$. For each model, we will define (respectively in Section \ref{sec:treeofisles1} and \ref{sec:treeofisles2}) the migrant children of a tree $T$, which is just a subset of vertices of $T$. We denote $C(T)$ their number, which is, in our terminology, the number of colonies of $\mathbf{m}athcal{I}$, though we may also refer to it as the number of colonies of $T$. These individuals are roots of disjoint\mathbf{m}athfrak{o}otnote{This will be clear from the precise definition of these migrant children.} trees which we will denote $\mathbf{m}athbb{T}migr^1, \dots, \mathbf{m}athbb{T}migr^{C(T)}$. After we cut off these subtrees from $T$, we obtain a pruned tree, which describes the genealogy of this individual restricted to its offspring living on $\mathbf{m}athcal{I}$. The size of this pruned tree shall be denoted $P(T)$, and is the population of $\mathbf{m}athcal{I}$, or of $T$. We start with the genealogical tree $\mathbf{m}athcal{T}$ of Lucy.
This notation obviously depend on the model, but we shall not specify it: clearly, it refers to the first model in Section \ref{sec:firstmodel} and to the second one in Section \ref{sec:secondmodel}. However, it depends on the number of resources we consider, and we shall add an index $r$ if necessary, writing $P_r(\mathbf{m}athcal{T})$ and $C_r(\mathbf{m}athcal{T})$ to specify that we deal with $r$ resources.
The tree of isles of $\mathbf{m}athcal{T}$, $\mathbf{m}athcal{A}$, can then be constructed recursively as follows. Recall that it is a multitype tree, whose vertices are called ``islands'', types ``population'' and descendants ``colonies''.
\betaegin{itemize}
\item First, take $X = \{ (\emptyset,\mathbf{m}athcal{T}) \}$.
\item At each step, if $X = \emptyset$ then stop. Otherwise, pick some $(v,T)$ in $X$, and remove it from $X$. Add island $v$ to $\mathbf{m}athcal{A}$, and give it population $P(T)$ and $C(T)$ colonies (if any). Then, for $i \in \{ 1, \dots, C(T) \}$, add $(v \, i, \mathbf{m}athbb{T}migr^i)$ to $X$, and go to the next step.
\end{itemize}
It should be clear that this algorithm provides a multitype tree, which is, by definition, the tree of isles constructed from $\mathbf{m}athcal{T}$.
It is already worth keeping in mind the following properties of the tree of isles, which shall be proven later:
\betaegin{itemize}
\item its construction is deterministic;
\item the information which is relevant to our study is encoded in the tree of isles;
\item if the initial genealogical tree is a Galton-Watson tree, then the tree of isles is a multitype Galton-Watson tree.
\end{itemize}
Let us finally take advantage of this section to fix some notation. A deterministic tree shall be denoted $\mathbf{m}athcal{T}$, and its tree of isles $\mathbf{m}athcal{A}$ (from French ``arbre'', tree). A random tree (with no type, and it will always be a Galton-Watson tree) will be denoted $\mathbf{m}athbb{T}$, and the corresponding tree of isles $\mathbf{m}athbb{A}$. Note once again that the latter is just obtained deterministically from $\mathbf{m}athbb{T}$: each $\mathbf{m}athbb{T}(\omega)$ gives rise to a $\mathbf{m}athbb{A}(\omega)$.
\sigmaubsection{Notation and empirical measure} \lambdaanglebel{sec:empmeas}
Let us first introduce some notation.
\betaegin{itemize}
\item $C_K$ is the space of nonnegative continuous functions with compact support in $(0, + \infty)$;
\item $\mathbf{m}athcal{M}^+$ is the space of nonnegative Radon measures on $(0,+ \infty)$, endowed with the vague topology (i.e. the space of test functions is $C_K$);
\item we denote $\mathbf{m}athbb{R}ightarrow$ the convergence in $\mathbf{m}athcal{M}^+$, and $\convlaw$ the usual convergence in distribution of real random variables;
\item for $\mathbf{m}athcal{P} \in \mathbf{m}athcal{M}^+$ and $f \in C_K$, $\lambdaangle \mathbf{m}athcal{P}, f \rangle$ is the integral of $f$ with respect to $\mathbf{m}athcal{P}$;
\item finally, for notational simplicity, in the whole document, we do not write the integer parts.
\end{itemize}
As mentioned in the introduction, we want to start with a large number of Galton-Watson trees with reproduction law $\rho$, which has mean 1 and a second moment $0 < \sigma^2 < + \infty$. We will thus pick $(\mathbb{O}mega, \mathbf{m}athcal{F}, \mathbb{P})$ a probability space where an i.i.d. family $(\mathbf{m}athbb{T}^k)_{k \gammaeq 0}$ of those trees can be defined. We can then construct, for each model, the corresponding trees of isles $(\mathbf{m}athbb{A}^k(r))_{k \gammaeq 0}$ corresponding to $r$ resources, which are also i.i.d. since the construction of the tree of isles is deterministic.
Remember that we wish to have the number $N$ of initial islands, as well as the number of resources $r_N$ on each island, tend to infinity, and to study the empirical measure of the population. We thus define, for each $r,n \in \mathbf{m}athbb{N}$,
\[
\mathbf{m}athcal{P}_n(r) = \sigmaum_{k=1}^n \sigmaum_{u \in \mathbf{m}athcal{U}} \delta_{\mathbf{m}athbb{A}_u^k(r)} \mathbf{m}athds{1}n{\mathbf{m}athbb{A}_u^k(r) \mathbf{m}athbf{n}eq 0}.
\]
In other words, the mass of $\mathbf{m}athcal{P}_n(r)$ at $l \in \mathbf{m}athbb{N}$ is the number of islands where $l$ people have lived, when starting from $n$ islands and considering $r$ resources. We are interested in the convergence of $\mathbf{m}athcal{P}_N(r_N)$, but to obtain a nontrivial limit, we will need to rescale it, by setting
\[
\cP^{(N)}_n(r) = \sigmaum_{k=1}^n \sigmaum_{u \in \mathbf{m}athcal{U}} \delta_{\mathbf{m}athbb{A}_u^k(r)/N^2} \mathbf{m}athds{1}n{\mathbf{m}athbb{A}_u^k(r) \mathbf{m}athbf{n}eq 0}.
\]
This rescaling actually implies (see Lemma \ref{lem:tightness}) the tightness of $(\cP^{(N)}_N(r_N))$ in $\mathbf{m}athcal{M}^+$. Clearly, $\cP^{(N)}_n(r)$ is just the sum of $n$ independent copies of the reference measure $\cP^{(N)}(r) := \cP^{(N)}_1(r)$. We also define $\mathbf{m}athbb{T} := \mathbf{m}athbb{T}^1$ and $\mathbf{m}athbb{A}(r) := \mathbf{m}athbb{A}^1(r)$, so $(\mathbf{m}athbb{T}^k)$ (resp. $(\mathbf{m}athbb{A}^k(r))$) is a family of i.i.d. copies of $\mathbf{m}athbb{T}$ (resp. $\mathbf{m}athbb{A}(r)$). Finally, we let, for $p \gammaeq 1$, $i \gammaeq 0$,
\[
Z_{i,p}(r) = \# \{ u \in \mathbf{m}athcal{U}, \, |u| = i, \, \mathbf{m}athbb{A}_u(r) = p \},
\]
so we can rewrite
\[
\cP^{(N)}(r) = \sigmaum_{p \gammaeq 1} \delta_{p/N^2} \sigmaum_{i \gammaeq 0} Z_{i,p}(r)
\]
what will turn out to be very useful to write an equation solved by the cumulant of $\cP^{(N)}_N(r_N)$: recall that the cumulant of a random measure $\mathbf{m}athcal{P}$ on $\mathbf{m}athcal{M}^+$ is
\[
\kappa(f) = - \lambdan \mathbf{m}athbb{E} \lambdaeft ( \exp - \lambdaangle \mathbf{m}athcal{P} , f \rangle \right )
\]
for $f \in C_K$, and that the knowledge of $\kappa(f)$ for every $f \in C_K$ characterizes $\mathbf{m}athcal{P}$.
\sigmaubsection{General technique of proof} \lambdaanglebel{sec:technique}
To compute the limit of $(\cP^{(N)}_N(r_N))$, we will proceed in several steps. The first is to prove the tightness, which is easy. The second is to prove the uniqueness of the limit. To this end, we first give an equation solved by the cumulant $\kappa_N(f)$ of $\cP^{(N)}_N(r_N)$, which is obtained thanks to the branching property of the tree of isles. We then pass to the limit in this equation to obtain an equation solved by any subsequential limit of $\kappa_N(f)$, which is readily proved to have a unique solution. This guarantees, with the tightness, the convergence of $(\cP^{(N)}_N(r_N))$.
The main part of this work is to compute the limit of the unknown quantity involved in the aforementioned equation, namely the population and number of colonies of the root of the tree of isles. As we mentioned, these quantities can be read on a random walk (which is the exploration process of the genealogical tree of Lucy), and our work will then boil down to finding limits of functionals of random walks. This is undoubtedly the most technical part of the work; however, if we leave aside the necessary computations, most of the results should not come as a surprise.
\sigmaection{Exploration processes}
In this section, we shall provide a general way to build the exploration process of a tree. The goal of this construction is that, if this tree is a Galton-Watson tree, then its exploration process is a random walk. These matters are classical, and we shall not dwell on the proofs.
\sigmaubsection{A general construction of the exploration process} \lambdaanglebel{sec:explproc}
Consider a finite tree $\mathbf{m}athcal{T}$, with size $s = s(\mathbf{m}athcal{T})$. There are several ways to label it, i.e. write a bijection between the set of vertices and $\{1,\dots,s\}$, the most common being by breadth-first and depth-first search. From a labeling, we may construct the exploration process as follows: denote $k_i$ the number of children of (the individual labeled) $i$. Then the exploration process is given by $S_0 = 0$ and
\[
S_i = (k_1 - 1) + \dots + (k_i - 1), \quad i = 1 \dots s,
\]
and we let $S_i = -1$ for $i > s$ for the sake of definiteness. We will extend this process to the whole of $\mathbf{m}athbb{R}^+$ by interpolating linearly for the first model, whereas we will instead set $S_t = S_{\lambdafloor t \rfloor}$ for the second model. When $\mathbf{m}athcal{T}$ is a Galton-Watson tree with reproduction law $\rho$, and when it is labeled by breadth-first or depth-first search, it is well-known that $(S_n)$ is a left-continuous random walk, with step distribution $\tilde{\rho}$, absorbed at $-1$ at time $s$, where $\tilde{\rho}(k) = \rho(k+1)$ for $k \gammaeq -1$.
Let us now explain a more general way to label $\mathbf{m}athcal{T}$ so as to conserve this last property. Let $\mathbf{m}athcal{T}$ be a discrete or continuous tree. We say that $\ell$ is a \emptyseth{line} in $\mathbf{m}athcal{T}$ if $\ell$ is a set of vertices such that every path from the root to a leaf contains at most one vertex of $\ell$. We now define $\mathbf{m}athcal{T}_{\ell}$ the tree pruned at level $\ell$, as follows.
\betaegin{itemize}
\item In the discrete case, $\mathbf{m}athcal{T}_{\ell}$ is the connected component of the root when we delete the descendants of the individuals of $\ell$: see Figure \ref{fig:pruneddisctree}.
\item In the continuous case, $\mathbf{m}athcal{T}_{\ell}$ is the connected component of the root when we delete the descendants of the individuals of $\ell$, but keep the life-lengths of all the individuals in $\ell$: see Figure \ref{fig:prunedconttree}.
\end{itemize}
\betaegin{figure}[htb]
\centering
\includegraphics[width=0.75 \columnwidth]{Pruned_disc_tree.pdf}
\caption{A discrete tree $\mathbf{m}athcal{T}$, a line $\ell$ (the circled vertices), and the tree $\mathbf{m}athcal{T}_{\ell}$ pruned along this line.}
\lambdaanglebel{fig:pruneddisctree}
\end{figure}
\betaegin{figure}[htb]
\centering
\includegraphics[width=0.75 \columnwidth]{Pruned_cont_tree.pdf}
\caption{A continuous tree $\mathbf{m}athcal{T}$, a line $\ell$ (the circled vertices), and the tree $\mathbf{m}athcal{T}_{\ell}$ pruned along this line. Note that we cut the descendants of the individuals in $\ell$, not the individuals in $\ell$, so the right-most individuals still remain in $\mathbf{m}athcal{T}_{\ell}$.}
\lambdaanglebel{fig:prunedconttree}
\end{figure}
We will label our tree $\mathbf{m}athcal{T}$ by giving labels $1$ to $s(\mathbf{m}athcal{T})$ in this order, the label 1 going to the root. To explain which vertex we label $i+1$ after we have given labels $1$ to $i$, we use what we call a \textbf{Markovian rule}.
\betaegin{defn} \lambdaanglebel{def:rule}
Let $\mathbf{m}athcal{L}$ be the set of nonempty lines in $\mathbf{m}athcal{T}$. A Markovian rule $\mathbf{m}athcal{R}$ is a mapping from $\mathbf{m}athcal{L}$ to the set of vertices of $\mathbf{m}athcal{T}$ such that, for every $\ell \in \mathbf{m}athcal{L}$, $\mathbf{m}athcal{R}(\ell) \in \ell$ and $\mathbf{m}athcal{R}(\ell)$ depends only on $\mathbf{m}athcal{T}_{\ell}$.
\end{defn}
The labeling of $\mathbf{m}athcal{T}$ corresponding to this rule is then given by the following algorithm.
\betaegin{enumerate}
\item At step 0, take $\ell$ to be the root of $\mathbf{m}athcal{T}$, and go to step $1$.
\item At step $i$:
\betaegin{itemize}
\item if $\ell = \emptyset$, then stop;
\item if not, choose the vertex $v$ according to the rule $\mathbf{m}athcal{R}$, i.e. take $v = \mathbf{m}athcal{R}(\ell)$. Give $v$ the label $i$. Then remove $v$ from $\ell$, and add to $\ell$ the children of $v$;
\item then go to step $i+1$.
\end{itemize}
\end{enumerate}
It should be clear that when the algorithm stops, the tree is labeled. Let $(S_n)$ to be the exploration process corresponding to this labeling. We claim the following.
\betaegin{lemma} \lambdaanglebel{lem:explproc}
\betaegin{itemize}
\item The size $\sigma(\mathbf{m}athcal{T})$ of the tree is the hitting time of $-1$ by $(S_n)$.
\item If $\mathbf{m}athcal{T} = \mathbf{m}athbb{T}$ is a (discrete or continuous) Galton-Watson tree with reproduction law $\rho$, then, at each step of the labeling algorithm, conditionally on $\ell$, the subtrees rooted at the vertices of $\ell$ are i.i.d. with the same law as $\mathbf{m}athbb{T}$. In particular, $(S_n)$ has the law of a random walk with step distribution $\tilde{\rho}$, absorbed when first hitting $-1$.
\end{itemize}
\end{lemma}
\betaegin{proof}
The first part is classical and easily proven by induction. The second part is just the observation that, at each step of the labeling algorithm, the fact that we choose a Markovian rule ensures that the set of edges above $\ell$ is a stopping line, in the terminology of \cite{Chauvin} (see also the nice informal explanation in \cite{BertoinToA}). This ensures that the branching property holds for the subtrees rooted at $\ell$, i.e. that conditionally on $\ell$, the subtrees rooted at the vertices of $\ell$ are i.i.d. with the same law as $\mathbf{m}athbb{T}$. This clearly implies the statement about the exploration process.
\end{proof}
This result obviously encompasses the two known cases mentioned. The rule in the breadth-first search case is ``pick the leftmost vertex at the lowest generation in $\ell$'', and ``pick the smallest vertex in the lexicographical order in $\ell$'' in the depth-first search case. We may make up a lot of valid rules, and this construction will be mostly useful when we study the second model, since the rule then is quite involved.
\sigmaubsection{Death-first search algorithm} \lambdaanglebel{sec:deathfirst}
Let us now introduce a particular labeling of a continuous tree by what we shall call ``death-first search''. Consider a continuous tree $\mathbf{m}athcal{T}$, such that two events (birth or death) do not occur at the same time. Our rule is, for a line $\ell$, to pick the individual in $\ell$ who dies first. Since $\mathbf{m}athcal{T}_{\ell}$ keeps track of the life-lengths of the individuals in $\ell$, this is clearly a Markovian rule. See Figure \ref{fig:death_first} for an example, and Figure \ref{fig:death_first_expl_proc} for the corresponding exploration process $(S_n)$.
\betaegin{figure}[htb]
\centering
\includegraphics[width=0.5 \columnwidth]{Death_first.pdf}
\caption{Labeling of a tree by death-first search.}
\lambdaanglebel{fig:death_first}
\end{figure}
\betaegin{figure}[htb]
\centering
\includegraphics[width=0.5 \columnwidth]{Death_first_expl_proc.pdf}
\caption{Exploration process corresponding to the labeling of the tree in Figure \ref{fig:death_first}.}
\lambdaanglebel{fig:death_first_expl_proc}
\end{figure}
Now, let $0 = \tau_0 < \tau_1 < \dots < \tau_k < \tau_{k+1} = + \infty$ be the times of the successive events of birth or death. Then $1 = 1+S_0$ is the number of individuals living on $[\tau_0,\tau_1)$. Then the number of children of 1 is $1 + S_1$, so the number of people alive on $[\tau_1,\tau_2)$ is $1 + S_1$. The number of children of 2 is $1 + (S_2 - S_1)$, so the number of individuals alive on $[\tau_2,\tau_3)$ is
\[
\mathbf{m}athds{1}derbrace{1 + S_1}_{\text{individuals alive at $\tau_2^-$}} + \overbrace{- 1}^{\text{death of 2}} + \mathbf{m}athds{1}derbrace{1 + S_2 - S_1}_{\text{number of children of 2}} = 1 + S_2.
\]
By a clear induction, one readily sees that $1+S_i$ is the number of individuals alive on $[\tau_i,\tau_{i+1})$, for $i \in \{ 0, \dots, k \}$. We will use a variation of this algorithm and this observation when studying our second model.
\sigmaection{First model: fossil resources} \lambdaanglebel{sec:firstmodel}
Let us recall informally our first model, which involves fossil resources, i.e. non-regrowing resources. Each island $\mathbf{m}athcal{I}$ contains a number $r \in \mathbf{m}athbb{N}$ of resources, which are consumed by the first $r$ individuals living on $\mathbf{m}athcal{I}$. Every new individual born on $\mathbf{m}athcal{I}$ after these $r$ first will migrate, each to a new virgin island with $r$ resources, to found its own colony.
\sigmaubsection{Tree of isles} \lambdaanglebel{sec:treeofisles1}
Let us start by defining the tree of isles $\mathbf{m}athcal{A}(r)$ from a discrete tree $\mathbf{m}athcal{T}$. According to Section \ref{sec:treeofisles}, to this end, all we need to do is to explain how we choose its migrant children, which should be clear from the informal description of the model. Since several children may be born at the same time, the only issue is to choose which children migrate after the resources on the island have been exhausted, and we will pick arbitrarily the right-most children.
To make this precise, if $s(\mathbf{m}athcal{T}) \lambdaeq r$, then $\mathbf{m}athcal{T}$ has no migrant children. Else, label $\mathbf{m}athcal{T}$ by breadth-first search. Then the migrant children are the individuals with label in $\{r+1, r+2, \dots \}$, which are descendants of individuals with label in $\{1, \dots, r \}$. See Figures \ref{fig:disc_tree} and \ref{fig:tree_of_isles_1} for examples.
\betaegin{figure}[htb]
\centering
\includegraphics[width=0.75 \columnwidth]{Disc_tree.pdf}
\caption{A tree labeled $\mathbf{m}athcal{T}$ by breadth-first search. Its migrants for $r=5$ are shown in a square, the individuals in a circle remaining on the island. Here $P_r(\mathbf{m}athcal{T})=5$ and $C_r(\mathbf{m}athcal{T}) = 4$.}
\lambdaanglebel{fig:disc_tree}
\end{figure}
\betaegin{figure}[htb]
\centering
\includegraphics[width=0.5 \columnwidth]{Tree_of_isles_1.pdf}
\caption{The tree of isles $\mathbf{m}athcal{A}(r)$ obtained from the tree $\mathbf{m}athcal{T}$ of Figure \ref{fig:disc_tree}, for $r=5$.}
\lambdaanglebel{fig:tree_of_isles_1}
\end{figure}
Let us go on with our plan: we wish to read the population and number of colonies of the root of $\mathbf{m}athcal{A}(r)$ on the exploration process of $\mathbf{m}athcal{T}$. To this end, we shall naturally consider $(S_n)$ the exploration process associated to the labeling by breadth-first search, see Figure \ref{fig:disc_expl_proc} for an example. We let
\[
\varsigmainf = \inf \{t \gammaeq 0, \, S_i = - 1 \}, \quad \inf \emptyset := + \infty,
\]
the hitting time of $-1$ by $(S_n)$. We now claim the following.
\betaegin{lemma} \lambdaanglebel{lem:popcolRW1}
The following equalities hold:
\[
P_r(\mathbf{m}athcal{T}) = \varsigmainf \wedge r, \quad C_r(\mathbf{m}athcal{T}) = 1 + S_{\varsigmainf \wedge r}.
\]
\end{lemma}
\betaegin{proof}
Recall from Lemma \ref{lem:explproc} that $\varsigmainf = s(\mathbf{m}athcal{T})$. Hence, when there are no migrants, i.e. $s(\mathbf{m}athcal{T}) = \varsigmainf \lambdaeq r$, then $P_r(\mathbf{m}athcal{T}) = s(\mathbf{m}athcal{T}) = \varsigmainf = \varsigmainf \wedge r$ and $C_r(\mathbf{m}athcal{T}) = 0 = 1 + S_{\varsigmainf} = 1 + S_{\varsigmainf \wedge r}$.
When there are migrants, i.e. $s(\mathbf{m}athcal{T}) = \varsigmainf > r$, then by definition $P_r(\mathbf{m}athcal{T}) = r = \varsigmainf \wedge r$. Now, by an easy induction as in Section \ref{sec:deathfirst}, it is easy to check that for each $i \in \{1, \dots, s(\mathbf{m}athcal{T})\}$, $1 + S_i$ is precisely the number of children of the individuals with label in $\{ 1,\dots,i \}$ whose label is in $\{ i+1, i+2, \dots \}$. Hence, by definition, $C_r(\mathbf{m}athcal{T}) = 1 + S_r = 1 + S_{\varsigmainf \wedge r}$.
\end{proof}
\betaegin{figure}[htb]
\centering
\includegraphics[width=0.75 \columnwidth]{Disc_expl_proc.pdf}
\caption{The exploration process obtained from the tree $\mathbf{m}athcal{T}$ of Figure \ref{fig:disc_tree}. Check that, for $r = 5$, $\varsigmainf \wedge r = 5 = P_r(\mathbf{m}athcal{T})$ and $1 + S_r = 4 = C_r(\mathbf{m}athcal{T})$.}
\lambdaanglebel{fig:disc_expl_proc}
\end{figure}
Let us use the notation of Section \ref{sec:empmeas}, so $\mathbf{m}athbb{T}$ is a Galton-Watson tree with reproduction law $\rho$ and $\mathbf{m}athbb{A}(r)$ its tree of isles when we consider $r$ resources. Instead of considering the walk $(S_n)$ as being constructed from $\mathbf{m}athbb{T}$, we rather take $(S_n)$ to be an actual random walk with step distribution $\tilde{\rho}$, defined on all of $\mathbf{m}athbb{Z}^+$. We thus just have to replace the equalities in Lemma \ref{lem:popcolRW1} by equalities in law. Our objective now is to prove that $\mathbf{m}athbb{A}(r)$ is a multitype Galton-Watson tree. Taking this for granted, the only information we need to characterize it is the law of the type (i.e. population in our terminology) and number of children (or colonies) of the root, i.e. the law of the couple $(P_r(\mathbf{m}athbb{T}),C_r(\mathbf{m}athbb{T}))$. We let $\pi_r$ the law of $P_r(\mathbf{m}athbb{T})$, and $\gamma_r$ the law of $C_r(\mathbf{m}athbb{T})$ knowing that $P_r(\mathbf{m}athbb{T}) = r$. By definition, $C_r(\mathbf{m}athbb{T}) = 0$ when $P_r(\mathbf{m}athbb{T}) < r$, so the law of $(P_r(\mathbf{m}athbb{T}),C_r(\mathbf{m}athbb{T}))$ is indeed specified by $\pi_r$ and $\gamma_r$.
\betaegin{lemma} \lambdaanglebel{lem:treeGW1}
The tree of isles $\mathbf{m}athbb{A}(r)$ is a $r$-type Galton-Watson tree, described as follows.
\betaegin{itemize}
\item The population of the root is chosen according to $\pi_r$;
\item the number of colonies of the islands of population $r$ has law $\gamma_r$, and each colony chooses independently its population according to $\pi_r$;
\item the islands of population $1,\dots,r-1$ do not have any colonies.
\end{itemize}
\end{lemma}
\betaegin{proof}
Let us take a look at the algorithm defining the tree of isles $\mathbf{m}athbb{A}(r)$. The root has, by definition, population given by $\pi_r$. If $\mathbf{m}athbb{A}_{\emptyset}(r) < r$, then $\mathbf{m}athbb{T}$ has no migrant children and $\mathbf{m}athbb{A}_u(r) = 0$ for $u \in \mathbf{m}athcal{U}^*$. Else, $\mathbf{m}athbb{A}_{\emptyset}(r) = r$, and $\mathbf{m}athbb{T}$ has a number of migrant children given by $\gamma_r$. Conditioned on these migrant children, the second statement of Lemma \ref{lem:explproc} tells that the subtrees $\mathbf{m}athbb{T}_{\mathbf{m}athrm{migr}}^1,\dots,\mathbf{m}athbb{T}_{\mathbf{m}athrm{migr}}^{C_r(\mathbf{m}athbb{T})}$ are independent with the same law as $\mathbf{m}athbb{T}$. A simple induction then yields the result.
\end{proof}
A straightforward corollary of this result concerns $(Z_{i,p}(r),\, p = 1, \dots, r)_{i \gammaeq 0}$, as defined in Section \ref{sec:empmeas}.
\betaegin{lemma} \lambdaanglebel{lem:procGW1}
The process $(Z_{i,p}(r),\, p = 1, \dots, r)_{i \gammaeq 0}$ is a $r$-type Galton-Watson process, described as follows.
\betaegin{itemize}
\item The $r$-tuple $(Z_{0,1}(r),\dots,Z_{0,r}(r))$ has the same law as $(\mathbf{m}athds{1}n{P_r(\mathbf{m}athbb{T})=1},\dots,\mathbf{m}athds{1}n{P_r(\mathbf{m}athbb{T})=r})$;
\item the number of children of the individuals of type $r$ has law $\gamma_r$, and each child chooses independently its type according to $\pi_r$;
\item the individuals of type $1,\dots,r-1$ do not have any children.
\end{itemize}
\end{lemma}
\sigmaubsection{Rescaling}
Let us keep on with our program. We now start from $N$ independent islands and $r_N$ resources, both meant to tend to infinity. We wish to rescale $r_N$ so that each island has a probability of order $1/N$ to have colonies. From Lemma \ref{lem:popcolRW1},
\betaegin{equation} \lambdaanglebel{eq:tailvsinf}
\mathbb{P}(C_{r_N}(\mathbf{m}athbb{T}) > 0) = \mathbb{P}(\varsigmainf > r_N) \sigmaim \sigmaqrt{\mathbf{m}athfrak{r}ac{2}{\pi \sigma^2}} \mathbf{m}athfrak{r}ac{1}{\sigmaqrt{r_N}}.
\end{equation}
The last equivalent stems from well-known facts about hitting times for random walks, see e.g. \cite{Spitzer}, p.382. We shall thus assume that, for some $c > 0$,
\betaegin{equation} \lambdaanglebel{eq:hyprN1}
\lambdaim_{N \to + \infty} \mathbf{m}athfrak{r}ac{r_N}{N^2} = c > 0.
\end{equation}
Let us introduce some notation. We denote for simplicity
\[
\lambda = \sigmaqrt{\mathbf{m}athfrak{r}ac{2}{\pi \sigma^2 c}}.
\]
Let
\[
P_N = P_{r_N}(\mathbf{m}athbb{T}), \quad C_N = C_{r_N}(\mathbf{m}athbb{T}), \quad p_N = \mathbb{P}(P_N = r_N),
\]
their conditioned laws
\[
\pi^{(N)} = \mathbf{m}athcal{L}(P_N | P_N < r_N), \quad \gammaN = \mathbf{m}athcal{L}( C_N | P_N = r_N ),
\]
and their rescaled versions
\[
\widetilde{\pi}N = \mathbf{m}athcal{L} \lambdaeft ( \mathbf{m}athfrak{r}ac{P_N}{N^2} \mathbf{m}iddle | P_N < r_N \right ), \quad \gammaN = \mathbf{m}athcal{L} \lambdaeft ( \mathbf{m}athfrak{r}ac{C_N}{N} \mathbf{m}iddle | P_N = r_N \right ).
\]
Let us first study the limit, in different senses, of the two latter. We define here
\[
\mathbf{m}u(\mathbf{m}athrm{d}x) = \mathbf{m}athfrak{r}ac{1}{2} \mathbf{m}athfrak{r}ac{1}{x^{3/2}} \: \mathbf{m}athrm{d}x, \quad \mathbf{m}u^c(\mathbf{m}athrm{d}x) = \mathbf{m}athds{1}n{x \in (0,c)} \mathbf{m}u(\mathbf{m}athrm{d}x)
\]
and
\[
\theta = \mathbf{m}athcal{L}(\sigmaqrt{c} \sigma W_1^+), \quad\mathbb{P}(W_1^+ \in \mathbf{m}athrm{d}x) = x e^{-x^2/2} \mathbf{m}athds{1}n{x > 0} \: \mathbf{m}athrm{d}x.
\]
The notation $W_1^+$ just stems from the usual notation $(W^+_t)_{t \in [0,1]}$ for the standard Brownian meander, and $W_1^+$ is its tip, whose law is the Rayleigh law $x e^{-x^2/2} \mathbf{m}athds{1}n{x > 0} \: \mathbf{m}athrm{d}x$.
\betaegin{lemma} \lambdaanglebel{lem:convmeas}
As $N \to + \infty$,
\[
p_N \sigmaim \mathbf{m}athfrak{r}ac{\lambda}{N}
\]
and moreover, the convergences
\[
N \widetilde{\pi}N \mathbf{m}athbb{R}ightarrow \lambda \mathbf{m}u^c, \quad \widetilde{\g}^{(N)} \convlaw \theta
\]
hold, respectively in $\mathbf{m}athcal{M}^+$ and in law.
\end{lemma}
\betaegin{proof}
The first statement is just \eqref{eq:tailvsinf} written with a non-strict inequality. For the first convergence, note that more generally, \eqref{eq:tailvsinf} and \eqref{eq:hyprN1} ensure that, for every $a > 0$,
\[
N \mathbb{P} ( \varsigmainf > a N^2 ) \to \mathbf{m}athfrak{r}ac{\lambda}{\sigmaqrt{a}} = \mathbf{m}athfrak{r}ac{\lambda}{2} \int_a^{+ \infty} \mathbf{m}athfrak{r}ac{1}{x^{3/2}} \: \mathbf{m}athrm{d}x,
\]
so one readily sees by standard approximations that
\[
N \mathbb{P} \lambdaeft ( \mathbf{m}athfrak{r}ac{\varsigmainf}{N^2} \in \mathbf{m}athrm{d}x \right ) \mathbf{m}athbb{R}ightarrow \mathbf{m}athfrak{r}ac{\lambda}{2} \mathbf{m}athfrak{r}ac{1}{x^{3/2}} \: \mathbf{m}athrm{d}x.
\]
Recall also from Lemma \ref{lem:popcolRW1} that $P_N$ has the same law as $\varsigmainf \wedge r_N$, and thus, for $f \in C_K$,
\betaegin{align*}
& \int_0^{+ \infty} f(x) N \widetilde{\pi}N(\mathbf{m}athrm{d}x) \\
& = \mathbb{P} ( \mathbb{P}_N < r_N )^{-1} \int_0^{(r_N - 1)/N^2} f(x) N \mathbb{P} ( P_N / N^2 \in \mathbf{m}athrm{d}x) \\
& = (1-p_N)^{-1} \int_0^{(r_N - 1)/N^2} f(x) N \mathbb{P} ( \varsigmainf / N^2 \in \mathbf{m}athrm{d}x) \\
& = (1-p_N)^{-1} \lambdaeft ( \int_0^c f(x) N \mathbb{P} ( \varsigmainf / N^2 \in \mathbf{m}athrm{d}x) + \int_c^{(r_N - 1) / N^2} f(x) N \mathbb{P} ( \varsigmainf / N^2 \in \mathbf{m}athrm{d}x) \right ) \\
& \to \int_0^c f(x) \mathbf{m}u(\mathbf{m}athrm{d}x)
\end{align*}
using the computation above and that the second term is easily seen to tend to 0 since $r_N/N^2 \to c$.
For the second convergence, note that Lemma \ref{lem:popcolRW1} implies that $\widetilde{\g}^{(N)}$ is the law of
\[
\lambdaeft. \mathbf{m}athfrak{r}ac{1 + S_{r_N}}{N} \right | \varsigmainf \gammaeq r_N.
\]
It is well-known (see \cite{Bolt}) that a centered random walk with a second moment, conditioned to stay positive, converges to the Brownian meander $(W^+_t)_{t \in [0,1]}$, and in particular
\[
\lambdaeft. \mathbf{m}athfrak{r}ac{S_{r_N}}{\sigma \sigmaqrt{r_N}} \right | \varsigmainf \gammaeq r_N \convlaw W^+_1,
\]
whence the result follows after noticing (see e.g. \cite{Igle}) that $W^+_1$ has the Rayleigh law.
\end{proof}
\sigmaubsection{Heuristics and result} \lambdaanglebel{sec:heuristics}
Before stating the result, we will, with the help of the previous results, discuss some heuristics. Consider the forest
\[
\lambdaeft ( \mathbf{m}athbb{A}^1(r_N),\dots,\mathbf{m}athbb{A}^N(r_N) \right ) / N^2,
\]
where dividing by $N^2$ means rescaling the population of every island by $1/N^2$. The islands with (rescaled) population $1/N^2, \dots, (r_N - 1) / N^2$ do not have colonies, whereas those with population $r_N / N^2 \alphapprox c$ may\mathbf{m}athfrak{o}otnote{And probably do: an island of population $r_N/N^2$ does not have colonies only in the (rare) case where the founder of this island has precisely $r_N - 1$ descendants.}. We shall thus call the latter type of islands \emptyseth{fertile}. According to Lemma \ref{lem:convmeas}, the number of colonies of a fertile island is approximately $\mathbf{m}athrm{f} N$, where $\mathbf{m}athrm{f}$ has law $\theta$. We will then say that this island has fertility $\mathbf{m}athrm{f}$.
Now, by Lemma \ref{lem:treeGW1}, the populations $t_1,\dots,t_{\mathbf{m}athrm{f} N}$ of these $\mathbf{m}athrm{f} N$ islands are chosen independently. Each has a probability $p_N \sigmaim \lambda / N$ to be fertile, and the population of any other island has law $\widetilde{\pi}_N$, and $N \widetilde{\pi}_N \mathbf{m}athbb{R}ightarrow \lambda \mathbf{m}u^c$. Hence, the measure
\[
\sigmaum_{i = 1}^{\mathbf{m}athrm{f} N} \delta_{t_i}
\]
is approximately a Poisson measure with intensity
\[
\mathbf{m}athrm{f} \lambda \lambdaeft ( \mathbf{m}u^c + \delta_c \right ).
\]
By the superposition property of Poisson measures, this can be reformulated by saying that the descendants of a fertile individual with fertility $\mathbf{m}athrm{f}$ are
\betaegin{itemize}
\item either fertile, and there is approximately a Poissonian number with parameter $\mathbf{m}athrm{f} \lambda$ of those;
\item or not fertile, and those contribute to the empirical measure as approximately a Poisson random measure with intensity $\mathbf{m}athrm{f} \lambda \mathbf{m}u^c$ .
\end{itemize}
Finally, since the initial number of individuals is $N$, we may link them to a virtual ancestor of fertility $1$.
This invites us to introduce the following measure $\eta$. We first construct a Galton-Watson tree $T$ with fertilities\mathbf{m}athfrak{o}otnote{This is, once again, just another way to speak of types, but we would rather avoid the confusion.} in $(0,+ \infty)$. The number of children of an individual with fertility $\mathbf{m}athrm{f}$ is Poisson with parameter $\mathbf{m}athrm{f} \lambda$, and each child has a fertility chosen independently according to $\theta$. We start from an individual with fertility $1$, and denote $\mathbf{m}athrm{f}_u$ the fertility of $u \in T$.
Obviously, if we forget about the types, $T$ has the same law as a Galton-Watson tree, constructed as follows:
\betaegin{itemize}
\item the reproduction law of the ancestor is Poisson with parameter $\lambda$;
\item the other individuals have reproduction law $\gammaTh$, which is a Cox law: it is a mixture of Poisson laws, where the random parameter is chosen as $\lambda \sigmaqrt{c} \sigma W_1^+$, where we recall that $\sigmaqrt{c} \sigma W_1^+$ has law $\theta$.
\end{itemize}
The point of this alternative construction is to note that $\lambda \sigmaqrt{c} \sigma W_1^+ = \sigmaqrt{2/\pi} W_1^+$, so $\gammaTh \mathbf{m}athbf{n}eq \delta_1$ and has mean $1$, so that $T$ is a.s. finite. For matters concerning the construction of these variables and the measurability of the functions and variables we consider, we refer to \cite[ch. III]{Harris} and \cite[ch. VI]{Atney}.
Finally, we define a measure $\eta$ as follows: consider $(\mathbf{m}athbf{n}u_u(\mathbf{m}athrm{f}), \, \mathbf{m}athrm{f} \gammaeq 0)_{u \in \mathbf{m}athcal{U}}$ an i.i.d. family, consisting of collections indexed by $\mathbf{m}athbb{R}^+$ of random variables, such that, for each $u \in \mathbf{m}athcal{U}$ and $\mathbf{m}athrm{f} \gammaeq 0$, $\mathbf{m}athbf{n}u_u(\mathbf{m}athrm{f})$ is a Poisson measure with intensity $\mathbf{m}athrm{f} \lambda \mathbf{m}u^c$. This can for instance be constructed using a family of i.i.d. Poisson processes indexed by $\mathbf{m}athcal{U}$, see Section \ref{sec:threelemmas}. Then we define
\[
\eta = \sigmaum_{u \in T} ( \mathbf{m}athbf{n}u_u(\mathbf{m}athrm{f}_u) + \delta_c) - \delta_c = \sigmaum_{u \in T} \mathbf{m}athbf{n}u_u(\mathbf{m}athrm{f}_u) + (\# T - 1) \delta_c.
\]
Subtracting $\delta_c$ is just to take into account that the ancestor is a virtual one. We may now state our result.
\betaegin{thm} \lambdaanglebel{th:conv1}
Under the assumption \eqref{eq:hyprN1}, the sequence $(\cP^{(N)}_N(r_N))$ converges in distribution in $\mathbf{m}athcal{M}^+$ to a random measure with the same law as $\eta$.
\end{thm}
\sigmaubsection{Proof}
We first fix a sequence $(r_N)$ such that \eqref{eq:hyprN1} is verified and proceed as explained in Section \ref{sec:technique} by first proving the tightness.
\betaegin{lemma} \lambdaanglebel{lem:tightness}
The sequence $(\cP^{(N)}_N(r_N))$ is tight in $\mathbf{m}athcal{M}^+$.
\end{lemma}
\betaegin{proof}
This fact can be proven as in Lemma 5 in \cite{BertoinAR}. Indeed, one can readily check that, since our test functions have compact support in $(0,+ \infty)$, it follows from the tightness of $\lambdaangle \cP^{(N)}_N(r_N), \mathbf{m}athbb{I}d \rangle$. But the latter is the total population of a Galton-Watson forest started from $N$ ancestors, renormalized by $1/N^2$, and it is well-known that this converges to the total population of a Feller diffusion, see e.g. \cite{LeGallBook}.
\end{proof}
Our next step is to derive here an equation solved by the cumulant of $\cP^{(N)}_N(r_N)$, given by
\[
\kappa_N(f) = - \lambdan \mathbf{m}athbb{E} \lambdaeft ( \exp - \lambdaangle \cP^{(N)}_N(r_N) , f \rangle \right ) = - N \lambdan \mathbf{m}athbb{E} \lambdaeft ( \exp - \lambdaangle \cP^{(N)}(r_N) , f \rangle \right ),
\]
where we recall that $\cP^{(N)}_N(r_N)$ is the sum of $N$ independent copies of $\cP^{(N)}(r_N)$.
\betaegin{lemma} \lambdaanglebel{lem:eqcumu}
The cumulant $\kappa_N(f)$ solves the following equation
\betaegin{equation}\lambdaanglebel{eq:cumuN}
\exp - \kappa_N(f) = \mathbf{m}athbb{E} \lambdaeft ( \exp - \lambdaeft ( f \lambdaeft ( \mathbf{m}athfrak{r}ac{P_N}{N^2} \right ) + \mathbf{m}athfrak{r}ac{C_N}{N} \kappa_N(f) \right ) \right )^N
\end{equation}
for every $f \in C_K$.
\end{lemma}
\betaegin{proof}
Lemma \ref{lem:treeGW1} and the branching property show that $(Z_{0,p}(r_N))_{p=1,\dots,r_N}$ has the same law as $(\mathbf{m}athds{1}n{P_N=1},\dots,\mathbf{m}athds{1}n{P_N=r_N})$, and that, conditionally on $P_N$ and $C_N$, $(Z_{i,\cdot}(r_N))_{i \gammaeq 1}$ is independent from $Z_{0,\cdot}$ and has the same law as the sum of $C_N$ independent copies of $(Z_{i,\cdot}(r_N))_{i \gammaeq 0}$. Hence, conditioning on $P_N$ and $C_N$, we may write
\betaegin{align*}
\exp - \kappa_N(f) & = \mathbf{m}athbb{E} \lambdaeft ( \exp - \lambdaangle \cP^{(N)}_N(r_N) , f \rangle \right ) \\
& = \mathbf{m}athbb{E} \lambdaeft ( \exp - \sigmaum_{i \gammaeq 0} \sigmaum_{p=1}^{r_N} Z_{i,p}(r_N) f(p/N^2) \right )^N \\
& = \mathbf{m}athbb{E} \lambdaeft ( \exp - \sigmaum_{p=1}^{r_N} Z_{0,p}(r_N) f(p/N^2) \times \mathbf{m}athbb{E} \lambdaeft ( \exp - \sigmaum_{i \gammaeq 0} \sigmaum_{p=1}^{r_N} Z_{i,p}(r_N) f(p/N^2) \right )^{C_N} \right )^N \\
& = \mathbf{m}athbb{E} \lambdaeft ( \exp - \sigmaum_{p=1}^{r_N} \mathbf{m}athds{1}n{P_N = p} f(p/N^2) \times \mathbf{m}athbb{E} ( \exp - \cP^{(N)}(r_N) )^{C_N} \right )^N \\
& = \mathbf{m}athbb{E} \lambdaeft ( \exp - f(P_N/N^2) \times \mathbf{m}athbb{E} ( \exp - \cP^{(N)}_N(r_N) )^{C_N/N} \right )^N \\
& = \mathbf{m}athbb{E} \lambdaeft ( \exp - f(P_N/N^2) \times (\exp - \kappa_N(f))^{C_N/N} \right )^N
\end{align*}
and the result follows.
\end{proof}
This lemma allows us to give an equation solved by any limit point of $(\kappa_N(f))$ (which exist by Lemma \ref{lem:tightness}).
\betaegin{prop} \lambdaanglebel{prop:limcumu}
For $f \in C_K$, any limit point $\kappa(f)$ of $(\kappa_N(f))$ is the unique solution to the following equation
\betaegin{equation} \lambdaanglebel{eq:cumu}
\exp - k(f) = \lambda \lambdaeft (\int_0^{+ \infty} (1-e^{-f(x)}) \mathbf{m}u^c(\mathbf{m}athrm{d}x) + \int_0^{+ \infty} \lambdaeft ( 1 - e^{-f(c)- \kappa(f) x} \right ) \theta(\mathbf{m}athrm{d}x) \right ).
\end{equation}
\end{prop}
\mathbf{m}athbf{n}oindent Note that the first term on the right-hand side is the cumulant of a Poisson random measure with intensity $\mathbf{m}u^c$.
\betaegin{proof}
We assume for simplicity that $(\kappa_N(f))$ converges to some $\kappa(f)$. Let us investigate the behavior of the right-hand term of Equation \eqref{eq:cumuN}. The expectation therein tends to $1$, so we just have to study
\[
\betaegin{split}
N \mathbf{m}athbb{E} \lambdaeft (1 - e^{-f (P_N/N^2) - \kappa_N(f) C_N/N} \right ) = & N \mathbf{m}athbb{E} \lambdaeft ( \lambdaeft ( 1 - e^{-f(P_N/N^2)} \right ) \mathbf{m}athds{1}n{P_N < r_N} \right ) \\
& + N \mathbf{m}athbb{E} \lambdaeft ( \lambdaeft ( 1 - e^{-f (r_N/N^2)-\kappa_N(f) C_N/N} \right ) \mathbf{m}athds{1}n{P_N = r_N} \right ) .
\end{split}
\]
\betaegin{enumerate}[fullwidth]
\item We may rewrite the first term on the RHS as
\[
\mathbb{P}(P_N < r_N) N \mathbf{m}athbb{E} \lambdaeft ( \lambdaeft ( 1 - e^{-f(P_N/N^2)} \right ) \mathbf{m}iddle | \mathbf{m}athds{1}n{P_N < r_N} \right ) = (1-p_N) \int_0^{+ \infty} \lambdaeft ( 1 - e^{-f(x)} \right ) N \widetilde{\pi}N(\mathbf{m}athrm{d}x).
\]
Since $x \mathbf{m}apsto 1 - e^{-f(x)} \in C_K$, Lemma \ref{lem:convmeas} ensures that this tends to
\[
\int_0^{+ \infty} \lambdaeft ( 1 - e^{-f(x)} \right ) \mathbf{m}u^c(\mathbf{m}athrm{d}x).
\]
\item Now, the second term on the RHS is
\betaegin{align*}
& N \mathbf{m}athbb{E} \lambdaeft ( \lambdaeft ( 1 - e^{-f (r_N/N^2) - \kappa_N(f) C_N/N} \right ) \mathbf{m}athds{1}n{P_N = r_N} \right ) \\
& = N p_N \int_0^{+ \infty} \lambdaeft ( 1 - e^{-f (r_N/N^2) - \kappa_N(f) x} \right ) \widetilde{\g}^{(N)}(\mathbf{m}athrm{d}x) \\
& := N p_N \int_0^{+ \infty} g_N(x) \widetilde{\g}^{(N)}(\mathbf{m}athrm{d}x).
\end{align*}
The sequence $(g_N)$ converges pointwise to
\[
g \, : \, \mathbf{m}apsto 1 - e^{-f(c)-\kappa(f) x}
\]
and all the $g_N$'s are bounded and uniformly Lipschitz-continuous. Since $\widetilde{\g}^{(N)} \to \theta$ by Lemma \ref{lem:convmeas}, it is then straightforward that
\[
\int_0^{+ \infty} g_N(x) \widetilde{\g}^{(N)}(\mathbf{m}athrm{d}x) \to \int_0^{+ \infty} g(x) \theta(\mathbf{m}athrm{d}x).
\]
The result then follows after from the convergence $N p_N \to \lambda$.
\item It only remains to prove that the equation has a unique solution. Taking $x = e^{-\kappa(f)}$ has the unknown, we may rewrite it
\[
x = A - B \mathbf{m}athbb{E} (x^{\sigma \sigmaqrt{c} W_1^+}),
\]
with $B > 0$. The function $x \mathbf{m}apsto x - A + B \mathbf{m}athbb{E} (x^{\sigma \sigmaqrt{c} W_1^+})$ has derivative
\[
1 + B \sigma \sigmaqrt{c} \mathbf{m}athbb{E} ( W_1^+ x^{\sigma \sigmaqrt{c} W_1^+ - 1}).
\]
This last quantity is positive, whence the result follows. \qedhere
\end{enumerate}
\end{proof}
The last step is to check that the measure $\eta$ defined in Section \ref{sec:heuristics} has the same law as the limit measure we just obtained, and to this end, one only need to prove the following.
\betaegin{prop} \lambdaanglebel{prop:samecumu}
The cumulant of the random measure $\eta$ solves Equation \eqref{eq:cumu}.
\end{prop}
\betaegin{proof}
We denote $T(\mathbf{m}athrm{f})$ a tree constructed as in Section \ref{sec:heuristics}, but starting instead from an ancestor of fertility $\mathbf{m}athrm{f}$, and let $\eta(\mathbf{m}athrm{f})$ be the corresponding measure. For $g \in C_K$, consider
\[
\phi(\mathbf{m}athrm{f}) = \mathbf{m}athbb{E} ( \exp - \lambdaangle \eta + \delta_c, g \rangle ).
\]
Note that we count $\delta_c$ for the virtual ancestor to simplify the computations (in particular, the cumulant of $\eta \eqlaw \eta(1)$ is $- \lambdan \phi(1) - g(c)$). Indeed, the branching property shows that in the tree $T(\mathbf{m}athrm{f})$, conditionally on the number $k$ of individuals at the first generation, which is Poissonian with parameter $\lambda \mathbf{m}athrm{f}$, each subtree rooted at the first generation accounts for a measure $\eta^j$ such that
\betaegin{itemize}
\item the $\eta^j$'s are independent from the measure $\mathbf{m}athbf{n}u_{\emptyset}(f_{\emptyset}) + \delta_c$ generated by the root,
\item $\eta^j$ has the same law as $\eta(\mathbf{m}athrm{f}_j) + \delta_c$, where the $\mathbf{m}athrm{f}_j$'s are i.i.d. with law $\thetaeta$.
\end{itemize}
Hence, conditioning on the number of children at the first generation and using the exponential formula for Poisson measures, we have
\betaegin{align*}
\phi(\mathbf{m}athrm{f}) & = \mathbf{m}athbb{E} (\exp - \lambdaangle \eta_{\emptyset}(\mathbf{m}athrm{f}) + \delta_c , g \rangle ) \sigmaum_{k \gammaeq 0} e^{-\lambda \mathbf{m}athrm{f}} \mathbf{m}athfrak{r}ac{(\lambda \mathbf{m}athrm{f})^k}{k!} \lambdaeft ( \int_0^{+ \infty} \mathbf{m}athbb{E} ( \exp - \lambdaangle \eta(s) + \delta_c,g \rangle ) \theta(\mathbf{m}athrm{d}s) \right)^k \\
& = e^{- \lambda \mathbf{m}athrm{f}} e^{-g(c)} \mathbf{m}athbb{E} ( \exp - \lambdaangle \eta_{\emptyset}(\mathbf{m}athrm{f}), g \rangle ) \sigmaum_{k \gammaeq 0} \mathbf{m}athfrak{r}ac{(\lambda \mathbf{m}athrm{f})^k}{k!} \lambdaeft ( \int_0^{+ \infty} \phi(s) \theta(\mathbf{m}athrm{d}s) \right)^k \\
& = e^{- \lambda \mathbf{m}athrm{f}} e^{-g(c)} \exp - \mathbf{m}athrm{f} \lambda \int_0^{+ \infty} (1 - e^{-g(s)}) \mathbf{m}u^c(\mathbf{m}athrm{d}s) \exp \lambdaeft ( \lambda \mathbf{m}athrm{f} \int_0^{+ \infty} \phi(s) \theta(\mathbf{m}athrm{d}s) \right ) \\
& = \exp - \lambdaeft ( g(c) + \mathbf{m}athrm{f} \lambda \int_0^{+ \infty} (1 - e^{-g(s)}) \mathbf{m}u^c(\mathbf{m}athrm{d}s) + \lambda \mathbf{m}athrm{f} \lambdaeft ( 1 - \int_0^{+ \infty} \phi(s) \theta(\mathbf{m}athrm{d}s) \right ) \right ) .
\end{align*}
This shows that
\[
\phi(\mathbf{m}athrm{f})e^{g(c)} = \lambdaeft ( \phi(1) e^{g(c)} \right )^{\mathbf{m}athrm{f}}.
\]
Plugging this in the above formula readily shows that $\mathbb{P}hi := - \lambdan \phi(1)$ solves
\[
\mathbb{P}hi = g(c) + \lambda \int_0^{+ \infty}c (1-e^{-g(s)}) \mathbf{m}u(\mathbf{m}athrm{d}s) + \lambda \lambdaeft ( 1 - \int_0^{+ \infty} e^{-g(c)-s (\mathbb{P}hi-g(c))} \theta(\mathbf{m}athrm{d}s) \right )
\]
so that $\mathbb{P}hi - g(c)$, which is the cumulant of $\eta$, also solves \eqref{eq:cumu}.
\end{proof}
\sigmaubsection{Genealogy of the islands}
\sigmaubsubsection{Introduction}
The arguments and heuristics mentioned in Section \ref{sec:heuristics} should make it clear that a result concerning the genealogy of the island could be obtained, which is lost when considering merely the empirical measure $\cP^{(N)}_N(r_N)$. We shall thus now give an idea of what the ``genealogical tree'' of the islands looks like at the limit when $N \to + \infty$. Obviously, this tree is infinite, since e.g. we start from $N \to + \infty$ islands. On the other hand, the heuristics of Section \ref{sec:heuristics} suggest that the tree consisting only of fertile islands should be finite, more precisely be a Galton-Watson tree with the (critical) Cox reproduction law $\gammaTh$, so that the whole genealogical tree of the islands has a.s. finite height (but infinite width). Inspired by Definition 1 in \cite{BertoinToA}, we will now introduce the definition of a tree-indexed Continuous State Branching Process with types\mathbf{m}athfrak{o}otnote{The types being what we called populations, but we wish to give the most general definition here.}.
Let us define in the following $\mathbf{m}athcal{M}inf$ the subset of $\mathbf{m}athcal{M}^+$ of measures which integrate $1$ at infinity. In particular, if $\mathbf{m}u \in \mathbf{m}athcal{M}inf$, we can rank the atoms of a Poisson measure with intensity $\mathbf{m}u$ in the decreasing order, which allows the following definition to make sense. If $\mathbf{m}u$ is finite, there is only a finite number of such atoms, and we shall always complete this decreasing sequence with an infinite sequence of zeros.
\betaegin{defn}
Consider a measurable space $T$, and a family of $\sigma$-finite measures $(\rho_t)_{t \in T}$ on $T \times (0,+ \infty)$, such that, for every $t \in T$, $\rho_t ( T \times \cdot) \in \mathbf{m}athcal{M}inf$. Fix $t_0 \in T$ and $\mathbf{m}athrm{f}_0 \gammaeq 0$. A tree-indexed CSBP with types, with reproduction laws $(\rho_t)_{t \in T}$, started from $(t_0,\mathbf{m}athrm{f}_0)$, is a process $(\mathbf{m}athcal{Z}_u)_{u \in \mathbf{m}athcal{U}}$ indexed by the universal tree $\mathbf{m}athcal{U}$, with values in $T \times (0,+ \infty)$, such that
\betaegin{itemize}
\item $\mathbf{m}athcal{Z}_{\emptyset}= (t_0,\mathbf{m}athrm{f}_0)$ a.s.;
\item for every $k \in \mathbf{m}athbb{Z}^+$, conditionally on $(\mathbf{m}athcal{Z}_u, u \in \mathbf{m}athcal{U}, |u| \lambdaeq k)$,
\betaegin{itemize}
\item the sequences $(\mathbf{m}athcal{Z}_{u \, j})_{j \in \mathbf{m}athbb{N}}$, for $|u| = k$, are independent;
\item for $|u| = k$, writing $\mathbf{m}athcal{Z}_u = (t,\mathbf{m}athrm{f})$, the sequence $(\mathbf{m}athcal{Z}_{u \, j})_{j \in \mathbf{m}athbb{N}}$ is distributed as the family of the atoms of a Poisson measure with intensity $\mathbf{m}athrm{f} \rho_t$, where atoms are repeated according to their multiplicity and ranked in the decreasing order of their second coordinate.
\end{itemize}
\end{itemize}
\end{defn}
Notice that the branching property holds with respect to the second variable, that is, the independent sum of a CSBP with parameters $(t_0,\mathbf{m}athrm{f}_0,(\rho_t))$ and one with parameters $(t_0,\mathbf{m}athrm{f}'_0,(\rho_t))$ is a CSBP with parameters $(t_0,\mathbf{m}athrm{f}_0+\mathbf{m}athrm{f}'_0,(\rho_t))$.
\betaegin{figure}[htb]
\centering
\includegraphics[width= \columnwidth]{Tree_of_isles_reordered.pdf}
\caption{The tree of isles from Figure \ref{fig:tree_of_isles_2}, and its reordering.}
\lambdaanglebel{fig:tree_of_isles_reordered}
\end{figure}
Let us define the object we shall study. As before, we now call the types ``population'', the vertices ``islands'' and the descendants ``colonies''. We have a forest of i.i.d. trees of isles
\[
\lambdaeft ( \mathbf{m}athbb{A}^1(r_N),\dots,\mathbf{m}athbb{A}^N(r_N) \right ).
\]
We root these trees at $1, \dots, N$, and link them to $\emptyset$, to which we give population $r_N$. We call $\mathbf{m}athbb{F}N$ the tree obtained and once again, we give population 0 to the islands in $\mathbf{m}athcal{U} \betaackslash \mathbf{m}athbb{F}N$. We define the tree $\mathbf{m}athbb{F}Nr$ by reordering the colonies of each island, along with their subtree, in the decreasing order of their population, leaving unchanged the initial order if ties occur: see Figure \ref{fig:tree_of_isles_reordered}. The following result deals with its convergence. We may actually prove a slightly stronger result and to this end, let us first introduce the $\mathbf{m}athcal{U}$-indexed processes $\mathbf{m}athcal{Z}N$ and $\mathbf{m}athcal{Z}Nr$ defined by
\[
\mathbf{m}athcal{Z}N_u = \lambdaeft ( \mathbf{m}athbb{F}N_u / N^2, k_u(\mathbf{m}athbb{F}N)/N \right ), \quad \mathbf{m}athcal{Z}Nr_u = \lambdaeft ( \mathbf{m}athbb{F}Nr_u / N^2, k_u(\mathbf{m}athbb{F}Nr)/N \right ), \quad u \in \mathbf{m}athcal{U},
\]
where we recall that $k_u(\mathbf{m}athbb{F}N)$ is the number of colonies of $u$ in $\mathbf{m}athbb{F}N$, or in formulas,
\[
k_u(\mathbf{m}athbb{F}N) = \# \{ j \in \mathbf{m}athbb{N}, \mathbf{m}athbb{F}N_{u \, j} \mathbf{m}athbf{n}eq 0 \}.
\]
We shall prove the following result, where we denote $q_1$ the projection on the first coordinate.
\betaegin{thm} \lambdaanglebel{th:convtree}
The process $(\mathbf{m}athcal{Z}Nr)$ converges as $n \to + \infty$, in the sense of finite dimensional distributions, to $\mathbf{m}athcal{Z}$, where $\mathbf{m}athcal{Z}$ is a tree-indexed CSBP with types, started from $(c,1)$, with reproduction law $\lambda (\mathbf{m}u^c + \delta_c) \otimes \theta$ for the type $c$, and $\delta_0 \otimes \delta_0$ for the others. In particular $(\mathbf{m}athbb{F}Nr)$ converges to $q_1(\mathbf{m}athcal{Z})$.
\end{thm}
It is worth noticing two features of this result. First, one cannot construct the limit of $(\mathbf{m}athbb{F}Nr)$ directly. To this end, we need first to construct $\mathbf{m}athcal{Z}$, and then project it on the first coordinate; but the knowledge of the second coordinate is necessary to get the whole process. This is why we prove the more general convergence of the two-coordinate process in order to obtain the convergence of $(\mathbf{m}athbb{F}Nr)$.
Note also that this result agrees with Theorem \ref{th:conv1}, in that the measure generated by the tree $\mathbf{m}athcal{Z}$ (except for his root)
\[
\eta' := \sigmaum_{u \in \mathbf{m}athcal{U}^*} \delta_{q_1(\mathbf{m}athcal{Z}_u)} \mathbf{m}athds{1}n{q_1(\mathbf{m}athcal{Z}_u) \mathbf{m}athbf{n}eq 0}
\]
has the same law as the measures of Theorem \ref{th:conv1}. Indeed, similar calculations as in the proof below and the proof of Proposition \ref{prop:samecumu} can be carried out to show that the cumulant of $\eta'$ solves Equation \eqref{eq:cumu}. However, since we only show a result dealing with the convergence of finite-dimensional marginals, we cannot deduce Theorem \ref{th:conv1} from Theorem \ref{th:convtree}. Doing this would require to introduce a relevant topology on the tree-indexed CSBP (with types or not) and prove the tightness results associated. This technical and long detour would not bring, we believe, much more understanding of the model.
\sigmaubsubsection{Some results about Poisson random measures} \lambdaanglebel{sec:threelemmas}
Let us start with three preliminary lemmas. The first is essentially a classical fact (see e.g. \cite{BertoinToA}), and merely rephrases Theorem 16.18 in \cite{Kallenberg}.
\betaegin{lemma} \lambdaanglebel{lem:convpoisson}
Let $(\mathbf{m}athbf{n}u^{(N)})$ be a sequence of probability measures $(0,+ \infty)$, and assume that
\[
N \mathbf{m}athbf{n}u^{(N)} \mathbf{m}athbb{R}ightarrow \mathbf{m}athbf{n}u
\]
as $N \to + \infty$, for some $\mathbf{m}athbf{n}u \in \mathbf{m}athcal{M}inf$. Let, for each $N$, $(Y^{(N)}_i)_{i \gammaeq 0}$ a sequence of i.i.d. random variables with law $\mathbf{m}athbf{n}u^{(N)}$, and take, for $\mathbf{m}athrm{f} > 0$, $(\mathbf{m}athrm{a}N_1(\mathbf{m}athrm{f}),\dots,\mathbf{m}athrm{a}N_{\mathbf{m}athrm{f} N}(\mathbf{m}athrm{f}))$ the reordering of $(Y^{(N)}_i)_{i = 1 \dots \mathbf{m}athrm{f} N}$ in the decreasing order. Then, for every fixed $k \gammaeq 1$,
\[
(\mathbf{m}athrm{a}N_1(\mathbf{m}athrm{f}),\dots,\mathbf{m}athrm{a}N_k(\mathbf{m}athrm{f})) \to (\mathbf{m}athrm{a}_1(\mathbf{m}athrm{f}),\dots,\mathbf{m}athrm{a}_k(\mathbf{m}athrm{f})),
\]
where $(\mathbf{m}athrm{a}_i(\mathbf{m}athrm{f}))_{i \gammaeq 0}$ is the reordering in the decreasing order of the atoms of a Poisson random measure with intensity $\mathbf{m}athrm{f} \mathbf{m}athbf{n}u$.
\end{lemma}
Consider in the following $\mathbf{m}athbf{n}u \in \mathbf{m}athcal{M}inf$, and let $(\mathbf{m}athrm{a}_i(\mathbf{m}athrm{f}))_{i \gammaeq 1}$ as in the previous statement. To make sense of the coming results, and since it is of use in the proofs, let us recall how we can construct a Poisson measure with intensity $\mathbf{m}athrm{f} \mathbf{m}athbf{n}u$ in a measurable way. Let $(A_i)_{i \gammaeq 1}$ be a partition of $\mathbf{m}athbb{R}^+$ in measurable subsets of finite $\mathbf{m}athbf{n}u$-measure. Define for $i \gammaeq 1$ such that $\mathbf{m}athbf{n}u(A_i) > 0$,
\[
\lambda_i = \mathbf{m}athbf{n}u(A_i), \quad \mathbf{m}athbf{n}u_i = \mathbf{m}athbf{n}u(\cdot \cap A_i)/\lambda_i.
\]
Independently for each $i$, let $(N^i(\mathbf{m}athrm{f}))_{\mathbf{m}athrm{f} \gammaeq 0}$ be a Poisson process with intensity $\lambda_i$ and $(X^i_j)_{j \gammaeq 1}$ a sequence of i.i.d. random variables with law $\mathbf{m}athbf{n}u_i$, and define
\[
\xi^i(\mathbf{m}athrm{f}) = \sigmaum_{j=1}^{N^i(\mathbf{m}athrm{f})} \delta_{X^i_j}, \quad \xi = \sigmaum_{i \gammaeq 1} \xi^i.
\]
Then $\xi(\mathbf{m}athrm{f})$ is a Poisson random measure with intensity $\mathbf{m}athrm{f} \mathbf{m}athbf{n}u$.
\betaegin{lemma} \lambdaanglebel{lem:contpoisson}
For every continuous $g \colon \mathbf{m}athbb{R}^+ \to \mathbf{m}athbb{R}$ with compact support and every $r \in \mathbf{m}athbb{N}$, the mapping
\[
\mathbf{m}athrm{f} \mathbf{m}apsto \mathbf{m}athbb{E} ( g ( \mathbf{m}athrm{a}_r(\mathbf{m}athrm{f})))
\]
is continuous on $\mathbf{m}athbb{R}^+$.
\end{lemma}
\betaegin{proof}
Let $\varepsilon > 0$. The measure $\mathbf{m}athbf{n}u$ is in $\mathbf{m}athcal{M}inf$, so $\alpha := \mathbf{m}athbf{n}u( ( \varepsilon, + \infty ) )$ is finite. Then $\mathbf{m}athrm{a}_r(\mathbf{m}athrm{f}) > \varepsilon$ if and only if there are $r$ or more atoms of the Poisson measure in $(\varepsilon,+ \infty)$, so
\[
\mathbb{P} ( \mathbf{m}athrm{a}_r (\mathbf{m}athrm{f}) \in (\varepsilon , + \infty) ) = \sigmaum_{k \gammaeq r} e^{- \mathbf{m}athrm{f} \alpha} \mathbf{m}athfrak{r}ac{(\mathbf{m}athrm{f} \alpha)^k}{k!}
\]
which is clearly continuous in $\mathbf{m}athrm{f}$. Then, the complimentary probability $P(\mathbf{m}athrm{a}_r(\mathbf{m}athrm{f}) \in [0, \varepsilon))$ is also continuous. The result thus holds for any indicator function, and the result follows by standard approximations.
\end{proof}
\betaegin{lemma} \lambdaanglebel{lem:unifconvpoisson}
In the notation of Lemma \ref{lem:convpoisson}, for every continuous $g \colon \mathbf{m}athbb{R}^+ \to \mathbf{m}athbb{R}$ with bounded variation and compact support, and every $r \in \mathbf{m}athbb{N}$,
\[
\mathbf{m}athbb{E}(g(\mathbf{m}athrm{a}N_r(\mathbf{m}athrm{f}))) \to \mathbf{m}athbb{E}(g(\mathbf{m}athrm{a}_r(\mathbf{m}athrm{f})))
\]
uniformly on the compact sets of $\mathbf{m}athbb{R}^+$.
\end{lemma}
\mathbf{m}athbf{n}oindent This is in particular true, and that is all we will use, for a Lipschitz-continuous $g$ with compact support.
\betaegin{proof}
The function $g$ has bounded variation, so it may be written as $g = g^+ - g^-$, where $g^+$ and $g^-$ are nondecreasing and continuous. Take $A > 0$ and consider the mappings
\[
\phi_n^{\pm} : \mathbf{m}athrm{f} \mathbf{m}apsto \mathbf{m}athbb{E}(g^{\pm}(\mathbf{m}athrm{a}N_r(\mathbf{m}athrm{f})))
\]
on $[0,A]$. It is obvious, in the construction above, that $\mathbf{m}athrm{f} \mathbf{m}apsto \mathbf{m}athrm{a}N_r(\mathbf{m}athrm{f})$ is nondecreasing, thus so do $\phi_n^{\pm}$. Lemma \ref{lem:convpoisson} ensures that $\mathbf{m}athrm{a}N_r(\mathbf{m}athrm{f}) \to \mathbf{m}athrm{a}_r(\mathbf{m}athrm{f})$, so by dominated convergence, $\phi_n^{\pm}$ converges simply to $\phi^{\pm}$, where
\[
\phi^{\pm}(\mathbf{m}athrm{f}) = \mathbf{m}athbb{E}(g^{\pm}(\mathbf{m}athrm{a}_r(\mathbf{m}athrm{f}))).
\]
Now, Lemma \ref{lem:contpoisson} shows that $\phi^{\pm}$ are continuous, so the result follows from Dini's second theorem.
\end{proof}
\sigmaubsubsection{Proof of Theorem \ref{th:convtree}}
The proof will rely heavily on Lemmas \ref{lem:treeGW1} and \ref{lem:convmeas}, which we will use without further notice.
\betaegin{enumerate}[fullwidth]
\item Let us start with some preliminary definitions. Recall that we say that $u \in \mathbf{m}athbb{F}N$ if $\mathbf{m}athbb{F}N_u \mathbf{m}athbf{n}eq 0$. For $u \in \mathbf{m}athbb{F}N$, $\mathbf{m}athbb{F}N_u$ is called its population, instead of type, and $k_u(\mathbf{m}athbb{F}N)/N$ its fertility. An island in $\mathbf{m}athbb{F}N$ of population $r^{(N)} := r_N/N^2$ is said to be fertile. For a function $\mathbf{m}athcal{Z}$ indexed by $\mathbf{m}athcal{U}$ and $u \in \mathbf{m}athcal{U}$, $\mathbf{m}athcal{Z}^{u +}$ is the function obtained by shifting the subtree rooted at $u$ back to $\emptyset$, so for instance, $\mathbf{m}athcal{Z}^{u+}_{\emptyset} = \mathbf{m}athcal{Z}_u$.
Let $\mathbf{m}athcal{F}$ be the set of functions from $\mathbf{m}athcal{U}$ to $(\mathbf{m}athbb{R}^+)^2$. For $k \in \mathbf{m}athbb{Z}^+$, we define $E_k$ the set of functions $g$ from $\mathbf{m}athcal{F}$ to $\mathbf{m}athbb{R}^+$ which can be written as
\betaegin{equation} \lambdaanglebel{Ek}
g(\mathbf{m}athcal{T}) = g_{\emptyset} (\mathbf{m}athcal{T}_{\emptyset}) g_1(\mathbf{m}athcal{T}_1) \dots g_s(\mathbf{m}athcal{T}_s) g_{1 \, 1}(\mathbf{m}athcal{T}_{1 \, 1}) \dots g_{s \dots s}(\mathbf{m}athcal{T}_{s \dots s})
\end{equation}
for every $\mathbf{m}athcal{T} \in \mathbf{m}athcal{F}$, for some $s \in \mathbf{m}athbb{N}$, where the $g_u$'s are Lipschitz-continuous with compact support in $(\mathbf{m}athbb{R}^+)^2$, and the last index consists of $k$ letters $s$. In particular, such a $g$ depends only on the $k$ first generations. If $\mathbf{m}athcal{T}$ consists only on a root with population $p$, that is if $\mathbf{m}athcal{T}_{\emptyset} = (p,0)$ and $\mathbf{m}athcal{T}_u = (0,0)$ for $u \in \mathbf{m}athcal{U}^*$, then we denote $g(p) := g(\mathbf{m}athcal{T}) = g_{\emptyset}(p,0)$.
\item In the sequel, we will need to consider $\mathbf{m}athbb{F}N(\mathbf{m}athrm{f})$ a tree constructed as $\mathbf{m}athbb{F}N$, but with $\mathbf{m}athrm{f} N$ islands at generation $1$ (so $\mathbf{m}athbb{F}N$ has the law of $\mathbf{m}athbb{F}N(1)$). We say that this tree has fertility $\mathbf{m}athrm{f}$. More generally, we may take $\mathbf{m}athrm{f}$ to be random with some law $\mathbf{m}athbf{n}u$, and we write $\mathbf{m}athbb{F}N(\mathbf{m}athbf{n}u)$ for the corresponding tree. We may also construct as above $\mathbf{m}athbb{F}Nr(\mathbf{m}athbf{n}u)$, $\mathbf{m}athcal{Z}N(\mathbf{m}athbf{n}u)$ and $\mathbf{m}athcal{Z}Nr(\mathbf{m}athbf{n}u)$. Rather than $\mathbf{m}athbb{E}(g(\mathbf{m}athcal{Z}Nr(\mathbf{m}athbf{n}u)))$, we may write $E_{\mathbf{m}athbf{n}u}(g(\mathbf{m}athcal{Z}N))$. We need these trees because of the following observation: consider a fertile island $u \in \mathbf{m}athbb{F}N(\mathbf{m}athbf{n}u)$, $u \mathbf{m}athbf{n}eq \emptyset$. Then $u$ has a fertility chosen according to $\widetilde{\g}^{(N)}$, so the branching property shows that $\mathbf{m}athbb{F}^{(N),u+}$ has the law of $\mathbf{m}athbb{F}N(\widetilde{\g}^{(N)})$, and $\mathbf{m}athcal{Z}^{(N),u+}$ has the law of $\mathbf{m}athcal{Z}N(\widetilde{\g}^{(N)})$.
Now, take an island $u \in \mathbf{m}athbb{F}N(\mathbf{m}athbf{n}u)$, and assume it is fertile with fertility $\mathbf{m}athrm{f}$. Amongst $u \, 1, \dots, u \: \mathbf{m}athrm{f} n$, each has independently a probability $p_N$ to be fertile, so the number of fertile islands is binomial with parameters $\mathbf{m}athrm{f} N$ and $p_N$. The fertile trees have the law of $\mathbf{m}athbb{F}N(\gammaN)$, and the other ones have the law of the trivial tree with only a root and population chosen according to $\widetilde{\pi}N$. Let us condition on $u$ having $k$ fertile colonies, and consider the populations $\mathbf{m}athbb{Y}N_1(\mathbf{m}athrm{f}),\dots,\mathbf{m}athbb{Y}N_{\mathbf{m}athrm{f} N - k}(\mathbf{m}athrm{f})$ of the non-fertile children. $(\mathbf{m}athbb{Y}N_1(\mathbf{m}athrm{f}),\dots,\mathbf{m}athbb{Y}N_{\mathbf{m}athrm{f} N - k}(\mathbf{m}athrm{f}))$ is then an i.i.d. sequence with law $\widetilde{\pi}N$.
Let us reorder the tree and take a look at the first $s$ colonies of the root of $\mathbf{m}athbb{F}Nr(\mathbf{m}athbf{n}u)$. Take $\mathbf{m}athbb{X}Nf$ a binomial variable with parameters $\mathbf{m}athrm{f} N$ and $p_N$. For $1 \lambdaeq i \lambdaeq s$, the analysis above shows that with probability $\mathbb{P}(\mathbf{m}athbb{X}Nf \wedge s = i)$, the following happens:
\betaegin{itemize}
\item the $i$ first islands are fertile;
\item the population of the $s-i$ following islands has the law of $(\mathbf{m}athrm{a}N_{1}(\mathbf{m}athrm{f}),\dots,\mathbf{m}athrm{a}N_{s-i}(\mathbf{m}athrm{f}))$ where we denote $(\mathbf{m}athrm{a}N_1(\mathbf{m}athrm{f}),\dots,\mathbf{m}athrm{a}N_{\mathbf{m}athrm{f} N - i}(\mathbf{m}athrm{f}))$ the reordering in decreasing order of a $(\mathbf{m}athrm{f} N - i)$-sample of law $\widetilde{\pi}N$.
\end{itemize}
\item Let us now reason by induction, the assumption $\mathbf{m}athcal{P}_r$ at rank $r$ being that for every $f \in E_k$ and every laws $\mathbf{m}athbf{n}u^{(N)}, \mathbf{m}athbf{n}u$ on $\mathbf{m}athbb{R}^+$ such that $\mathbf{m}athbf{n}u^{(N)} \convlaw \mathbf{m}athbf{n}u$, we have
\betaegin{equation} \lambdaanglebel{rec}
\mathbf{m}athbb{E}_{\mathbf{m}athbf{n}u^{(N)}} \lambdaeft ( f \lambdaeft ( \mathbf{m}athcal{Z}Nr \right ) \right ) \to \mathbf{m}athbb{E}_{\mathbf{m}athbf{n}u} \lambdaeft ( f (\mathbf{m}athcal{Z}) \right ),
\end{equation}
where $\mathbf{m}athcal{Z}$ is defined in Theorem \ref{th:convtree}. This obviously implies the result by picking $\mathbf{m}athbf{n}u^{(N)} = \mathbf{m}athbf{n}u = \delta_1$.
$\mathbf{m}athcal{P}_0$ is easy to check by weak convergence of $\mathbf{m}athbf{n}u^{(N)}$ to $\mathbf{m}athbf{n}u$ and uniform continuity of $\mathbf{m}athrm{f}_{\emptyset}$. So assume $\mathbf{m}athcal{P}_{r-1}$, and take $g \in E_r$, which we write
\[
g(\mathbf{m}athcal{Z}) = g(\mathbf{m}athcal{Z}_{\emptyset}) g_1(\mathbf{m}athcal{Z}^{1+}) \dots g_s(\mathbf{m}athcal{Z}^{s+}),
\]
with $g_1,\dots,g_s \in E_{r-1}$.
By the analysis above, we may condition the tree $\mathbf{m}athbb{F}N(\mathbf{m}athbf{n}u^{(N)})$ on the fertility of the root and its number of fertile children, to obtain
\betaegin{align*}
\mathbf{m}athbb{E}_{\mathbf{m}athbf{n}u^{(N)}}(g(\mathbf{m}athcal{Z}N)) & = \int \mathbf{m}athbf{n}u^{(N)}(\mathbf{m}athrm{d}\f) g_{\emptyset}(r^{(N)},\mathbf{m}athrm{f}) \times \\
& \lambdaeft ( \sigmaum_{i=0}^{s-1} \mathbb{P}(\mathbf{m}athbb{X}Nf = i) \mathbf{m}athbb{E}_{\widetilde{\g}^{(N)}}(g_1(\mathbf{m}athcal{Z}N)) \dots \mathbf{m}athbb{E}_{\widetilde{\g}^{(N)}}(g_i(\mathbf{m}athcal{Z}N)) \right. \times \\
& \quad \mathbf{m}athbb{E}(g_{i+1}(\mathbf{m}athrm{a}N_1(\mathbf{m}athrm{f}))) \dots \mathbf{m}athbb{E}(g_s(\mathbf{m}athrm{a}N_{s-i}(\mathbf{m}athrm{f}))) \\
& + \lambdaeft. \sigmaum_{i \gammaeq s} \mathbb{P}(\mathbf{m}athbb{X}Nf = i) \mathbf{m}athbb{E}_{\widetilde{\g}^{(N)}}(g_1(\mathbf{m}athcal{Z}N)) \dots \mathbf{m}athbb{E}_{\widetilde{\g}^{(N)}}(g_s(\mathbf{m}athcal{Z}N)) \right ) \\
& = \sigmaum_{i=0}^{s-1} \mathbf{m}athbb{E}_{\widetilde{\g}^{(N)}}(g_1(\mathbf{m}athcal{Z}N)) \dots \mathbf{m}athbb{E}_{\widetilde{\g}^{(N)}}(g_i(\mathbf{m}athcal{Z}N)) \times \\
& \int \mathbf{m}athbf{n}u^{(N)}(\mathbf{m}athrm{d}\f) g_{\emptyset}(r^{(N)},\mathbf{m}athrm{f})\mathbb{P}(\mathbf{m}athbb{X}Nf = i) \mathbf{m}athbb{E}(g_{i+1}(\mathbf{m}athrm{a}N_1(\mathbf{m}athrm{f}))) \dots \mathbf{m}athbb{E}(g_s(\mathbf{m}athrm{a}N_{s-i}(\mathbf{m}athrm{f}))) \\
& + \mathbf{m}athbb{E}_{\widetilde{\g}^{(N)}}(g_1(\mathbf{m}athcal{Z}N)) \dots \mathbf{m}athbb{E}_{\widetilde{\g}^{(N)}}(g_p(\mathbf{m}athcal{Z}N)) \int g_{\emptyset}(r^{(N)},\mathbf{m}athrm{f}) \mathbb{P}(\mathbf{m}athbb{X}Nf \gammaeq i) \mathbf{m}athbf{n}u^{(N)}(\mathbf{m}athrm{d}\f).
\end{align*}
By the induction hypothesis $\mathbf{m}athcal{P}_{r-1}$ and Lemma \ref{lem:convmeas},
\[
\mathbf{m}athbb{E}_{\widetilde{\g}^{(N)}}(g_i(\mathbf{m}athcal{Z}N)) \to \mathbf{m}athbb{E}_{\theta}(g_i(\mathbf{m}athcal{Z})).
\]
Define $\mathbf{m}athbb{X}f$ a variable with Poisson law with parameter $\lambda \mathbf{m}athrm{f}$. Let us prove that the quantity
\betaegin{equation} \lambdaanglebel{eq:lastterm}
\int \mathbf{m}athbf{n}u^{(N)}(\mathbf{m}athrm{d}\f) g_{\emptyset}(r^{(N)},\mathbf{m}athrm{f})\mathbb{P}(\mathbf{m}athbb{X}Nf = i) \mathbf{m}athbb{E}(g_{i+1}(\mathbf{m}athrm{a}N_1(\mathbf{m}athrm{f}))) \dots \mathbf{m}athbb{E}(g_s(\mathbf{m}athrm{a}N_{s-i}(\mathbf{m}athrm{f})))
\end{equation}
converges to
\betaegin{equation} \lambdaanglebel{eq:limlastterm}
\int \mathbf{m}athbf{n}u(\mathbf{m}athrm{d}\f) g_{\emptyset}(c,\mathbf{m}athrm{f})\mathbb{P}(\mathbf{m}athbb{X}f = i) \mathbf{m}athbb{E}(g_{i+1}(\mathbf{m}athrm{a}_1(\mathbf{m}athrm{f}))) \dots \mathbf{m}athbb{E}(g_s(\mathbf{m}athrm{a}_{s-i}(\mathbf{m}athrm{f})))
\end{equation}
where $(\mathbf{m}athrm{a}_i(\mathbf{m}athrm{f}))_{i \gammaeq 1}$ is the reordering in the decreasing order of the atoms of a Poisson measure with intensity $\mathbf{m}athrm{f} \mathbf{m}u^c$. According to Lemmas \ref{lem:convmeas} and \ref{lem:unifconvpoisson}, we obtain that
\[
\mathbf{m}athbb{E}(g_{i+j}(\mathbf{m}athrm{a}N_j(\mathbf{m}athrm{f}))) \to \mathbf{m}athbb{E}(g_{i+j}(\mathbf{m}athrm{a}_j(\mathbf{m}athrm{f}))), \quad j=1 \dots s-i,
\]
uniformly on the compact sets of $\mathbf{m}athbb{R}^+$. Using this fact and the weak convergence of $\mathbf{m}athbf{n}u^{(N)}$ to $\mathbf{m}athbf{n}u$, it is easy to see that \eqref{eq:lastterm} has the same limit, if any, as
\betaegin{equation} \lambdaanglebel{eq:lastterm2}
\int \mathbf{m}athbf{n}u^{(N)}(\mathbf{m}athrm{d}\f) g_{\emptyset}(r^{(N)},\mathbf{m}athrm{f})\mathbb{P}(\mathbf{m}athbb{X}Nf = i) \mathbf{m}athbb{E}(g_{i+1}(\mathbf{m}athrm{a}_1(\mathbf{m}athrm{f}))) \dots \mathbf{m}athbb{E}(g_s(\mathbf{m}athrm{a}_{s-i}(\mathbf{m}athrm{f}))).
\end{equation}
The difference between \eqref{eq:lastterm2} and \eqref{eq:limlastterm} is bounded, up to a constant, by
\[
\betaegin{split}
& \int \lambdaeft |g_{\emptyset} (r^{(N)},\mathbf{m}athrm{f}) - g_{\emptyset}(c,\mathbf{m}athrm{f})\right | \mathbf{m}athbf{n}u^{(N)}(\mathbf{m}athrm{d}\f) + \sigmaup \lambdaeft | g_{\emptyset} \right | \int_{\mathbf{m}athrm{supp} \: g_{\emptyset}} \mathbf{m}athbf{n}u^{(N)}(\mathbf{m}athrm{d}\f) \lambdaeft | \mathbb{P}(\mathbf{m}athbb{X}Nf = i) - \mathbb{P}(\mathbf{m}athbb{X}f = i) \right | \\
+ & \lambdaeft | \int g_{\emptyset} (c,\mathbf{m}athrm{f}) \mathbf{m}athbf{n}u^{(N)}(\mathbf{m}athrm{d}\f) - \int g_{\emptyset}(c,\mathbf{m}athrm{f}) \mathbf{m}athbf{n}u(\mathbf{m}athrm{d}\f) \right |.
\end{split}
\]
The first and last term tend to 0 by uniform continuity of $g_{\emptyset}$ and weak convergence of $\mathbf{m}athbf{n}u^{(N)}$ to $\mathbf{m}athbf{n}u$. For the second one, Le Cam's inequality \cite{LeCam} gives
\[
\lambdaeft | \mathbb{P}(\mathbf{m}athbb{X}Nf = i) - \mathbb{P}(\mathbf{m}athbb{X}f = i) \right | \lambdaeq \sigmaum_{k=1}^{\mathbf{m}athrm{f} N} p_N^2 = \mathbf{m}athrm{f} O \lambdaeft ( \mathbf{m}athfrak{r}ac1N \right ),
\]
where $O$ is uniform in $\mathbf{m}athrm{f}$, what ensures the expected convergence. By the same reasoning, we also have that
\[
\int g_{\emptyset}(r^{(N)},\mathbf{m}athrm{f}) \mathbb{P}(\mathbf{m}athbb{X}Nf \gammaeq i) \mathbf{m}athbf{n}u^{(N)}(\mathbf{m}athrm{d}\f) \to \int g_{\emptyset}(c,\mathbf{m}athrm{f}) \mathbb{P}(\mathbf{m}athbb{X}f \gammaeq i) \mathbf{m}athbf{n}u(\mathbf{m}athrm{d}\f).
\]
We hence get that $\mathbf{m}athbb{E}_{\mathbf{m}athbf{n}u^{(N)}}(g(\mathbf{m}athcal{Z}N))$ tends to
\[
\betaegin{split}
& \sigmaum_{i=0}^{s-1} \mathbf{m}athbb{E}_{\theta}(g_1(\mathbf{m}athcal{Z})) \dots \mathbf{m}athbb{E}_{\theta}(g_i(\mathbf{m}athcal{Z})) \times \int g_{\emptyset}(c,\mathbf{m}athrm{f}) \mathbf{m}athbf{n}u(\mathbf{m}athrm{d}\f) \mathbf{m}athbb{E}(g_{i+1}(\mathbf{m}athrm{a}_1(\mathbf{m}athrm{f}))) \dots \mathbf{m}athbb{E}(g_s(\mathbf{m}athrm{a}_{s-i}(\mathbf{m}athrm{f}))) \mathbb{P}(\mathbf{m}athbb{X}f = i) \\
& + \mathbf{m}athbb{E}_{\theta}(g_1(\mathbf{m}athcal{Z})) \dots \mathbf{m}athbb{E}_{\theta}(g_s(\mathbf{m}athcal{Z})) \int g_{\emptyset}(c,\mathbf{m}athrm{f}) \mathbb{P}(\mathbf{m}athbb{X}f \gammaeq i) \mathbf{m}athbf{n}u(\mathbf{m}athrm{d}\f).
\end{split}
\]
By a similar computation as just done, we can see that this is precisely $\mathbf{m}athbb{E}_{\mathbf{m}athbf{n}u}(g(\mathbf{m}athcal{Z}))$, what shows the result.
\end{enumerate}
\sigmaection{Second model: regrowing resources} \lambdaanglebel{sec:secondmodel}
We shall now present our second model of migration under constraints, which is arguably more natural than the first one. The only difference is the migration rule: now, we shall assume that individuals will migrate when there are too many of them living at the same time on the same island. In other words, we assume that each island has regrowing resources, enough to feed $r$ people at the same time. If a birth happens when $r$ people coexist on a given island, the newborns will migrate each to a different virgin island, containing $r$ resources.
To make this model more natural, we will now consider continuous time trees, so that we can precisely tell which birth event forces certain individuals to migrate. Otherwise, imagine that at some generation, more than $r$ individuals coexist. We would then have to choose which children of which individual migrate. Doing this in a relevant way would force us to choose the migrant individuals uniformly at random, what would introduce some randomness in the construction of the tree of isles from a given (deterministic) tree, what we want to avoid.
\sigmaubsection{Tree of isles} \lambdaanglebel{sec:treeofisles2}
Start from a finite continuous tree $\mathbf{m}athcal{T}$, such that two events (birth or death) do not occur at the same time (which is a.s. the case for a continuous Galton-Watson tree). Once again, to define the tree of isles, we just need to define its migrant children. To this end, consider the process $(N_t)$ counting the number of people alive at time $t$, which we assume to be c\`adl\`ag. Define $\tau$ be the first time (if any) such that
\betaegin{itemize}
\item for $t \in [0,\tau)$, $N_t \lambdaeq r$;
\item $N_{\tau} > r$.
\end{itemize}
Then the $N_{\tau}- r$ rightmost individuals born at $\tau$ will be migrant children. After this, cut off the trees rooted at these individuals, and proceed identically with the pruned tree, until it is impossible to find other migrant children. See Figure \ref{fig:cont_tree} for an example.
\betaegin{figure}[htb]
\centering
\includegraphics[width=0.75 \columnwidth]{Cont_tree.pdf}
\caption{A tree $\mathbf{m}athcal{T}$ and its labeling, for $r=4$. The individuals in a circle remain on the island, those in a square migrate. Here $P_r(\mathbf{m}athcal{T}) = 12$ and $C_r(\mathbf{m}athcal{T}) = 3$.}
\lambdaanglebel{fig:cont_tree}
\end{figure}
This thus allows us to construct the tree of isles as explained in Section \ref{sec:treeofisles}. An example is given in Figure \ref{fig:tree_of_isles_2}.
\betaegin{figure}[htb]
\centering
\includegraphics[width=0.25 \columnwidth]{Tree_of_isles_2.pdf}
\caption{The tree of isles $\mathbf{m}athcal{A}(r)$ drawn from the tree $\mathbf{m}athcal{T}$ of Figure \ref{fig:cont_tree}, for $r = 4$.}
\lambdaanglebel{fig:tree_of_isles_2}
\end{figure}
We shall now explain how to label our tree so that the relevant information, namely its population $P(\mathbf{m}athcal{T})$ and number of colonies $C(\mathbf{m}athcal{T})$, can easily be read on the corresponding exploration process $(S_n)$. According to Section \ref{sec:explproc}, to define this labeling, we only need to precise the (Markovian) rule on how to choose the next vertex to be labeled.
We shall take the following rule $\mathbf{m}athcal{R}$, defined for every line $\ell$. For such a line, consider the pruned tree $\mathbf{m}athcal{T}_{\ell}$, for which we can define migrant individuals thanks to the above algorithm. Take $\ell = \ell^+ \cup \ell^0 \cup \ell^-$ a partition of $\ell$ in
\betaegin{itemize}
\item the children of migrants individuals,
\item the migrant individuals
\item and the other individuals.
\end{itemize}
Then $\mathbf{m}athcal{R}$ is defined as follows:
\betaegin{itemize}
\item if $\ell^+ \mathbf{m}athbf{n}eq \emptyset$, then $\mathbf{m}athcal{R}(\ell)$ is the individual in $\ell^+$ which will die the sooner;
\item if $\ell^+ = \emptyset$ and $\ell^0 \mathbf{m}athbf{n}eq \emptyset$, then $\mathbf{m}athcal{R}(\ell)$ is the individual in $\ell^0$ which will die the sooner;
\item else $\mathbf{m}athcal{R}(\ell)$ is the individual of $\ell^-$ which will die the sooner.
\end{itemize}
By construction, this is clearly a Markovian rule.
This algorithm is just a modification of the death-first search algorithm. Informally, we apply the latter until we observe more than $r$ people coexisting, say $r'$ of them. When this is the case, $r'-r$ newborns migrate, and for the sake of definiteness, we choose the $r' - r$ rightmost ones. We then explore their subtrees one after another by death-first search, starting with the migrant individual who dies first. When all of these subtrees are explored, we resume the exploration of the initial tree by death-first search, until we see other migrant individuals or the exploration is over. All of this is probably clearer on a picture, see Figure \ref{fig:cont_tree}.
To this labeling, we may thus associate a walk $(S_n)$. As shown on Figure \ref{fig:cont_expl_proc}, let us define
\[
\sigma_1(r) = 0, \quad \varsigmainf = \inf \{ n \gammaeq 1, S_n = - 1 \}
\]
where as usual $\inf \emptyset = + \infty$, and successively
\[
\varsigma_i(r) = \inf \{ n > \sigma_i, S_n > r - 1 \} \wedge \varsigmainf, \quad \sigma_{i+1}(r) = \inf \{ n > \varsigma_i, S_n = r - 1 \} \wedge \varsigmainf,
\]
so that the walk first hits $[r,+ \infty)$ at $\varsigma_1(r)$, then makes excursions\mathbf{m}athfrak{o}otnote{We shall use this word in a very loose sense, only the precise definitions should be taken as rigorous. However, and thankfully, ``Brownian excursion'' will have its usual meaning.} above $r$ on each interval $[\varsigma_i(r),\sigma_{i+1}(r))$, and below $r$ on each interval $[\sigma_{i+1}(r),\varsigma_{i+1}(r))$, before hitting $-1$ at $\varsigmainf$. To have convenient formulas, we have let all the $\sigma_i(r)$ and $\varsigma_i(r)$ to be equal to $\varsigmainf$ after the walk has hit $-1$. Defines as well
\[
\mathbf{m}athcal{O}_i(r) = (S_{\varsigma_i(r)} - (r-1)) \vee 0, \quad i \gammaeq 1
\]
be the overshoot above level $r-1$ (and by definition $0$ after $-1$ is hit), and
\[
\ell_i(r) = \varsigma_i(r) - \sigma_i(r), \quad i \gammaeq 1,
\]
so the sum of the $\ell_i(r)$'s is the time spent below $r - 1$ before hitting $-1$. The relation between the population and number of colonies of the initial island and the corresponding exploration process is the following.
\betaegin{figure}[htb]
\centering
\includegraphics[width=\columnwidth]{Cont_expl_proc.pdf}
\caption{The exploration process attached to the labeling of the tree of Figure \ref{fig:cont_tree}, for $r = 4$ (forgetting about the extra notation $r$). Check that $\mathbf{m}athcal{O}_1(r)+ \mathbf{m}athcal{O}_2(r) = 3 = C_r(\mathbf{m}athcal{T})$ and $\ell_1(r) + \ell_2(r) + \ell_3(r) = 12 = P_r(\mathbf{m}athcal{T})$.}
\lambdaanglebel{fig:cont_expl_proc}
\end{figure}
\betaegin{lemma} \lambdaanglebel{lem:popcolRW2}
The equalities
\[
P_r(\mathbf{m}athcal{T}) = \sigmaum_{i=1}^{+ \infty} \ell_i(r), \quad C_r(\mathbf{m}athcal{T}) = \sigmaum_{i=1}^{+ \infty} \mathbf{m}athcal{O}_i(r)
\]
hold.
\end{lemma}
\mathbf{m}athbf{n}oindent In words, $P_r(\mathbf{m}athcal{T})$ is the time spent by $(S_n)$ under $r-1$, and $C_r(\mathbf{m}athcal{T})$ the sum of the overshoots above level $r-1$, both before hitting $-1$. The quantities involved are easy to follow on Figures \ref{fig:cont_tree} and \ref{fig:cont_expl_proc}, and carefully following in parallel the walk and the tree on these examples should make the following proof quite obvious.
\betaegin{proof}
As explained above, our labeling algorithm works as follows. As long as there are no migrant children, it is just the death-first search algorithm. As we mentioned in Section \ref{sec:deathfirst}, $1 + S_i$ is precisely the number of individuals alive between the $(i-1)$-th and the $i$-th event. Hence, the first time $i$ that $(1 + S_n)$ is greater than $r$ is the first time when more than $r$ people coexist. The supernumerary $1 + S_i - r$ migrate, and this quantity is precisely the first overshoot $\mathbf{m}athcal{O}_1(r)$, whereas the $i = \ell_1(r)$ first individuals visited remain on the initial island.
Then, we modify our algorithm to explore (by death-first search) the $\mathbf{m}athcal{O}_1(r)$ subtrees of the migrant children. According to the first part of Lemma \ref{lem:explproc}, the first exploration goes from $1 + S_i$ to $1 + S_i - 1$ while remaining above $1 + S_i - 1$, the second from $1 + S_i - 1$ to $1 + S_i - 2$ and remains above $1 + S_i - 2$, \dots, and the $\mathbf{m}athcal{O}_1(r)$-th and last from $1 + S_i - \mathbf{m}athcal{O}_i(r) + 1 = r$ to $1 + S_i - \mathbf{m}athcal{O}_i(r) = r - 1$ while remaining above $r-1$. Hence, when all these explorations are done, no new overshoot has been observed, and no time below $r$ has been cumulated. These subtrees have thus all been visited, and we may just as well cut them off.
The exploration of the initial tree then resumes, and the same argument can then be applied to the pruned tree, so we can thus conclude by a simple induction on the size of the tree.
\end{proof}
From now on, we replace $\mathbf{m}athcal{T}$ by $\mathbf{m}athbb{T}$, a Galton-Watson tree with reproduction law $\rho$ and construct its tree of isles $\mathbf{m}athbb{A}(r)$ for $r$ resources. We also replace $(S_n)$ by an actual random walk defined on the whole of $\mathbf{m}athbb{Z}^+$, with step distribution $\tilde{\rho}$, so Lemma \ref{lem:popcolRW2} can be rewritten as an equality in law. Unlike the first model, an island with colonies can have an arbitrarily large population. Letting $\pi_r$ to be the law of the pair $(P_r(\mathbf{m}athbb{T}),C_r(\mathbf{m}athbb{T}))$, we cannot anymore write $\pi_r$ as (roughly) a product measure. It should be clear than an analogue to Lemmas \ref{lem:treeGW1} and \ref{lem:procGW1} holds. We call a $\mathbf{m}athbb{N}$-type Galton-Watson tree (resp. process) a multitype Galton-Watson tree (resp. process) with types in $\mathbf{m}athbb{N}$, and we still adopt the notation of Section \ref{sec:empmeas}.
\betaegin{lemma} \lambdaanglebel{lem:GW2}
The tree of isles $\mathbf{m}athbb{A}(r)$ is a $\mathbf{m}athbb{N}$-type Galton-Watson tree, such that the population and number of colonies of each island has law $\pi_r$. Similarly, the process $(Z_{i,p}(r),\, p \gammaeq 1)_{i \gammaeq 0}$ is a $\mathbf{m}athbb{N}$-type Galton-Watson process, such that the type and number of children of each individual has law $\pi_r$.
\end{lemma}
\sigmaubsection{Result}
As in Section \ref{sec:empmeas}, we now start from $N$ independent islands and $r_N$ resources, and employ the same notation. Let us first define, for a real function $X$ and a real $x$, $\tau_x(X)$ to be the hitting time of $(x,+ \infty)$ by $X$, i.e.
\[
\tau_x(X) = \inf \{ t \gammaeq 0, X_t > x \}, \quad \inf \emptyset = + \infty.
\]
Let $C_N = C_{r_N}(\mathbf{m}athbb{T})$ and $P_N = P_{r_N}(\mathbf{m}athbb{T})$. From Lemma \ref{lem:popcolRW2}, the probability that an island has colonies is
\betaegin{equation} \lambdaanglebel{eq:probacol}
\mathbb{P}(C_N > 0) = \mathbb{P}(\tau_{r_N-1}(S) < \varsigmainf) \sigmaim \mathbf{m}athfrak{r}ac{1}{r_N}.
\end{equation}
The last equivalent is an extension of the classical gambler's ruin estimates, and follows e.g. from Theorem 2, p. 18, in \cite{Takacs} and a Tauberian theorem (p.203, 204 of \cite{Takacs}). We shall thus assume that, for some $c > 0$,
\betaegin{equation} \lambdaanglebel{eq:hyprN2}
\lambdaim_{N \to + \infty} \mathbf{m}athfrak{r}ac{r_N}{N} = c > 0.
\end{equation}
Beware that, as we mentioned, this is a different rescaling from \eqref{eq:hyprN1} for the first model. We shall prove the following, where $\tilde{c} := c / \sigma$ and we recall that $\mathbf{m}u$ is a measure on $(0,+ \infty)$ with density $1/(2 x^{3/2})$.
\betaegin{thm} \lambdaanglebel{th:conv2}
Under the assumption \eqref{eq:hyprN2}, the sequence $(\cP^{(N)}_N(r_N))$ converges in distribution in $\mathbf{m}athcal{M}^+$ to a random measure $\mathbf{m}athcal{P}$, characterized by a cumulant $\kappa(f)$, unique solution to
\betaegin{equation} \lambdaanglebel{eq:cumu2}
\exp - \kappa(f) = \lambda \int_0^{+ \infty} \lambdaeft ( 1 - e^{-f(x)} \right ) \mathbf{m}u(\mathbf{m}athrm{d}x) + \mathbf{m}athfrak{r}ac1c \mathbf{m}athbb{E} \lambdaeft ( 1 - e^{-f(P) - \kappa(f) C} \right )
\end{equation}
for every $f \in C_K$, where $(P,C)$ is a couple of random variables with Laplace transform
\betaegin{equation} \lambdaanglebel{eq:lapltransPC}
\mathbf{m}athbb{E}(\exp - \alpha P - \beta C) = \lambdaeft ( \mathbf{m}athfrak{r}ac{\sigmaqrt{2 \alpha} \tilde{c}}{\sigmainh \sigmaqrt{2 \alpha} \tilde{c}} \right )^2 \mathbf{m}athfrak{r}ac{1}{\beta c + \sigmaqrt{2 \alpha} \tilde{c} coth \sigmaqrt{2 \alpha} \tilde{c}}.
\end{equation}
\end{thm}
We shall follow the same route as for Theorem \ref{th:conv1}: write an equation for the cumulant of $\cP^{(N)}(r_N)$, study its convergence, and prove that it has a unique solution. Obviously, we also need to prove the tightness, but it is obtained as for Lemma \ref{lem:tightness}.
\sigmaubsection{Interpretation} \lambdaanglebel{sec:interpretation}
As for the first model, let us try to give a reasonable interpretation of the limiting measure $\mathbf{m}athcal{P}$. First, let us study $(P,C)$. To this end, notice that we may rewrite
\[
\mathbf{m}athbb{E}(\exp - \alpha P - \beta C) = \int_0^{+ \infty} \mathbf{m}athfrak{r}ac{1}{c} e^{-x/c} e^{- \beta x} \lambdaeft ( \mathbf{m}athfrak{r}ac{\sigmaqrt{2 \alpha} \tilde{c}}{\sigmainh \sigmaqrt{2 \alpha} \tilde{c}} \right )^2 \exp - x \mathbf{m}athfrak{r}ac{1}{c} \lambdaeft ( \mathbf{m}athfrak{r}ac{\sigmaqrt{2 \alpha} \tilde{c}}{\mathop{\mathrm{th}} \sigmaqrt{2 \alpha} \tilde{c}} - 1 \right ) \: \mathbf{m}athrm{d}x.
\]
This makes clear the following facts.
\betaegin{itemize}
\item $C$ has an exponential $\mathbf{m}athcal{E}(1/c)$ law (surprisingly, this quantity does not depend on the second moment of $\rho$).
\item Conditionally on $C$, $P$ has the law of the sum of three independent variables:
\betaegin{itemize}
\item two whose law has Laplace transform
\[
\mathbf{m}athfrak{r}ac{\sigmaqrt{2 \alpha} \tilde{c}}{\sigmainh \sigmaqrt{2 \alpha} \tilde{c}};
\]
\item a third one whose conditional Laplace Transform is
\[
\exp - C \mathbf{m}athfrak{r}ac{1}{c} \lambdaeft ( \mathbf{m}athfrak{r}ac{\sigmaqrt{2 \alpha} \tilde{c}}{\mathop{\mathrm{th}} \sigmaqrt{2 \alpha} \tilde{c}} - 1 \right ).
\]
\end{itemize}
\end{itemize}
Note, and the reason for its appearance will be clear in the proofs, that $\sigmaqrt{2 \alpha} \tilde{c} / \sigmainh \sigmaqrt{2 \alpha} \tilde{c}$ is the Laplace transform of the law of the hitting time of $\tilde{c}$ by a Bessel 3 process. This distribution has a complicated density given by Theta functions, and we shall thus not dwell on this matter.
Let us then construct a random measure $\eta$ as follows. We first build a tree $T$ with fertilities, which is nothing else than a Galton-Watson tree with types in $\mathbf{m}athbb{R}^+$. Each individual of fertility $\mathbf{m}athrm{f}$ has a number of descendants distributed as a Poisson distribution with parameter $\mathbf{m}athrm{f}$. Each of these children chooses independently a fertility which is exponential $\mathbf{m}athcal{E}(1/c)$. Conditionally on its fertility $\mathbf{m}athrm{f}$, we attach to each individual a population $P(\mathbf{m}athrm{f})$ distributed as a variable with Laplace transform
\[
\mathbf{m}athbb{E}(\exp - \alpha P(\mathbf{m}athrm{f})) = \lambdaeft ( \mathbf{m}athfrak{r}ac{\sigmaqrt{2 \alpha} \tilde{c}}{\sigmainh \sigmaqrt{2 \alpha} \tilde{c}} \right )^2 \exp - \mathbf{m}athrm{f} \mathbf{m}athfrak{r}ac{1}{c} \lambdaeft ( \mathbf{m}athfrak{r}ac{\sigmaqrt{2 \alpha} \tilde{c}}{\mathop{\mathrm{th}} \sigmaqrt{2 \alpha} \tilde{c}} - 1 \right ).
\]
We start from a (virtual) individual of fertility $1$ and population, say, 1. We have then constructed a tree $T$, to each vertex $v$ of which a fertility $\mathbf{m}athrm{f}_v$ and a population $P_v$ is attached. One readily checks that the reproduction law is critical and not $\delta_1$, so this tree is almost surely finite.
Then, consider $(\eta_u(\mathbf{m}athrm{f}), \mathbf{m}athrm{f} \gammaeq 0)_{u \in \mathbf{m}athcal{U}}$ a family of independent variables, such that $\eta_u(\mathbf{m}athrm{f})$ is a Poisson random measure with intensity $\mathbf{m}athrm{f} \lambda \mathbf{m}u$. Define
\[
\eta = \sigmaum_{u \in T} (\eta_u(\mathbf{m}athrm{f}_u) + \delta_{P_u}) - \delta_1.
\]
Subtracting $\delta_1$ just means that we do not take into account the population of the virtual initial individual. The reader will check, as for the first model, that the cumulant of $\eta$ solves the same equation \eqref{eq:cumu2} as the cumulant of $\mathbf{m}athcal{P}$, so that these two measures have the same law.
\sigmaubsection{Equation of the cumulant}
Let us first notice that, as for the first model, the cumulant $\kappa_N(f)$ of $\cP^{(N)}(r_N)$, given by
\[
\kappa_N(f) = - \lambdan \mathbf{m}athbb{E} \lambdaeft ( \exp - \lambdaangle \cP^{(N)}_N(r_N) , f \rangle \right ) = - N \lambdan \mathbf{m}athbb{E} \lambdaeft ( \exp - \lambdaangle \cP^{(N)}(r_N) , f \rangle \right ),
\]
for $f \in C_K$, also solves Equation \eqref{eq:cumuN}. The proof is identical.
\betaegin{lemma} \lambdaanglebel{lem:eqcumu2}
The cumulant $\kappa_N(f)$ solves the following equation
\betaegin{equation}\lambdaanglebel{eq:cumuN2}
\exp - \kappa_N(f) = \mathbf{m}athbb{E} \lambdaeft ( \exp - \lambdaeft ( f \lambdaeft ( \mathbf{m}athfrak{r}ac{P_N}{N^2} \right ) + \mathbf{m}athfrak{r}ac{C_N}{N} \kappa_N(f) \right ) \right )^N
\end{equation}
for every $f \in C_K$.
\end{lemma}
We now assume that $\kappa_N(f) \to \kappa(f)$. Once again, the RHS of \eqref{eq:cumuN2} has the same limit, if any, as
\betaegin{align*}
N \mathbf{m}athbb{E} \lambdaeft ( 1 - \exp - \lambdaeft ( f \lambdaeft ( \mathbf{m}athfrak{r}ac{P_N}{N^2} \right ) \right. \right. & \lambdaeft. \lambdaeft. + \mathbf{m}athfrak{r}ac{C_N}{N} \kappa_N(f) \right ) \right ) \\
& = N \mathbf{m}athbb{E} \lambdaeft ( \lambdaeft ( 1 - \exp - f \lambdaeft ( \mathbf{m}athfrak{r}ac{P_N}{N^2} \right ) \right ) \mathbf{m}athds{1}n{C_N = 0} \right ) \\
& \quad + N \mathbb{P}(C_N > 0) \mathbf{m}athbb{E} \lambdaeft ( \lambdaeft. 1 - \exp - \lambdaeft ( f \lambdaeft ( \mathbf{m}athfrak{r}ac{P_N}{N^2} \right ) + \mathbf{m}athfrak{r}ac{C_N}{N} \kappa_N(f) \right ) \right \vert C_N > 0 \right ).
\end{align*}
The first part is easy to deal with. Note indeed that by the Cauchy-Schwarz inequality,
\[
N \mathbf{m}athbb{E} \lambdaeft ( \lambdaeft ( 1 - \exp - f \lambdaeft ( \mathbf{m}athfrak{r}ac{P_N}{N^2} \right ) \right ) \mathbf{m}athds{1}n{C_N = 0} \right )^2 \lambdaeq N \mathbb{P}(C_N = 0) \mathbf{m}athbb{E} \lambdaeft ( \lambdaeft ( 1 - \exp - f \lambdaeft ( \mathbf{m}athfrak{r}ac{P_N}{N^2} \right ) \right )^2 \right ).
\]
By \eqref{eq:probacol} and \eqref{eq:hyprN2}, the first term in the RHS is bounded. By Lemma \ref{lem:popcolRW2}, $P_N \lambdaeq \varsigmainf$, so $P_N / N^2 \to 0$ a.s. and the second term thus tends to 0 by dominated convergence. Hence
\[
N \mathbf{m}athbb{E} \lambdaeft ( \lambdaeft ( 1 - \exp - f \lambdaeft ( \mathbf{m}athfrak{r}ac{P_N}{N^2} \right ) \right )\mathbf{m}athds{1}n{C_N = 0} \right ) \sigmaim N \mathbf{m}athbb{E} \lambdaeft ( 1 - \exp - f \lambdaeft ( \mathbf{m}athfrak{r}ac{P_N}{N^2} \right ) \right ) \to \int_0^{+ \infty} (1-e^{-f(x)}) \mathbf{m}u(\mathbf{m}athrm{d}x)
\]
as for Lemma \ref{lem:convmeas}. On the second hand, note that by \eqref{eq:probacol} and \eqref{eq:hyprN2}
\betaegin{align*}
& N \mathbb{P}(C_N > 0) \mathbf{m}athbb{E} \lambdaeft ( \lambdaeft. 1 - \exp - \lambdaeft ( f \lambdaeft ( \mathbf{m}athfrak{r}ac{P_N}{N^2} \right ) + \mathbf{m}athfrak{r}ac{C_N}{N} \kappa_N(f) \right ) \right \vert C_N > 0 \right ) \\
& \sigmaim \mathbf{m}athfrak{r}ac1c \mathbf{m}athbb{E} \lambdaeft ( \lambdaeft. 1 - \exp - \lambdaeft ( f \lambdaeft ( \mathbf{m}athfrak{r}ac{P_N}{N^2} \right ) - \mathbf{m}athfrak{r}ac{C_N}{N} \kappa_N(f) \right )\right \vert C_N > 0 \right ).
\end{align*}
We are thus led to compute the weak limit of the couple of random variables $(P_N/N^2,C_N/N)$ knowing that $C_N > 0$. This is more involved than for our first model, and this will be the goal of the next section.
\sigmaubsection{Population and number of colonies of a fertile island}
We shall now give a formula for the population and number of colonies of a fertile island, by proving the following result.
\betaegin{prop} \lambdaanglebel{prop:convPNCN}
Conditionally on $\{ C_N > 0 \}$, the variable $(P_N/N^2,C_N/N)$ converges in law to a variable $(P,C)$ whose Laplace transform is given by \eqref{eq:lapltransPC}.
\end{prop}
\betaegin{remark}
In the proof, we shall actually assume that $r_N = \lambdafloor c N \rfloor$. The results hold without this restriction, but writing the proofs would then require some more pages of technical convolutions on which we do not wish to dwell. The essential ingredient to get rid of this restriction is to sandwich our processes, depending on $r_N/N$, between two processes depending on $c-\varepsilon$ and $c+\varepsilon$, where $\varepsilon > 0$ can be chosen arbitrarily small, and to see that these two processes actually converge to the process depending on $c$ when $\varepsilon \to 0$. Depending on the cases, this is due to the absolute continuity of the laws considered or the continuity of the local time in the space variable.
\end{remark}
The proof of this result will be twofold. First, $(P_N/N^2,C_N/N)$ can be reformulated, thanks to Lemma \ref{lem:popcolRW2}, in terms of functionals of a critical random walk with a second moment. At the limit, this quantity can thus be written as a functional of the Brownian motion. More precisely, we wish to condition on $C_N > 0$, which corresponds to conditioning this random walk to hit $[r_N, + \infty)$ before $-1$. The limit will thus actually be in terms of a functional of a Brownian excursion, conditioned on hitting $(c,+ \infty)$. The second part of the proof is to compute the Laplace transform of this functional, which can be done through Williams' decomposition theorem of the excursion, along with a Ray-Knight theorem and some results of Pitman and Yor \cite{PitmanYor} concerning the Laplace transforms of functionals of Bessel bridges.
\sigmaubsubsection{Convergence to the excursion measure}
Let us first write $P_N$ and $C_N$ in terms of a functional of the random walk $(S_n)$, and to make this precise, we shall introduce some more notation. Let $\mathbf{m}athcal{E}$ be the space of excursions, that is of nonnegative c\`adl\`ag functions on $\mathbf{m}athbb{R}^+$ such that, for every $\mathbf{m}athbf{e} \in \mathbf{m}athcal{E}$,
\[
\zetaeta(\mathbf{m}athbf{e}) := \sigmaup \{t \gammaeq 0, \, \mathbf{m}athbf{e}_t > 0 \} \in [0,+ \infty).
\]
This space is endowed with the distance
\[
\delta(\mathbf{m}athbf{e},\mathbf{m}athbf{e}') = \sigmaup_{t \gammaeq 0} |\mathbf{m}athbf{e}_t - \mathbf{m}athbf{e}'_t| + |\zetaeta(\mathbf{m}athbf{e}) - \zetaeta(\mathbf{m}athbf{e}')|,
\]
which makes it a Polish space, see \cite{LeGallExcursion}. Let $\mathbf{m}athbb{D}^0$ be the space of c\`adl\`ag functions on $\mathbf{m}athbb{R}_+$ vanishing at $0$. For $d > 0$ and $f \in \mathbf{m}athbb{D}^0$, recall that $\tau_d(f)$ is the hitting time of $(d,+ \infty)$, and let
\[
\tau_{d,-}(f) = \inf \{ t \lambdaeq \tau_d(f), \, f(t) = 0 \; \& \; \mathbf{m}athfrak{o}rall s \in [t,\tau_d) \, f(s) \gammaeq 0 \}
\]
and finally
\[
\tau_{d,+}(f) = \inf \{ t \gammaeq \tau_d(f), f(t) < 0 \}.
\]
These quantities are depicted in Figure \ref{fig:exc_full}.
\betaegin{figure}[htb]
\centering
\includegraphics[width= \columnwidth]{Exc_full.pdf}
\caption{The quantities $\tau_{d,-}(f)$, $\tau_d(f)$ and $\tau_{d,+}(f)$ shown on the exploration process of Figure \ref{fig:cont_expl_proc}.}
\lambdaanglebel{fig:exc_full}
\end{figure}
We let $\mathbf{m}athbb{D}^0_d$ be the subset of $\mathbf{m}athbb{D}^0$ such that $\tau_d$ and $\tau_{d,+}$ are finite. For $(X,Y) \in \mathbf{m}athbb{D}^0_d \times \mathbf{m}athbb{D}^0$, we define $e_d$ the function which extracts from $X$ the first excursion which goes above level $d$, and shifts $Y$ accordingly, in formulas:
\[
e_d(X,Y) = \lambdaeft ( X_{(\tau_{d,-}(X)+t)\wedge \tau_{d,+}(X)} \vee 0,Y_{(\tau_{d,-}(X)+t)\wedge \tau_{d,+}(X)} - Y_{\tau_{d,+}(X)} \right )_{t \gammaeq 0}.
\]
Clearly, $e_d(X,Y) \in \mathbf{m}athcal{E} \times \mathbf{m}athbb{D}^0$. Let us finally define, for $(X,Y) \in \mathbf{m}athcal{E} \times \mathbf{m}athbb{D}^0$,
\[
\phi_d(X,Y) = \lambdaeft ( \int_0^{\zetaeta(X)} \mathbf{m}athds{1}n{X_s < d} \: \mathbf{m}athrm{d}s, Y_{\zetaeta(X)} \right ) \in \mathbf{m}athbb{R}_+ \times \mathbf{m}athbb{R},
\]
which computes the total time spent by $X$ below level $d$, as well as the value of $Y$ at the final point of $X$. This is probably a good time to state the important feature of these mappings. We let here $\mathbf{m}athbb{W}$ be the law of the Brownian motion (which is in particular a law on $\mathbf{m}athbb{D}^0_d$).
\betaegin{lemma} \lambdaanglebel{lem:contmap}
\betaegin{itemize}
\item For $\mathbf{m}athbb{W}$-a.e. $X$ and every $Y \in \mathbf{m}athbb{D}^0$, the mapping $e_d : \mathbf{m}athbb{D}^0_d \times \mathbf{m}athbb{D}^0 \to \mathbf{m}athcal{E} \times \mathbf{m}athbb{D}_0$ is continuous at $(X,Y)$.
\item For any $X \in \mathbf{m}athcal{E}$ and continuous $Y \in \mathbf{m}athbb{D}^0$, the function $\phi_d$ is continuous at $(X,Y)$.
\end{itemize}
\end{lemma}
\betaegin{proof}
The first part of the statement just stems from the fact that a.s., when hitting a value, the Brownian motion oscillates around it. Hence, the times $\tau_{d,-}$, $\tau_d$ and $\tau_{d,+}$ are continuous at $\mathbf{m}athbb{W}$-almost every $X$, and the continuity of $e_d$ then readily follows from the continuity of the Brownian motion. The second part is trivial by definition of the distance $\delta$ and dominated convergence.
\end{proof}
From Lemma \ref{lem:popcolRW2}, it is now natural to define
\[
L_t = \sigmaum_{i, \, \varsigma_i \lambdaeq t} \mathbf{m}athcal{O}^N_i(r_N)
\]
be the sum of the overshoots above level $r_N$ of $(S_n)$ up to time $t \gammaeq 0$. Now, let us define our rescaled random walk
\[
S^{(N)}_t = \mathbf{m}athfrak{r}ac{1}{\sigma N} S_{N^2t}, \quad t \gammaeq 0,
\]
and the rescaled overshoot process
\[
L^{(N)}_t = \mathbf{m}athfrak{r}ac{1}{\sigma N} L_{N^2t}, \quad t \gammaeq 0.
\]
Recall that $\tilde{c} = c / \sigma$. We may then reformulate Lemma \ref{lem:popcolRW2}, by stating that, whenever $C_N > 0$,
\betaegin{equation} \lambdaanglebel{eq:PNCN}
\lambdaeft. \lambdaeft ( \mathbf{m}athfrak{r}ac{1}{N^2} P_N, \mathbf{m}athfrak{r}ac1N C_N \right ) \right | \{ C_N > 0 \} \eqlaw \phi_{\tilde{c}}(e_{\tilde{c}}(S^{(N)},\sigma L^{(N)})).
\end{equation}
We are now in a good position to state the main result of this section, which is quite similar to the main result of \cite{LeGallExcursion}. To this end, recall from Williams' description of It\^o's measure (see e.g. Th. 4.5 p. 499 in \cite{RY}) that the It\^o measure of the set of excursions with a maximum greater than $\tilde{c}$ is finite. Hence, we may define $\mathbf{m}athbf{n}^{> \tilde{c}}$ a probability law on $\mathbf{m}athcal{E}$, which is the law of an excursion conditioned on having a maximum greater than $\tilde{c}$. This is in particular a semi-martingale, and we can thus define its local time. More generally, for a semi-martingale $X$, we let $\ell^a_t(X)$ is its local time at level $a$ up to time $t$, and we obviously consider a modification of $(\ell^a_t(X), a \in \mathbf{m}athbb{R}, t \gammaeq 0)$ which is a.s. continuous in $t$ and c\`adl\`ag in $a$, see \cite{RY}.
\betaegin{lemma} \lambdaanglebel{lem:limPNCN}
The convergence in distribution
\betaegin{equation} \lambdaanglebel{eq:limPNCN}
\lambdaeft. \lambdaeft ( \mathbf{m}athfrak{r}ac{1}{N^2} P_N, \mathbf{m}athfrak{r}ac1N C_N \right ) \right | \lambdaeft \{ C_N > 0 \right \} \to \lambdaeft ( \int_0^{\zetaeta(\mathbf{m}athbf{e})} \mathbf{m}athds{1}n{\mathbf{m}athbf{e}_s < \tilde{c}} \: \mathbf{m}athrm{d}s, \mathbf{m}athfrak{r}ac{\sigma}{2} \ell^{\tilde{c}}_{\infty}(\mathbf{m}athbf{e}) \right )
\end{equation}
holds, where $\mathbf{m}athbf{e}$ has law $\mathbf{m}athbf{n}^{> \tilde{c}}$.
\end{lemma}
\betaegin{proof}
This is obviously obtained by passing to the limit in \eqref{eq:PNCN}, though this requires some care. It should be intuitively clear that $(S^{(N)},L^{(N)})$ converges to a Brownian motion and its local time at $\tilde{c}$, which is precisely the content of Theorem 1.3 of \cite{PerkinsLT}, which we may reformulate, if we are careful of the different normalization from \cite{RY}, as
\[
(S^{(N)},L^{(N)}) \to \lambdaeft ( B,\mathbf{m}athfrak{r}ac12 \ell^{\tilde{c}}(B) \right )
\]
weakly in $D([0,+ \infty),\mathbf{m}athbb{R}^2)$, where $B$ is a standard Brownian motion. One should just take note of the two following facts:
\betaegin{itemize}
\item our walk is left-continuous, so the definition of $L^{(N)}$ we give is precisely Formula (1.2) in \cite{PerkinsLT}, with $x = \tilde{c}$;
\item the only slight difference is that Theorem 1.3 of \cite{PerkinsLT} deals with versions of the random walk and the local time which are linearly interpolated, unlike ours, but since the limit is continuous, and hence the limit holds for the topology of uniform convergence on the compacts, it clearly does not make any difference.
\end{itemize}
Now, the latter, the continuity of $e_{\tilde{c}}$ and the continuous mapping theorem ensure that
\[
e_{\tilde{c}}(S^{(N)},\sigma L^{(N)}) \to e_{\tilde{c}} \lambdaeft ( B,\mathbf{m}athfrak{r}ac{\sigma}{2} \ell^{\tilde{c}}(B) \right )
\]
weakly in $\mathbf{m}athcal{E} \times \mathbf{m}athbb{D}_0$. But the definition of $e_{\tilde{c}}$ ensures that the first coordinate of $e_{\tilde{c}}(B,\sigma \ell^{\tilde{c}}(B)/2)$ is the first excursion of a Brownian motion which goes above level $\tilde{c}$, and has thus law $\mathbf{m}athbf{n}^{> \tilde{c}}$. The second coordinate is $\sigma/2$ times its total local time at $\tilde{c}$. The result then follows from the continuity of $\phi_{\tilde{c}}$.
\end{proof}
\sigmaubsubsection{Time spent under a level and local time of an excursion}
The last part of the proof of Proposition \ref{prop:convPNCN} is then to compute the Laplace transform of the RHS of \eqref{eq:limPNCN}. Hence, from now on, $\mathbf{m}athbf{e}$ is an excursion conditioned on having a maximum greater than $\tilde{c}$, that is a process with law $\mathbf{m}athbf{n}^{> \tilde{c}}$. We let
\[
(P,C) = \lambdaeft ( \int_0^{\zetaeta(\mathbf{m}athbf{e})} \mathbf{m}athds{1}n{\mathbf{m}athbf{e}_s < \tilde{c}} \: \mathbf{m}athrm{d}s, \mathbf{m}athfrak{r}ac{\sigma}{2} \ell_{\infty}^{\tilde{c}}(\mathbf{m}athbf{e}) \right ).
\]
\betaegin{lemma}
The variable $(P,C)$ has Laplace transform given by \eqref{eq:lapltransPC}.
\end{lemma}
\betaegin{proof}
Let us explain how to construct such an excursion $\mathbf{m}athbf{e}$. From Williams' decomposition (see \cite[Th 4.5 p. 499]{RY}), conditionally on its maximum $M$, which has a ``law'' with density $1/(2x^2) \mathbf{m}athds{1}n{x > 0}$, the Brownian excursion has the law of two independent Bessel 3 processes, until they hit $M$, put back-to-back. Hence, if $R$ is a Bessel 3 process, then
\betaegin{equation} \lambdaanglebel{eq:williams}
\mathbf{m}athbb{E}(\exp - \alpha P - \beta C) = 2 \tilde{c} \int_{\tilde{c}}^{+ \infty} \mathbf{m}athfrak{r}ac{1}{2x^2} \mathbf{m}athbb{E} \lambdaeft ( \exp - \alpha \int_0^{\tau_x(R)} \mathbf{m}athds{1}n{R_s < \tilde{c}} \: \mathbf{m}athrm{d}s - \mathbf{m}athfrak{r}ac{\beta \sigma}{2} \ell_{\tau_x(R)}^{\tilde{c}}(R) \right )^2 \: \mathbf{m}athrm{d}x
\end{equation}
and we are thus led to compute, for a fixed $x \gammaeq \tilde{c}$,
\[
g(\alpha,\beta) := \mathbf{m}athbb{E} \lambdaeft ( \exp - \alpha \int_0^{\tau_x(R)} \mathbf{m}athds{1}n{R_s < \tilde{c}} \: \mathbf{m}athrm{d}s - \mathbf{m}athfrak{r}ac{\beta \sigma}{2} \ell_{\tau_x(R)}^{\tilde{c}} \right ).
\]
Now, recall that the Bessel processes have the Brownian scaling property, i.e. $R$ has the same law as $Q=(x R_{t/x^2})_{t \gammaeq 0}$, so the occupation time formula provides
\[
\ell^a_{\tau_x(Q)}(Q) = x \ell^{a/x}_{\tau_1(R)}(R)
\]
and thus, still by Brownian scaling,
\betaegin{align*}
g(\alpha,\beta) & = \mathbf{m}athbb{E} \lambdaeft ( \exp - \alpha \int_0^{\tau_x(Q)} \mathbf{m}athds{1}n{Q < \tilde{c}} \: \mathbf{m}athrm{d}s - \mathbf{m}athfrak{r}ac{\beta \sigma}{2} \ell_{\tau_x(Q)}^{\tilde{c}}(Q) \right ) \\
& = \mathbf{m}athbb{E} \lambdaeft ( \exp - \alpha \int_0^{x^2 \tau_1(R)} \mathbf{m}athds{1}n{R_{s/x^2} < \tilde{c}/x} \: \mathbf{m}athrm{d}s - \mathbf{m}athfrak{r}ac{\beta \sigma}{2} x \ell^{\tilde{c}/x}_{\tau_1(R)}(R) \right ) \\
& = \mathbf{m}athbb{E} \lambdaeft ( \exp - \alpha x^2 \int_0^{\tau_1(R)} \mathbf{m}athds{1}n{R_s < \tilde{c}/x} \: \mathbf{m}athrm{d}s - \mathbf{m}athfrak{r}ac{\beta \sigma}{2} x \ell^{\tilde{c}/x}_{\tau_1(R)}(R) \right ).
\end{align*}
Hence, we are now led to compute, for $u,v \gammaeq 0$ and $0 \lambdaeq r \lambdaeq 1$,
\[
h(u,v) := \mathbf{m}athbb{E} \lambdaeft ( \exp - u \int_0^{\tau_1(R)} \mathbf{m}athds{1}n{R_u < r} \: \mathbf{m}athrm{d}u - v \ell_{\tau_1(R)}^r(R) \right ).
\]
By the occupation time formula (note that the quadratic variation of a Bessel process is $t$),
\[
h(u,v) = \mathbf{m}athbb{E} \lambdaeft ( \exp - u \int_0^r \ell_{\tau_1(R)}^u(R) \: \mathbf{m}athrm{d}u - v \ell_{\tau_1(R)}^r(R) \right ),
\]
which we may rewrite as
\[
\mathbf{m}athbb{E} \lambdaeft ( \exp - \int_0^1 \ell_{\tau_1(R)}^x(R) \mathbf{m}athbf{n}u(\mathbf{m}athrm{d}x) \right )
\]
where $\mathbf{m}athbf{n}u(\mathbf{m}athrm{d}x) = u \mathbf{m}athds{1}_{[0,r]}(\mathbf{m}athrm{d}x) + v \delta_r$. From a Ray-Knight Theorem\mathbf{m}athfrak{o}otnote{Surprisingly hard to find in the literature: it can be seen as a consequence of Theorem 4 in \cite{WilliamsDecomp} along with the Williams' decomposition, Theorem 3.11 in \cite{RY}. This statement is also given in \cite{MansuyYor}, p.42, along with a direct proof.}, the process $(\ell_{\tau_1}^x(R), x \in [0,1])$ is a squared Bessel 2 bridge, and thus, the latter quantity is precisely computed in Prop. 5.10 in \cite{PitmanYor}. After easy but long computations, one obtains
\[
\mathbf{m}athbb{E} \lambdaeft ( \exp - \int_0^1 \ell_{\tau_1(R)}^x(R) \mathbf{m}athbf{n}u(\mathbf{m}athrm{d}x) \right ) = \mathbf{m}athfrak{r}ac{\sigmaqrt{2 u}}{(1-r)(2 v \sigmainh \sigmaqrt{2 u} r + \sigmaqrt{2 u} \cosh \sigmaqrt{2 u} r) + \sigmainh \sigmaqrt{2 u} r}.
\]
Hence
\betaegin{align*}
g(\alpha,\beta) & = \mathbf{m}athfrak{r}ac{x \sigmaqrt{2 \alpha}}{(1-\tilde{c}/x)(\beta \sigma x \sigmainh \sigmaqrt{2 \alpha} \tilde{c} + x \sigmaqrt{2 \alpha} \cosh \sigmaqrt{2 \alpha} \tilde{c}) + \sigmainh \sigmaqrt{2 \alpha} \tilde{c}} \\
& = \mathbf{m}athfrak{r}ac{\sigmaqrt{2 \alpha}}{\sigmainh \sigmaqrt{2 \alpha} \tilde{c}} \mathbf{m}athfrak{r}ac{x}{1 + (x-\tilde{c})(\beta \sigma + \sigmaqrt{2 \alpha} coth \sigmaqrt{2 \alpha} \tilde{c})}.
\end{align*}
The result then readily follows from plugging this formula in \eqref{eq:williams}.
\end{proof}
\betaegin{remark}
Recall the interpretation we gave of the variable $(P,C)$ in Section \ref{sec:interpretation}. This formulas can thus be interpreted by saying that the excursion can be split into three parts: a part between 0 and the hitting time of $\tilde{c}$, which has the law of a Bessel 3 process. Another part between the last hitting time of $\tilde{c}$ and 0, with the same law. And in between, there is an exponential ``quantity'' of excursions above and below $\tilde{c}$; each contributes independently to a microscopic random amount, given by the above Laplace transform, to the time spent below $\tilde{c}$. See also the next section for a discrete version of these heuristics.
\end{remark}
Proposition \ref{prop:convPNCN} then follows from the result just proven and Lemma \ref{lem:limPNCN}. To conclude the proof of Theorem \ref{th:conv2}, all we need to check now is that Equation \eqref{eq:cumu2} has a unique solution which is done as for Theorem \ref{th:conv1}.
\sigmaubsection{Another way to the result}
To conclude, let us present another way to compute the limit of $(P_N/N^2,C_N/N)$ knowing that $C_N > 0$. This method is more elementary but requires more steps, and might be seen as more natural -- at least to the author, who firstly used it to derive the result. Once again, the goal is to compute the time spent under $r_N$, and the sum of the overshoots, both up to the hitting time of $-1$, for the random walk $(S_n)$ conditioned on hitting $[r_N,+ \infty)$ before $-1$.
To obtain such an excursion of the random walk, we wait until we see $(S_n)$ hit $[r_N,+ \infty)$, and we consider the excursion from $0$ to $-1$ straddling this time. We can cut this excursion in three pieces:
\betaegin{itemize}
\item a first piece, where the walk goes from 0 to $[r_N,+ \infty)$;
\item a third piece, where the walk goes from $r_N$ to $-1$;
\item a second piece in between these two, where the walk goes from $r_N$ to $r_N$ without going back to 0, a certain amount of time.
\end{itemize}
By the same reasonings as above, it should be clear that the first piece, after rescaling, converges to the first piece of a Brownian motion going from 0 to $\tilde{c}$ while remaining positive. From Williams' decomposition, Theorem 3.11 in \cite{RY}, this has the law of a Bessel 3 process $R$, and thus the time it takes to hit $\tilde{c}$ has the law of $\tau_{\tilde{c}}(R)$, which has, as we mentioned, Laplace transform
\[
\mathbf{m}athfrak{r}ac{\sigmaqrt{2 \alpha} \tilde{c}}{\sigmainh \sigmaqrt{2 \alpha} \tilde{c}}.
\]
The third part obviously accounts for the same independent quantity.
Now, let us study what happens in the middle. We may forget\mathbf{m}athfrak{o}otnote{However, this relies on the walk having a second moment. Otherwise, it could have a macroscopic jump from below $r_N$ to above.} about the time for the random walk to go from $[r_N,+ \infty)$ back to $r_N - 1$, and first consider the walk at $r_N - 1$. From Formulas (3) p. 187 and (b) p. 181 in \cite{Spitzer}, the size of an overshoot has finite mean $\sigma^2/2$, so \eqref{eq:hyprN2} and a gambler's ruin estimate (see Lemma 5.1.3 in \cite{LawlerLimic}) show that the walk goes to $-1$ before coming back at $r_N$ with probability $\sigma^2/(2cn)$. Hence, if we let $L_N$ to be the number of such excursions from $r_N$ to $r_N$ without going back to $-1$, we deduce therefrom that $L_N / N$ converges to a variable $L$ with an exponential law $\mathbf{m}athcal{E}(\sigma^2/(2c))$.
Now, conditionally on $L$, i.e., loosely, $L_N \alphapprox L N$, the number of colonies $C_N$ is the sum of $L_N$ independent overshoots, and thus, by the law of large numbers, $C_N/N$ converges to $L$ times the mean size of an overshoot, i.e. $C = \sigma^2 L /2$.
Finally, still conditionally on $L$, we want to know the time spent below $r_N$ by this piece of the walk. At the limit, after rescaling and by similar reasonings as in the above section, it can be seen as the time spent below 0 by a Brownian motion
\betaegin{itemize}
\item up until it has accumulated a local time $\sigma L$ at 0,
\item and conditioned on not hitting $- \tilde{c}$ before this time (what has positive probability).
\end{itemize}
We thus have to compute this quantity. But to construct a Brownian motion up to a local time of $\ell$, all we need is a Poisson point process $(e_s)_{s \in [0,\ell]}$ with intensity $\mathbf{m}athbf{n}$, the It\^o measure, and then glue these excursions together (see \cite{RY}). Hence, by thinning, to construct such a conditioned Brownian motion, we take a Poisson point process $(e_s)_{s \in [0,\ell]}$ with intensity $\mathbf{m}athbf{n}_{> - \tilde{c}}$, the restriction of $\mathbf{m}athbf{n}$ to the excursions with minimum greater than $- \tilde{c}$, and then glue these excursions together. Let $R(e)$ be the length of an excursion $e$. Then the time spent by this conditioned Brownian motion under $0$ is
\[
\sigmaum_{0 \lambdaeq s \lambdaeq \ell} R(e_s) \mathbf{m}athds{1}n{\inf e < 0}
\]
and, by symmetry and using the exponential formula for Poisson point processes (\cite{RY}, p. 476), for $\alpha \gammaeq 0$,
\betaegin{align*}
\mathbf{m}athbb{E} \lambdaeft ( \exp - \alpha \sigmaum_{0 \lambdaeq s \lambdaeq \ell} R(e_s) \mathbf{m}athds{1}n{\inf e < 0} \right ) & = \exp - \ell \int (1 - e^{- \alpha R(u)}) \mathbf{m}athds{1}n{ \inf u < 0} \: \mathbf{m}athbf{n}_{ > - \tilde{c}}(\mathbf{m}athrm{d}u) \\
& = \exp - \ell \int (1 - e^{- \alpha R(u)}) \mathbf{m}athds{1}n{ 0 < \sigmaup u < \tilde{c}} \: \mathbf{m}athbf{n}(\mathbf{m}athrm{d}u) \\
& = \exp - \ell \int_{x = 0}^{\tilde{c}} \int_{y=0}^{+ \infty} (1 - e^{- \alpha y}) \: \mathbf{m}(\mathbf{m}athrm{d}x,\mathbf{m}athrm{d}y)
\end{align*}
where $\mathbf{m}$ is the image of $\mathbf{m}athbf{n}$ (or $\mathbf{m}athbf{n}_+$, the It\^o measure of the unsigned excursion) by $e \mathbf{m}apsto (\sigmaup e,R(e))$. Once again, Williams' decomposition of the Brownian excursion tells that its maximum $M$ has ``law'' $1/(2x^2) \mathbf{m}athds{1}n{x > 0} \: \mathbf{m}athrm{d}x$, and that conditioned on this maximum, it has the law of two independent Bessel 3 processes $R$ and $R'$ put back to back. Therefore
\betaegin{align*}
\int_{x = 0}^{\tilde{c}} \int_{y=0}^{+ \infty} \lambdaeft ( 1 - e^{- \alpha y} \right ) \: \mathbf{m}(\mathbf{m}athrm{d}x,\mathbf{m}athrm{d}y) & = \int_{x = 0}^{\tilde{c}} \mathbf{m}athfrak{r}ac{1}{2 x^2} \mathbf{m}athbb{E} \lambdaeft ( \lambdaeft ( 1 - e^{- \alpha (\tau_x(R) + \tau_x(R'))} \right ) \right ) \: \mathbf{m}athrm{d}x \\
& = \int_{x = 0}^{\tilde{c}} \mathbf{m}athfrak{r}ac{1}{2 x^2} \lambdaeft ( 1 - \mathbf{m}athbb{E}(e^{- \alpha \tau_x(R)})^2 \right ) \: \mathbf{m}athrm{d}x \\
& = \int_{x = 0}^{\tilde{c}} \mathbf{m}athfrak{r}ac{1}{2 x^2} \lambdaeft ( 1 - \mathbf{m}athfrak{r}ac{2 \alpha x^2}{\sigmah^2 x \sigmaqrt{2 \alpha}} \right ) \: \mathbf{m}athrm{d}x \\
& = \mathbf{m}athfrak{r}ac{1}{2 \tilde{c}} \lambdaeft ( \mathbf{m}athfrak{r}ac{\sigmaqrt{2 \alpha} \tilde{c}}{\mathop{\mathrm{th}} \sigmaqrt{2 \alpha} \tilde{c} } - 1 \right ).
\end{align*}
Putting the pieces together, this is just saying that, conditionally on $L = 2 C / \sigma^2$, the variable $P$ has Laplace transform
\[
\lambdaeft ( \mathbf{m}athfrak{r}ac{\sigmaqrt{2 \alpha} \tilde{c}}{\sigmainh \sigmaqrt{2 \alpha} \tilde{c}} \right )^2 \exp - \sigma L \mathbf{m}athfrak{r}ac{1}{2 \tilde{c}} \lambdaeft ( \mathbf{m}athfrak{r}ac{\sigmaqrt{2 \alpha} \tilde{c}}{\mathop{\mathrm{th}} \sigmaqrt{2 \alpha} \tilde{c} } - 1 \right ) = \lambdaeft ( \mathbf{m}athfrak{r}ac{\sigmaqrt{2 \alpha} \tilde{c}}{\sigmainh \sigmaqrt{2 \alpha} \tilde{c}} \right )^2 \exp - C \mathbf{m}athfrak{r}ac1c \lambdaeft ( \mathbf{m}athfrak{r}ac{\sigmaqrt{2 \alpha} \tilde{c}}{\mathop{\mathrm{th}} \sigmaqrt{2 \alpha} \tilde{c} } - 1 \right )
\]
which is precisely what we remarked after Theorem \ref{th:conv2}. The main advantage of this method is thus probably that it provides directly the law of $P$ knowing $C$, which is not obvious when merely looking at Formula \eqref{eq:lapltransPC}.
\betaibliographystyle{abbrv}
\betaibliography{Bibli}
\end{document} |
\begin{document}
\title{A Gentle Introduction to a Beautiful Theorem of Molien}
\begin{abstract}
The purpose of this note is to give an accessible proof of
Moliens Theorem in Invariant Theory, in the language of
today's Linear Algebra and Group Theory, in order to
prevent this beautiful theorem from being forgotten.
\end{abstract}
\tableofcontents
\section*{Introduction}\label{sintro}
We present some memories of a visit to the ring zoo in 2004. This time
we met an animal looking like a unicorn, known by the name of
invariant theory. It is rare, old, and very beautiful. The purpose
of this note is to give an almost self contained introduction to
and clarify the proof of the amazing theorem of Molien, as
presented in \cite{Sloane1}. An introduction into
this area, and much more, is contained in \cite{Sturmfels}.
There are many very short proofs of this theorem, for instance in
\cite{Stanley}, \cite{Hu2}, and \cite{Tambour}. \par
Informally, Moliens Theorem is a power series generating function formula for
counting the dimensions of subrings of homogeneous polynomials
of certain degree which are invariant under the action of a finite
group acting on the variables.
As an apetizer, we display this stunning formula:
$$
\Phi_G(\lambda) := \frac{1}{|G|} \sum_{g\in G} \frac{1}{\det(\mathrm{id} - \lambda T_g)}
$$
We can immediately see elements of linear algebra, representation theory, and
enumerative combinatorics in it, all linked together.
The paper \cite{Sloane1} nicely shows how
this method can be applied in Coding theory. For
Coding Theory in general, see \cite{jubi}.\par
Before we can formulate the Theorem, we need to set the
stage by looking at some Linear Algebra (see \cite{Roman}),
Group Theory (see \cite{Hu}), and Representation Theory (see \cite{Sagan} and \cite{Tambour}).
\section{Preliminaries}\label{spreliminaries}
Let $V\cong {\mathbf{C}}^n$ be a finite dimensional complex inner product space with
orthonormal basis $\mathcal{B} = (\mathbf{e}_1,\dots,\mathbf{e}_n)$ and let $\mathbf{x} = (x_1,\dots,x_n)$
be the orthonormal basis of the algebraic dual space $V^\ast$ satisfying
$\forall 1\le i,j \le n : x_i(\mathbf{e}_j) = \delta_{ij}$. Let
$G$ be a finite group acting unitarily linear on $V$ from the left, that is,
for every $g\in G$ the mapping $V \to V, \mathbf{v} \mapsto g. \mathbf{v}$ is
a unitary bijective linear transformation. Using coordinates, this can be expressed as
$[g. \mathbf{v}]_\mathcal{B} = [g]_{\mathcal{B},\mathcal{B}} [\mathbf{v}]_\mathcal{B}$, where $[g]_{\mathcal{B},\mathcal{B}}$ is unitary.
Thus, the action is a unitary representation of $G$, or in other words, a $G$--module.
Note that we are using \inx{left composition} and column vectors,
i.e. $\mathbf{v} = (v_1, \dots , v_n) \istmit{convention} [v_1 \, v_2 \, \dots \, v_n]^\top$,
c.~f.~\cite{Anton}.
\par
The elements of
$V^\ast$ are \inx{linear forms}(linear functionals), and the elements $x_1,\dots, x_n$,
looking like variables, are also linear forms, this will be important later. \par
Thinking of $x_1,\dots, x_n$ as variables, we may view (see \cite{Tambour}) $S(V^\ast)$,
the \kwd{symmetric algebra} on $V^\ast$ as the algebra
$R := {\mathbf{C}}[\mathbf{x}] := {\mathbf{C}}[x_1,\dots, x_n] $ of polynomial functions $V\to {\mathbf{C}}$
or polynomials in these variables (linear forms).
It is naturally graded by degree as
$R = \bigoplus_{d \in {\mathbf{N}}} R_d$, where $R_d$ is the vector space
spanned by the polynomials of (total) degree $d$,
in particular, $R_0 = {\mathbf{C}}$, and
$R_1 = V^\ast$.\par
The action of $G$ on $V$ can be lifted to an action on $R$.
\begin{proposition}\label{pindact}
Let $V$, $G$, $R$ as above.
Then the mapping $. : G \times R \to R, (g,f) \mapsto g. f$
defined by $(g. f) (\mathbf{v}) := f(g^{-1} . \mathbf{v})$ for $\mathbf{v} \in V$
is a left action.
\end{proposition}
\begin{proof} For $\mathbf{v} \in V$, $g,h \in G$, and $f \in R$ we check
\begin{enumerate}
\item $(1. f)(\mathbf{v}) = f(1^{-1}. \mathbf{v}) = f(1. \mathbf{v}) = f(\mathbf{v})$
\item \begin{multline*}
((hg). f)(\mathbf{v}) = f((hg)^{-1} . \mathbf{v}) =
f((g^{-1} h^{-1}). \mathbf{v}) =\\ f (g^{-1} . (h^{-1} . \mathbf{v}))
= (g. f)(h^{-1} . \mathbf{v}) = (h. (g. f))(\mathbf{v})
\end{multline*}
\end{enumerate}
\end{proof}
In fact, we know more.
\begin{proposition}\label{pindact2}
Let $V$, $G$, $R$ as above.
For every $g \in G$, the mapping $T_g: R \to R, f \mapsto g. f$
is an algebra automorphism preserving the grading, i.e.
$g.R_d \subset R_d$ (here we do not bother about surjectivity).
\end{proposition}
\begin{proof} For $\mathbf{v} \in V$, $g\in G$, $c\in {\mathbf{C}}$, and $f,f' \in R$ we check
\begin{enumerate}
\item \begin{multline*}
(g. (f+f'))(\mathbf{v}) =
(f+f')(g^{-1} . \mathbf{v}) =
f(g^{-1} . \mathbf{v}) + f'(g^{-1} . \mathbf{v}) =\\
(g. f)(\mathbf{v}) + (g. f')(\mathbf{v}) =
(g. f + g. f')(\mathbf{v})
\textrm {, thus } g. (f+f') =g. f + g. f'
\end{multline*}
\item \begin{multline*}
(g. (f\cdot f'))(\mathbf{v}) =
(f\cdot f')(g^{-1} . \mathbf{v}) =
f(g^{-1} . \mathbf{v}) \cdot f'(g^{-1} . \mathbf{v}) =\\
(g. f)(\mathbf{v}) \cdot (g. f')(\mathbf{v}) =
(g. f \cdot g. f')(\mathbf{v})
\textrm {, thus } g. (f\cdot f') =g. f \cdot g. f'
\end{multline*}
\item $
(g. (cf))(\mathbf{v}) = (cf)(g^{-1} . \mathbf{v}) = c (f(g^{-1} . \mathbf{v})) =
c ((g. f)(\mathbf{v})) = (c (g. f))(\mathbf{v})
$
\item By part $2.$ it is clear that the grading is preserved.
\item To show that $f \mapsto g. f$ is bijective it is enough to
show that this mapping is injective on the finite dimensional
homogeneous components
$R_d$. Let us introduce a name for this mappig, say
$T_g^d : R_d \to R_d, f \mapsto g. f$. Now
$f \in \ker(T_g^d)$ implies that
$g. f = 0 \in R_d$, i.e. $g. f$ is a polynomial mapping from
$V$ to ${\mathbf{C}}$ of degree $d$ vanishing identically,
$\forall \mathbf{v} \in V: (g. f)(\mathbf{v}) = 0$. By definition of the
extended action we have
$\forall \mathbf{v} \in V: f(g^{-1} . \mathbf{v}) = 0$.
Since $G$ acts on $V$ this implies that
$\forall \mathbf{v} \in V : f(\mathbf{v}) = 0$, so $f$ is the zero mapping.
Since our ground field has characteristic $0$, this implies that
$f$ is the zero polynomial, which we may view as an element of every $R_d$.
See for instance \cite{Cox}, proposition 5 in section 1.1.
\item Note that every $T_g^d$ is also surjective, since all group elements have their inverse in $G$.
\end{enumerate}
\end{proof}
Both propositions together give us a homomorphism from $G$ into
$\ensuremath{\mathrm{Aut}}(R)$. They also clarify the r\^ole of the \emph{induced}
matrices, which are classical in this area, as mentionend in \cite{Sloane1}.
Since the monomials $x_1,\dots,x_n$ of degree one form a basis for
$R_1$, it follows from the proposition that their products
$\mathbf{x}_2 := (x_1^2,x_1 x_2,x_1 x_3,\dots,x_1 x_n, x_2^2,x_2 x_3,\dots)$
form a basis for $R_2$, and, in general, the monomials of degree $d$
in the linear forms (!) $x_1,\dots,x_n$ form a basis $\mathbf{x}_d$ of
$R_d$. Clearly, they certainly span $R_d$, and by the last observation
in the last proof they are linearly independent.\par
\begin{definition}\label{dinduced}
In the context from above, that is $g \in G$, $f \in R^d$, and $\mathbf{v} \in V$,
we define
$$
T_g^d : R_d \to R_d, f \mapsto g. f : R^d \to {\mathbf{C}}, \mathbf{v} \mapsto f(g^{-1} . \mathbf{v}) = f(T_{g^{-1}} (\mathbf{v}))
.$$
\end{definition}
\begin{remark}\label{rinduced}
In particular, we have $(T_g^1 (f))(\mathbf{v}) = f(T_{g^{-1}}(\mathbf{v}) ),$ see
proposition \ref{pprop0} below.
\end{remark}
Keep in mind that a function $f \in R_d$ maps to $T_g^d (f) = g. f$.
Setting $A_g := [T_g^1]_{\mathbf{x},\mathbf{x}}$, then
$A_g^{[d]} := [T_g^d]_{\mathbf{x}_d,\mathbf{x}_d}$ is the $d$--th induced matrix
in \cite{Sloane1}, because $T_g^1(f\cdot f') = T_g^1(f)\cdot T_g^1(f')$.
Also, if $f,f'$ are eigenvectors of $T_g^1$ corresponding to the eigenvalues
$\lambda,\lambda'$, then $f\cdot f'$ is an eigenvector of $T_g^2$
with eigenvalue $\lambda \cdot \lambda'$, because
$T_g(f\cdot f') = T_g(f) \cdot T_g(f') = (\lambda f) \cdot (\lambda' f')
= (\lambda \lambda')(f \cdot f')$. All this generalizes to $d>2$, we
will get back to that later. \par
We end this section by verifying two little facts needed in the next section.
\begin{proposition}\label{ppropd}
The \kwd{first induced operator} of the inverse of a group element $g\in G$
is given by $T_{g^{-1}}^1 = (T_g^1)^{-1}$.
\end{proposition}
\begin{proof}
Since $\dim(V^\ast) < \infty$, it is sufficient to prove
that $T_{g^{-1}}^1 \circ T_{g}^1 = \mathrm{id}_{V^\ast}$.
Keep in mind that $(T_g^1 (f))(\mathbf{v}) = f (T_{g^{-1}} (\mathbf{v}))$. For arbitrary
$f \in V^\ast$ we see that
\begin{align*}
(T_{g^{-1}}^1 \circ T_{g}^1 )(f) = T_{g^{-1} }^1 ( T_{g}^1 (f)) = T_{g^{-1}}^1 ( g. f)
= g^{-1} . ( g. f) = (g^{-1} g). f = f.
\end{align*}
\end{proof}
We will be mixing group action notation and composition
freely, depending on the context. The following observation is a
translation device.
\begin{proposition}\label{pprop0}
For $g \in G$ nd $f \in V^\ast$ the following holds:
$$
T^1(f) = g. f = f \circ T_{g^{-1}}.
$$
\end{proposition}
\begin{proof}
For $\mathbf{v} \in V$ we see
$(T^1(f))(\mathbf{v}) = (g. f)(\mathbf{v}) \istmit{def} f(g^{-1} . \mathbf{v} ) = f( T_{g^{-1}}(\mathbf{v}) ).$
\end{proof}
\section{The Magic Square}\label{ssquare}
Remember that we require a unitary representation of $G$,
that is the operators $T_g : V \to V$ need to be unitary,
i.e. $\forall g \in G : (T_g)^{-1} = (T_g)^\ast$.
The first goal of this sections is to show that this implies
that the induced operators $T_g^d : R_d \to R_d, f \mapsto g. f$
are also unitary. We saw that $T_g^1 = V^\ast$, the algebraic
dual of $V$. In order to understand the operator duals of
$V$ and $V^\ast$ we need to look on their inner products first.
We may assume that the operators $T_g$ are unitary with respect to
the standard inner product
$\scp{\mathbf{u}}{\mathbf{v}} = [\mathbf{u}]_{\mathcal{B}, \mathcal{B}} \bullet \overline{[\mathbf{v}]_{\mathcal{B}, \mathcal{B}}}$,
where $\bullet$ denotes the dot product.\par
Before we can speak of unitarity of the induced operators $T_g^d$
we have to make clear which inner product applies on $R^1 = V^\ast$.
Quite naively, for $f,g \in V^\ast$ we are tempted to define
$\scp{f}{g} = [f]_{\mathbf{x}, \mathbf{x}} \bullet \overline{[g]_{\mathbf{x}, \mathbf{x}}}$. \par
We will motivate this in a while, but first we take a look
at the diagram in \cite{Roman}, chapter10, with our objects:
$$
\begin{CD}
\quad @<T_g^\times<< \quad\\
R^1 = V^\ast @>T_g^1>>V^\ast = R^1 \\
@VVPV@VVPV\\
V@>T_g>>V\\
\quad @<T_g^\ast<< \quad\\
\end{CD}
$$
Here $P$ (\lq \lq Rho\rq\rq\ ) denotes the \inx{Riesz map}, see \cite{Roman},
Theorem 9.18, where it is called $R$, but $R$ denotes already our big ring.
We started by looking at the operator $T_g$, which is unitary, so
its inverse is the Hilbert space adjoint $T_g^\ast$. Omiting the names
of the bases we have $[T_g^\ast] = [T_g]^\ast $. We
also see the operator adjoint $T_g^\times$ with matrix
$[T_g^\times] = [T_g]^\top$, the transpose.
However, the arrow for $T_g^1$ is not in the original diagram, but
soon we will see it there, too. \par
Fortunately, the Riesz map $P$ turns a linear form into a vector
and its inverse $\tau : V \to V^\ast$ maps a vector to
a linear form, both are conjugate isomorphisms.
This is mostly all we need in order to show that $T_g^1$ is unitary.
In the following three propositions we use
that $V$ has the orthonormal basis $\mathcal{B}$ and that $V^\ast$ has the orthonormal basis
$\mathbf{x}$.
\begin{proposition}\label{ppropa}
For every $f \in V^\ast$ the coordinates of its Riesz
vector are given by
$$[P(f)]_\mathbf{e} = (\overline{f(\mathbf{e}_1)}, \dots , \overline{f(\mathbf{e}_n)}).$$
\end{proposition}
\begin{proof}
Writing $\tau$ for the inverse of $P$, we need to show that
$$
P(f) = \sumn{i}{\overline{f(\mathbf{e}_i)}}\mathbf{e}_i
$$
which is equivalent to
$$
f = \tau \left ( \sumn{i}{\overline{f(\mathbf{e}_i)}}\mathbf{e}_i \right ).
$$
It is sufficient to show the latter for values of $f$ on the basis
vectors $\mathbf{e}_j$, $1 \le j \le n$. We obtain
\begin{align*}
\left (\tau \left ( \sumn{i}{\overline{f(\mathbf{e}_i)}}\mathbf{e}_i \right )\right ) (\mathbf{e}_j) &=
\scp{\mathbf{e}_j}{ \left ( \sumn{i}{\overline{f(\mathbf{e}_i)}}\mathbf{e}_i \right )}
=
\sumn{i}{\scp{\mathbf{e}_j}{ \left ( {\overline{f(\mathbf{e}_i)}}\mathbf{e}_i \right )} } \\
&= \overline{\overline{f(\mathbf{e}_i)}} \sumn{i}{\scp{\mathbf{e}_j}{ \mathbf{e}_i } }
= f(\mathbf{e}_i) \cdot 1.
\end{align*}
\end{proof}
In particular, this implies that $P(x_i) = \mathbf{e}_i$.
\begin{proposition}\label{ppropb}
Our makeshift inner product on $V^\ast$ satisfies
$$
\scp{f}{g} = \scp{P(f)}{P(g)}
,$$
where $f,g \in V^\ast$.
\end{proposition}
\begin{proof}
By our vague definition we have
$\scp{f}{g} = [f]_{\mathbf{x}, \mathbf{x}} \bullet \overline{[g]_{\mathbf{x}, \mathbf{x}}}$.
It is enough to show that
$\scp{x_i}{x_j} = \scp{P(x_i)}{P(x_j)}$. From the comment after the proof of
Proposition \ref{ppropa} we obtain
$$\scp{P(x_i)}{P(x_j)} = \scp{\mathbf{e}_i}{\mathbf{e}_j} = \delta_{ij} = \mathbf{e}_i \bullet \mathbf{e}_j
= [x_i]_{\mathbf{x}, \mathbf{x}} \bullet \overline{[x_j]_{\mathbf{x}, \mathbf{x}}}
.$$
\end{proof}
Hence, our guess for the inner product on $V^\ast$ was correct.
We will now relate the Riesz vector of $f \in V^\ast$ to the
Riesz vector of $f \circ T_g^{-1}$. Recall that the Riesz vector of $f \in V^\ast$
is the unique vector $\mathbf{w} = P(f)$ such that $f(\mathbf{v}) = \scp{\mathbf{v}}{\mathbf{w}}$ for
all $\mathbf{v} \in V$. If $f \ne 0$ it can be found by scaling any nonzero
vector in the cokernel of $f$, which is one--dimensional, see \cite{Roman},
in particular Theorem 9.18.
\begin{proposition}\label{pprope}
Let $T_g : V \to V$ be unitary, $f \in V^\ast$, $\mathbf{w} = P(f)$ the vector of $f \in V^\ast$.
Then $T_g(\mathbf{w})$ is the Riesz vector of $f \circ T_g^{-1}$, i.e. the Riesz vector of $T^1_g(f)$.
\end{proposition}
\begin{proof}
We may assume that $f \ne 0$.
Using the notation $\gen{\mathbf{w}}$ for the one--dimensional subspace spanned by $\mathbf{w}$,
we start with a little diagram:
$$
\gen{\mathbf{w}} \odot \ker(f) \overset{T_g}{\longrightarrow} \gen{T_g(\mathbf{w})} \odot \ker(f \circ T_g^{-1} ),
$$
wheere $\odot$ denotes the orthogonal direct sum.\par
We need to show that $f \circ T_g^{-1} = \scp{\cdot}{T_g(\mathbf{w})}$, i.e.
that $(f \circ T_g^{-1})(\mathbf{v}) = \scp{\mathbf{v}}{T_g(\mathbf{w})}$ for all $\mathbf{v} \in V$.
Since $\mathbf{w} = P(f)$ the vector of $f$, we have $f(\mathbf{v}) = \scp{\mathbf{v}}{\mathbf{w}}$ for all $\mathbf{v} \in V$.
We obtain
\begin{align*}
(f \circ T_g^{-1})(\mathbf{v}) &= \scp{T_g^{-1}(\mathbf{v})}{\mathbf{w}} \istmit{T_g \,\,\mathrm{unitary} }
\scp{\mathbf{v}}{T_g(\mathbf{w})}.
\end{align*}
From remark \ref{rinduced} we conclude that $f \circ T_g^{-1} = T^1_g(f)$.
\end{proof}
Observe that proposition \ref{pprope} implies the commutativity of the following
two diagrams.
$$
\begin{CD}
V^\ast @>T_g^1>>V^\ast\\
@VVPV@VVPV\\
V@>T_g>>V\\
\end{CD}
\qquad \mathrm{and } \qquad
\begin{CD}
V^\ast @>(T_g^1)^{-1}>>V^\ast\\
@VVPV@VVPV\\
V@>(T_g)^{-1}>>V\\
\end{CD}
$$
Indeed, \ref{pprope} implies
\begin{align}
P \circ T_g^1 &= T_g \circ P \\
P \circ (T_g^1)^{-1} &= (T_g)^{-1} \circ P
\end{align}
\begin{proposition}\label{pproplink}
The first induced operator $T_g^1$ is unitary.
\end{proposition}
\begin{proof}
We may use that $T_g$ is unitary, that is,
$$
\scp{T_g(\mathbf{v})}{\mathbf{w}} = \scp{\mathbf{v}}{(T_g)^{-1}(\mathbf{w})}
= \scp{\mathbf{v}}{(T_{g^{-1}})(\mathbf{w})} \qquad (\ast)
.$$
Let $f,h \in V^\ast$ arbitrary, $\mathbf{w} := P(f)$, and $\mathbf{u} := P(h)$.
We need to check that $\scp{(T_g^1)(f)}{h} = \scp{f}{(T_g^1)^{-1}(h)}$.
We see that
\begin{align*}
\scp{(T_g^1)(f)}{h} &\istmit{\mathrm{proposition }\ref{ppropb}}
\scp{(P\circ T_g^1)(f)}{P(h)} \istmit{(1)} \scp{(T_g\circ P )(f)}{P(h)} \\
&= \scp{(T_g( P ))(f)}{P(h)} = \scp{T_g(\mathbf{w})}{\mathbf{u}} \istmit{\ast} \scp{\mathbf{w}}{T_g^{-1}(\mathbf{u})} \\
&= \scp{P(f)}{T_g^{-1}(P(h))} = \scp{P(f)}{(T_g^{-1} \circ P ) (h)}\\
&\istmit{(2)} \scp{P(f)}{( P \circ (T_g^1)^{-1}) (h)} = \scp{P(f)}{ P ((T_g^1)^{-1} (h))}\\
&= \scp{f}{(T_g^1)^{-1}(h)}
\end{align*}
\end{proof}
After having looked at eigenvalues we will see that this generalizes to higher degree,
that $T_g^d$ is diagonalizable for all $d\in {\mathbf{Z}}^+$. But first let us look at the matrix version
of proposition \ref{pproplink}.
\begin{proposition}\label{ppropf}
$$
[T^1_g]_{\mathbf{x},\mathbf{x}} = \overline{[T_g]_{\mathbf{e},\mathbf{e}}}
$$
\end{proposition}
\begin{proof}
Let $A := [T_g]_{\mathcal{B}, \mathcal{B}} = [A_1| \cdots |A_i| \cdots | A_n] = [a_{i,j}]$ and
$B := [T_g^1]_{\mathbf{x},\mathbf{x}} = [B_1| \cdots |B_i| \cdots | B_n] = [b_{i,j}]$.
We will use the commutativity of the diagram, i.e.
$P^{-1} \circ T_g \circ P = T_g$, which we will mark as $\square$. No, the proof
is not finished here.
We get $T_g(\mathbf{e}_i) = A_i = \sumn{k}{a_{k,i}} \mathbf{e}_k$ and
\begin{align*}
T_g^1 (x_i) &\istmit{\square} (P^{-1} \circ T_g \circ P)(x_i) = P^{-1} ( T_g ( P (x_i)) \\
&\istmit{\ref{ppropa}} P^{-1} ( T_g ( \mathbf{e}_i)) = P^{-1} \left ( \sumn{k}{a_{k,i}} \mathbf{e}_k \right)
\istmit{\textrm{konj.}} \sumn{k}{ \overline{a_{k,i}} P^{-1} \left ( \mathbf{e}_k \right) }\\
&\istmit{\ref{ppropa}} \sumn{k}{\overline{a_{k,i}} x_k}
\end{align*}
On the other hand,
$[T^1_g(x_i)]_{\mathbf{x}} = [T^1_g]_{\mathbf{x},\mathbf{x}} \mathbf{e}_i = B_i$ implies
$T^1_g(x_i) = \sumn{k}{b_{k,i}}\mathbf{e}_k$.
Together we obtain $b_{k,i} = \overline{a_{k,i}}$, and the proposition follows.
\end{proof}
\section{Averaging over the Group}\label{sreynolda}
Now we apply averaging to obtain self-adjoint operators.
\begin{definition}\label{dreynolds} We define the following operators:
\begin{enumerate}
\item $\displaystyle \hat{T} : V\to V, \mathbf{v} \mapsto \hat{T}(\mathbf{v})
:= \sumg{g}{T_g(\mathbf{v})}$
\item $\displaystyle \hat{T^1} : V^\ast\to V^\ast, f \mapsto \hat{T^1}(f)
:= \sumg{g}{T^1_g(f)}$
\end{enumerate}
\end{definition}
These are sometimes called the \kwd{Reynolds} operator of $G$.
\begin{proposition}\label{preynolds}
The operators $ \hat{T}$ and $\hat{T^1}$ are self-adjoint (Hermitian).
\end{proposition}
\begin{proof}
The idea of the averaging trick is that if $g\in G$ runs through all group
element and $g' \in G$ is fixed, then the products $g'g$ run also through all group elements.
We will make use of the facts that every $T_g$ and every $T^1_g$ is unitary.
\begin{enumerate}
\item We need to show that $\scp{\hat{T}(\mathbf{v})}{\mathbf{w}} = \scp{\mathbf{v}}{\hat{T}(\mathbf{w})}$ for
arbitrary $\mathbf{v},\mathbf{w} \in V$. We obtain
\begin{align*}
\scp{\hat{T}(\mathbf{v})}{\mathbf{w}} &=\scp{\sumg{g}{T_g(\mathbf{v})}}{\mathbf{w}} = \sumg{g}{\scp{T_g(\mathbf{v})}{\mathbf{w}}}\\
&\istmit{unit.} \sumg{g}{\scp{\mathbf{v}}{(T_g)^{-1}(\mathbf{w})}} = \sumg{g}{\scp{\mathbf{v}}{(T_{g^{-1}})(\mathbf{w})}} \\
&= \sumg{g'}{\scp{\mathbf{v}}{(T_{g'})(\mathbf{w})}} = \scp{\mathbf{v}}{\hat{T}(\mathbf{w})}
\end{align*}
\item The same proof, \emph{mutitis mutandis}, replacing $\hat{T} \leftrightarrow \hat{T^1}$,
$T_g \leftrightarrow T_g^1$, $\mathbf{v} \leftrightarrow f$, and $\mathbf{w} \leftrightarrow h$ shows that
$\scp{\hat{T^1}(f)}{h} = \scp{f}{\hat{T^1}(h)}.$
\end{enumerate}
\end{proof}
Consequently, $ \hat{T}$ and $\hat{T^1}$ are unitarily diagonalizable with real spectrum.
\begin{proposition}\label{pproph}
The operators $ \hat{T}$ and $\hat{T^1}$ are \inx{idempotent}, i.e.
\begin{enumerate}
\item $ \hat{T} \circ \hat{T} = \hat{T} $
\item $ \hat{T^1} \circ \hat{T^1} = \hat{T^1} $ .
\end{enumerate}
In particular, the eigenvalues of both operators are either $0$ or $1$.
\end{proposition}
\begin{proof}
Again, we show only one part, the other part is analog. To begin with,
let $s \in G$ be fixed. Then
\begin{align*}
T_s \circ \hat{T} &= T_s \circ \sumg{g}{T_g} = \sumg{g}{T_s \circ T_g} \\
&= \sumg{g}{T_{sg} } = \sumg{g'}{T_{g'} } = \hat{T}.
\end{align*}
From this it follows that
\begin{align*}
\hat{T} \circ \hat{T} &= \left (\sumg{g}{T_g} \right) \circ \hat{T}
= \sumg{g}{T_g\circ \hat{T} } \istmit{above}
= \sumg{g}{ \hat{T} } \\
&= \frac{1}{|G|} \cdot |G| \cdot\hat{T} = \hat{T}.
\end{align*}
From $ \hat{T} \circ \hat{T} = \hat{T} $ we conclude that $ \hat{T} \circ (\hat{T} - \mathrm{id}) = 0 $.
Thus the minimal polynomial of $T$ divides the polynomial $\lambda (\lambda - 1)$, so
all eigenvalues are contained in $\set{0,1}$.
\end{proof}
We will now look at the eigenvalues of $T_g$ and $T^1_g$ and their
interrelation. Since both operators are unitary, their eigenvalues
have absolute value $1$.
\begin{proposition}\label{pvictor}
\begin{enumerate}
\item If $\mathbf{v} \in V$ is an eigenvector of $T_g$ for the eigenvalue $\lambda$,
then $\mathbf{v}$ is an eigenvector of $T_{g^{-1}}$ for the eigenvalue $\overline{\lambda} = \frac{1}{\lambda}$.
\item If $f\in V^\ast$ is an eigenvector of $T^1_g$ for the eigenvalue $\lambda$,
then $f$ is an eigenvector of $T^1_{g^{-1}}$ for the eigenvalue $\frac{1}{\lambda}$.
\item If $f\in V^\ast$ is an eigenvector of $T^1_g$ for the eigenvalue $\lambda$,
then $P(f) \in V$ is an eigenvector of $T_g$ for the eigenvalue $\overline{\lambda} = \frac{1}{\lambda}$.
\item If $\mathbf{v} \in V$ is an eigenvector of $T_g$ for the eigenvalue $\lambda$,
then $P^{-1} (\mathbf{v}) \in V^\ast$ is an eigenvector of $T^1_g$ for the eigenvalue $\overline{\lambda}=\frac{1}{\lambda}$.
\end{enumerate}
\end{proposition}
\begin{proof}
We will make use of the commutativity of Proposition \ref{pprope}. Observe that $g.\mathbf{v} = T_g(\mathbf{v})$
and $g. f = f \circ T_g$.
\begin{enumerate}
\item \quad
\begin{align*}
T_g(\mathbf{v}) &= g.\mathbf{v} = \lambda \mathbf{v} \implies g^{-1} . g.\mathbf{v} = g^{-1} . \lambda \mathbf{v}
\implies g^{-1} . g.\mathbf{v} = \lambda g^{-1} .\mathbf{v} \\
& \implies \mathbf{v} = \lambda g^{-1} .\mathbf{v} \implies T_{g^{-1}}(\mathbf{v}) = g^{-1} . \mathbf{v} = \frac{1}{\lambda} \mathbf{v}
\end{align*}
\item \quad
\begin{align*}
T^1_g(f) &= g. f = \lambda f \implies g^{-1} . g. f = g^{-1} . \lambda f
\implies g^{-1} . g. f = \lambda g^{-1} . f \\
& \implies f = \lambda g^{-1} . f \implies T^1_{g^{-1}}(f) = g^{-1} . f = \frac{1}{\lambda} f
\end{align*}
\item \quad
\begin{align*}
T^1_g(f) = \lambda f &\folgtmit{P\circ} P(T^1_g(f)) = P(\lambda f) \folgtmit{(1)} T_g(P(f)) = P(\lambda f) \\
&\implies T_g(P(f)) = \overline{\lambda} P( f) =\frac{1}{\lambda} P( f)
\end{align*}
\item \quad
\begin{align*}
T_g(\mathbf{v}) = \lambda \mathbf{v} &\folgtmit{P^{-1} \circ} P^{-1}(T_g(\mathbf{v})) = P^{-1} (\lambda \mathbf{v})
\folgtmit{\square} (T_g^1 \circ P^{-1})(\mathbf{v}) = \overline{\lambda} P^{-1} (\mathbf{v}) \\
&\implies T_g^1 ( P^{-1} (\mathbf{v})) = \frac{1}{\lambda} P^{-1} (\mathbf{v})
\end{align*}
\end{enumerate}
\end{proof}
This implies that if we consider the union of the spectra over all $g\in G$,
then we obtain the same (multi)set, no matter if we take $T_g$ or $T^1_g$. \par
\section{Eigenvectors and eigenvalues}\label{svictor}
Now we continue from where we left at the end of section \ref{spreliminaries},
fixing one group element $g \in G$ and compare $T_g^1$ with $T_g^d$ for $d > 1$.
By a method called \kwd{stars and bars} it is easy to see that $$\tilde{d} := \dim_{\mathbf{C}}(R_d)
= \frac{(n+d+1)!}{(n-1)!d!} .$$
Remember that every $T_g^1$ is unitarily diagonalizable with eigenvalues of absolute value $1$.
If $\ensuremath \mathrm{spec}(T_g^1) = (\omega_1,\dots , \omega_n) \in U(1)^n $,
then $V^\ast$ has an orthonormal basis $\mathbf{y}_g^1 := (y_{1}, \dots ,y_{n} )$,
such that $T_g^1 (y_{i}) = \omega_i \cdot y_{i} $ for all $1 \le i \le n$,
and $[T_g^1]_{\mathbf{y}_g^1,\mathbf{y}_g^1} = \ensuremath \mathrm{diag}(\omega_1,\dots , \omega_n)$.
Moreover,
$$
[T_g^1]_{\mathbf{y}_g^1,\mathbf{y}_g^1} = [\mathrm{id}]_{\mathbf{y}_g^1, \mathbf{x}} \cdot [T_g^1]_{\mathbf{x},\mathbf{x}} \cdot [\mathrm{id}]_{ \mathbf{x}, \mathbf{y}_g^1}
= \ensuremath \mathrm{diag}(\omega_1,\dots , \omega_n) ,
$$
where $[\mathrm{id}]_{\mathbf{y}_g^1, \mathbf{x}} = [\mathrm{id}]_{ \mathbf{x}, \mathbf{y}_g^1}^\ast$ is unitary. \par
For $d>1$ put
$$
\mathbf{x}^d := (x_1^d, x_2^d, \dots , x_n^d, x_1^{d-1}x_2 ,x_1^{d-1}x_3 , \dots ,x_1^{d-1}x_n, \dots )
=: (\tilde{x_1}, \dots, \tilde{x}_{\tilde{d}})
,$$
all monomials in the $x_i$ of total degree $d$, numbered from $1$ to $\tilde{d}$.
These are certainly linear independent, since we have
no relations amongst the variables, and span $R_d$, since
every monomial of total degree $d$ can be written as a linear combination of these.
So the form a basis for $R_d$. We will not require that this can be made into an
orthonormal basis, we do not even consider any inner product on $R_d$ for $d>1$.
We rather want to establish that
$$
\mathbf{y}^d := (y_1^d, y_2^d, \dots , y_n^d, y_1^{d-1}y_2 ,y_1^{d-1}y_3 , \dots ,y_1^{d-1}y_n, \dots )
=: (\tilde{y_1}, \dots, \tilde{y}_{\tilde{d}})
$$
is a basis of eigenvectors of $T_g^d$ diagonalizing $T_g^d$, using the
same numbering.
Arranging the eigenvalues of $T_g^1$ in the sam way we put
$$
\mathbf{\omega}^d := (\omega_1^d, \omega_2^d, \dots , \omega_n^d, \omega_1^{d-1}\omega_2 ,\omega_1^{d-1}\omega_3 ,
\dots ,\omega_1^{d-1}\omega_n, \dots )
=: (\tilde{\omega_1}, \dots, \tilde{\omega}_{\tilde{d}}).
$$
Now we establish that the $\tilde{y_i}$, $1\le i \le \tilde{d}$ are
the eigenvectors for the eigenvalues $\tilde{\omega_1}$ of $T_g^d$.
\begin{proposition}\label{pinducedeigen}
In the context above,
$$
T_g^d (\tilde{y_i}) = \tilde{\omega_i} \cdot \tilde{y_i}
$$
for all $1\le i \le \tilde{d}$.
\end{proposition}
\begin{proof}
The key is proposition \ref{pindact2}, as in the preliminary observations
at the end of section \ref {spreliminaries}. Let
$$
\tilde{y_i} = \prod_{j=1}^{n} y_j^{\epsilon_j}
$$
and
$$
\tilde{\omega_i} = \prod_{j=1}^{n} \omega_j^{\epsilon_j}
,$$
where $\epsilon_j \in {\mathbf{N}}$ and the sum of these exponents is $d$.
Then
\begin{align*}
T_g^d (\tilde{y_i}) &= T_g^d \left ( \prod_{j=1}^{n} y_j^{\epsilon_j} \right )
= \prod_{j=1}^{n} T_g^1 \left ( y_j^{\epsilon_j} \right )
= \prod_{j=1}^{n} \omega_j^{\epsilon_j} y_j^{\epsilon_j}
= \tilde{\omega_i} \cdot \tilde{y_i}
\end{align*}
\end{proof}
As a consequence, $R_d$ has a basis of eigenvectors of $T_g^d$ and
$T_g^d$ is similar to the \inx{diagonal matrix}
$\ensuremath \mathrm{diag}(\tilde{\omega_1}, \dots, \tilde{\omega}_{\tilde{d}})$.
\section{Moliens Theorem}\label{sstart}
We will now make some final preparations and then present
the proof of Moliens Theorem.\par
For $f \in R$ and $g \in G$ we say that $f$ is an \kwd{invariant}
of $g$ if $g. f = f$ and that $f$ is a (simple) invariant
of $G$ if $\forall g \in G : g. f = f$.
The method of averaging from section \ref{sreynolda} can also be applied to create invariants:
\begin{proposition}\label{ppropg}
For $f\in V^\ast$ put $\hat{f} := \hat{T^1} (f)$. Then $\hat{f}$ is an
invariant of $G$.
\end{proposition}
\begin{proof}
Let $g \in G$ be arbitrary. We will show that $g.\hat{f} = \hat{f}$. Clearly,
from proposition \ref{pprop0} we get that
\begin{align*}
g.\hat{f} &= \hat{f} \circ T_{g^{-1}} = (\hat{T^1} (f)) \circ T_{g^{-1}} \\
&= \left( \sumg{s}{T_s^1(f) }\right )\circ T_{g^{-1}} = \left( \sumg{s}{f \circ T_{s^{-1}} }\right )\circ T_{g^{-1}}\\
&= \sumg{s}{f \circ T_{s^{-1}} \circ T_{g^{-1}}} = \sumg{t}{f \circ T_{t^{-1}}} = \hat{f}.
\end{align*}
\end{proof}
Now, we call
$$
R^G := \genset{f\in R}{\forall g \in G : g. f = f}
$$
the \kwd{algebra of invariants} of $G$.
\begin{proposition}\label{pinvalg}
$R^G$ is a subalgebra of $R$.
\end{proposition}
\begin{proof}
Since the mapping $f \mapsto g.f$ is linear for every $g\in G$,
$R^G$ is the intersection of subspaces, and hence a subspace.
Let us check the subring conditions in more detail.
For arbritrary $g \in G$, $f,h \in R^G$, and $\mathbf{v} \in V$ we have
$g. f = f$, $g. h = h$
\begin{enumerate}
\item For the zero $0 \in R$ we obtain $(g. 0)(\mathbf{v}) = 0(g^{-1}. \mathbf{v} ) = 0(\mathbf{v})$,
so $0 \in R^G$.
\item We see
\begin{align*}
g. (f-h)(\mathbf{v}) &= (f-h)(g^{-1} . \mathbf{v}) = f(g^{-1} . \mathbf{v}) - h(g^{-1} . \mathbf{v}) \\
&= (g . f)(\mathbf{v}) - (g . h)(\mathbf{v}) = f(\mathbf{v}) - h(\mathbf{v}) = (f-h)(\mathbf{v})
\end{align*}
\item Likewise,
\begin{align*}
g. (f\cdot h)(\mathbf{v}) &= (f\cdot h)(g^{-1} . \mathbf{v}) = f(g^{-1} . \mathbf{v}) \cdot h(g^{-1} . \mathbf{v}) \\
&= (g . f)(\mathbf{v}) \cdot (g . h)(\mathbf{v}) = f(\mathbf{v}) \cdot h(\mathbf{v}) = (f\cdot h)(\mathbf{v}).
\end{align*}
\end{enumerate}
\end{proof}
Our subalgebra $R^G$ is graded in the same way as $R$.
\begin{proposition}\label{pinvalggraded}
The algebra of invariants of $G$ is naturally graded as
$$
R^G = \bigoplus_{d \in {\mathbf{N}}} R^G_d,
$$
where $R^G_d = \genset{f\in R_d}{\forall g \in G : g. f = f}$,
called the $d$--th \kwd{homogeneous component} of $R^G$.
\end{proposition}
\begin{proof}
This follows directly from proposition \ref{pindact} and proposition \ref{pindact2}.
\end{proof}
\begin{definition}[Molien series]\label{dmolien}
Viewing $R^G_d$ as a vector space, we define
$$
a_d := \dim_{\mathbf{C}} R^G_d,
$$
the number of linearly independent homogeneous invariants of degree $d\in {\mathbf{N}}$, and
$$
\Phi_G(\lambda) := \sum_{d\in{\mathbf{N}}} a_d \lambda^d,
$$
the \kwd{Molien series} of $G$.
\end{definition}
Thus, the Molien series of $G$ is an ordinary power series generating
function whose coefficients are the numbers of linearly independent homogeneous invariants of degree $d$.
The following beautiful formula gives these numbers, its proof is the
aim of this paper.
\begin{theorem}[Molien, 1897]\label{tmolien}
$$
\Phi_G(\lambda) := \frac{1}{|G|} \sum_{g\in G} \frac{1}{\det(\mathrm{id} - \lambda T_g)}
$$
\end{theorem}
Following \cite{Sloane1} we first look the number $a_1$ of linearly independent homogeneous invariants of degree $d$.
\begin{theorem}[Theorem 13 in \cite{Sloane1}]\label{t13}
$$
a_1 = \ensuremath{\mathrm{Tr}} (\hat{T}) = \ensuremath{\mathrm{Tr}} (\hat{T^1})
$$
\end{theorem}
\begin{proof}
First, we note that the equation $\ensuremath{\mathrm{Tr}} (\hat{T}) = \ensuremath{\mathrm{Tr}} (\hat{T^1}) $ follows from
the remark at the end of section \ref{sreynolda}, since the sum for the
trace runs over all group elements. Remember that the trace is independent
of the choice of basis.
From proposition \ref{pproph} we know that both operators are idempotent hermitian
and $V^\ast$ has a an orthornormal basis $\mathbf{f} = (\mathbf{f}_a,\dots, \mathbf{f}_n)$
of eigenvectors of $\hat{T^1}$, corresponding to the eigenvalues
$\lambda_1, \dots , \lambda_n \in \set{0,1}$, so
$$
[\hat{T^1}]_{\mathbf{f},\mathbf{f}} = \ensuremath \mathrm{diag}(\lambda_1, \dots , \lambda_n).
$$
Let us say that this matrix has $r$ entries $1$ and the remaining $n-d$ entries $0$.
By rearranging the eigenvalues and eigenvectors we may assume that the
first $r$ entries are $1$ and the remaining $n-d$ are $0$, i.e.
$$
\left ([\hat{T^1}]_{\mathbf{f},\mathbf{f}}\right )_{i,i} =
\begin{cases}
1 & : 1 \le i \le r\\
0 & : r+1 \le i \le n.
\end{cases}
$$
Hence $\hat{T^1} (f_i) = f_i$ for $1 \le i \le r$ and $\hat{T^1} (f_i) = 0$ for $r+1 \le i \le n$.
Any linear invariant of $G$ is certainly fixed by $\hat{T^1}$,
so $a_1 \le r$. On the other hand, by proposition \ref{ppropg},
$\hat{f_i} := \hat{T^1} (f_i) = \lambda_i f_i$ is an invariant of $G$ for every $1\le i\le r$,
so $a_1 \ge r$. Together, $a_1 = r$.
\end{proof}
Before the final proof, let us introduce a handy notation.
\begin{definition}\label{dcorfficient}
Let $p(\lambda) \in {\mathbf{C}}[\lambda]$ or $p(\lambda) \in {\mathbf{C}}[[\lambda]]$. Then
$[\lambda^i]:p(\lambda)$ denotes the \inx{coefficient} of $\lambda^i$ in $p(\lambda)$.
\end{definition}
So, for example $[x^2]: 2x^3 + 42x^2 - 6 = 42$ and $[\lambda^d]:\Phi_G(\lambda) = a_d$.
\begin{proof}{(Moliens Theorem)}
We just established the case $d = 1$, so the reader is probably
expecting a proof by induction over $d$. But this is \emph{not} the case.
Rather, the case $d = 1$ applies to all $d > 1$.
Note that $a_d$ is equal to the number of linearly independent
invariants of all of the $T_g^d$. So Theorem \ref{t13} gives us
\begin{align*}
a_1 &= \ensuremath{\mathrm{Tr}} (\hat{T}) = \ensuremath{\mathrm{Tr}} (\hat{T^1}) \qquad \mathrm{ and} \qquad\\
a_d &= \ensuremath{\mathrm{Tr}} (\hat{T^d}),
\end{align*}
where the latter includes the first.
From definition \ref{dreynolds} we also have
$$
\hat{T^1} = \sumg{g}{T^1_g}
\quad
\textrm{and in general}
\quad
\hat{T^d} = \sumg{g}{T^d_g} ,
$$
so we already know that
$$
a_d = \sumg{g}{\ensuremath{\mathrm{Tr}}(T^d_g)}.
$$
So all we need to show is
$$
[\lambda^d]:\frac{1}{|G|} \sum_{g\in G} \frac{1}{\det(\mathrm{id} - \lambda T^1_g)} = \sumg{g}{\ensuremath{\mathrm{Tr}}(T^d_g)}.
$$
We will show that for every summand (group element) the equation
$$
[\lambda^d]: \frac{1}{\det(\mathrm{id} - \lambda T^1_g)} = \ensuremath{\mathrm{Tr}}(T^d_g)
$$
holds. From proposition \ref{pinducedeigen} we get for every $g\in G$ that
\begin{align*}\ensuremath{\mathrm{Tr}}(T^d_g) &= \ensuremath{\mathrm{Tr}}(\ensuremath \mathrm{diag}(\tilde{\omega_1}, \dots, \tilde{\omega}_{\tilde{d}})) \\&=
\tilde{\omega_1} + \dots + \tilde{\omega}_{\tilde{d}} =
\end{align*}
sum of the products
of the $\omega_1, \omega_2, \dots ,\omega_n $, taken $d$ of them at a time.
On the other hand, for the same $g\in G$ we
obtain from section \ref{svictor} that
$[T_g^1]_{\mathbf{y}_g^1,\mathbf{y}_g^1} = \ensuremath \mathrm{diag}(\omega_1,\dots , \omega_n)$
so that
\begin{align*}
\det(\mathrm{id} - \lambda T^1_g) &= \det(\mathrm{id} - \lambda \cdot \ensuremath \mathrm{diag}(\omega_1,\dots , \omega_n) ) \\
&= (1 - \lambda \omega_1 )(1 - \lambda \omega_2 )\dots(1 - \lambda \omega_n ),
\end{align*}
so
\begin{align*}
\quad & \frac{1}{\det(\mathrm{id} - \lambda T^1_g)} = \frac{1}{(1 - \lambda \omega_1 )(1 - \lambda \omega_2 )\dots(1 - \lambda \omega_n )} \\
&= \frac{1}{(1 - \lambda \omega_1) } \cdot \frac{1}{(1 - \lambda \omega_2)} \cdot \dots \frac{1}{(1 - \lambda \omega_n)} \\
&= (1 + \lambda \omega_1 + \lambda^2 \omega_1^2 + \dots )(1 + \lambda \omega_2 + \lambda^2 \omega_2^2 + \dots ) \dots
(1 + \lambda \omega_n + \lambda^2 \omega_n^2 + \dots )
\end{align*}
and here the coefficient of $\lambda^d$ is also sum of the products
of $\omega_1, \omega_2, \dots ,\omega_n $, taken $d$ of them at a time.
Again, the last claim
$$
\frac{1}{|G|} \sum_{g\in G} \frac{1}{\det(\mathrm{id} - \lambda T_g)} =
\frac{1}{|G|} \sum_{g\in G} \frac{1}{\det(\mathrm{id} - \lambda T^1_g)}
$$
follows from
the remark at the end of section \ref{preynolds}, since the sum runs over all group elements.
\end{proof}
\section{Symbol table}\label{ssymbol}
\begin{multicols}{2}
\begin{description}
\item[$a_d$] number of linearly independent homogeneous invariants of degree $d$
\item[$\tilde{d}$] Dimension of $R_d$
\item[$\mathcal{B}$] ON basis for $V$
\item[$G$] Finite group
\item[$\omega_i$] eigenvalue of $T_g^1$ (\cite{Sloane1} $= w_i$ )
\item[$P(f)$] \lq\lq Rho\rq\rq\ Riesz vector of $f$.
\item[$\rho$] Unitary representation $\rho : G \to U(V), g \mapsto T_g$
\item[$R$] Big algebra, direct sum of
\item[$R_d$] Direct summand of degree $d$
\item[$R^G$] Ring of invariants of $d$
\item[$R^G_d$] Degree $d$ summand
\item[$T_g$] representation of $g$ on $V$, (\cite{Sloane1} $ A_\alpha= [T_{g_\alpha}]_{\mathcal{B}, \mathcal{B}} $ )
\item[$V$] Complex inner product space
\item[$V^\ast$] Algebraic dual of $V$
\end{description}
\end{multicols}
\section{Lost and found}\label{slostfound}
Some things to explore from here:
\begin{itemize}
\item If we know the conjugacy classes of $G$, we may be able to say more, since every
unitary representation splits into irreducible components.
\item There seems to be a link to P\'olya enumeration.
\item We have GAP code, see \cite{GAP4}.
\item An example would be nice.
\item Relations on the generators in $S$ of the Cayley graph $\Gamma(G,S)$
should lead to conditions of the minimal polynomial of its adjacency operator $Q(\Gamma(G,S))$.
\item Also, Cayley graphs of some finite reflection groups \cite{Hu2} should become accessible.
\item Check some more applications, as mentioned in \cite{Sloane1}.
\item For finding invariants, check also \cite{Cox}, Gr\"obner bases.
\end{itemize}
\addcontentsline{toc}{section}{References}
\addcontentsline{toc}{section}{Index}
\printindex
\def\thefootnote{}
\footnote{\texttt{\jobname .tex} Typeset: \today }
\end{document} |
\begin{document}
\global\long\def\mathbf{I}{\mathbf{I}}
\global\long\def\ensuremath{\mathbb{R}}{\ensuremath{\mathbb{R}}}
\begin{comment}
The set of real numbers
\end{comment}
\global\long\def\ensuremath{\mathbb{R}}{\ensuremath{\mathbb{R}}}
\begin{comment}
The set of real numbers
\end{comment}
\global\long\def\ensuremath{\mathbb{C}}{\ensuremath{\mathbb{C}}}
\begin{comment}
The set of complex numbers
\end{comment}
\global\long\def\ensuremath{\mathbb{Z}}{\ensuremath{\mathbb{Z}}}
\begin{comment}
The set of integer numbers
\end{comment}
\global\long\def\ensuremath{\mathbb{N}}{\ensuremath{\mathbb{N}}}
\begin{comment}
The set of natural numbers
\end{comment}
\newcommandx\GL[2][usedefault, addprefix=\global, 1=\ensuremath{\mathbb{R}}]{\ensuremath{GL\left(#2,#1\right)}}
\begin{comment}
The \#1 dimensional general linear group over the field \#2
\end{comment}
\newcommandx\SL[2][usedefault, addprefix=\global, 1=\ensuremath{\mathbb{R}}]{\ensuremath{SL\left(#2,#1\right)}}
\begin{comment}
The \#1 dimensional special linear group over the field \#2
\end{comment}
\global\long\def\ogroup#1{O\left(#1\right)}
\global\long\def\sogroup#1{SO\left(#1\right)}
\global\long\def\SE#1{\ensuremath{SE\left(#1\right)}}
\begin{comment}
The \#1 dimensional special Euclidean group
\end{comment}
\global\long\def\SO#1{\ensuremath{SO\left(#1\right)}}
\begin{comment}
The \#1 dimensional special orthogonal group
\end{comment}
\global\long\def\SU#1{\ensuremath{SU\left(#1\right)}}
\begin{comment}
The \#1 dimensional special unitary group
\end{comment}
\global\long\def\mbox{SS}et#1{\ensuremath{S^{#1}}}
\global\long\def\ball#1#2{\ensuremath{\mathbb{B}_{#1}(#2)}}
\global\long\def\ensuremath{\text{Pr}}ojspace#1{\ensuremath{\mathcal{P}(#1)}}
\global\long\def\ensuremath{\text{sign}}{\ensuremath{\text{sign}}}
\global\long\def\text{diag}#1{\text{diag}\left(#1\right)}
\global\long\def\vecop#1{\text{vec}\left(#1\right)}
\global\long\def\inprod#1#2{\left\langle #1,#2\right\rangle }
\global\long\def\abs#1{\left|#1\right|}
\global\long\def\text{dist}{\text{dist}}
\global\long\def\norm#1{\left\Vert #1\right\Vert }
\global\long\def\ensuremath{\text{grad}}{\ensuremath{\text{grad}}}
\global\long\def\ensuremath{\text{Hess}}{\ensuremath{\text{Hess}}}
\global\long\deff{f}
\global\long\defh{h}
\global\long\defg{g}
\global\long\defF{F}
\global\long\def\axisangle#1#2{\ensuremath{R\left(#1,#2\right)}}
\begin{comment}
Axis-angle representation with axis \#1 and angle \#2
\end{comment}
\global\long\def\scalar#1{#1}
\global\long\def\vector#1{#1}
\begin{comment}
general vector notation
\end{comment}
\global\long\def\myvec#1{#1}
\global\long\def\myvec x{\myvec x}
\global\long\def\myvec y{\myvec y}
\global\long\def\myvec z{\myvec z}
\global\long\def\myvec u{\myvec u}
\global\long\def\myvec zonsta{\myvec a}
\global\long\def\myvec zonstb{\myvec b}
\global\long\def\myvec zenter{\myvec c}
\global\long\defA{A}
\global\long\defB{B}
\global\long\defC{C}
\global\long\defD{D}
\global\long\defE{E}
\global\long\def\matrixe{E}
\global\long\defI{I}
\global\long\defU{U}
\global\long\defU'{U'}
\global\long\def\ensuremath{\mathbb{H}}{\ensuremath{\mathbb{H}}}
\begin{comment}
The set of quaternions
\end{comment}
\global\long\def\mbox{SS}et 3{\mbox{SS}et 3}
\global\long\def\quat#1{\boldsymbol{#1}}
\global\long\def\hat{\imath}{\hat{\imath}}
\global\long\def\hat{\jmath}{\hat{\jmath}}
\global\long\def\hat{k}{\hat{k}}
\global\long\def\real#1{\mbox{Re}\left(#1\right)}
\global\long\def\imag#1{\mbox{Im}\left(#1\right)}
\global\long\def\boldsymbol{\imath_{m}}{\boldsymbol{\imath_{m}}}
\global\long\def\otimes{\otimes}
\global\long\def\otimes{\otimes}
\global\long\def\quat q{\quat q}
\global\long\def\quat p{\quat p}
\global\long\def\quat z{\quat z}
\global\long\def\mathscr{H}{\mathscr{H}}
\begin{comment}
The set of quaternions
\end{comment}
\global\long\def\ensuremath{\dqset^{\norm 1}}{\ensuremath{\mathscr{H}^{\norm 1}}}
\global\long\def\mathscr{P}{\mathscr{P}}
\global\long\def\mathscr{D}{\mathscr{D}}
\global\long\def\varepsilonvector#1{\underline{\boldsymbol{#1}}}
\global\long\def\dq#1{\underline{\boldsymbol{#1}}}
\global\long\def\varepsilon{\varepsilon}
\global\long\def\dq q{\dq q}
\global\long\def\dq p{\dq p}
\global\long\def\skewsymproduct#1{\ensuremath{\left\lfloor #1\right\rfloor _{\times}}}
\begin{comment}
Skew-symmetric matrix associated to cross-product with vector \#1
\end{comment}
\global\long\def\text{Tr}{\text{Tr}}
\global\long\def\text{diag}{\text{diag}}
\global\long\def\text{Tr}iag{\text{triag}}
\global\long\def\text{cdown}{\text{cdown}}
\global\long\def\mathscr{V}{\mathscr{V}}
\global\long\def\man#1{\boldsymbol{#1}}
\global\long\def\ensuremath{\varphi}{\ensuremath{\varphi}}
\global\long\def\ensuremath{\mathcal{X}}{\ensuremath{\mathcal{X}}}
\global\long\def\mathcal{Y}{\mathcal{Y}}
\global\long\def\mathcal{Z}{\mathcal{Z}}
\global\long\def\mathcal{Z}urvea{\ensuremath{\mathcal{X}}}
\global\long\def\mathcal{Z}urveb{\mathcal{Y}}
\global\long\def\mathcal{A}{\mathcal{A}}
\global\long\def\mathcal{B}{\mathcal{B}}
\global\long\def\mathcal{N}{\mathcal{N}}
\global\long\def\mathcal{R}{\mathcal{R}}
\global\long\def\mathcal{\mathcal{Y}}{\mathcal{\mathcal{Y}}}
\global\long\deff{f}
\global\long\def\mathcal{\mathcal{U}}{\mathcal{\mathcal{U}}}
\global\long\def\mathcal{V}{\mathcal{V}}
\global\long\defV{V}
\global\long\defV'{V'}
\global\long\defv{v}
\global\long\defu{u}
\global\long\defu{u}
\global\long\def\man a{\man a}
\global\long\def\man b{\man b}
\global\long\def\man u{\man u}
\global\long\def\man uentral{\man c}
\global\long\def\vfieldset#1{\mathscr{X}(#1)}
\global\long\def\mathscr{D}{\mathscr{D}}
\global\long\def\text{Id}{\text{Id}}
\global\long\def\mbox{PT}{\mbox{PT}}
\global\long\def\alpha{\alpha}
\global\long\def\curve{\alpha}
\global\long\def\exp{\exp}
\global\long\def\log{\log}
\global\long\def\logb#1{\overrightarrow{#1}}
\global\long\def\mathbb{L}{\mathbb{L}}
\global\long\def\mathbb{B}{\mathbb{B}}
\global\long\def\mathcal{C}{\mathcal{C}}
\global\long\defC{C}
\global\long\def\Omega{\Omega}
\global\long\def\text{inj}{\text{inj}}
\global\long\def\text{\ensuremath{\grad}}{\text{\ensuremath{\ensuremath{\text{grad}}}}}
\global\long\def\ensuremath{\mbox{p}}{\ensuremath{\mbox{p}}}
\global\long\def\ensuremath{\text{Pr}}{\ensuremath{\text{Pr}}}
\global\long\def\mathcal{E} {\mathcal{E} }
\global\long\def\mean#1{\bar{#1}}
\global\long\def\text{\ensuremath{\mu}}{\text{\ensuremath{\mu}}}
\global\long\defP{P}
\global\long\def\Sigma{\Sigma}
\global\long\defM {M }
\global\long\def\mbox{\ensuremath{\mathcal{M}}} {\mbox{\ensuremath{\mathcal{M}}} }
\global\long\def\ensuremath{\mathcal{B}}{\ensuremath{\mathcal{B}}}
\global\long\def\scalar{\sigma}{\scalar{\sigma}}
\global\long\defs{s}
\global\long\def\Phi{\Phi}
\global\long\defX{X}
\global\long\defY{Y}
\global\long\defZ{Z}
\global\long\def\est#1{\hat{#1}}
\global\long\def\outcome#1{\breve{#1}}
\global\long\defN{N}
\global\long\def\mathbb{E}{\mathbb{E}}
\global\long\def\text{\ensuremath{\mu}}set{\mathscr{E}}
\global\long\def\vector x{\vector x}
\global\long\def\vector y{\vector y}
\global\long\def\varpi{\varpi}
\global\long\def\vartheta{\vartheta}
\global\long\deff{f}
\global\long\defh{h}
\global\long\defn_{\state}{n_{\vector x}}
\global\long\defn_{\meas}{n_{\vector y}}
\global\long\defn_{\text{P}noise}{n_{\varpi}}
\global\long\defn_{\mnoise}{n_{\vartheta}}
\global\long\defn_{\state}aug{n_{a}}
\global\long\defPpnoise{Q}
\global\long\defPmnoise{R}
\global\long\defv{v}
\global\long\defN{N}
\global\long\defw{w}
\global\long\defwmatrix{W}
\global\long\defwm{w_{i}^{\left(m\right)}}
\global\long\defwc{w_{i}^{\left(c\right)}}
\global\long\defwcc{w_{i}^{\left(cc\right)}}
\global\long\def\chi{\chi}
\global\long\def\gamma{\gamma}
\global\long\def\xi{\xi}
\global\long\def\zeta{\zeta}
\global\long\def\sigma\text{R}{\sigma\text{R}}
\global\long\def\mbox{SS}{\mbox{SS}}
\global\long\def\sigma\text{R}sr{\text{SR}\sigma\text{R}}
\global\long\def\text{Ri}\sigma\text{R}{\text{Ri}\sigma\text{R}}
\global\long\def\text{UT}{\text{UT}}
\global\long\def\text{PaUT}{\text{PaUT}}
\global\long\def\text{RiUT}{\text{RiUT}}
\global\long\def\text{SRUT}{\text{SRUT}}
\global\long\def\text{RiSRUT}{\text{RiSRUT}}
\global\long\def\text{PaSRUT}{\text{PaSRUT}}
\global\long\def\nu{\nu}
\global\long\def\nuquat{\quat{\nu}}
\global\long\defG{G}
\global\long\def\text{VtoQ}{\text{VtoQ}}
\global\long\def\text{RoVtoQ}{\text{RoVtoQ}}
\global\long\def\text{GeRVtoQ}{\text{GeRVtoQ}}
\global\long\def\text{QuVtoQ}{\text{QuVtoQ}}
\global\long\def\text{QtoV}{\text{QtoV}}
\global\long\def\text{QtoRoV}{\text{QtoRoV}}
\global\long\def\text{QtoGeRV}{\text{QtoGeRV}}
\global\long\def\text{QtoQuV}{\text{QtoQuV}}
\global\long\def\text{QwMean}{\text{QwMean}}
\global\long\defI{I}
\global\long\defeps{eps}
\global\long\def\text{RMST}{\text{RMST}}
\global\long\def\text{RMSD}{\text{RMSD}}
\global\long\def\delta t{\delta t}
\global\long\def\oplus{\oplus}
\global\long\def\ominus{\ominus}
\global\long\def\odot{\odot}
\global\long\def\otimes{\otimes}
\global\long\def\bigoplus{\bigoplus}
\global\long\def\bigotimes{\bigotimes}
\global\long\def\gnorm#1{\norm{#1}_{\text{g}}}
\global\long\def\quat 1{\quat 1}
\global\long\def\ginprod#1#2{\left\langle #1,#2\right\rangle _{g}}
\global\long\def\glinset#1#2{\mathcal{L}^{g}(#1,#2)}
\global\long\def\text{P}{\text{P}}
\global\long\def\Phi^{g}{\Phi^{g}}
\global\long\def\gintprod#1#2{\left\langle #1,#2\right\rangle _{g}}
\global\long\defA{A}
\title{Unscented Kalman Filters for Riemannian State-Space Systems}
\author{Henrique M. T. Menegaz, João Y. Ishihara, Hugo T. M. Kussaba\thanks{H. M. T. Menegaz (henriquemenegaz@unb.br) is with the Faculdade Gama at the Universidade de Brasília (UnB), Brazil.
J. Y. Ishihara (ishihara@lara.unb.br) and Hugo T. M. Kussaba (kussaba@lara.unb.br) are with the Automation and Robotics Laboratory (LARA) at the UnB. Web-page: www.lara.unb.br.}}
\maketitle
\begin{abstract}
Unscented Kalman Filters (UKFs) have become popular in the research
community. \textit{\emph{Most UKFs work only with Euclidean }}systems,
but in many scenarios it is advantageous to consider systems with
state-variables taking values on \emph{Riemannian manifolds}. However,
we can still find some gaps in the literature's theory of UKFs for
Riemannian systems: for instance, the literature has not yet i) developed
Riemannian extensions of some fundamental concepts of the UKF theory
(e.g., extensions of $\sigma$-representation, Unscented Transformation,
Additive UKF, Augmented UKF, additive-noise system), ii) proofs of
some steps in their UKFs for Riemannian systems (e.g., proof of sigma
points parameterization by vectors, state correction equations, noise
statistics inclusion), and iii) relations between their UKFs for Riemannian
systems. In this work, we attempt to develop a theory capable of filling
these gaps. Among other results, we propose Riemannian extensions
of the main concepts in the UKF theory (including closed forms), justify
all steps of the proposed UKFs, and provide a framework able to relate
UKFs for particular manifolds among themselves and with UKFs for Euclidean
spaces. Compared with UKFs for Riemannian manifolds of the literature,
the proposed filters are more consistent, formally-principled, and
general. An example of satellite attitude tracking illustrates the
proposed theory.
\end{abstract}
\section{Introduction}
\label{sec:Introduction}
When we want to know the value of some variables of a given system–e.g.,
the position and velocity of a car, the position and attitude of a
satellite, the temperature of a boil, etc.—we can acquire data from
the system and develop a mathematical model of it. But measurements
are noisy, and models are always imperfect. Hence, to estimate the
desired variables, we often must use filters, such as Unscented Kalman
Filters (UKFs). Researchers have been applying UKFs in applications
of diverse fields: for example, in power electronic \cite{Meng2016},
aerospace \cite{Rahimi2015}, and automotive \cite{Vargas2016} systems.
These filters' success is partially explained by their good trade-off
between estimation quality and computational complexity compared with
similar techniques such as the Extended Kalman Filter (EKF) \cite{Julier2004}.
\textit{\emph{Most UKFs work only with Euclidean models (the so-called
state-space systems; cf. Section }}\ref{subsec:Additive-Unscented-Kalman}\textit{\emph{),
but sometimes modeling with }}\emph{Riemannian manifolds}\textit{\emph{
is }}better.\textit{\emph{ These manifolds can i) model more systems
(}}cf. Section \ref{subsec:Kalman-filtering-in}\textit{\emph{), ii)
provide better mathematical properties}} than Euclidean subspaces\textit{\emph{
(e.g., better metrics), and iii) be the set where measurements}} take
value from (cf. \cite{Hauberg2013,Absil2008,Pennec2006} and Section
\ref{subsec:Kalman-filtering-in}).
Although some works have introduced UKFs for Riemannian systems (e.g.,
\cite{Enayati2015,Gilitschenski2016,Lee2016,Hauberg2013}; cf. Section
\ref{subsec:Kalman-filtering-in}), we can still find some gaps in
the literature's theory for these UKFs. First, fundamental UKF concepts
still miss for Riemannian manifolds, such as $\sigma$-representation
($\sigma\text{R}$), Unscented Transformation (UT), Additive UKF, Augmented UKF,
additive-noise system, among others (cf. \cite{Menegaz2015}). Second,
some steps in UKFs for Riemannian manifolds are not formally justified,
such as when a UKF parameterize sigma points by vectors, or correct
the predicted state estimate, or consider noise statistics (cf. \cite{Hauberg2013,Crassidis2003,Challa2016};
see Sections \ref{subsec:Kalman-filtering-in} and \ref{subsec:Relation-with-the}).
Third, we do not know how the literature's consistent UKFs for Riemannian
manifolds relates among themselves—do they follow from a same general
Riemannian UKF?—or with UKFs for Euclidean Spaces—are these particular
cases of those?
In this work, by continuing the research of \cite{Hauberg2013}, we
aim to develop a formalized and systematized theory for UKFs on Riemannian
manifolds. Among other results, this theory introduces Riemannian
extensions of the main concepts in the UKF theory (including closed
forms), justifies all steps of the proposed UKFs, and provides a framework
able to relate UKFs for particular manifolds among themselves and
with UKFs for Euclidean spaces.
\subsection{Kalman filtering in Riemannian manifolds}
\label{subsec:Kalman-filtering-in}
\textit{\emph{Riemannian manifolds can model many applications; far
more than Euclidean spaces. For instance, we find i)}} special orthogonal
groups, special Euclidean groups, unit spheres (including the set
of unit quaternions), and the study quadric (the set of unit dual-quaternions)
applied to many robotics applications \cite{Bullo2004,Adorno2011c,Selig2005a,Barrau2017a,Bonnabel2008},
aerospace systems \cite{Curtis2014,Wie2008,Barrau2017a,Bonnabel2008,Crassidis2003},
bio-engineering \cite{Enayati2015,Pennec2006a}, among others; ii)
positive symmetric matrices applied to applications in image recognition,
image registration, image tracking, and surgery \cite{Pennec2006a};
iii) Grassmann and Stiefel manifolds applied to information theory
\cite{Pitaval2017}, machine learning \cite{Harandi2018}, visual
recognition \cite{Harandi2018,Hajati2017}, communication systems
\cite{Seddik2017}, and geology \cite{Chepushtanova2017}; and iv)
other Riemannian manifolds applied to quantum systems \cite{AT:12},
and special and general relativity \cite{Godinho2014}.
Some works in the literature have proposed KFs for \textit{particular}
Riemannian systems: the works \cite{Condomines2013,Condomines2014,Kim2007c,Gilitschenski2016}
and \cite{Crassidis2003} (among others) introduced EKFs and UKFs
for unit quaternions; and \cite{Lee2016,DeRuiter2014} and \cite{Markovic2016}
EKFs for special orthogonal groups. Other works have proposed KFs
for \textit{classes} of Riemannian systems: the works \cite{Barczyk2011},
\cite{Martin2009} and \cite{Bonnabel2007} introduced EKFs for Lie
groups; and \cite{Hauberg2013} a UKF for geodesically-complete Riemannian
manifolds.geodesically-complete
Developing UKFs for Riemannian manifolds is difficult because, in
general, Riemannian manifolds lack some mathematical tools used in
most UKFs, such as multiplication and addition (cf. UKFs in \cite{Sarkka2013,Menegaz2016,Menegaz2015}).
An alternative is to use properties of an embedding Euclidean space
and afterwards perform operations to return to the working manifold.
For instance, an application on $\mbox{SS}et 3$ can use derivatives, sums,
multiplications, metrics of $\ensuremath{\mathbb{R}}^{4}$ and afterwards perform
a normalization. Many works take this embedding approach \cite{Vartiainen2014,Teixeira2009,Challa2016}.
However, this approach may i) lose the physical identification (e.g.,
an addition of unit quaternions yields a non unit quaternion, which
does not represent a rotation anymore), or ii) disregard the global
properties of the manifold leading to instability. To retain the estimates
on the working manifolds, literature UKFs use intrinsic manifold properties
(cf. \cite{Barczyk2011,Martin2009,Bonnabel2007})—meaning we do not
use properties of embedding Euclidean spaces.
In this work, we take this intrinsic approach; we combine the UKF
theory we developed in \cite{Menegaz2015} with the statistics for
Riemannian manifolds of \cite{Pennec1996} and some results of \cite{Hauberg2013}
to develop a theory of UKFs for any geodesically-complete Riemannian
manifolds.
\section{Riemannian manifolds}
\label{sec:Riemannian-manifolds}
In this section, we provide a\textit{ }\textit{\emph{general description}}
of the concepts from Riemannian Geometry used in this work and in
Appendix \ref{appendix:Riemannian-manifolds} their \textit{\emph{formal
definitions}}. This exposition is mainly based on \cite{DoCarmo1992},
and partially on \cite{Pennec2006} and \cite{Absil2008}.
A \emph{differentiable manifold} (Definition \ref{def:differentiable-manifold})
$\mathcal{N}$ (or $\mathcal{N}^{n}$) can be viewed as a set whose subsets are
identified through \emph{charts} (injective mappings) with subsets
of the $\ensuremath{\mathbb{R}}^{n}$. For every point $\man a$ on a differentiable
manifold $\mathcal{N}$, we can define the vector space of tangent vectors
at $\man a$ called \textit{tangent space} and denoted by $T_{\man a}\mathcal{N}$
(Definition \ref{def:tangent-space}).
A \emph{Riemannian manifold} $\mathcal{N}$ (Definition \ref{Definition:Riemannian-metric})
is a differentiable manifold endowed with a \emph{Riemannian metric}
(Definition \ref{Definition:Riemannian-metric}) $\inprod{\,}{\,}$
or $g$. For $\man a\in\mathcal{N}$ and $v\in T_{\man a}\mathcal{N}$,
the \emph{norm} of $v$ associated to $\man a$ is defined
by $\norm{v}_{\man a}:=\inprod{v}{v}_{\man a}^{1/2}$
\cite{Pennec2006}.
For two points $\man a$ and $\man b$ in $\mathcal{N}$ connected by
a curve $\curve:\mathbf{I}\rightarrow\mathcal{N}$, the \emph{distance} between
$\man a$ and $\man b$ is defined by, for $[a,b]\subset\mathbf{I}$,
\[
\text{dist}\left(\man a,\man b\right):=\min_{\curve}\mathbb{L}_{a}^{b}\left(\curve\right);\quad\curve(a)=\man a,\,\curve(b)=\man b,
\]
where $\mathbb{L}_{a}^{b}(\alpha)$ is the arc length (Definition
\ref{def:arc-length}) of $\alpha$ in the interval $\left[a,b\right]$.
A \emph{geodesic} \emph{ball} of center $\man a$ and radius $r$
is the set defined as
\[
\mathbb{B}(\man a,r):=\{\man x\in\mathcal{N}:\text{dist}(\man x,\man a)<r\}.
\]
Given a tangent vector $v_{0}\in T_{\alpha(t_{0})}\mathcal{N}$, $t_{0}\in\mathbf{I}$,
there exists only one parallel vector field $\mathcal{Z}urvea$ (Definition
\ref{def:vector-field}) along $\alpha$, such that $\mathcal{Z}urvea(t_{0})=v_{0}$;
$\mathcal{Z}urvea(t)$ is called the \emph{parallel transport} of $\mathcal{Z}urvea(t_{0})$
along $\alpha$.
A curve $\curve:\mathbf{I}\rightarrow\mathcal{N}$ is called a \emph{geodesic
at} $t_{0}\in\mathbf{I}$ if
\[
\frac{D}{dt}\Big(\curve'(t)\Big)=0
\]
at $t_{0}$, where $D/dt(\curve'(t))$ is the covariant derivative
of $\curve'(t)$ (Theorem \ref{thm:covariant-derivative-1}); if $\curve$
is a geodesic at $t$, for all $t\in\mathbf{I}$, we say $\curve$ is
a \emph{geodesic} \cite{DoCarmo1992}. If a curve minimizes the \emph{arc
length} between two points of the manifold, then this curve is a geodesic,
but the converse is \emph{only} valid \emph{locally}. If the definition
domain of all geodesics of $\mathcal{N}$ can be extended to $\ensuremath{\mathbb{R}}$,
then $\mathcal{N}$ is said to be \emph{geodesically-complete}. There exists
at least one geodesic connecting every two points of a geodesically-complete
manifold.
Given a point $\man a\in\mathcal{N}$, the \emph{exponential mapping} (Definition
\ref{def:exponential-map}), denoted by $\exp_{\man a}$, associates
a vector of $T_{\man a}\mathcal{N}$ to a point of $\mathcal{N}$. Geometrically,
$\exp_{\man a}(v)$ is a point of $\mathcal{N}$ obtained by going
out the length equal to $\norm{v}$, starting from $\man a$,
along a geodesic which passes through $\man a$ with velocity equal
to $v/\norm{v}$.
Assuming a geodesically-complete manifold, it is possible to follow
the geodesic $\exp_{\man a}(tv)$ from $t=0$ to $t\rightarrow\infty$.
It may happen, however, that from a particular value $t_{v}$
to $t\rightarrow\infty$, the geodesics $\exp_{\man a}(tv)$
\emph{do not minimize the arc length} between $\man a$ and $\exp_{\man a}(tv)$.
In this case, the subset $\{\exp_{\man a}(t_{v}v):v\in T_{\man a}\mathcal{N}\}\subset\mathcal{N}$
is called the \emph{cut locus} $\mathcal{C}(\man a)$ and the inverse
image $C(\man a):=\exp_{\man a}^{-1}[\mathcal{C}(\man a)]$
the \emph{tangential cut locus} \cite{Pennec2006}. The \emph{injectivity
radius} of $\mathcal{N}$ is defined as $\text{inj}(\mathcal{N}):=\inf_{\man p\in\mathcal{N}}\text{dist}(\man p,\mathcal{C}(\man p)).$
For every $\man a\in\mathcal{N}$, we can reduce the domain of $\exp_{\man a}$
to some subsets such that $\exp_{\man a}$ is a diffeomorphism.
The maximal of these subsets is called the \emph{maximal definition
domain} $\Omega(\man a)\subset T_{\man a}\mathcal{N}$; this set
is \emph{bounded} by $C(\man a)$ \cite{Pennec2006}.
The inverse mapping of $\exp_{\man a}$ is the (Riemannian) \emph{logarithm
mapping} (Definition \ref{def:exponential-map})\textit{\emph{ and}}
we denote it by either \emph{$\log_{\man a}\man b$} or $\overrightarrow{\man b\man a}$.
\section{Intrinsic Statistics on Riemannian Manifolds}
\label{sec:Intrinsic-Statistics-on}
UKFs are based on information of \emph{moments} of \emph{random vectors}
and of \emph{sample moments} of \emph{weighted sets}. To define UKFs
on Riemannian manifolds, we need extensions of these concepts.
\subsection{Statistics of random points}
\label{subsec:Statistics-of-random}
Riemannian extensions of random vectors are called \emph{(Riemannian)
random points} \cite{Pennec2006}; the set of all random points taking
values on a Riemannian manifold $\mathcal{N}$ is denoted by $\mbox{\ensuremath{\man{\Phi}}}_{\mathcal{N}}$.
Given a random point $\man{X}\in\mbox{\ensuremath{\man{\Phi}}}_{\mathcal{N}}$,
its \emph{probability density function (pdf)} is denoted by $\man{\ensuremath{\mbox{p}}}_{\man{X}}$,
and for a real-valued function $F:\mathcal{N}\rightarrow\ensuremath{\mathbb{R}}$
the\emph{ expected value of $F$ relative to $\man{X}$ is
defined by
\begin{equation}
\man{\mathcal{E} }_{\man{X}}\left\{ F(\man{X})\right\} :=\int_{\mathcal{N}}F(\man{\man b})\man{\ensuremath{\mbox{p}}}_{\man{X}}(\man{\man b})d\mathcal{N}(\man{\man b}).\label{eq:expectation-of-real-valued-function}
\end{equation}
}For functions taking values on manifolds, we cannot define the expected
value as in (\ref{eq:expectation-of-real-valued-function}); thus,
we define mean points following the Karcher expectation: they are
the \emph{local} minima of variances \cite{Pennec2006}.
Given a point $\man uentral\in\mathcal{N}$, the \emph{variance} $\scalar{\sigma}_{\man{X}}^{2}(\man uentral)$
is defined by $\scalar{\sigma}_{\man{X}}^{2}(\man uentral):=\man{\mathcal{E} }_{\man{X}}\{\text{dist}^{2}(\man uentral,\man{X})\}.$
If $\scalar{\sigma}_{\man{X}}^{2}(\man c)$ is finite for every point $\man uentral\in\mathcal{N}$,
then a point $\mean{\man{X}}\in\mathcal{N}$ is an \emph{expected point}
or \emph{mean} of $\man{X}$ if
\begin{equation}
\mean{\man{X}}=\arg\underset{\man c\in\mathcal{N}}{\min}\scalar{\sigma}_{\man{X}}^{2}(\man uentral).\label{eq:Riemannian-mean-definition}
\end{equation}
The set of all means of $\man{X}$ is denoted by $\mathbb{E}(\man{X})$.
A random point can have more than one mean\footnote{For a discussion about the existence and uniqueness of this expectation,
cf. Section 4.2 of \cite{Pennec2006}.}.
Let $\man{X}\in\mbox{\ensuremath{\man{\Phi}}}_{\mathcal{N}}$ be a
random point with a mean $\mean{\man{X}}\in\mathbb{E}(\man{X})$,
and consider a point $\man a\in\mathcal{N}$. If $\mean{\man{X}}\in\Omega(\man a)$,
then the \emph{$j$th (central) moment of $\man{X}$ with respect
to $\mean{\man{X}}$ at $\man a$} is defined by, for even $j$,
\begin{equation}
\man{M }_{\man{X},\mean{\man{X}}}^{\man a,j}:=\man{\mathcal{E} }_{\man{X}}\Big\{\Big[\Big(\overrightarrow{\man a\man{X}}-\logb{\man a\mean{\man{X}}}\Big)(\diamond)^{T}\Big]^{\otimes\frac{j}{2}}\Big\};\label{eq:Riemannian-central-moment}
\end{equation}
and for odd $j$,
\[
\man{M }_{\man{X},\mean{\man{X}}}^{\man a,j}:=\man{\mathcal{E} }_{\man{X}}\Big\{\Big[\Big(\overrightarrow{\man a\man{X}}-\logb{\man a\mean{\man{X}}}\Big)(\diamond)^{T}\Big]^{\otimes\frac{j-1}{2}}\otimes\Big(\overrightarrow{\man a\man{X}}-\logb{\man a\mean{\man{X}}}\Big)\Big\}.
\]
We define joint pdf {[}denoted by $\man{\ensuremath{\mbox{p}}}_{\man{X}\man Y}(\man x,\man y)${]},
joint expected moment ($\man{\mathcal{E} }_{\man{X}\man Y}\left\{ f(\man x,\man y)\right\} $)
and cross-covariance ($\man P_{\man{X}\man Y,(\mean{\man{X}},\bar{\man Y})}^{\man a\man b}$)
of two random points $\man{X}$ and $\man Y$ similarly (cf. \cite{Menegaz2016}).
The notation $\man{X}\sim(\mean{\man{X}},\man{M }_{\man{X},\mean{\man{X}}}^{\man a,2},...,\man{M }_{\man{X},\mean{\man{X}}}^{\man a,l})_{\mathcal{N}}$
stands for a Riemannian random point $\man{X}\in\man{\Phi}_{\mathcal{N}}$
with mean $\mean{\man{X}}\in\mathbb{E}(\man{X})$ and moments
$\man{M }_{\man{X},\mean{\man{X}}}^{\man a,2},...,\man{M }_{\man{X},\mean{\man{X}}}^{\man a,l}$.
The second moment ($j=2$) is called \emph{covariance} and denoted
by $\man P_{\man{X}\man{X},\mean{\man{X}}}^{\man a}:=\man{M }_{\man{X},\mean{\man{X}}}^{\man a,j}$.
If $\mathbb{E}(\man{X})=\{\mean{\man{X}}\},$ we can write $\man{M }_{\man{X}}^{\man a,j}:=\man{M }_{\man{X},\mean{\man{X}}}^{\man a,j}$
and $\man P_{\man{X}\man{X}}^{\man a}:=\man P_{\man{X}\man{X},\mean{\man{X}}}^{\man a}$,
or even $\man{M }_{\man{X}}^{j}:=\man{M }_{\man{X},\mean{\man{X}}}^{\mean{\man{X}},j}$
and $\man P_{\man{X}\man{X}}:=\man P_{\man{X}\man{X},\mean{\man{X}}}^{\mean{\man{X}}}$.
We represent statistics of \emph{Euclidean} manifolds without bold
notation. For $X\in\Phi_{\ensuremath{\mathbb{R}}^{n}}$, $X$ is symmetric if
{\small{}$\ensuremath{\mbox{p}}_{X}(\bar{X}+x)=\ensuremath{\mbox{p}}_{X}(\bar{X}-x)$} for every $x\in\mathbb{R}^{n}$.
If $X$ has a mean, then
\[
\mean{X}=\arg\underset{\myvec zenter\in\ensuremath{\mathbb{R}}^{n}}{\min}\scalar{\sigma}_{X}^{2}(\myvec zenter)=\mathcal{E} _{X}\{X\};
\]
and, for $j$ even,
\[
M _{X}^{j}:=\mathcal{E} _{X}\{[(\overrightarrow{\mean{X}X}-\logb{\mean{X}\mean{X}})(\diamond)^{T}]^{\otimes\frac{j}{2}}\}=\mathcal{E} _{X}\{[(X-\mean{X})(\diamond)^{T}]^{\otimes\frac{j}{2}}\}
\]
(similarly for $j$ odd and for sample cross-covariances).
\subsection{Statistics of weighted sets}
\label{sec:Statistics-of-weighted}
For a Riemannian manifold $\mathcal{N}$ and the natural numbers $l\geq2$
and $N\geq1$, consider the weighted set
\begin{multline*}
\man{\chi}:=\Big\{\man{\chi}_{i},w_{i}^{m},w_{i}^{c,j},w_{i}^{cc,j}:\man{\chi}_{i}\in\mathcal{N};\\
j=1,...,l;\,w_{i}^{m},w_{i}^{c,j},w_{i}^{cc,j}\in\ensuremath{\mathbb{R}}\Big\}_{i=1}^{N}.
\end{multline*}
The weights $w_{i}^{m}$ are associated (below) with the definition
of sample\emph{ }\textit{\emph{mean}}, $w_{i}^{c,j}$ with the $j$th
sample\emph{ }\textit{\emph{moment}}, and $w_{i}^{cc}$ with the $j$th
sample \textit{\emph{cross-moment}} of $\man{\chi}$.
The\emph{ sample variance }of $\man{\chi}$ with respect to a point
$\man c\in\mathcal{N}$ is defined by $s_{\mathbb{\man{\chi}}}^{2}(\man uentral):=\sum_{i=1}^{N}w_{i}^{m}\text{dist}^{2}(\man uentral,\man{\chi}_{i}).$
If the variance $s_{\mathbb{\man{\chi}}}^{2}(\man uentral)$ is
finite for every point $\man uentral\in\mathcal{N}$, then a \emph{sample}
\emph{expected point} or\emph{ sample mean} of $\man{\chi}$ is defined
by
\begin{equation}
\man{\mu}_{\man{\chi}}:=\arg\underset{\man c\in\mathcal{N}}{\min}s_{\mathbb{\man{\chi}}}^{2}(\man uentral).\label{eq:Riemannian-sample-mean-definition}
\end{equation}
The set of all sample means of $\man{\chi}$ is represented by $\text{\ensuremath{\mu}}set(\man{\chi})$.
An weighted set in the form of $\man{\chi}$ can have more than one
sample mean.
For a point $\man a\in\mathcal{N}$, if $\man{\mu}_{\man{\chi}},\man{\chi}_{1},\man{\chi}_{2},...,\man{\chi}_{N}\in\mathcal{N}-\mathcal{C}(\man a)$,
then the\emph{ $j$th sample moment of $\man{\chi}$ with respect
to $\mean{\man{X}}$ at $\man a$} is defined by, for $j$ even,
\begin{equation}
\man{\mbox{\ensuremath{\mathcal{M}}} }_{\man{\chi},\man{\mu}_{\man{\chi}}}^{\man a,j}:=\sum_{i=1}^{N}w_{i}^{c,j}\Big[\big(\logb{\man a\man{\chi}_{i}}-\logb{\man a\man{\mu}_{\man{\chi}}}\big)(\diamond)^{T}\Big]^{\otimes\frac{j}{2}};\label{eq:Riemannian-sample-moment-definition}
\end{equation}
and for $j$ odd,
\[
\man{\mbox{\ensuremath{\mathcal{M}}} }_{\man{\chi},\man{\mu}_{\man{\chi}}}^{\man a,j}:=\sum_{i=1}^{N}w_{i}^{c,j}\Big[\big(\logb{\man a\man{\chi}_{i}}-\logb{\man a\man{\mu}_{\man{\chi}}}\big)(\diamond)^{T}\Big]^{\otimes\frac{j-1}{2}}\otimes\big(\logb{\man a\man{\chi}_{i}}-\logb{\man a\man{\mu}_{\man{\chi}}}\big).
\]
The sample moment ($j=2$) is called \emph{sample covariance} and
denoted by $\man{\Sigma}_{\man{\chi}\man{\chi},\man{\mu}_{\man{\chi}}}^{\man a}:=\man{\mbox{\ensuremath{\mathcal{M}}} }_{\man{\chi},\man{\mu}_{\man{\chi}}}^{\man a,2}$.
If $\text{\ensuremath{\mu}}set\left(\man{\chi}\right)=\{\man{\mu}_{\man{\chi}}\},$
we can write $\man{\mbox{\ensuremath{\mathcal{M}}} }_{\man{\chi}}^{\man a,j}:=\man{\mbox{\ensuremath{\mathcal{M}}} }_{\man{\chi},\man{\mu}_{\man{\chi}}}^{\man a,j}$
and $\man{\Sigma}_{\man{\chi}\man{\chi}}^{\man a}:=\man{\Sigma}_{\man{\chi}\man{\chi},\man{\mu}_{\man{\chi}}}^{\man a}$;
or even, $\man{\mbox{\ensuremath{\mathcal{M}}} }_{\man{\chi}}^{j}:=\man{\mbox{\ensuremath{\mathcal{M}}} }_{\man{\chi},\man{\mu}_{\man{\chi}}}^{\man{\mu}_{\man{\chi}},j}$and
$\man{\Sigma}_{\man{\chi}\man{\chi}}:=\man{\Sigma}_{\man{\chi}\man{\chi},\man{\mu}_{\man{\chi}}}^{\man{\mu}_{\man{\chi}}}$.
In addition, for i) the Riemannian manifold $\mathcal{R}$, ii) a function
$f:\mathcal{N}\rightarrow\mathcal{R}$, iii) the weighted set
\[
\mathbf{\man{\gamma}}:=\Big\{\man{\gamma}_{i},w_{i}^{m},w_{i}^{c,j},w_{i}^{cc,j}:\man{\gamma}_{i}=f(\man{\chi}_{i});\,j=1,...,l\Big\}_{i=1}^{N},
\]
with a mean $\man{\mu}_{\man{\gamma}}$, and iv) the point $\man b\in\mathcal{R}$.
If $\man{\mu}_{\man{\gamma}},\man{\gamma}_{1},\man{\gamma}_{2},...,\man{\gamma}_{N}\in\mathcal{R}-\mathcal{C}(\man b)$,
then the \emph{$j$th} \emph{cross-moment of $\man{\chi}$ and $\man{\gamma}$
with respect to $(\man{\mu}_{\man{\chi}},\man{\mu}_{\man{\gamma}})$
at $(\man a,\man b)$} is defined by, for $j$ even,
\[
\man{\mbox{\ensuremath{\mathcal{M}}} }_{\man{\chi}\man{\gamma},\man{\mu}_{\man{\chi}}\man{\mu}_{\man{\gamma}}}^{j,\man a\man b}:=\sum_{i=1}^{N}w_{i}^{cc,j}\Big[\big(\logb{\man a\man{\chi}_{i}}-\logb{\man a\man{\mu}_{\man{\chi}}}\big)\big(\logb{\man a\man{\gamma}_{i}}-\logb{\man a\man{\mu}_{\man{\gamma}}}\big)^{T}\big)\Big]^{\otimes\frac{j}{2}};
\]
and for $j$ odd,
\begin{multline*}
\man{\mbox{\ensuremath{\mathcal{M}}} }_{\man{\chi}\man{\gamma},\man{\mu}_{\man{\chi}}\man{\mu}_{\man{\gamma}}}^{j,\man a\man b}:=\sum_{i=1}^{N}w_{i}^{cc,j}\Big[\big(\logb{\man a\man{\chi}_{i}}-\logb{\man a\man{\mu}_{\man{\chi}}}\big)\\
\times\big(\logb{\man a\man{\gamma}_{i}}-\logb{\man a\man{\mu}_{\man{\gamma}}}\big)^{T}\big)\Big]^{\otimes\frac{j-1}{2}}\otimes\big(\logb{\man a\man{\chi}_{i}}-\logb{\man a\man{\mu}_{\man{\chi}}}\big).
\end{multline*}
The second sample cross-moment ($j=2$) is called \emph{sample cross-covariance}
and denoted by $\man{\Sigma}_{\man{\chi}\man{\gamma},\man{\mu}_{\man{\chi}}\man{\mu}_{\man{\gamma}}}^{\man a\man b}:=\man{\mbox{\ensuremath{\mathcal{M}}} }_{\man{\chi}\man{\gamma},\man{\mu}_{\man{\chi}}\man{\mu}_{\man{\gamma}}}^{j,\man a\man b}$.
If $\text{\ensuremath{\mu}}set\left(\mathbf{\man{\chi}}\right)=\{\man{\mu}_{\man{\chi}}\}$
and $\text{\ensuremath{\mu}}set\left(\mathbf{\man{\gamma}}\right)=\{\man{\mu}_{\man{\gamma}}\},$
we can write $\man{\mbox{\ensuremath{\mathcal{M}}} }_{\man{\chi}\man{\gamma}}^{j,\man a\man b}:=\man{\mbox{\ensuremath{\mathcal{M}}} }_{\man{\chi}\man{\gamma},\man{\mu}_{\man{\chi}}\man{\mu}_{\man{\gamma}}}^{j,\man a\man b}$
and $\man{\Sigma}_{\man{\chi}\man{\gamma}}^{\man a\man b}:=\man{\Sigma}_{\man{\chi}\man{\gamma},\man{\mu}_{\man{\chi}}\man{\mu}_{\man{\gamma}}}^{\man a\man b}$;
or even, if $\man{\mbox{\ensuremath{\mathcal{M}}} }_{\man{\chi}\man{\gamma}}^{j}:=\man{\mbox{\ensuremath{\mathcal{M}}} }_{\man{\chi}\man{\gamma}}^{j,\man{\mu}_{\man{\chi}}\man{\mu}_{\man{\gamma}}}$and
$\man{\Sigma}_{\man{\chi}\man{\gamma}}:=\man{\Sigma}_{\man{\chi}\man{\gamma}}^{\man{\mu}_{\man{\chi}}\man{\mu}_{\man{\gamma}}}$.
We represent \emph{Euclidean} sets sample statistics without bold
notation. For a set $\chi$ with points $\chi_{i}\in\ensuremath{\mathbb{R}}^{n}$,
we have
\[
\text{\ensuremath{\mu}}_{\chi}=\arg\underset{\myvec zenter\in\ensuremath{\mathbb{R}}^{n}}{\min}s_{\mathbb{\chi}}^{2}(\myvec zenter)=\sum_{i=1}^{n}w_{i}^{m}\chi_{i};
\]
and, for $j$ even, $\mbox{\ensuremath{\mathcal{M}}} _{\chi}^{j}=\sum_{i=1}^{N}w_{i}^{c,j}[(\chi_{i}-\mu_{\chi})(\diamond)^{T}]^{\otimes\frac{j}{2}}$
(similarly for $j$ odd and for sample cross-moments).
\section{Unscented Kalman Filters}
\label{subsec:Additive-Unscented-Kalman}
There are two main concepts required to define UKFs, namely: $\sigma\text{R}$s
and $\text{UT}$s \cite{Menegaz2015}. Broadly, i) a $\sigma\text{R}$ is a set of
weighted points (the sigma points) approximating a random vector,
and ii) a UT is a function mapping two functionally related random
vectors to two sets that approximate their joint pdf.
For the natural numbers $l\geq2$ and $N\geq1$, consider i) a
function $f:\ensuremath{\mathbb{R}}^{n}\rightarrow\ensuremath{\mathbb{R}}^{\eta}$; ii) the random
vectors $X\sim(\mean{X},M _{X}^{2},...,M _{X}^{l})_{\ensuremath{\mathbb{R}}^{n}}$
and $Y:=f(X)\sim(\mean{Y},M _{Y}^{2},...,M _{Y}^{l})_{\ensuremath{\mathbb{R}}^{\eta}}$;
and iii) the sets\footnote{Compared with \cite{Menegaz2015}, here we consider \emph{simpler}
sets. With this consideration, we have a clearer text and \emph{do
not} \emph{lose generality} for the results relative to the UKFs.}
\begin{multline*}
\chi:=\Big\{\chi_{i},w_{i}^{m},w_{i}^{c,j},w_{i}^{cc,j}:\chi_{i}\in\ensuremath{\mathbb{R}}^{n};\\
j=1,...,l;\,w_{i}^{m},w_{i}^{c,j},w_{i}^{cc,j}\in\ensuremath{\mathbb{R}}\Big\}_{i=1}^{N};\text{ and}
\end{multline*}
\[
\gamma:=\Big\{\gamma_{i},w_{i}^{m},w_{i}^{c,j},w_{i}^{cc,j}:\gamma_{i}=f(\chi_{i});\,j=1,...,l\Big\}_{i=1}^{N}.
\]
\begin{defn}[$\sigma$R. Definition 1 of \cite{Menegaz2015}]
\label{def:sigma_representacao_ext1} The set $\chi$ is an $l$\emph{th
order} $N$ \emph{points} $\sigma$R\emph{($l$th$N\sigma$R)
of} $X$ if, for every $j=1,\ldots,l$:
\begin{align}
w_{i}^{m} & \neq0,\,w_{i}^{c,j}\neq0,w_{i}^{cc,j}\neq0,\quad i=1,\ldots,N;\label{eq:sigma-rep-def-weights-condition}\\
\text{\ensuremath{\mu}}_{\chi} & =\mean{X};\label{eq:sigma-rep-def-mean-condition}\\
\mbox{\ensuremath{\mathcal{M}}} _{\chi}^{j} & =M _{X}^{j}.\label{eq:sigma-rep-def-moments-condition}
\end{align}
\end{defn}
\begin{defn}[UT. Definition 2 of \cite{Menegaz2015}]
\label{def:unscented-transform} If $\text{\ensuremath{\mu}}_{\chi}=\mean{X}$
and $\mbox{\ensuremath{\mathcal{M}}} _{\chi}^{j}=M _{X}^{j}$ for every $j=2,\ldots,l$;
then the $l$\emph{th order UT ($l$UT)} is defined by
\begin{multline*}
l\mbox{UT}:\big(f,\mean{X},M _{X}^{2},...,M _{X}^{l}\big)\mapsto\\
\big(\text{\ensuremath{\mu}}_{\gamma},\mbox{\ensuremath{\mathcal{M}}} _{\gamma}^{2},...,\mbox{\ensuremath{\mathcal{M}}} _{\gamma}^{l},\mbox{\ensuremath{\mathcal{M}}} _{\chi\gamma}^{2},...,\mbox{\ensuremath{\mathcal{M}}} _{\chi\gamma}^{l}\big).
\end{multline*}
$\chi$ is called the \emph{independent set} of an $l\mbox{UT}$,
and $\gamma$ its \emph{dependent} set.
\end{defn}
Every $l$th$N\sigma$R is an independent set of an $l\mbox{UT}$.
When calling an $l$th$N\sigma$R of $X$ or an $l\text{UT}$, the reference
to the $l$th order can be omitted if $l=2$. Also, the reference
to $N$ point and/or to $X$ can be omitted in case they are obvious
from the context or irrelevant to a discussion.
We can apply UTs in KF prediction-correction frameworks to form UKFs.
UKFs estimate the state of systems described either in the additive
form
\begin{equation}
\vector x_{k}=f_{k}\left(\vector x_{k-1}\right)+\varpi_{k},\,\vector y_{k}=h_{k}\left(\vector x_{k}\right)+\vartheta_{k};\label{eq:additive-system}
\end{equation}
or, more generally, in the form
\begin{equation}
\vector x_{k}=f_{k}\left(\vector x_{k-1},\varpi_{k}\right),\,\vector y_{k}=h_{k}\left(\vector x_{k},\vartheta_{k}\right),\label{eq:general-system}
\end{equation}
where $k$ is the time step; $\vector x_{k}$ $\in\Phi^{n_{\state}}$
is the internal state; $\vector y_{k}\in\Phi^{n_{\meas}}$ is the measured
output; and $\varpi_{k}\in\Phi^{n_{\text{P}noise}}$ and $\vartheta_{k}\in\Phi^{n_{\mnoise}}$
are the process and measurement noises respectively; the noise terms
$\varpi_{k}$ and $\vartheta_{k}$ are assumed to be uncorrelated.
In \cite{Menegaz2015}, we developed consistent UKFs for these systems:
the the\textit{ }\emph{Additive UKF}\textit{\emph{ (}}\emph{AdUKF,
}Algorithm 6 of \cite{Menegaz2016}; see also \cite{Menegaz2015})
for (\ref{eq:additive-system}); and the \textit{Augmented UKF} (\textit{AuUKF},
Algorithm 7 of \cite{Menegaz2016}; see also \cite{Menegaz2015})
for (\ref{eq:general-system}). But how could we develop similar UKFs
when $\vector x_{k}$, $\vector y_{k}$, $\varpi_{k}$ and $\vartheta_{k}$
are Riemannian random points? In the next section, we begin a theory
towards this goal.
\section{Riemannian $\sigma$-representations}
\label{sec:Riemannian-sigma-representations}
In this section, first, we define \emph{Riemannian $\sigma$-representation}s
($\text{Ri}\sigma\text{R}$). They extend $\sigma\text{R}$s to Riemannian manifolds: $\sigma\text{R}$s approximate
random vectors, and $\text{Ri}\sigma\text{R}$s approximate Riemannian random points.
Then, we show a way of extending closed forms of $\sigma\text{R}$s to $\text{Ri}\sigma\text{R}$s.
Afterwards, we introduce results relative to the minimum number of
sigma points of an $\text{Ri}\sigma\text{R}$. At last, we introduce some particular
forms of $\text{Ri}\sigma\text{R}$s.
For now on, we make the following assumptions—we explain their implications
in Section \ref{subsec:Riemannian-Unscented-Filters}—:
\begin{enumerate}
\item \label{enu:assumption1}all Riemannian manifolds are \emph{geodesically-complete};
\item \label{enu:assumption2}all Riemannian exponential mappings are defined
with their domain allowing them to \emph{realize diffeomorphisms;}
\item \label{enu:assumption6}every set of weighted points belonging to
a Riemannian manifold admits \emph{one, and only one, Riemannian sample
mean.}
\end{enumerate}
For the point $\man a\in\mathcal{N}$ and the natural numbers $l\geq2$
and $N\geq1$, consider i) a random point $\man{X}\sim(\mean{\man{X}},\man{M }_{\man{X},\mean{\man{X}}}^{\man a,2},...,\man{M }_{\man{X},\mean{\man{X}}}^{\man a,l})_{\mathcal{N}^{n}}$
and ii) a weighted set $\man{\chi}:=\{\man{\chi}_{i},w_{i}^{m},w_{i}^{c,j},w_{i}^{cc,j}|\man{\chi}_{i}\in\mathcal{N}\}{}_{i=1}^{N}$
with sample mean $\man{\text{\ensuremath{\mu}}}_{\man{\chi}}$ and sample moments $\man{\mbox{\ensuremath{\mathcal{M}}} }_{\man{\chi}}^{j}$,
$j=2$, ..., $l$.
\begin{defn}[\emph{Ri$\sigma\text{R}$. Definition 9.1 of \cite{Menegaz2016}}]
\label{def:Consider-a-Riemannian}The set $\man{\chi}$ is a \emph{Riemannian}
$l$\emph{th order} $N$ \emph{points} $\sigma$-\emph{representation
(Ri$l$th$N\sigma$R) of} $\man{X}$ if, for every $j=1,\ldots,l$:
\begin{align}
w_{i}^{m} & \neq0,\,w_{i}^{c,j}\neq0,w_{i}^{cc,j}\neq0,\quad i=1,\ldots,N;\label{eq:Riemannian-sigma-rep-definition-condition}\\
\man{\text{\ensuremath{\mu}}}_{\man{\chi}} & =\mean{\man{X}};\label{eq:Riemannian-sigma-rep-definition-mean-condition}\\
\man{\mbox{\ensuremath{\mathcal{M}}} }_{\man{\chi}}^{j} & =\man{M }_{\man{X}}^{j},\quad j=2,3,\ldots,l;\label{eq:Riemannian-sigma-rep-definition-moments-condition}
\end{align}
Moreover, assume $\man{\chi}$ is an Ri$l$th$N\sigma$R of $X$,
then:
\begin{itemize}[labelsep=0.1cm,leftmargin=0.30cm]
\item $\chi$ is \emph{normalized} if, for every $j=1,2,\ldots,l$:
\[
\sum_{i=1}^{N}w_{i}^{m}=\sum_{i=1}^{N}w_{i}^{c,j}=\sum_{i=1}^{N}w_{i}^{cc,j}=1.
\]
\item $\chi$ is \emph{homogeneous} if, for every $j=1,2,\ldots,l$, the
following equations are satisfied: for $N$ odd and every $i=1,...,N-1$:
\begin{equation}
w_{1}^{m}=w_{i}^{m},\,w_{1}^{c,j}=w_{i}^{c,j},\,w_{1}^{cc,j}=w_{i}^{cc,j};\label{eq:Riemannian-sigma-rep-def-homogeneous-odd}
\end{equation}
or, for $N$ even and every $i=1,...,N$:
\begin{equation}
w_{1}^{m}=w_{i}^{m},\,w_{1}^{c,j}=w_{i}^{c,j},\,w_{1}^{cc,j}=w_{i}^{cc,j}.\label{eq:Riemannian-sigma-rep-def-homogeneous-even}
\end{equation}
\item $\man{\chi}$ is \emph{symmetric (with respect to $\man{\chi}_{N}$,
without loss of generality)} if
\begin{multline}
\logb{\man{\text{\ensuremath{\mu}}}_{\man{\chi}}\man{\chi}_{i}}-\logb{\man{\text{\ensuremath{\mu}}}_{\man{\chi}}\man{\chi}_{N}}=-\Big(\logb{\man{\text{\ensuremath{\mu}}}_{\man{\chi}}\man{\chi}_{i+\text{int}\left(\frac{N}{2}\right)}}-\logb{\man{\text{\ensuremath{\mu}}}_{\man{\chi}}\man{\chi}_{N}}\Big),\\
w_{i}^{m}=w_{i+\text{int}\left(\frac{N}{2}\right)}^{m},\,w_{i}^{c,j}=w_{i+\text{int}\left(\frac{N}{2}\right)}^{c,j},\,w_{i}^{cc,j}=w_{i+\text{int}\left(\frac{N}{2}\right)}^{cc,j},\label{eq:Riemannian-sigma-rep-def-symmetric}
\end{multline}
for every $j=1,2,\ldots,l$ and $i=1,...,\text{int}(N/2)$, where
$\text{int}(N/2)$ stands for greatest integer less than or equal
to $N/2$.
\end{itemize}
\end{defn}
When calling an Ri$l$th$N\sigma$R of $\man{X}$, the reference
to the $l$th order can be omitted if $l=2$. Also, the reference
to $N$ points or to $\man{X}$ can be omitted if they are obvious
from the context or irrelevant to a discussion.
Ri$l$th$N\sigma$Rs are generalizations of $l$th$N\sigma$Rs;
every $l$th$N\sigma$R with an Ri$l$th$N\sigma$R, and every
Ri$l$th$N\sigma$R with Euclidean points is an $l$th$N\sigma$R.
This follows directly from the last paragraph of Sections \ref{subsec:Statistics-of-random}
and of \ref{sec:Statistics-of-weighted}.
Finding closed forms for $\text{Ri}\sigma\text{R}$s may be troublesome, but the next
theorem provides a way of obtaining them from closed forms of $\sigma\text{R}$s—the
reader will find several closed forms of $\sigma\text{R}$s in \cite{Menegaz2015,Menegaz2016,Sarkka2013}.
\begin{thm}[\emph{Theorem 9.1 of \cite{Menegaz2016}}]
\label{thm:Euclidean-to-Riemannian-sigma-rep}Suppose that, for every
$i=1,\ldots,N$,
\begin{enumerate}
\item $w_{i}^{m}>0$,
\item $\Omega(\man{\mean{X}})$ is convex, and
\item $\man{\chi}_{i}\in\mathbb{B}(\man{\mean{X}},r)\cap\mathcal{C}(\man{\mean{X}})$
\end{enumerate}
where $0<r\le\frac{1}{2}\min\{\text{inj}(\mathcal{N}),\text{P}i/\sqrt{\kappa})$ and
$\kappa$ is an upper bound of the sectional curvatures of $\mathcal{N}$.
Then \textup{$\man{\chi}$} is a normalized Ri$l$th$N\sigma$R
of $\man{X}$ if, and only if,
\[
\chi:=\big(\log_{\man{\mean{X}}}\man{\chi}_{i},w_{i}^{m},w_{i}^{c,j},w_{i}^{cc,j}\big)_{i=1}^{N}
\]
is a normalized $l$th$N\sigma$R of the random vector
\[
X\sim\big([0]_{n\times1},\man{M }_{\man{X}}^{2},\ldots,\man{M }_{\man{X}}^{l}\big)_{T_{\man{\mean{X}}}\mathcal{N}}.
\]
Moreover, the following statements are true:
\begin{enumerate}
\item \label{enu:theo:euclidean-to-Riemannian-sigma-rep-homoegeneous}$\man{\chi}$
is homogeneous if, and only if, $\chi$ is homogeneous;
\item \label{enu:theo:euclidean-to-Riemannian-sigma-rep-symmetric}$\man{\chi}$
is symmetric if, and only if, $\chi$ is symmetric.
\end{enumerate}
\end{thm}
The proof of Theorem \ref{thm:Euclidean-to-Riemannian-sigma-rep}
is given in Appendix \ref{proof:theoremRiSRs}; for conditions to
assure the convexity of $\Omega(\man{\mean{X}})$, see \cite{FGR:15}
and references therein.
With this theorem, we can extend some results from $l$th$N\sigma$Rs
to Ri$l$th$N\sigma$Rs, such as the minimum number of sigma points
of an Ri$l$th$N\sigma$R.
\begin{cor}[Corollary 9.1 of \cite{Menegaz2016}]
\label{cor:Riemannian-sr-minimum-numbers}Let i) $\man{\chi}$ be
a normalized Ri$l$th$N\sigma$R of $\man{X}$ with $w_{i}^{m}>0$
for every $i=1,\ldots,N$; and ii) the rank of the covariance $\man{P_{XX}}$
be $r\leq n$. Then the following statements are true:
\begin{enumerate}
\item \label{enu:Minimum number of sigma points1-2}$N\geq r+1$. If
$N=r+1$, then $\man{\chi}$ is called a \emph{minimum} Ri$l$th$N\sigma$R
of $\man{X}$.
\item \label{enu:Minimum number of sigma points1-1-1}If $\man{\chi}$ is
symmetric, then $N\geq2r$. If $\man{\chi}$ is symmetric and $N=2r$,
then $\man{\chi}$ is called a \emph{minimum symmetric} Ri$l$th$N\sigma$R
of $\man{X}$.
\end{enumerate}
Moreover, consider the set $\chi:=\big\{\logb{\mean{\man{X}}\man{\chi}_{i}},w_{i}^{m},w_{i}^{c,j},w_{i}^{cc,j}\big\}_{i=1}^{N}$
and the random vector $X\sim\big([0]_{n\times1},\man{P}_{\man{X}\man{X}}\big)_{T_{\mean{\man{X}}}\mathcal{N}}.$
Then the following statements are true:
\begin{itemize}
\item If $\chi$ is a (normalized) homogeneous minimum symmetric $\sigma\text{R}$
of $X$ (\emph{HoMiSy$\sigma$R}, Corollary 3 of \cite{Menegaz2015}),
then $\man{\chi}$ is also minimum and symmetric and is called a \emph{Riemannian
(normalized) homogeneous minimum symmetric $\sigma$ -representation
of $\man{X}$.}
\item If $\chi$ is a \emph{Rho Minimum $\sigma$R} of $X$ (``it is
described in the 6th row of Table I of \cite{Menegaz2015} and refereed
there as the “Minimum set of {[}12{]}“), then $\man{\chi}$ is also
minimum, and is called a \emph{Riemannian Rho Minimum $\sigma$ -representation
(RiRhoMi$\sigma$R) of $\man{X}$ .}
\item If $\chi$ is a Minimum $\sigma$R of $X$ (Theorem 3 of \cite{Menegaz2015}),
then $\man{\chi}$ is also minimum, and is called a \emph{Riemannian
Minimum $\sigma$-representation (RiMi$\sigma$R) of $\man{X}$
.}
\end{itemize}
\end{cor}
The proof of Corollary \ref{cor:Riemannian-sr-minimum-numbers} is
given in Appendix \ref{proof:Corollary_particularRiSRs}.
With Theorem \ref{thm:Euclidean-to-Riemannian-sigma-rep} and Corollary
\ref{cor:Riemannian-sr-minimum-numbers}, we can find an $\text{Ri}\sigma\text{R}$
($w_{i}^{m}>0$ for every $i=1,\ldots,N$) by first finding
a normalized $\sigma\text{R}$ in the tangent space of the considered manifold;
each normalized $\sigma\text{R}$s (cf. \cite{Menegaz2015} and \cite{Menegaz2016})
have their associated $\text{Ri}\sigma\text{R}$s (cf. Corollary \ref{cor:Riemannian-sr-minimum-numbers}).
For instance, suppose we want to calculate the normalized $\mbox{RiMi\ensuremath{\sigma}R}$
of $\man{X}\in\man{\Phi}_{\mathcal{N}}$ (Corollary \ref{cor:Riemannian-sr-minimum-numbers});
that is, we want\footnote{For a set $\man{\xi}:=\{\man{\xi}_{i},w_{i}^{m,j},w_{i}^{c,j},w_{i}^{cc,j}\}$,
if $w_{i}^{m,j}=w_{i}^{c,j}=w_{i}^{cc,j}$ for every
$j=1,...,$l; then we write $w_{i}:=w_{i}^{m,j}$ and
$\{\man{\xi}_{i},w_{i}\}=\man{\xi}$.}
\[
\mbox{\ensuremath{\man{\chi}}}=\left\{ \man{\chi}_{i},w_{i}\right\} _{i=1}^{n_{\state}+1}=\mbox{RiMi\ensuremath{\sigma}R}\Big(\est{\man{\vector x}}_{k-1|k-1},\est{\man{P}}_{\man{\vector x\vector x}}^{k-1|k-1}\Big).
\]
We can compute the $\mbox{Mi\ensuremath{\sigma}R}$ (Theorem 3 of
\cite{Menegaz2015})
\[
\chi=\big\{\chi_{i},w_{i}\big\}_{i=1}^{n_{\state}+1}:=\mbox{Mi\ensuremath{\sigma}R}\Big([0]_{n_{\state}\times1},\est{\man{P}}_{\man{\vector x\vector x}}^{k-1|k-1}\Big),
\]
and then, from Theorem \ref{thm:Euclidean-to-Riemannian-sigma-rep},
we would have
\[
\mbox{\ensuremath{\man{\chi}}}=\big\{\exp_{\est{\man{\vector x}}_{k-1|k-1}}\chi_{i},w_{i}\big\}_{i=1}^{n_{\state}+1}.
\]
The work \cite{Hauberg2013} introduced this technique {[}cf. (11)
to (17) therein{]}, and here, with Theorem \ref{thm:Euclidean-to-Riemannian-sigma-rep}
and Corollary \ref{cor:Riemannian-sr-minimum-numbers}, we provide
its formal justification and required assumptions.
\section{Riemannian Unscented Transformations}
\label{sec:Riemannian-Unscented-Transformat}
Essentially, a UT is an approximation of the joint pdf of two functionally-related
random vectors by two weighted sets. For a Riemannian extension of
the UT, we develop likewise.
For the natural numbers $l\geq2$ and $N\geq1$, consider i) a
function $f:\mathcal{N}\rightarrow\mathcal{R}$, ii) the random points $\man{X}\sim(\mean{\man{X}},\man{M }_{\man{X}}^{2},...,\man{M }_{\man{X}}^{l}){}_{\mathcal{N}^{n}}$
and $\man{Y}:=f(\man{X})\sim(\mean{\man{Y}},\man{M }_{\man{Y}}^{2},...,M _{\man{Y}}^{l})_{\mathcal{R}^{\eta}}$,
and iii) the sets
\begin{multline*}
\man{\chi}:=\Big\{\man{\chi}_{i},w_{i}^{m},w_{i}^{c,j},w_{i}^{cc,j}:\man{\chi}_{i}\in\mathcal{N};\\
j=1,...,l;\,w_{i}^{m},w_{i}^{c,j},w_{i}^{cc,j}\neq0\Big\}_{i=1}^{N}\text{ and}
\end{multline*}
\[
\man{\gamma}:=\Big\{\man{\gamma}_{i},w_{i}^{m},w_{i}^{c,j},w_{i}^{cc,j}:\man{\gamma}_{i}=f(\man{\chi}_{i});\,j=1,...,l\Big\}_{i=1}^{N}.
\]
\begin{defn}[\emph{Ri$l$UT; Definition of 9.2 \cite{Menegaz2016}}]
\label{def:Riemannian-Unscented-Transformation} If $\man{\text{\ensuremath{\mu}}}_{\man{\chi}}=\mean{\man{X}}$
and $\man{\mbox{\ensuremath{\mathcal{M}}} }_{\man{\chi}}^{j}=\man{M }_{\man{X}}^{j}$
for every $j=2,\ldots,l$; then the $l$\emph{th order Riemannian
Unscented Transformation (Ri$l$UT)} is defined by
\begin{multline*}
\mbox{Ri}l\mbox{UT}:\big(f,\mean{\man{X}},\man{M }_{\man{X}}^{2},...,\man{M }_{\man{X}}^{l}\big)\mapsto\\
(\man{\text{\ensuremath{\mu}}}_{\man{\gamma}},\man{\mbox{\ensuremath{\mathcal{M}}} }_{\man{\gamma}}^{2},...,\man{\mbox{\ensuremath{\mathcal{M}}} }_{\man{\gamma}}^{l},\man{\mbox{\ensuremath{\mathcal{M}}} }_{\man{\chi}\man{\gamma}}^{2},...,\man{\mbox{\ensuremath{\mathcal{M}}} }_{\man{\chi}\man{\gamma}}^{l}).
\end{multline*}
$\man{\chi}$ is called the \emph{independent set} of $\mbox{Ri}l\mbox{UT}$,
and $\man{\gamma}$ its \emph{dependent} set.
\end{defn}
Every Ri$l$th$N\sigma$R\emph{ }is an independent set of an $\mbox{Ri}l\mbox{UT}$.
If $l=2$ or $l$ is irrelevant for a given discussion, we can omit
the reference to $l$ and write $\text{RiUT}:=\mbox{Ri}2\mbox{UT}$.
$\mbox{Ri}l\mbox{UT}$s are generalizations of $l\text{UT}$s; every $l\text{UT}$
is an $\mbox{Ri}l\mbox{UT}$, and every $\mbox{Ri}l\mbox{UT}$ with
Euclidean points is an $l\text{UT}$. This follows directly from the last
paragraph of Sections \ref{subsec:Statistics-of-random} and of \ref{sec:Statistics-of-weighted}.
An Ri$l$UT can be viewed as a mapping from $2$ random points $\man{X}\in\Phi_{\mathcal{N}}$
and $\man{Y}:=\man f(\man X)$ to two Riemannian sets $\man{\chi}$
and $\man{\gamma}$ acting as a \textit{discrete approximation} of
the \textit{\emph{joint}} pdf of $(\man{X},\man{Y})$. For instance,
an Ri$2$UT can be viewed as the following approximation (this interpretation
is inspired on \cite{Sarkka2007})
\[
\left(\begin{array}{c}
\man{X}\\
\man{Y}
\end{array}\right)\approx\left(\begin{array}{c}
\tilde{\man{X}}\\
\tilde{\man{Y}}
\end{array}\right)\sim\left(\left(\begin{array}{c}
\man{\text{\ensuremath{\mu}}}_{\man{\chi}}\\
\man{\text{\ensuremath{\mu}}}_{\man{\gamma}}
\end{array}\right),\left(\begin{array}{cc}
\man{\Sigma}_{\man{\chi}\man{\chi}} & \man{\Sigma}_{\man{\chi}\man{\gamma}}\\
\man{\Sigma}_{\man{\chi}\man{\gamma}}^{T} & \man{\Sigma}_{\man{\gamma}\man{\gamma}}
\end{array}\right)\right).
\]
\section{Riemannian Unscented Kalman Filters}
\label{sec:Riemannian-Unscented-Filters}
At this point, we still need to develop i) Riemannian systems; and
ii) state correction equations. First, UKFs estimate systems with
random vectors {[}cf. (\ref{eq:additive-system}) and (\ref{eq:general-system}){]};
thus, for Riemannian UKFs (RiUKFs), we define \textit{systems with
Riemannian random points} (Section \ref{subsec:Riemannian-Systems}).
Second, three steps compose UKFs: 1) state prediction, 2) measurement
prediction, and 3) state correction (cf.\cite{Menegaz2015} and \cite{Menegaz2016}).
The Riemannian extensions of steps 1) and 2) are trivial: since UTs
compose steps 1 and 2, we extend them with \textit{$\text{RiUT}$s}. But
we still must extend step 3 (Section \ref{subsec:Correction-equations}).
In possession of these two results, we define RiUKFs and provide a
list of some particular forms (Section \ref{subsec:Riemannian-Unscented-Filters}).
\subsection{Riemannian Dynamics Systems}
\label{subsec:Riemannian-Systems}
Up to this point, we have focused on results regarding points on manifolds.
In this section, we focus on results for dynamic state-space systems
on Riemannian manifolds.
The \emph{Riemannian (stochastic discrete-time dynamic) system} in
its \emph{general} form is given by the following pair of equations:
\begin{equation}
\man{\vector x}_{k}=f_{k}\left(\man{\vector x}_{k-1},\man{\varpi}_{k}\right),\,\man{\vector y}_{k}=h_{k}\left(\man{\vector x}_{k},\man{\vartheta}_{k}\right)\label{eq:Riemannian-general-system}
\end{equation}
where $k$ is the time step; $\man{\vector x}_{k}\in\man{\Phi}_{\mathcal{N}_{\man{\vector x}}^{n_{\state}}}$
the internal state; $\man{\vector y}_{k}\in\man{\Phi}_{\mathcal{N}_{\man{\vector y}}^{n_{\meas}}}$
is the measured output; $\man{\varpi}_{k}\in\man{\Phi}_{\mathcal{N}_{\man{\varpi}}^{n_{\text{P}noise}}}$
the process noise; and $\man{\vartheta}_{k}\in\man{\Phi}_{\mathcal{N}_{\man{\vartheta}}^{n_{\mnoise}}}$
the measurement noise. The noises $\man{\varpi}_{k}$ and $\man{\vartheta}_{k}$
are uncorrelated, $\man{\varpi}_{k}$ has mean $\mean{\man{\varpi}}_{k}$
and covariance $\boldsymbol{Q}_{k}$, and $\man{\vartheta}_{k}$ mean
$\mean{\man{\vartheta}}_{k}$ and covariance $\boldsymbol{R}_{k}$.
We also want to consider an\emph{ additive variant of (\ref{eq:Riemannian-general-system})}
because filters for this class of systems are computationally cheaper.
This additive variant of (\ref{eq:Riemannian-general-system}) would
have i) $\man{\varpi}_{k}$ acting on $f_{k}(\man{\vector x}_{k-1})$
by ``adding'' its mean to the mean of $f_{k}(\man{\vector x}_{k-1})$
and its covariance to the covariance of $f_{k}(\man{\vector x}_{k-1})$,
and ii) $\man{\vartheta}_{k}$ acting similarly on $h_{k}(\man{\vector x}_{k})$.
We can work with sums in tangent spaces using the following proposition.
\begin{prop}[Proposition 8.2 of \cite{Menegaz2016}]
\label{prop:Addition of Riemannian random points}Consider a Riemannian
point $\man{X}\sim(\bar{\man{X}},\man{P}_{\man{X}\man{X}})_{\mathcal{N}^{n}}$
and a random vector $p\sim(\bar{p},P_{pp})_{T_{\bar{\man a}}\mathcal{N}^{n}}$.
If $\Omega(\man{\mean{X}})$ is convex, and $\bar{p}\in\mathbb{B}(\man{\mean{X}},r)\cap\mathcal{C}(\man{\mean{X}})$
where $0<r\le\frac{1}{2}\min\{\text{inj}(\mathcal{N}),\text{P}i/\sqrt{\kappa})$ and
$\kappa$ is an upper bound of the sectional curvatures of $\mathcal{N}$;
then
\begin{equation}
\exp_{\mean{\man{X}}}\big[\logb{\mean{\man{X}}\man{X}}+p\big]\sim\big(\exp_{\bar{\man{X}}}\bar{p},\man{P}_{\man{X}\man{X}}+P_{pp}\big)_{\mathcal{N}_{\man{\vector x}}}.\label{eq:Riemannian-addition}
\end{equation}
\end{prop}
The proof of Proposition \ref{prop:Addition of Riemannian random points}
is in Appendix \ref{proof:proposition1}.
Consider this proposition twice: one for the process function with
$\man a=f_{k}(\man{\vector x}_{k-1})$ and $p=\varpi_{k}$ , and
the other for the measurement function with $\man a=h_{k}(\man{\vector x}_{k})$
and $p=\vartheta_{k}$. Using this reasoning, we define the \emph{additive}
\emph{Riemannian (stochastic discrete-time dynamic) system} as follows
\{equation (9.20) of \cite{Menegaz2016}\}:
\begin{align}
\man{\vector x}_{k} & =\mbox{\ensuremath{\exp}}_{\overline{f_{k}\big(\man{\vector x}_{k-1}\big)}}\left[\log_{\overline{f_{k}\big(\man{\vector x}_{k-1}\big)}}f_{k}\big(\man{\vector x}_{k-1}\big)+\varpi_{k}\right]\nonumber \\
\man{\vector y}_{k} & =\mbox{\ensuremath{\exp}}_{\overline{h_{k}\big(\man{\vector x}_{k}\big)}}\left[\log_{\overline{h_{k}\big(\man{\vector x}_{k}\big)}}h_{k}\big(\man{\vector x}_{k}\big)+\vartheta_{k}\right];\label{eq:Riemannian-additive-system}
\end{align}
where $\man{\vector x}_{k}\in\man{\Phi}_{\mathcal{N}_{\man{\vector x}}^{n_{\state}}}$
, $\man{\vector y}_{k}\in\man{\Phi}_{\mathcal{N}_{\man{\vector y}}^{n_{\meas}}}$
, $\varpi_{k}\in T_{f_{k}\left(\man{\vector x}_{k-1}\right)}\mathcal{N}_{\man{\vector x}}^{n_{\state}}$,
and $\vartheta_{k}\in T_{f_{k}\left(\man{\vector x}_{k-1}\right)}\mathcal{N}_{\man{\vector y}}^{n_{\meas}}$.
The noise $\varpi_{k}$ has mean $\mean{\varpi}_{k}\in T_{f_{k}\left(\man{\vector x}_{k-1}\right)}\mathcal{N}_{\man{\vector x}}^{n_{\state}}$
and covariance $Ppnoise_{k}\in T_{f_{k}\left(\man{\vector x}_{k-1}\right)}\mathcal{N}_{\man{\vector x}}^{n_{\state}}\times T_{f_{k}\left(\man{\vector x}_{k-1}\right)}\mathcal{N}_{\man{\vector x}}^{n_{\state}}$,
and $\vartheta_{k}$ mean $\mean{\vartheta}_{k}\in T_{f_{k}\left(\man{\vector x}_{k-1}\right)}\mathcal{N}_{\man{\vector y}}^{n_{\meas}}$
and covariance $Pmnoise_{k}\in T_{f_{k}\left(\man{\vector x}_{k-1}\right)}\mathcal{N}_{\man{\vector y}}^{n_{\meas}}\times T_{f_{k}\left(\man{\vector x}_{k-1}\right)}\mathcal{N}_{\man{\vector y}}^{n_{\meas}}$.
Note that $\varpi_{k}$ is defined in the tangent space $T_{f_{k}\left(\man{\vector x}_{k-1}\right)}\mathcal{N}_{\man{\vector x}}^{n_{\state}}$
and $\vartheta_{k}$ in $T_{f_{k}\left(\man{\vector x}_{k-1}\right)}\mathcal{N}_{\man{\vector y}}^{n_{\meas}}$.
In Remark \ref{rem:Alternative-additive-Riemannian-system}, we discuss
an alternative definition in which these noises belong to Riemannian
manifolds. An example with the unit sphere manifold of dimension 3,
$\mbox{SS}et 3$, is provided in Section \ref{sec:Numerical-Example}.
To the best of our knowledge, (\ref{eq:Riemannian-additive-system})
is the \emph{first consistent additive-noise Riemannian system}. Although
the literature has introduced additive-noise discrete-time UKFs for
some Riemannian manifolds, we could not find any additive-noise system
retaining the random point in the working manifolds; even for simple
manifolds such as $\mbox{SS}et 3$ (cf. \cite{Crassidis2003,Chang2016,Vartiainen2014}).
If $\mathcal{N}_{\man{\vector x}}^{n_{\state}}=\ensuremath{\mathbb{R}}^{n_{\state}}$ and $\mathcal{N}_{\man{\vector y}}^{n_{\meas}}=\ensuremath{\mathbb{R}}^{n_{\meas}}$
then (\ref{eq:Riemannian-additive-system}) is the \emph{additive
system} (\ref{eq:additive-system}). This is a direct consequence
of the following results: for $a,b\in\ensuremath{\mathbb{R}}^{n}$ $\log_{a}b=b-a$
and $\mbox{\ensuremath{\exp}}_{a}b=b+a$.
Sometimes, only one of the two equations in (\ref{eq:Riemannian-general-system})
can be written with additive-noise as in (\ref{eq:Riemannian-additive-system}).
In this case, we define the following two partially-additive Riemannian
systems:
\begin{align}
\man{\vector x}_{k} & =f_{k}\left(\man{\vector x}_{k-1},\man{\varpi}_{k}\right)\nonumber \\
\man{\vector y}_{k} & =\mbox{\ensuremath{\exp}}_{\overline{h_{k}\big(\man{\vector x}_{k}\big)}}\left[\log_{\overline{h_{k}\big(\man{\vector x}_{k}\big)}}h_{k}\big(\man{\vector x}_{k}\big)+\vartheta_{k}\right];\label{eq:Riemannian-partiallly-additive-system1}
\end{align}
and
\begin{align}
\man{\vector x}_{k} & =\mbox{\ensuremath{\exp}}_{\overline{f_{k}\big(\man{\vector x}_{k-1}\big)}}\left[\log_{\overline{f_{k}\big(\man{\vector x}_{k-1}\big)}}f_{k}\big(\man{\vector x}_{k-1}\big)+\varpi_{k}\right]\nonumber \\
\man{\vector y}_{k} & =h_{k}\left(\man{\vector x}_{k},\man{\vartheta}_{k}\right).\label{eq:Riemannian-partiallly-additive-system2}
\end{align}
\begin{rem}[]
\label{rem:Alternative-additive-Riemannian-system}System (\ref{eq:Riemannian-additive-system})
is defined with tangent space process and measurement noises. An alternative
definition in which these noises belong to Riemannian manifolds is
the following:
\begin{align*}
\man{\vector x}_{k} & =\mbox{\ensuremath{\exp}}_{\overline{f_{k}\left(\man{\vector x}_{k-1}\right)}}\left[\log_{\overline{f_{k}\left(\man{\vector x}_{k-1}\right)}}f_{k}\left(\man{\vector x}_{k-1}\right)+\log_{\overline{f_{k}\left(\man{\vector x}_{k-1}\right)}}\man{\varpi}_{k}\right]\\
\man{\vector y}_{k} & =\mbox{\ensuremath{\exp}}_{\overline{h_{k}\left(\man{\vector x}_{k}\right)}}\left[\log_{\overline{h_{k}\left(\man{\vector x}_{k}\right)}}h_{k}\left(\man{\vector x}_{k}\right)+\log_{\overline{h_{k}\left(\man{\vector x}_{k}\right)}}\man{\vartheta}_{k}\right];
\end{align*}
where $\man{\vector x}_{k}\in\man{\Phi}_{\mathcal{N}_{\man{\vector x}}^{n_{\state}}}$
, $\man{\vector y}_{k}\in\man{\Phi}_{\mathcal{N}_{\man{\vector y}}^{n_{\meas}}}$
, $\man{\varpi}_{k}\in\man{\Phi}_{\mathcal{N}_{\man{\vector x}}^{n_{\state}}}$,
and $\man{\vartheta}_{k}\in\man{\Phi}_{\mathcal{N}_{\man{\vector y}}^{n_{\meas}}}$.
In this case, it would be interesting to assume one of the following
two cases:
\begin{enumerate}
\item That are known i) the means of $\man{\varpi}_{k}$ and $\man{\vartheta}_{k}$—e.g.,
$\mean{\man{\varpi}}_{k}\in\mathcal{N}_{\man{\vector x}}^{n_{\state}}-\mathcal{C}(\overline{f_{k}\left(\man{\vector x}_{k-1}\right)})$
and $\mean{\man{\vartheta}}_{k}\in\mathcal{N}_{\man{\vector y}}^{n_{\meas}}-\mathcal{C}(\overline{h_{k}\left(\man{\vector x}_{k}\right)})$—,
b) the covariance of $\man{\varpi}_{k}$ with respect to $\mean{\man{\varpi}}_{k}$
at $\overline{f_{k}\left(\man{\vector x}_{k-1}\right)}$, and iii)
the covariance of $\man{\vartheta}_{k}$ with respect to $\mean{\man{\vartheta}}_{k}$
at $\overline{h_{k}\left(\man{\vector x}_{k}\right)}$.
\item That the means and covariances of $\log_{\overline{f_{k}\left(\man{\vector x}_{k-1}\right)}}\man{\varpi}_{k}$
and $\log_{\overline{h_{k}\left(\man{\vector x}_{k}\right)}}\man{\vartheta}_{k}$
are known—e.g., the means $\mean{\varpi}_{k}\in T_{\overline{f_{k}\left(\man{\vector x}_{k-1}\right)}}\man{\Phi}_{\mathcal{N}_{\man{\vector x}}^{n_{\state}}}$
and $\mean r_{k}\in T_{\overline{h_{k}\left(\man{\vector x}_{k}\right)}}\man{\Phi}_{\mathcal{N}_{\man{\vector y}}^{n_{\meas}}}$;
and the covariances $Ppnoise_{k}\in T_{\overline{f_{k}\left(\man{\vector x}_{k-1}\right)}}\man{\Phi}_{\mathcal{N}_{\man{\vector x}}^{n_{\state}}}\times T_{\overline{f_{k}\left(\man{\vector x}_{k-1}\right)}}\man{\Phi}_{\mathcal{N}_{\man{\vector x}}^{n_{\state}}}$
and $Pmnoise_{k}\in T_{\overline{h_{k}\left(\man{\vector x}_{k}\right)}}\man{\Phi}_{\mathcal{N}_{\man{\vector x}}^{n_{\state}}}\times T_{\overline{h_{k}\left(\man{\vector x}_{k}\right)}}\man{\Phi}_{\mathcal{N}_{\man{\vector y}}^{n_{\meas}}}$.
\end{enumerate}
\end{rem}
\subsection{Correction equations}
\label{subsec:Correction-equations}
In this section, we introduce Riemannian extensions of the UKFs correction
equations. Finding these extensions is not trivial because their Euclidean
versions include vector operations (cf. \cite{Menegaz2015}), which
are not defined for all Riemannian manifolds. Thus, we proceed by
first considering the simpler case $\man{\Phi}_{\mathcal{N}_{\man{\vector x}}^{n_{\state}}}=\man{\Phi}_{\mathcal{N}_{\man{\vector y}}^{n_{\meas}}}$.
\subsubsection{State and measurement in the same manifold}
\label{subsec:State-and-measurement}
Suppose that $\mathcal{N}_{\man{\vector x}}^{n_{\state}}=\mathcal{N}_{\man{\vector y}}^{n_{\meas}}$
and the measurements $\outcome{\man{\vector y}}_{1}$, ..., $\outcome{\man{\vector y}}_{k}$
have been acquired. Define the following random points\footnote{For the random points $\man{X}$ and $\man{Y}$ and the outcomes
$\outcome{\man{Y}}_{1}$, ..., $\outcome{\man{Y}}_{l}$ of $\man{Y}$;
the random point $\man{X}$|$\outcome{\man{Y}}_{1:k-1}$ stands
for $\man{X}$ conditioned to $\man{Y}_{i}=\outcome{\man{Y}}_{i}$
for every $i=1,$..., $l$.}
\begin{align*}
\man{\vector x}_{k|k-1} & :=\man{\vector x}_{k}|\outcome{\man{\vector y}}_{1:k-1}\\
\man{\vector x}_{k|k} & :=\man{\vector x}_{k}|\outcome{\man{\vector y}}_{1:k}\\
\man{\vector y}_{k|k-1} & :=\man{\vector y}_{k}|\outcome{\man{\vector y}}_{1:k-1},
\end{align*}
and the following projections on the tangent space of $\man{\vector x}_{k|k-1}$
\begin{align}
\vector x_{k|k-1}^{TM} & :=\log_{\mean{\man{\vector x}}_{k|k-1}}\man{\vector x}_{k|k-1}\label{eq:corrected-state-in-tangent-space}\\
\vector x_{k|k}^{TM} & :=\log_{\mean{\man{\vector x}}_{k|k-1}}\man{\vector x}_{k|k}\nonumber \\
\vector y_{k|k-1}^{TM} & :=\log_{\mean{\man{\vector x}}_{k|k-1}}\man{\vector y}_{k|k-1}\label{eq:predicted-measurement-in-tangent-space}\\
\outcome{\vector y}_{k}^{TM} & :=\log_{\mean{\man{\vector x}}_{k|k-1}}\outcome{\man{\vector y}}_{k}.\label{eq:realization-of-measurement-in-tangent-space}
\end{align}
Let i) $\man{\vector x}_{k|k-1}$ and $\man{\vector y}_{k|k-1}$ be characterized
by their projection on the tangent space of $\man{\vector x}_{k|k-1}$
according to the following equation:
\begin{multline}
\big[\begin{array}{cc}
\vector x_{k|k-1}^{TM} & \vector y_{k|k-1}^{TM}\big]^{T}\end{array}\sim\\
N\left(\left[\begin{array}{c}
[0]_{n_{\state},1}\\
\vector y_{k|k-1}^{TM}
\end{array}\right],\left[\begin{array}{cc}
\man{P}_{\man{\vector x\vector x}}^{k|k-1} & \man{P}_{\man{\vector x\vector y}}^{k|k-1}\\
\left(\man{P}_{\man{\vector x y}}^{k|k-1}\right)^{T} & \man{P}_{\man{\vector y\vector y}}^{k|k-1}
\end{array}\right]\right);\label{eq:predicted-tangential-statistics-are-normal}
\end{multline}
and ii) the projection $\vector x_{k|k}^{TM}$ be given by the following
linear correction of $\vector x_{k|k-1}^{TM}$
\begin{equation}
\vector x_{k|k}^{TM}=\vector x_{k|k-1}^{TM}+\man{G}_{k}\left(\outcome{\vector y}_{k}^{TM}-\vector y_{k|k-1}^{TM}\right),\label{eq:correction-equation-of-xk-tangent}
\end{equation}
where $\man{G}_{k}\in\ensuremath{\mathbb{R}}^{n_{\state}\timesn_{\state}}$ is
a gain matrix. From known results of the Kalman filtering theory (cf.
\cite{Anderson1979}), we have
\begin{equation}
\man{G}_{k}:=\man{P}_{\man{\vector x\vector y}}^{k|k-1}\left(\man{P}_{\man{\vector y\vector y}}^{k|k-1}\right)^{-1},\label{eq:Riemannian-kalman-gain}
\end{equation}
and $\vector x_{k|k}^{TM}\simN(\mean{\vector x}_{k|k}^{TM},\man{P}_{\man{\vector x\vector x}}^{k|k-1,\mean{\man{\vector x}}_{k|k-1}})$
where
\begin{align}
\mean{\vector x}_{k|k}^{TM} & :=\man{G}_{k}\left(\outcome{\vector y}_{k}^{TM}-\mean{\vector y}_{k|k-1}^{TM}\right)\label{eq:Riemannian-corrected-mean}\\
\man{P}_{\man{\vector x\vector x}}^{k|k,\mean{\man{\vector x}}_{k|k-1}} & :=\man{P}_{\man{\vector x\vector x}}^{k|k-1}-\left(\man{G}_{k}\right)\man{P}_{\man{\vector y\vector y}}^{k|k-1}\left(\man{G}_{k}\right)^{T}.\label{eq:Riemannian-corrected-cov-in-tangent-of-xk_k-1}
\end{align}
From (\ref{eq:corrected-state-in-tangent-space}), we have
\begin{equation}
\mean{\man{\vector x}}_{k|k}=\exp_{\mean{\man{\vector x}}_{k|k-1}}\vector x_{k|k}^{TM}.\label{eq:Riemannian-corrected-mean-1}
\end{equation}
The matrix $\man{P}_{\man{\vector x\vector x}}^{k|k-1,\mean{\man{\vector x}}_{k|k-1}}$
is the covariance of $\man{\vector x}_{k|k}$ relative to $\mean{\man{\vector x}}_{k|k}$
\emph{at} $\mean{\man{\vector x}}_{k|k-1}$. We want the covariance $\man{P}_{\man{\vector x\vector x}}^{k|k}:=\man{P}_{\man{\vector x\vector x}}^{k|k,\mean{\man{\vector x}}_{k|k}}$
of $\man{\vector x}_{k|k}$ \emph{at} $\mean{\man{\vector x}}_{k|k}$, and
the following theorem from \cite{Hauberg2013} provides the mechanism
to obtain $\man{P}_{\man{\vector x\vector x}}^{k|k,\mean{\man{\vector x}}_{k|k}}$
from $\man{P}_{\man{\vector x\vector x}}^{k|k,\mean{\man{\vector x}}_{k|k-1}}$.
\begin{thm}[Parallel Transport of a Bilinear Mapping \cite{Hauberg2013}]
\label{thm:Hauberg-matrix-parellel-transport}Let $P$ be a \emph{symmetric}
\emph{bilinear} mapping on the tangent space $T_{\man a}\mathcal{N}$ of
the Riemannian manifold $\mathcal{N}$ at $\man a\in\mathcal{N}$, and $\alpha:[0,1]\rightarrow\mathcal{N}$
a differentiable curve on $\mathcal{N}$ with $\alpha(0)=\man a$. Since
$P$ is symmetric, it can be written as
\[
P=\sum_{i=1}^{n}\lambda_{i}v_{i}v_{i}^{T}
\]
where ($v_{1},$ ..., $v_{n}$) is an orthonormal basis of $T_{\man a}\mathcal{N}$,
and each $\lambda_{i}$ is the eigenvalue of $P$ associated with
the eigenvector $v_{i}$. Let $v_{i}(t)$ be the parallel transport
of $v_{i}$ along $\alpha(t)$ (Definition \ref{def:parallel-transport}).
With this,
\begin{equation}
P_{t}:=\sum_{i=1}^{n}\lambda_{i}v_{i}(t)v_{i}(t)^{T}\label{eq:parallel-transport-theorem}
\end{equation}
is the \emph{parallel transport of $P$} along $\alpha(t)$.
\end{thm}
When we do not know the closed form of a tangent vector parallel transport,
we can use a numerical approach such as the \emph{Schild's Ladder}
(cf. \cite{Hauberg2013}; see \cite{Lorenzi2013} for other implementations
and algorithms of parallel transports).
We obtain $\man{P}_{\man{\vector x\vector x}}^{k|k}$ by performing the
parallel transport of $\man{P}_{\man{\vector x\vector x}}^{k|k,\mean{\man{\vector x}}_{k|k}}$from
$\mean{\man{\vector x}}_{k|k-1}$ to $\mean{\man{\vector x}}_{k|k}$ as
follows:
\begin{equation}
\man{P}_{\man{\vector x\vector x}}^{k|k}=\mbox{PT}\Big(\man{P}_{\man{\vector x\vector x}}^{k|k,\mean{\man{\vector x}}_{k|k}},\mean{\man{\vector x}}_{k|k-1},\mean{\man{\vector x}}_{k|k}\Big),\label{eq:Riemannian-corrected-cov}
\end{equation}
where
\begin{eqnarray*}
\mbox{PT}: & \textrm{Sym}\left(T_{\man a}\mathcal{N}\right)\times\mathcal{N}\times\mathcal{N} & \rightarrow\textrm{Sym}\left(T_{\man b}\mathcal{N}\right)\\
& \left(P^{\man a},\man a,\man b\right) & \mapstoP^{\man b}
\end{eqnarray*}
is the function mapping $\textrm{Sym}(T_{\man a}\mathcal{N})\times\mathcal{N}\times\mathcal{N}$
to $\textrm{Sym}(T_{\man b}\mathcal{N})$ according to (\ref{eq:parallel-transport-theorem}),
and $\textrm{Sym}(T_{\man a}\mathcal{N})$ denotes the space of symmetric
matrices of $T_{\man a}\mathcal{N}$.
With this, we can define a UKF for Riemannian systems when $\mathcal{N}_{\man{\vector x}}^{n_{\state}}=\mathcal{N}_{\man{\vector y}}^{n_{\meas}}$.
Let us now consider the original more general case.
\subsubsection{State and measurement in different manifolds}
\label{subsec:State-and-measurement=00005B}
If $\man{\vector x}_{k}$ belongs to a manifold $\man{\Phi}_{\mathcal{N}_{\man{\vector x}}^{n_{\state}}}$
and $\man{\vector y}_{k}$ to another manifold $\man{\Phi}_{\mathcal{N}_{\man{\vector y}}^{n_{\meas}}}$,
then we can not define $\vector y_{k|k-1}^{TM}$ as in (\ref{eq:predicted-measurement-in-tangent-space})
and $\outcome{\vector y}_{k}^{TM}$ as in (\ref{eq:realization-of-measurement-in-tangent-space});
consequently, neither $\vector x_{k|k}^{TM}$ as in (\ref{eq:correction-equation-of-xk-tangent}).
Since we know the correction equations when $\mathcal{N}_{\man{\vector x}}^{n_{\state}}=\mathcal{N}_{\man{\vector y}}^{n_{\meas}}$,
we can look for a manifold of which both $\mathcal{N}_{\man{\vector x}}$ and
$\mathcal{N}_{\man{\vector y}}$ are submanifolds. The simplest of such a class
is $\mathcal{N}_{\man{\vector x}}\times\mathcal{N}_{\man{\vector y}}$—the Cartesian
product of two Riemannian manifolds is a Riemannian manifold \cite{DoCarmo1992}.
Suppose $\vector x_{k|k-1}^{TM}$ and $\vector y_{k|k-1}^{TM}$ are jointly
Gaussian random vectors according to (\ref{eq:predicted-tangential-statistics-are-normal}).
Define i) the Riemannian Manifold $\mathcal{N}_{\man{\vector x},\man{\vector y}}:=\mathcal{N}_{\man{\vector x}}\times\mathcal{N}_{\man{\vector y}}$;
ii) the points $\man c:=(\man c_{\man{\vector x}},\man c_{\man{\vector y}})\in\mathcal{N}_{\man{\vector x},\man{\vector y}}$,
$\man b_{\man{\vector x}}\in\mathcal{N}_{\man{\vector x}}$, and $\man b_{\man{\vector y}}\in\mathcal{N}_{\man{\vector y}}$
(these points are chosen); and the following random vector belonging
to $T_{\man c}\mathcal{N}_{\man{\vector x},\man{\vector y}}$:
\begin{multline*}
\vector x_{k|k,**}^{T_{\man c}\mathcal{N}_{\man{\vector x},\man{\vector y}}}:=\log_{\man c}\big[\man{\vector x}_{k|k-1},\,\man b_{\man{\vector y}}\big]^{T}\\
+\man{G}_{k,**}\big(\log_{\man c}[\man b_{\man{\vector x}},\,\outcome{\man{\vector y}}_{k}]^{T}-\log_{\man c}[\man b_{\man{\vector x}},\man{\vector y}_{k|k-1}]^{T}\big)
\end{multline*}
where $\man{G}_{k,**}\in\ensuremath{\mathbb{R}}^{\left(n_{\state}+n_{\meas}\right)\times\left(n_{\state}+n_{\meas}\right)}$
is a gain matrix. The tangent vector $\vector x_{k|k,**}^{T_{\man c}\mathcal{N}_{\man{\vector x},\man{\vector y}}}$
is clearly related with $\vector x_{k|k}^{TM}$ by
\begin{equation}
\vector x_{k|k}^{TM}:=\left[\hat{\vector x}_{k|k,**}^{T_{\man c}\mathcal{N}_{\man{\vector x},\man{\vector y}}}\right]_{1:n_{\state},1}.\label{eq:augmented-corrected-state-to-non-augmented}
\end{equation}
By finding the mean and covariance of $\vector x_{k|k,**}^{T_{\man c}\mathcal{N}_{\man{\vector x},\man{\vector y}}}$,
we find the mean and covariance of $\vector x_{k|k}^{TM}$.
Since $\vector x_{k|k-1}^{TM}$ and $\vector y_{k|k-1}^{TM}$ are jointly
Gaussian random vectors, it follows that—we use the same reasoning
used to obtain (\ref{eq:Riemannian-kalman-gain}), (\ref{eq:Riemannian-corrected-mean}),
(\ref{eq:Riemannian-corrected-cov-in-tangent-of-xk_k-1}), (\ref{eq:Riemannian-corrected-mean-1}),
and (\ref{eq:Riemannian-corrected-cov})—
\begin{align*}
\man{P}_{\man{\vector x}\man{\vector x},**}^{k|k-1} & :=\man{\mathcal{E} }\left\{ \left(\log_{\man c}\left[\begin{array}{c}
\man x\\
\man b_{\man{\vector y}}
\end{array}\right]-\log_{\man c}\left[\begin{array}{c}
\mean{\man{\vector x}}_{k|k-1}\\
\man b_{\man{\vector y}}
\end{array}\right]\right)\left(\diamond\right)^{T}\right\} \\
\man{P}_{\man{\vector y}\man{\vector y},**}^{k|k-1} & :=\man{\mathcal{E} }\left\{ \left(\log_{\man c}\left[\begin{array}{c}
\man b_{\man{\vector x}}\\
\man{\vector y}
\end{array}\right]-\log_{\man c}\left[\begin{array}{c}
\man b_{\man{\vector x}}\\
\mean{\man{\vector y}}_{k|k-1}
\end{array}\right]\right)\left(\diamond\right)^{T}\right\}
\end{align*}
\begin{multline*}
\man{P}_{\man{\vector x}\man{\vector y},**}^{k|k-1}:=\man{\mathcal{E} }\left\{ \left(\log_{\man c}\left[\begin{array}{c}
\man x\\
\man b_{\man{\vector y}}
\end{array}\right]-\log_{\man c}\left[\begin{array}{c}
\mean{\man{\vector x}}_{k|k-1}\\
\man b_{\man{\vector y}}
\end{array}\right]\right)\right.\\
\times\left.\left(\log_{\man c}\left[\begin{array}{c}
\man b_{\man{\vector x}}\\
\man{\vector y}
\end{array}\right]-\log_{\man c}\left[\begin{array}{c}
\man b_{\man{\vector x}}\\
\mean{\man{\vector y}}_{k|k-1}
\end{array}\right]\right)^{T}\right\} ;
\end{multline*}
thus, the mean and covariance of $\vector x_{k|k,**}^{T_{\man c}\mathcal{N}_{\man{\vector x},\man{\vector y}}}$
are given by
\begin{align}
\man{G}_{k,**}:= & \man{P}_{\man{\vector x}\man{\vector y},**}^{k|k-1}\text{diag}\Big([0]_{n_{\state}\timesn_{\state}},\nonumber \\
& \man{\mathcal{E} }\{(\log_{\man c_{\man{\vector y}}}\man{\vector y}-\log_{\man c_{\man{\vector y}}}\mean{\man{\vector y}}_{k|k-1})\left(\diamond\right)^{T}\}^{-1}\Big)\label{eq:UKF-Riemannian-CartesianSolution-3}\\
\mean{\vector x}_{k|k,**}^{T_{\man c}\mathcal{N}_{\man{\vector x},\man{\vector y}}}:= & \log_{\man c}\left[\begin{array}{c}
\mean{\man{\vector x}}_{k|k-1}\\
\man b_{\man{\vector y}}
\end{array}\right]+\man{G}_{k,**}\log_{\man c}\left[\begin{array}{c}
\man b_{\man{\vector x}}\\
\mean{\man{\vector y}}_{k|k-1}
\end{array}\right]\label{eq:UKF-Riemannian-CartesianSolution-4}\\
\man{P}_{\man{\vector x}\man{\vector x},**}^{k|k,T_{\man c}M}:= & \man{P}_{\man{\vector x}\man{\vector x},**}^{k|k-1}-\left(\man{G}_{k,**}\right)\man{P}_{\man{\vector y}\man{\vector y},**}^{k|k-1}\left(\man{G}_{k,**}\right)^{T}.\label{eq:UKF-Riemannian-CartesianSolution-6}
\end{align}
We can choose $\man c$, $\man b_{\man{\vector x}}$ and $\man b_{\man{\vector y}}$
arbitrarily, and a particular choice yields the desired correction
equations.
\begin{thm}[Theorem 9.3 of \cite{Menegaz2016}]
\label{thm:General-to-Hauberg-UKF}Given (\ref{eq:augmented-corrected-state-to-non-augmented}),
(\ref{eq:UKF-Riemannian-CartesianSolution-3}), (\ref{eq:UKF-Riemannian-CartesianSolution-4}),
and (\ref{eq:UKF-Riemannian-CartesianSolution-6}); if $\man c_{\man{\vector x}}=\man b_{\man{\vector x}}=\est{\man{\vector x}}_{k|k-1}$
and $\man c_{\man{\vector y}}=\man b_{\man{\vector y}}=\est{\man{\vector y}}_{k|k-1}$,
then
\begin{equation}
\vector x_{k|k}^{TM}=\man{G}_{k}\log_{\est{\man{\vector y}}_{k|k-1}}\left(\man{\vector y}_{k}\right)\label{eq:corrected-state-tangent-final-equation}
\end{equation}
and
\begin{equation}
\man{P}_{\man{\vector x}\man{\vector x}}^{k|k,\mean{\man{\vector x}}_{k|k-1}}=\est{\man{P}}_{\man{\vector x}\man{\vector x}}^{k|k-1}-\man{G}_{k}\left(\est{\man{P}}_{\man{\vector y}\man{\vector y}}^{k|k-1}\right)^{-1}\left(\man{G}_{k}\right)^{T},\label{eq:corrected-coov-tangent-xk-k-1-final-equation}
\end{equation}
where
\[
\man{G}_{k}:=\man{P}_{\man{\vector x\vector y}}^{k|k-1}\left(\man{P}_{\man{\vector y\vector y}}^{k|k-1}\right)^{-1}.
\]
\end{thm}
The proof of Theorem \ref{thm:General-to-Hauberg-UKF} is in Appendix
\ref{proof:TheoremRiUKFcorrection}.
According to this theorem, the correction equations—(\ref{eq:Riemannian-kalman-gain}),
(\ref{eq:Riemannian-corrected-mean}), (\ref{eq:Riemannian-corrected-cov-in-tangent-of-xk_k-1}),
(\ref{eq:Riemannian-corrected-mean-1}), and (\ref{eq:Riemannian-corrected-cov})—are
true even when the state and the measurement belong to different manifolds.
Therefore, we do not have to perform calculations on the bigger manifold
$\mathcal{N}_{\man{\vector x},\man{\vector y}}$ to calculate $\vector x_{k|k}^{TM}$
and $\man{P}_{\man{\vector x}\man{\vector x}}^{k|k,\mean{\man{\vector x}}_{k|k-1}}$.
Instead, they can be calculated by (\ref{eq:corrected-state-tangent-final-equation})
and (\ref{eq:corrected-coov-tangent-xk-k-1-final-equation}) even
when $\mathcal{N}_{\man{\vector x}}\neq\mathcal{N}_{\man{\vector y}}$.
\subsection{New Riemannian Unscented Kalman Filters}
\label{subsec:Riemannian-Unscented-Filters}
At this point, we are endowed with the necessary results to provide
Riemannian extensions of UKFs. At every step time, the final estimates
$\est{\man{\vector x}}_{k|k}$ and $\est{\man{P}}_{\man{\vector x\vector x}}^{k|k}$
can be calculated by (\ref{eq:Riemannian-corrected-cov}) and Theorem
\ref{thm:General-to-Hauberg-UKF}. From (\ref{eq:Riemannian-kalman-gain}),
(\ref{eq:Riemannian-corrected-cov}), (\ref{eq:corrected-state-tangent-final-equation}),
and (\ref{eq:corrected-coov-tangent-xk-k-1-final-equation}) these
final estimates require $\est{\man{\vector x}}_{k|k-1}$, $\est{\man{P}}_{\man{\vector x\vector x}}^{k|k-1}$,
$\est{\man{\vector y}}_{k|k-1}$, $\est{\man{P}}_{\man{yy}}^{k|k-1}$,
and $\est{\man{P}}_{\man{\vector x y}}^{k|k-1}$. These last estimates
can be calculated by realizing $\text{RiUT}$s in systems (\ref{eq:Riemannian-general-system})
and (\ref{eq:Riemannian-additive-system}). For instance, from (\ref{eq:Riemannian-additive-system}),
Definition \ref{def:Riemannian-Unscented-Transformation} and Proposition
\ref{prop:Addition of Riemannian random points}, the estimates $\est{\man{\vector x}}_{k|k-1}$,
$\est{\man{P}}_{\man{\vector x\vector x}}^{k|k-1}$ can be calculated
by
\begin{multline*}
\Big(\est{\man{\vector y}}_{k|k-1}^{*},\est{\man{P}}_{\man{yy},*}^{k|k-1},\est{\man{P}}_{\man{\vector x y}}^{k|k-1}\Big):=\\
\text{RiUT}_{2}\left(h_{k},\est{\man{\vector x}}_{k|k-1},\est{\man{P}}_{\man{\vector x\vector x}}^{k|k-1}\right)
\end{multline*}
\begin{align*}
\est{\man{\vector y}}_{k|k-1} & :=\exp_{\est{\man{\vector y}}_{k|k-1}^{*}}\mean{\vartheta}_{k}\\
\est{\man{P}}_{\man{yy}}^{k|k-1} & :=\est{\man{P}}_{\man{yy},*}^{k|k-1}+Pmnoise_{k}.
\end{align*}
By similar formulas, we can obtain $\est{\man{\vector x}}_{k|k-1}$,
$\est{\man{P}}_{\man{\vector x\vector x}}^{k|k-1}$, $\est{\man{\vector y}}_{k|k-1}$,
$\est{\man{P}}_{\man{yy}}^{k|k-1}$, and $\est{\man{P}}_{\man{\vector x y}}^{k|k-1}$
for both (\ref{eq:Riemannian-general-system}) and (\ref{eq:Riemannian-additive-system}).
Below, we introduce the Riemannian UKFs (RiUKFs): UKFs for the Riemannian
systems (\ref{eq:Riemannian-general-system}) and (\ref{eq:Riemannian-additive-system}).
For the filter of (\ref{eq:Riemannian-general-system}), define the
augmented functions
\begin{align}
f_{k}^{aug}\big([\man{\vector x}_{k-1},\,\man{\varpi}_{k}]^{T}\big) & :=f_{k}\big(\man x_{k-1},\man{\varpi}_{k}\big)\label{eq:Riemannian-augmented-functions}\\
h_{k}^{aug}\big([\man{\vector x}_{k},\,\man{\vartheta}_{k}]^{T}\big) & :=h_{k}\big(\man x_{k},\man{\vartheta}_{k}\big).\nonumber
\end{align}
Consider system (\ref{eq:Riemannian-general-system}) and suppose
that i) the initial state is $\man{\vector x}_{0}\sim\left(\mean{\man{\vector x}}_{0},\man{P}_{\man{\vector x}\man{\vector x}}^{0}\right)_{\mathcal{N}_{\man{\vector x}}},$
and ii) the measurements $\outcome{\man{\vector y}}_{1}$, $\outcome{\man{\vector y}}_{2}$,
..., $\outcome{\man{\vector y}}_{k_{f}}$ are given. Then the \emph{Riemannian
Augmented Unscented Kalman Filter }(RiAuUKF)\emph{ }is given by the
following algorithm:
\begin{lyxalgorithm}[RiAuUKF; Algorithm 19 of \cite{Menegaz2016}]
\label{alg:RiAuUKF}Set the initial estimates $\est{\man{\vector x}}_{0|0}:=\mean{\man{\vector x}}_{0}$
and $\est{\man{P}}_{\vector x\vector x}^{0|0}:=\man{P}_{\vector x\vector x}^{0}$.
For $k=1,...,k_{f}$, perform the following steps:
\begin{enumerate}[labelsep=0.1cm,leftmargin=0.45cm]
\item \label{enu:RiAuUKF-State-prediction}State prediction.\textup{
\begin{align*}
\est{\man{\vector x}}_{k-1|k-1}^{aug} & :=\left[\est{\man{\vector x}}_{k-1|k-1}^{T},\mean{\man{\varpi}}_{k}^{T}\right]^{T}\\
\est{\man{P}}_{\man{\vector x\vector x},aug}^{k-1|k-1} & :=\mbox{\ensuremath{\text{diag}}}\left(\est{\man{P}}_{\man{\vector x\vector x}}^{k-1|k-1},\man{Ppnoise}_{k}\right)
\end{align*}
\begin{multline}
\Big(\est{\man{\vector x}}_{k|k-1},\est{\man{P}}_{\man{\vector x\vector x}}^{k|k-1}\Big):=\\
\text{RiUT}_{1}\left(f_{k}^{aug},\est{\man{\vector x}}_{k-1|k-1}^{aug},\est{\man{P}}_{\man{\vector x\vector x},aug}^{k-1|k-1}\right).\label{RiAuUKF-RiUT1}
\end{multline}
}
\item \label{enu:RiAuUKF-Measurement-prediction}Measurement prediction.\textup{
\begin{align*}
\est{\man{\vector x}}_{k|k-1}^{aug} & :=\left[\est{\man{\vector x}}_{k|k-1}^{T},\mean{\man{\vartheta}}_{k}^{T}\right]^{T}\\
\est{\man{P}}_{\man{\vector x\vector x},aug}^{k|k-1} & :=\mathcal{\text{diag}}\left(\est{\man{P}}_{\man{\vector x\vector x}}^{k|k-1},\man{Pmnoise}_{k}\right).
\end{align*}
\begin{multline}
\Big(\est{\man{\vector y}}_{k|k-1},\est{\man{P}}_{\man{yy}}^{k|k-1},\est{\man{P}}_{\man{\vector x y},a}^{k|k-1}\Big):=\\
\text{RiUT}_{2}\left(h_{k}^{aug},\est{\man{\vector x}}_{k|k-1}^{aug},\est{\man{P}}_{\man{\vector x\vector x},aug}^{k|k-1}\right)\label{RiAuUKF-RiUT2}
\end{multline}
\[
\est{\man{P}}_{\man{\vector x y}}^{k|k-1}:=\left[\est{\man{P}}_{\man{\vector x y},aug}^{k|k-1}\right]_{\left(1:n_{\state}\right),\left(1:n_{\meas}\right)}.
\]
}
\item \label{enu:RiAuUKF-State-correction}State correction.\textup{
\begin{align}
\man{G}_{k} & :=\est{\man{P}}_{\man{\vector x}\man{\vector y}}^{k|k-1}\Big(\est{\man{P}}_{\man{\vector y}\man{\vector y}}^{k|k-1}\Big)^{-1}\label{eq:RiAuUKF-KalmanGain}\\
\hat{\vector x}_{k|k}^{TM} & :=\man{G}_{k}\log_{\est{\man{\vector y}}_{k|k-1}}\big(\outcome{\man{\vector y}}_{k}\big)\nonumber \\
\est{\man{\vector x}}_{k|k} & :=\exp_{\est{\man{\vector x}}_{k|k-1}}\big(\hat{\vector x}_{k|k}^{TM}\big)\nonumber \\
\est{\man{P}}_{\man{\vector x}\man{\vector x}}^{k|k,\est{\man{\vector x}}_{k|k-1}} & :=\est{\man{P}}_{\man{\vector x}\man{\vector x}}^{k|k-1}-\man{G}_{k}\est{\man{P}}_{\man{\vector y}\man{\vector y}}^{k|k-1}\man{G}_{k}^{T}\nonumber \\
\est{\man{P}}_{\man{\vector x}\man{\vector x}}^{k|k} & :=\mbox{PT}\Big(\est{\man{P}}_{\man{\vector x}\man{\vector x}}^{k|k,\est{\man{\vector x}}_{k|k-1}},\est{\man{\vector x}}_{k|k-1},\est{\man{\vector x}}_{k|k}\Big).\nonumber
\end{align}
}
\end{enumerate}
\end{lyxalgorithm}
Consider the system (\ref{eq:Riemannian-additive-system}) and suppose
that i) the initial state is $\man{\vector x}_{0}\sim\left(\mean{\man{\vector x}}_{0},\man{P}_{\man{\vector x}\man{\vector x}}^{0}\right)_{\mathcal{N}_{\man{\vector x}}},$
and ii) the measurements $\outcome{\man{\vector y}}_{1}$, $\outcome{\man{\vector y}}_{2}$,
..., $\outcome{\man{\vector y}}_{k_{f}}$ are given. Then the \emph{Riemannian
Additive Unscented Kalman Filter }(RiAdUKF) is given by the following
algorithm:
\begin{lyxalgorithm}[RiAdUKF; Algorithm 21 of \cite{Menegaz2016}]
\label{alg:RiAdUKF}Set the initial estimates $\est{\man{\vector x}}_{0|0}:=\mean{\man{\vector x}}_{0}$
and $\est{\man{P}}_{\vector x\vector x}^{0|0}:=\man{P}_{\vector x\vector x}^{0}$.
For $k=1,...,k_{f}$, perform the following steps:
\begin{enumerate}[labelsep=0.1cm,leftmargin=0.45cm]
\item \label{enu:RiAdUKF-State-prediction}State prediction. \textup{
\begin{align}
\Big(\est{\man{\vector x}}_{k|k-1}^{*},\est{\man{P}}_{\man{\vector x\vector x},*}^{k|k-1}\Big) & :=\text{RiUT}_{1}\Big(f_{k},\est{\man{\vector x}}_{k-1|k-1},\est{\man{P}}_{\man{\vector x\vector x}}^{k-1|k-1}\Big)\label{eq:RiAdUKF-RiUT1}\\
\est{\man{\vector x}}_{k|k-1} & :=\exp_{\est{\man{\vector x}}_{k|k-1}^{*}}\mean{\varpi}_{k}\label{eq:RiAdUKF-MeanProcNoise}\\
\est{\man{P}}_{\man{\vector x\vector x}}^{k|k-1} & :=\est{\man{P}}_{\man{\vector x\vector x},*}^{k|k-1}+Ppnoise_{k}.\label{eq:RiAdUKF-CovProcNoise}
\end{align}
}
\item \label{enu:RiAdUKF-Measurement-prediction}Measurement prediction.
\textup{
\begin{multline}
\Big(\est{\man{\vector y}}_{k|k-1}^{*},\est{\man{P}}_{\man{yy},*}^{k|k-1},\est{\man{P}}_{\man{\vector x y}}^{k|k-1}\Big):=\\
\text{RiUT}_{2}\left(h_{k},\est{\man{\vector x}}_{k|k-1},\est{\man{P}}_{\man{\vector x\vector x}}^{k|k-1}\right)\label{eq:RiAdUKF-RiUT2}
\end{multline}
\begin{align}
\est{\man{\vector y}}_{k|k-1} & :=\exp_{\est{\man{\vector y}}_{k|k-1}^{*}}\mean{\vartheta}_{k}\label{eq:RiAdUKF-MeanMeasNoise}\\
\est{\man{P}}_{\man{yy}}^{k|k-1} & :=\est{\man{P}}_{\man{yy},*}^{k|k-1}+Pmnoise_{k}.\label{eq:RiAdUKF-CovMeasNoise}
\end{align}
}
\item \label{enu:RiAdUKF-State-correction}State correction.\textup{
\begin{align}
\man{G}_{k} & :=\est{\man{P}}_{\man{\vector x}\man{\vector y}}^{k|k-1}\Big(\est{\man{P}}_{\man{\vector y}\man{\vector y}}^{k|k-1}\Big)^{-1}\label{eq:RiAdUKF-KalmanGain}\\
\hat{\vector x}_{k|k}^{TM} & :=\man{G}_{k}\log_{\est{\man{\vector y}}_{k|k-1}}\big(\outcome{\man{\vector y}}_{k}\big)\label{eq:RiAdUKF-tangent-correct-estimate}\\
\est{\man{\vector x}}_{k|k} & :=\exp_{\est{\man{\vector x}}_{k|k-1}}\big(\hat{\vector x}_{k|k}^{TM}\big)\label{eq:RiAdUKF-corrected-estimate}\\
\est{\man{P}}_{\man{\vector x}\man{\vector x}}^{k|k,\est{\man{\vector x}}_{k|k-1}} & :=\est{\man{P}}_{\man{\vector x}\man{\vector x}}^{k|k-1}-\man{G}_{k}\est{\man{P}}_{\man{\vector y}\man{\vector y}}^{k|k-1}\man{G}_{k}^{T}\nonumber \\
\est{\man{P}}_{\man{\vector x}\man{\vector x}}^{k|k} & :=\mbox{PT}\Big(\est{\man{P}}_{\man{\vector x}\man{\vector x}}^{k|k,\est{\man{\vector x}}_{k|k-1}},\est{\man{\vector x}}_{k|k-1},\est{\man{\vector x}}_{k|k}\Big).\label{eq:RiAdUKF-corrected-covariance}
\end{align}
}
\end{enumerate}
\end{lyxalgorithm}
A\emph{ll steps of the} \emph{RiUKFs are justified by and coherent
with the other results of this work}. Among these, the most important
are $\sigma\text{R}$, $\text{RiUT}$ and Riemannian systems.
The notations $\text{RiUT}_{1}$ and $\text{RiUT}_{2}$ {[}in (\ref{RiAuUKF-RiUT1}),
(\ref{RiAuUKF-RiUT2}), (\ref{eq:RiAdUKF-RiUT1}), and (\ref{eq:RiAdUKF-RiUT2}){]}
indicate these $\text{RiUT}$s \emph{can have different forms}. The output
of $\text{RiUT}_{1}$ has only two terms—which is different from the number
of mapped variables in Definition \ref{def:Riemannian-Unscented-Transformation}—meaning
that only the first two variables of the output of Definition \ref{def:Riemannian-Unscented-Transformation}
are needed.
We can consider \emph{not regenerating the independent set of $\text{RiUT}_{2}$}
when $\text{RiUT}_{1}=\text{RiUT}_{2}$. Let $\man{\chi}_{*}^{k|k-1}$ be the
\emph{dependent set of $\text{RiUT}_{1}$} and $\man{\chi}_{i}^{k|k-1}$
the \emph{dependent set of $\text{RiUT}_{2}$}. Because, from (\ref{RiAuUKF-RiUT2})
and (\ref{eq:RiAdUKF-RiUT2}), $\man{\chi}_{i,*}^{k|k-1}$ and $\man{\chi}^{k|k-1}$
are different objects, we say $\man{\chi}_{*}^{k|k-1}$ is regenerated.
Nonetheless, we could set $\man{\chi}_{i,*}^{k|k-1}=\man{\chi}^{k|k-1}$;
consequently, the computational effort of the filter would decrease—calculating
a new $\man{\chi}^{k|k-1}$ can be computationally because it includes
calculating a square-root matrix of $\est{\man{P}}_{\man{\vector x\vector x},aug}^{k|k-1}$
or $\est{\man{P}}_{\man{\vector x\vector x}}^{k|k-1}$. But in this case,
i) the estimation quality of the RiAdUKF would possibly deteriorate—it
has been shown for the Euclidean case (cf. Section 5.1 of \cite{Menegaz2016})—and
ii) the reasoning behind the RiUKFs explained in the second paragraph
of this section would not be true anymore.
After choosing the manifolds' atlases, \emph{all} expressions for
the Riemannian exponentials, logarithms, etc., \emph{must be} \emph{coherent}
with the chosen parameterizations. These transformations, as well
as other elements in these filters such as covariances, have different
expressions \emph{depending on the parameterizations} defining the
manifolds.
We can find $\text{Ri}\sigma\text{R}$s (with $w_{i}^{m}>0$ for every $i=1,\ldots,N$)
\emph{by first finding $\sigma\text{R}$s in tangent spaces} (see the last paragraph
of Section \ref{sec:Riemannian-sigma-representations}). The independent
sets of $\text{RiUT}_{1}$ and $\text{RiUT}_{2}$ can be difficult to find. Fortunately,
closed forms of $\text{Ri}\sigma\text{R}$s (which can be independent sets of $\text{RiUT} s$)
can be found from closed forms of normalized $\sigma\text{R}$s by using Theorem
\ref{thm:Euclidean-to-Riemannian-sigma-rep}.
The method for obtaining the sample means of $\text{RiUT}_{1}$ and $\text{RiUT}_{2}$
affects the computation efforts of the RiUKFs because, following \cite{Pennec2006},
we define these sample means as optimization problems (Section \ref{sec:Statistics-of-weighted}).
Sometimes there exist closed forms, but more often it requires optimization
algorithms. The reader will find efficient options in \cite{Absil2008,Pennec2006,Moakher2002,Pennec1998}
and in the MATLAB and Python toolbox ManOpt \cite{Boumal2014}\footnote{Available for download at \textcolor{blue}{\url{https://www.manopt.org/}}.}.
Computational efforts of the RiUKFs also varies with the underlying
manifolds and their atlases because the expressions for exponentials,
logarithms and parallel transports change with them. The reader can
also refer to the ManOpt toolbox for many efficient implementations
of these operations.
Apart from these three factors, computational efforts majorly depends
on the square-rooting involved in the $\text{Ri}\sigma\text{R}$ calculations
and the $\est{\man{P}}_{\man{\vector y}\man{\vector y}}^{k|k-1}$ inversion
in the Kalman gain calculations. Since we can find $\text{Ri}\sigma\text{R}$s\textit{
}\textit{\emph{by finding $\sigma\text{R}$s in tangent spaces and, to the best
of our knowledge, all known $\sigma\text{R}$s require square-rooting a covariance
matrix (cf. \cite{Menegaz2015}), the computational complexity of
these operations in }}(\ref{RiAuUKF-RiUT1}) is $\mathcal{O}([n_{\state}+n_{\text{P}noise}]^{3})$,
in (\ref{RiAuUKF-RiUT2}) $\mathcal{O}([n_{\meas}+n_{\mnoise}]^{3})$,\textit{\emph{
in }}(\ref{eq:RiAdUKF-RiUT1}) is $\mathcal{O}(n_{\state}^{3})$, and
in (\ref{eq:RiAdUKF-RiUT2}) $\mathcal{O}(n_{\meas}^{3})$. The \textit{\emph{computational
complexity}} of the $\est{\man{P}}_{\man{\vector y}\man{\vector y}}^{k|k-1}$
inversion is $\mathcal{O}(n_{\meas}^{3})$ in both (\ref{eq:RiAuUKF-KalmanGain})
and (\ref{eq:RiAdUKF-KalmanGain}).
RiUKFs are\emph{ generalizations of UKFs}. Every UKF is a RiUKF, and
every RiUKF for Euclidean state-variables is a UKF. It is easy to
see that, if $\mathcal{N}_{\man{\vector x}}$ and $\mathcal{N}_{\man{\vector y}}$ are
Euclidean spaces, then RiAuUKF is equivalent to AuUKF (Algorithm 7
of \cite{Menegaz2016}), and RiAdUKF to AdUKF (Algorithm 6 of \cite{Menegaz2016}).
Since Cartesian products of Riemannian manifolds are also Riemannian
manifolds (e.g., $\mbox{SS}et 3\times\ensuremath{\mathbb{R}}^{n}$) \cite{DoCarmo1992},
the proposed RiUKF also estimates systems with state variables belonging
to \emph{Cartesian products} of Riemannian manifolds.
The Kalman gain $\man{G}_{k}$ in (\ref{eq:RiAuUKF-KalmanGain})
and (\ref{eq:RiAdUKF-KalmanGain}) could be defined in a more general
way, as done in (\ref{eq:UKF-Riemannian-CartesianSolution-3}). However,
it would imply more computational effort—the dimension of the sigma
points and matrices would be higher—at the exchange of no advantage,
at least at present; perhaps benefits can be obtained from (\ref{eq:UKF-Riemannian-CartesianSolution-3})
in future works.
The three assumptions cited at the beginning of Section \ref{sec:Riemannian-sigma-representations}
impose some limitations on the RiUKFs. Assumption \ref{enu:assumption1}
limits the RiUKFs to the case of geodesically-complete Riemannian
manifolds: still there are many of these manifolds useful for practical
applications, such as unit spheres, special orthogonal groups, special
Euclidean groups, real projective spaces, special unitary groups,
Grassmann manifolds, among others (cf. \cite{Absil2008} and Section
\ref{subsec:Kalman-filtering-in}). Assumption \ref{enu:assumption2}
imposes careful choice of $\man{P}_{\man{\vector x}\man{\vector x}}^{0}$,
$\man{Ppnoise}_{k}$, $\man{Pmnoise}_{k}$ (or $Q_{k}$ and
$Pmnoise_{k}$ for the RiAdUKF): their values should be consistent
with the logarithms in their definitions {[}or in (\ref{eq:Riemannian-additive-system})
in the case of the RiAdUKF{]}; since these covariances are tuning
parameters and are often set based on intuition, an user could chose
inconsistent (too great) values; this would probably result on either
inconsistent sigma points—because the tangent sigma points would be
outside the tangent cut locus—or on some divergence in the algorithm,
such as non-positive state covariance matrix. Assumption \ref{enu:assumption6}
will not, in most cases, impose other limitations if the user model
the system equations and parameters consistently.
We can find \emph{particular cases} of RiUKFs by choosing particular
forms of $\text{Ri}\sigma\text{R}$s; Table \ref{table:Riemannian-augmented-minimum-kalman-filters}
shows some cases for $\text{RiUT}_{1}=\text{RiUT}_{2}$—the second and third columns
contain the filters. Each filter is the resulting variant of using
i) the corresponding RiUKF in the \emph{heading row} of its column
(RiAuUKF or RiAdUKF), and ii) the corresponding $\text{Ri}\sigma\text{R}$ written in
the first column of its row. For instance, the Riemannian Minimum
AuUKF (RiMiAuUKF in the first row and second column), is the result
of the RiAuUKF with the RiMi$\sigma$R (Corollary \ref{cor:Riemannian-sr-minimum-numbers}).
All filters in Table \ref{table:Riemannian-augmented-minimum-kalman-filters}
are \emph{new}.
\begin{table}
\caption{RiUKF Variants for some Ri$\sigma$Rs.\label{table:Riemannian-augmented-minimum-kalman-filters}}
\begin{centering}
\begin{tabular}{ccc}
\textbf{$\sigma$R}\footnotemark[1] & \textbf{AuUKF}\footnotemark[1] & \textbf{AdUKF }\footnotemark[1]\tabularnewline
\hline
\textbf{RiMi$\sigma$R} & RiMiAuUKF & RiMiAdUKF\tabularnewline
\textbf{RiRhoMi$\sigma$R} & RiRhoMiAuUKF & RiRhoMiAdUKF\tabularnewline
\textbf{RiMiSy$\sigma$R} & RiMiSyAuUKF & RiMiSyAdUKF\tabularnewline
\textbf{RiHoMiSy$\sigma$R} & RiHoMiSyAuUKF & RiHoMiSyAdUKF\tabularnewline
\end{tabular}
\text{P}ar\end{centering}
\footnotemark[1]{Ad for Additive, Au for Augmented, Ho for Homogeneous,
Mi for Minimum, Ri stands for Riemannian, $\sigma$R for \textbf{$\sigma$-}Representation\textbf{,}
Sy for Symmetric, UKF for Unscented Kalman Filter. Rho stand for Rho
itself; see also the acronyms list in Appendix \ref{subsec:Notation-and-Acronyms}}
\end{table}
An RiUKF for the partially-additive system (\ref{eq:Riemannian-partiallly-additive-system1})
is given by step \ref{enu:RiAuUKF-State-prediction} of the RiAuUKF
with steps \ref{enu:RiAdUKF-Measurement-prediction} and \ref{enu:RiAdUKF-State-correction}
of the RiAdUKF, and for (\ref{eq:Riemannian-partiallly-additive-system2})
is given by step \ref{enu:RiAdUKF-State-prediction} of the RiAdUKF
with steps \ref{enu:RiAuUKF-Measurement-prediction} and \ref{enu:RiAuUKF-State-correction}
of the RiAuUKF.
For (\ref{eq:Riemannian-additive-system}), (\ref{eq:Riemannian-partiallly-additive-system1})
and (\ref{eq:Riemannian-partiallly-additive-system2}) when either
$f_{k}$ or $h_{k}$ are the identity function, we can simplify their
filters by skipping sigma points calculations; hence saving computation
effort. If, for example, $f_{k}(\man{\vector x})=\man{\vector x}$, then
the following two equations can replace the state prediction (e.g.,
the step \ref{enu:RiAdUKF-State-prediction} of the RiAdUKF):
\begin{align*}
\est{\man{\vector x}}_{k|k-1} & :=\exp_{\est{\man{\vector x}}_{k-1|k-1}}\mean{\varpi}_{k}\\
\est{\man{P}}_{\man{\vector x\vector x}}^{k|k-1} & :=\est{\man{P}}_{\man{\vector x\vector x}}^{k-1|k-1}+Ppnoise_{k}.
\end{align*}
The case $h_{k}(\man{\vector x})=\man{\vector x}$ is similar.
\subsection{Relation with the literature}
\label{subsec:Relation-with-the}
To the best of our knowledge, the \emph{UKF for Riemannian manifolds}
(\emph{UKFRM})\textit{ }\textit{\emph{of}} \textit{\emph{\cite{Hauberg2013}}}
is the only UKF for any geodesically-complete Riemannian manifold
in the literature. Consider system (\ref{eq:Riemannian-general-system})
and define the following functions—cf. (1) and (2) of \cite{Hauberg2013}—:
\begin{equation}
f_{k}^{*}(\man{\vector x}_{k-1}):=f_{k}(\man{\vector x}_{k-1},\man{\varpi}_{k-1}),\,h_{k}^{*}(\man{\vector x}_{k}):=h_{k}(\man{\vector x}_{k},\man{\vartheta}_{k}).\label{eq:system-of-hauberg}
\end{equation}
Suppose that i) the initial state $\man{\vector x}_{0}$ is characterized
by $\man{\vector x}_{0}\sim(\mean{\man{\vector x}}_{0},\man{P}_{\man{\vector x}\man{\vector x}}^{0})_{\mathcal{N}_{\man{\vector x}}},$
and ii) the measurements $\outcome{\man{\vector y}}_{1}$, $\outcome{\man{\vector y}}_{2}$,
..., $\outcome{\man{\vector y}}_{k_{f}}$ are given. Let
\[
\mbox{HoMiSy}\sigma\text{R}:(\mean{X},P_{XX})\mapsto\{\chi_{i,},w_{i}\}_{i=1}^{N}
\]
be a function mapping the mean $\mean{X}$ and covariance $P_{XX}$
of a given random vector $X$ to a HoMiSy$\sigma$R (Corollary
3 of \cite{Menegaz2015}). Then the UKFRM of \cite{Hauberg2013} is
given by the following algorithm:
\begin{lyxalgorithm}[UKFRM of \cite{Hauberg2013}]
\label{alg:UKF-Hauberg}Set $N:=2n_{\state}+1$ and the initial
estimates $\est{\man{\vector x}}_{0|0}:=\mean{\man{\vector x}}_{0}$ and
$\est{\man{P}}_{\vector x\vector x}^{0|0}:=\man{P}_{\vector x\vector x}^{0}$.
For $k=1,...,k_{f}$, perform the following steps:
\end{lyxalgorithm}
\begin{enumerate}[labelsep=0.1cm,leftmargin=0.45cm]
\item State prediction.
\begin{align}
& \big\{\chi_{i,k-1|k-1}^{TM},w_{i}\big\}{}_{i=1}^{N}:=\mbox{HoMiSy}\sigma\text{R}\Big([0]_{n_{\state}},\est{\man{P}}_{\man{\vector x}\man{\vector x}}^{k-1|k-1}\Big)\label{eq:UKF-Hauberg-tangent-previous-sigma-rep}\\
& \man{\chi}_{i}^{k-1|k-1}:=\exp_{\est{\man{\vector x}}_{k-1|k-1}}\big(\chi_{i,k-1|k-1}^{TM}\big),\,i=1,\ldots,N\label{eq:UKF-Hauberg-previous-sigma-rep}\\
& \man{\chi}_{i,*}^{k|k-1}:=f_{k}^{*}\big(\man{\chi}_{i}^{k-1|k-1}\big),\,i=1,\ldots,N\nonumber \\
& \est{\man{\vector x}}_{k|k-1}:=\arg\min_{\man a\in\mathcal{N}_{\man{\vector x}}}\sum_{i=1}^{N}w_{i}\text{dist}^{2}\big(\man{\chi}_{i,*}^{k|k-1},\man a\big)\nonumber \\
& \est{\man{P}}_{\man{\vector x}\man{\vector x}}^{k|k-1}:=\sum_{i=1}^{N}w_{i}\Big(\log_{\est{\man{\vector x}}_{k|k-1}}\big(\man{\chi}_{i,*}^{k|k-1}\big)\Big)\Big(\diamond\Big)^{T}.\label{eq:UKF-Hauberg-Predicted-X-COV}
\end{align}
\item Measurement prediction.
\begin{align}
& \Big\{\chi_{i,k|k-1}^{TM},w_{i}\Big\}_{i=1}^{N}:=f_{k}\mbox{HoMiSy}\sigma\text{R}\Big([0]_{n_{\state}},\est{\man{P}}_{\man{\vector x}\man{\vector x}}^{k|k-1}\Big)\label{eq:UKF-Hauberg-tangent-predicted-sigma-rep}\\
& \man{\chi}_{i}^{k|k-1}:=\exp_{\est{\man{\vector x}}_{k|k-1}}\big(\chi_{i,k|k-1}^{TM}\big),\,i=1,\ldots,N\label{eq:UKF-Hauberg-second-predicted-sigma-rep}\\
& \man{\gamma}_{i}^{k|k-1}:=h_{k}^{*}\big(\man{\chi}_{i}^{k|k-1}\big),\,i=1,\ldots,N\nonumber \\
& \est{\man{\vector y}}_{k|k-1}:=\arg\min_{\man b\in\mathcal{N}_{\man{\vector y}}}\sum_{i=1}^{N}w_{i}\text{dist}^{2}\left(\man{\gamma}_{i}^{k|k-1},\man b\right)\nonumber \\
& \est{\man{P}}_{\man{\vector y}\man{\vector y}}^{k|k-1}:=\sum_{i=1}^{N}w_{i}\Big(\log_{\est{\man{\vector y}}_{k|k-1}}\big(\man{\gamma}_{i}^{k|k-1}\big)\Big)\Big(\diamond\Big)^{T}\label{eq:UKF-Hauberg-Predicted-Y-COV}\\
& \est{\man{P}}_{\man{\vector x}\man{\vector y}}^{k|k-1}:=\sum_{i=1}^{N}w_{i}\Big(\log_{\est{\man{\vector x}}_{k|k-1}}\big(\man{\chi}_{i}^{k|k-1}\big)\Big)\nonumber \\
& \quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\Big(\log_{\est{\man{\vector y}}_{k|k-1}}\big(\man{\gamma}_{i}^{k|k-1}\big)\Big)^{T}.\nonumber
\end{align}
\item State correction.
\begin{align}
\man{G}_{k} & :=\est{\man{P}}_{\man{\vector x}\man{\vector y}}^{k|k-1}\Big(\est{\man{P}}_{\man{\vector y}\man{\vector y}}^{k|k-1}\Big)^{-1}\nonumber \\
\hat{\vector x}_{k|k}^{TM} & :=\hat{\vector x}_{k|k-1}^{TM}+\man{G}\log_{\est{\man{\vector y}}_{k|k-1}}\big(\outcome{\man{\vector y}}_{k}\big)\label{eq:ukf-hauberg-state-corrected-estimate}\\
\est{\man{\vector x}}_{k|k} & :=\exp_{\est{\man{\vector x}}_{k|k-1}}\big(\hat{\vector x}_{k|k}^{TM}\big)\nonumber \\
\est{\man{P}}_{\man{\vector x}\man{\vector x}}^{k|k,\est{\man{\vector x}}_{k|k-1}} & :=\est{\man{P}}_{\man{\vector x}\man{\vector x}}^{k|k-1}-\man{G}_{k}\est{\man{P}}_{\man{\vector y}\man{\vector y}}^{k|k-1}\man{G}_{k}^{T}\nonumber \\
\est{\man{P}}_{\man{\vector x}\man{\vector x}}^{k|k} & :=\mbox{PT}\Big(\est{\man{P}}_{\man{\vector x}\man{\vector x}}^{k|k,\est{\man{\vector x}}_{k|k-1}},\est{\man{\vector x}}_{k|k-1},\est{\man{\vector x}}_{k|k}\Big).\nonumber
\end{align}
\end{enumerate}
Compared with the UKFRM of \cite{Hauberg2013}, we can point out the
following five improvements of the RiUKFs:
\begin{enumerate}[labelsep=0.1cm,leftmargin=0.45cm]
\item The noises are incorporated into the RiUKFs, but in the UKFRM they
are not. In the RiAuUKF, the noises are incorporated by realizing
the augmented sigma points in the process and measurement functions
{[}equations (\ref{RiAuUKF-RiUT1}) and (\ref{RiAuUKF-RiUT2}){]};
and in the RiAdUKF, by ``adding'' (in the tangent space) their means
and covariances {[}equations (\ref{eq:RiAdUKF-MeanProcNoise}), (\ref{eq:RiAdUKF-CovProcNoise}),
(\ref{eq:RiAdUKF-MeanMeasNoise}), (\ref{eq:RiAdUKF-CovMeasNoise}){]}.\\
However, the UKFRM exclude the noises. Even though the UKFRM of \cite{Hauberg2013}
considers a system with process and measurement noises {[}cf. (\ref{eq:system-of-hauberg}){]},
they do \emph{not} influence any estimate within the UKFRM; these
noises' statistics \emph{do not} \emph{appear at any step }of the
UKFRM—commonly, filters consider these statistics when calculating
the predicted covariances, but this is also not the case for the UKFRM
{[}cf. (\ref{eq:UKF-Hauberg-Predicted-X-COV}) and (\ref{eq:UKF-Hauberg-Predicted-Y-COV}){]}.\\
We can point out at least two consequences of this absence of the
noise elements:
\begin{enumerate}
\item the Euclidean case of the UKFRM is \emph{not equivalent} \emph{to
any} (Euclidean) UKF. This can be seen by considering Euclidean manifolds
in Algorithm \ref{alg:UKF-Hauberg} (cf. the last paragraph of Sections
\ref{subsec:Statistics-of-random} and of \ref{sec:Statistics-of-weighted}).
Besides, to the best of our knowledge, there is no UKF without process
and measurement noises covariance (cf. \cite{Menegaz2015,Menegaz2016}).
\item the UKFRM might \emph{diverge} in situations in which the RiUKFs do
not. This behavior can be seen in the following simple example: consider
(\ref{eq:Riemannian-additive-system}) and (\ref{eq:system-of-hauberg})
with $\mathcal{N}_{\man{\vector x}}^{n_{\state}}=\mathcal{N}_{\man{\vector x}}^{n_{\text{P}noise}}=\mathcal{N}_{\man{\vector x}}^{n_{\meas}}=\mathcal{N}_{\man{\vector x}}^{n_{\mnoise}}=\ensuremath{\mathbb{R}}$.
Suppose that i) the initial state is $\vector x_{0}\sim(1,1)_{\ensuremath{\mathbb{R}}},$
ii) the noise covariances are $Ppnoise_{k}=Pmnoise_{k}=1$,
iii) the system functions are $f_{k}(\vector x_{k-1})=f_{k}^{*}(\vector x_{k-1})=\vector x_{k-1}$
and $h_{k}(\vector x_{k})=h_{k}^{*}(\vector x_{k})=1-\vector x_{k}$,
and iv) the measurements are $\outcome{\vector y}_{1}=\cdots=\outcome{\vector y}_{k_{f}}=1$.
For this example, we ran the (linear) KF (cf. \cite{Jazwinsky1970}),
the RiAdUKF, and the UKFRM. Both the KF and the RiAdUKF provided the
same estimates, but the UKFRM did not provide consistent results;
the simulation was halted because the corrected covariance ($\est{\man{P}}_{\man{\vector x}\man{\vector x}}^{2|2}$)
lost its positiveness. Similar results occurred in the simulations
of Section \ref{sec:Numerical-Example}.
\end{enumerate}
\item We introduced a consistent definition {[}equation (\ref{eq:Riemannian-additive-system}){]}
for the system associated with the RiAdUKF. To the best of our knowledge,
(\ref{eq:Riemannian-additive-system}) is the first consistent additive-noise
Riemannian stochastic discrete-time dynamic system.
\item To the best of our knowledge, the RiUKFs are the \emph{first} UKFs
for Riemannian state-space systems considering noises with \emph{non-zero
means}. Even for simple manifolds such as the unit sphere, we could
not find a UKF considering this case.
\item \emph{A}ll the equations of our RiUKFs are \emph{formally justified}.
These justifications are the following ones:
\begin{enumerate}
\item \emph{The equations of steps }\ref{enu:RiAuUKF-State-prediction}
and \ref{enu:RiAuUKF-Measurement-prediction} of the RiUKFs\emph{
are justified by Definition }\ref{def:Consider-a-Riemannian},\emph{
Theorem }\ref{thm:Euclidean-to-Riemannian-sigma-rep} and Corollary
\ref{sec:Riemannian-Unscented-Transformat}.
\item \emph{Equations }(\ref{eq:RiAuUKF-KalmanGain})\emph{ and }(\ref{eq:RiAdUKF-KalmanGain})\emph{
(the Kalman Gains) are justified in Section \ref{subsec:State-and-measurement=00005B}}.
This form of the Kalman gain $\man{G}_{k}$ in (\ref{eq:RiAuUKF-KalmanGain})
and (\ref{eq:RiAdUKF-KalmanGain}) follows as a particular case of
the Kalman gain of a more general system ($\man{G}_{k,**}$)
where the state and the measurement belong to the product $\mathcal{N}_{\man{\vector x}}\times\mathcal{N}_{\man y}$.
\item \emph{The equations of step }\ref{enu:RiAuUKF-State-correction} of
the RiUKFs\emph{ are justified in Section \ref{subsec:Correction-equations}}.
We showed that they follow from considering i) $\vector x_{k|k-1}^{TM}$
and $\vector y_{k|k-1}^{TM}$ normally-joint distributed {[}equation (\ref{eq:predicted-tangential-statistics-are-normal}){]},
and ii) $\vector x_{k|k}^{TM}$ given by a linear correction of $\vector x_{k|k-1}^{TM}$
by $(\outcome{\vector y}_{k}^{TM}-\vector y_{k|k-1}^{TM})$ {[}equation (\ref{eq:correction-equation-of-xk-tangent}){]}.
\end{enumerate}
\item (Euclidean) UKFs are particular cases of the RiUKFs (cf. Section \ref{subsec:Riemannian-Unscented-Filters}).
\end{enumerate}
Altogether, we can say the RiUKFs have novelties compared with the
UKF for Riemannian state-space systems of the literature.
\section{Example: Satellite Attitude Tracking}
\label{sec:Numerical-Example}
In this section, we apply the developed theory to estimate the attitude
of a satellite in a realistic scenario (cf. \cite{Crassidis2007}).
The set of possible attitudes of a rotating body is not a Euclidean
space, but a three dimensional smooth manifold known as $SO(3)$.
This manifold has many different topological properties from a Euclidean
space: for instance, it is compact whilst Euclidean spaces are not.
Due to this difference, Euclidean UKFs designed over Euclidean spaces
may not work properly: its estimates may not stay within the state-space
manifold, resulting in poor performance and poor accuracy \cite{Crassidis2003}.
Although we could apply an RiUKFs for $\SO 3$ in this example, we
prefer to apply an RiUKF for the set of unit quaternions $\mbox{SS}et 3$
because they represent, without singularities \cite{Stuelpnagel1964},
attitudes using the minimal set of parameters. Let $\quat q_{i}=\begin{bmatrix}\eta_{i} & \myvec{epsilon}_{i}^{T}\end{bmatrix}^{T}\in\mathbb{R}^{4}$,
where $\eta_{i}\in\mathbb{R}$ and $\myvec{epsilon}_{i}\in\mathbb{R}^{3}$.
It is possible to prove that the three dimensional sphere
\begin{equation}
S^{3}=\{(q_{1},q_{2},q_{3},q_{4})\in\mathbb{R}^{4}:q_{1}^{2}+q_{2}^{2}+q_{3}^{2}+q_{4}^{2}=1\}\label{eq:sphere_manifold}
\end{equation}
is a Riemannian manifold and the product
\[
\quat q_{1}\otimes\quat q_{2}=\begin{bmatrix}\eta_{1}\eta_{2}-\myvec{epsilon}_{1}^{T}\myvec{epsilon}_{2}\\
\eta_{1}\myvec{epsilon}_{2}+\eta_{2}\myvec{epsilon}_{1}+\myvec{epsilon}_{1}\times\myvec{epsilon}_{2}
\end{bmatrix}.
\]
is closed. For a rotation of an angle $\theta$ around an unit vector
$n$, there are two associated unit quaternions $\quat q$ and $\quat q'$
such that
\[
\quat q=\cos\left(\frac{\theta}{2}\right)+\boldsymbol{\imath_{m}}\mathbf{n}\sin\left(\frac{\theta}{2}\right),\quad\quat q'=-\quat q.
\]
Let $\quat q(t)\in\mbox{SS}et 3$ be the attitude of the satellite at the
time instant $t$, and $\omega(t)\in\ensuremath{\mathbb{R}}^{3}$ its the angular
velocity. The evolution of $\quat q(t)$ over time can be described
by the following differential equation \cite{Zipfel2007}:
\begin{equation}
\dot{\quat q}\left(t\right)=\frac{1}{2}\quat{\omega}(t)\otimes\quat q\left(t\right),\label{eq:satellite-system}
\end{equation}
where $\quat{\omega}\in\mathbb{R}^{4}$ is given by $\quat{\omega}=\begin{bmatrix}0 & \omega^{T}\end{bmatrix}^{T}$.
We generate synthetic data by a fourth order Runge-Kutta integration
of (\ref{eq:satellite-system}) over the interval $[0\text{s},20\text{s}]$
with angular velocity
\[
\omega\left(t\right)=\left[\begin{array}{c}
0.03\sin\left(\left[\text{P}i t/600\right]\degree\right)\\
0.03\sin\left(\left[\text{P}i t/600\right]\degree-300\degree\right)\\
0.03\sin\left(\left[\text{P}i t/600\right]\degree-600\degree\right)
\end{array}\right]
\]
and initial state $\quat q\left(0\right)=0.96+\boldsymbol{\imath_{m}}[0.13,\,0.19,\,\sqrt{1-0.96^{2}-0.13{}^{2}-0.19{}^{2}}]^{T}$
.
For filtering, we consider (\ref{eq:Riemannian-additive-system})
with $\man{\vector x}_{k}=\quat q(k\delta t)$
\begin{align*}
\theta(t) & :=\norm{\omega(t)}\frac{\delta t}{2}\\
f_{k}\big(\man{\vector x}_{k-1}\big) & =\begin{bmatrix}\cos\theta(t) & \frac{\omega^{T}(t)}{\norm{\omega(t)}}\sin\theta(t)\end{bmatrix}^{T}\otimes\quat{\vector x}_{k-1}\\
h_{k}\big(\man{\vector x}_{k}\big) & =\man{\vector x}_{k},
\end{align*}
$\mean{\varpi}_{k}=\mean{\vartheta}_{k}=[0]_{3\times1}$, $\boldsymbol{Ppnoise}_{k}=(0.31236\times10^{-6})^{2}I_{3}$,
and $\boldsymbol{Pmnoise}_{k}=(0.5\text{P}i/180\times10^{-6})^{2}I_{3}$.
These values for $\boldsymbol{Ppnoise}_{k}$ and $\boldsymbol{Pmnoise}_{k}$
were chosen according to \cite{Crassidis2003}.
We performed $1,000$ simulations with the RiUKFs of Table \ref{table:Riemannian-augmented-minimum-kalman-filters}
and the UKFRM of \cite{Hauberg2013}. To calculate Riemannian means,
we used the gradient descent method of \cite{Pennec1998} with a threshold
of $10^{-6}$; and for Riemannian exponentials, Riemannian logarithms,
and parallel transport, we used the MATLAB toolbox ManOpt \cite{Boumal2014}.
For all simulations, the RiUKFs of Table \ref{table:Riemannian-augmented-minimum-kalman-filters}
provided good estimates, with a Root Mean Square Error in the order
of $10^{-6}$ (Table \ref{tab:RMSE-stat-track}). The RiMiAdUKF or
the RiRhoMiAdUKF are the best alternatives for this example because
i) it demands less computational effort than the other filters—it
is additive and is composed of the least number of sigma points (cf.
Corollary \ref{cor:Riemannian-sr-minimum-numbers})— and ii) all RiUKFs
performed almost equally.
\begin{table}
\centering{}\caption{Root Mean Square Error ($\times10^{-6}$) of each RiUKF in Table \ref{table:Riemannian-augmented-minimum-kalman-filters}
considering 1,000 simulations of a satellite attitude tracking example.\label{tab:RMSE-stat-track}}
\begin{tabular}{cccc}
\hline
RiMiAuUKF & RiRhoMiAuUKF & RiMiSyAuUKF & RiHoMiSyAuUKF\tabularnewline
2,612 & 2,614 & 2,614 & 2,614\tabularnewline
\hline
RiMiAdUKF & RiRhoMiAdUKF & RiMiSyAdUKF & RiHoMiSyAdUKF\tabularnewline
2,612 & 2,613 & 2,613 & 2,613\tabularnewline
\hline
\end{tabular}
\end{table}
The UKFRM failed in all the $1,000$ simulations; in every simulation,
the state covariance estimate lost its positiveness. Nonexistence
of noise terms in the UKFRM might explain this problematic behavior
(cf. Section \ref{subsec:Relation-with-the}).
\section{Conclusions}
\label{sec:Conclusions}
In this work, we extend the systematization of the Unscented Kalman
Filtering theory we developed in \cite{Menegaz2015} towards estimating
the state of Riemannian systems. In this systematization, we introduce
the following results\footnote{These results were first presented in Menegaz's PhD thesis \cite{Menegaz2016}.}
(all results are mathematically justified):
\begin{enumerate}
\item A Riemannian extension of the $\sigma$-representation ($\sigma\text{R}$ ):
the Riemannian $\sigma$-representation ($\text{Ri}\sigma\text{R}$, Section \ref{sec:Riemannian-sigma-representations}).
\item A technique to obtain closed forms of the $\text{Ri}\sigma\text{R}$ by closed forms
of the $\sigma\text{R}$ (Theorem \ref{thm:Euclidean-to-Riemannian-sigma-rep}).
Using this result, we discover (Corollary \ref{cor:Riemannian-sr-minimum-numbers})
\begin{enumerate}
\item the minimum number of sigma points of an $\text{Ri}\sigma\text{R}$,
\item the minimum number of sigma points of a symmetric $\text{Ri}\sigma\text{R}$,
\item closed forms for the minimum $\text{Ri}\sigma\text{R}$, and
\item closed forms for the minimum symmetric $\text{Ri}\sigma\text{R}$.
\end{enumerate}
\item An \textit{\emph{additive-noise}} Riemannian\textit{ }system definition
(Section \ref{subsec:Riemannian-Systems}). We require this definition
to introduce additive-noise Riemannian UKFs.
\item Kalman correction equations on Riemannian manifolds (Section \ref{subsec:Correction-equations}).
\item New discrete-time Riemannian UKFs (RiUKFs), namely the Riemannian
Additive UKF and the Riemannian Augmented UKF (Section \ref{subsec:Riemannian-Unscented-Filters}).
Besides, we
\begin{enumerate}
\item provide a list of particular variants of these filters (Table \ref{table:Riemannian-augmented-minimum-kalman-filters});
all these variants are new. Compared with the literature's UKF for
Riemannian manifolds (in \cite{Hauberg2013}), our RiUKFs are more
consistent, formally-principled, and general.
\item numerically compare all these particular variants with the literature's
UKF on Riemannian manifolds in a satellite attitude tracking scenario.
For all 1,000 simulations, the new variants provided good estimates,
but the literature's filter diverged; in every simulation, the state
covariance estimate lost its positiveness.
\end{enumerate}
\end{enumerate}
With this work, we hope to have expanded the literature's knowledge
on Kalman filtering and provided a tool for the research community
to improve the performance and stability of many UKFs.
Following this study, we recommend the research community searching
for computationally-implementable variants of RiUKFs. Since concepts
of the Riemannian manifold theory can be very abstract, depending
on the underlying manifold, developing RiUKFs variants is not trivial.
This task is even harder without a generalizing base theory: that
is one of the reasons why, in this work, we develop a general consistent
systematized theory of Unscented Kalman Filters for Riemannian State-Space
Systems.
\appendix
\section{Appendix}
\subsection{Results relative to Riemannian manifolds \label{appendix:Riemannian-manifolds}}
In this appendix, we provide some results relative to the theory of
Riemannian manifolds. These definitions are mainly based on \cite{DoCarmo1992}.
\begin{defn}[Differentiable manifold \cite{DoCarmo1992}]
\label{def:differentiable-manifold}A \emph{differentiable manifold}
of dimension $n$ is a pair $\left(\mathcal{N},\mathcal{A}\right)$ where $\mathcal{N}$
is a set, and $\mathcal{A}=\{(U_{\myvec zonsta},\ensuremath{\varphi}_{a})\}$, called
atlas, a family of injective mappings (\emph{charts}) $\ensuremath{\varphi}_{\myvec zonsta}:U_{\myvec zonsta}\subset\ensuremath{\mathbb{R}}^{n}\rightarrow\mathcal{N}$
of open sets $U_{\myvec zonsta}$ of $\ensuremath{\mathbb{R}}^{n}$ into $\mathcal{N}$ such
that:
\begin{enumerate}
\item \label{enu:def_dif_man_item1}$\bigcup_{a}\ensuremath{\varphi}_{\myvec zonsta}(U_{\myvec zonsta})=\mathcal{N}.$
\item \label{enu:def_dif_man_item2}for any pair $\myvec zonsta,\myvec zonstb$,
with $\ensuremath{\varphi}_{\myvec zonsta}(U_{\myvec zonsta})\cap\ensuremath{\varphi}_{\myvec zonstb}(U_{\myvec zonstb})=:W\neq\emptyset,$
the sets $\ensuremath{\varphi}_{\myvec zonsta}^{-1}(W)$ and $\ensuremath{\varphi}_{\myvec zonstb}^{-1}(W)$
are open sets in $\ensuremath{\mathbb{R}}^{n}$, and the mappings $\ensuremath{\varphi}_{\myvec zonstb}^{-1}\circ\ensuremath{\varphi}_{\myvec zonsta}$
and $\ensuremath{\varphi}_{\myvec zonstb}^{-1}\circ\ensuremath{\varphi}_{\myvec zonsta}$ are differentiable.
\item \label{enu:def_dif_man_item3}The family $\mathcal{A}=\{(U_{\myvec zonsta},\ensuremath{\varphi}_{\myvec zonsta})\}$
is \emph{maximal} relative to the conditions \ref{enu:def_dif_man_item1})
and \ref{enu:def_dif_man_item2}).
\end{enumerate}
A pair $(U_{\myvec zonsta},\ensuremath{\varphi}_{\myvec zonsta})$ (or the mapping $\ensuremath{\varphi}_{\myvec zonsta}$)
with $\man a\in\ensuremath{\varphi}_{\myvec zonsta}(U_{\myvec zonsta})$ is called a
\emph{parameterization} of $\mathcal{N}$ at $\man a$. For simplicity,
we can denote a differentiable manifold $\left(\mathcal{N},\mathcal{A}\right)$
of dimension $n$ by $\mathcal{N}$ or $\mathcal{N}^{n}$.
\end{defn}
\begin{defn}[Differentiable function\cite{DoCarmo1992}]
\label{def:differentiable-function}Let $\mathcal{N}_{1}^{n}$ and $\mathcal{N}_{2}^{m}$
be differentiable manifolds. A mapping $f:\mathcal{N}_{1}\rightarrow\mathcal{N}_{2}$
is \emph{differentiable at} $\man a\in\mathcal{N}_{1}$ if, given a parameterization
$\ensuremath{\varphi}_{2}:V\subset\ensuremath{\mathbb{R}}^{m}\rightarrow\mathcal{N}_{2}$ at $f(\man a)$,
there exists a parameterization $\ensuremath{\varphi}_{1}:U\subset\ensuremath{\mathbb{R}}^{n}\rightarrow\mathcal{N}_{1}$
at $\man a$ such that $f(\ensuremath{\varphi}_{1}(U))\subset\ensuremath{\varphi}_{2}(V)$
and the mapping
\begin{equation}
\tilde{f}:=\ensuremath{\varphi}_{2}^{-1}\circf\circ\ensuremath{\varphi}_{1}:U\subset\ensuremath{\mathbb{R}}^{n}\rightarrow\ensuremath{\mathbb{R}}^{m}\label{eq:expressionphi-1}
\end{equation}
is differentiable at $\ensuremath{\varphi}_{1}^{-1}(\man a)$. We say $f$
is differentiable on an open set of $\mathcal{N}_{1}$ if it is differentiable
at all of the points of this open set.
\end{defn}
In this work, \emph{we suppose that all functions are differentiable
unless otherwise stated}.
\begin{defn}[Tangent space \cite{DoCarmo1992}]
\label{def:tangent-space}Let $\mathcal{N}$ be a differentiable manifold.
A differentiable function $\alpha:\mathbf{I}\rightarrow\mathcal{N}$ is called
a (differentiable) \emph{curve }in $\mathcal{N}$. Suppose $\alpha(0)=\man a\in\mathcal{N}$,
and let $\mathscr{D}_{\man a}(\mathcal{N})$ be the set of all functions $f:\mathcal{N}\rightarrow\ensuremath{\mathbb{R}}$
that are differentiable at $\man a$. The \emph{tangent vector to
the curve $\alpha$ }at $t=0$ is a function $\alpha'(0):\mathscr{D}_{\man a}(\mathcal{N})\rightarrow\ensuremath{\mathbb{R}}$
given by
\[
\alpha'(0)f=\left.d(f\circ\alpha)/dt\right|_{t=0},\quadf\in\mathscr{D}_{\man a}(\mathcal{N}).
\]
Note that $\alpha'(0)$ is an operator taking $f\in\mathscr{D}_{\man a}(\mathcal{N})$
to a scalar $\left.d(f\circ\alpha)/dt\right|_{t=0}$. A \emph{tangent
vector at $\man a$ }is a tangent vector of some curve $\alpha:\mathbf{I}\rightarrow\mathcal{N}$
with $\alpha(0)=\man a$ at $t=0$. The set of all tangent vectors
to $\mathcal{N}$ at $\man a$ will be indicated by $T_{\man a}\mathcal{N}$.
\end{defn}
The set $T_{\man a}\mathcal{N}$ forms a vector space of dimension $n$
and is called the \emph{tangent space of $\mathcal{N}$ at $\man a$.}
\begin{defn}[Arc length \cite{Pennec2006}]
\label{def:arc-length}Given an open interval $\mathbf{I}\subset\ensuremath{\mathbb{R}}$,
a differentiable function (Definition \ref{def:differentiable-function})
$\alpha:\mathbf{I}\rightarrow\mathcal{N}$ is called a (differentiable) \emph{curve
}in $\mathcal{N}$. Given a curve $\alpha$ on $\mathcal{N}$, the arc length
of $\alpha$ in the interval $\left[a,b\right]\subset\mathbf{I}$ is
defined by
\[
\mathbb{L}_{a}^{b}(\alpha):=\int_{a}^{b}\norm{\curve'(t)}_{\alpha(t)}dt.
\]
\end{defn}
\begin{defn}[Differential of a function]
Let $\mathcal{N}_{1}$ and $\mathcal{N}_{2}$ be differentiable manifolds and
$f:\mathcal{N}_{1}\rightarrow\mathcal{N}_{2}$ a differentiable mapping.
For every $\man a\in\mathcal{N}_{1}$ and for each $v\in T_{\man a}\mathcal{N}_{1}$,
choose a differentiable curve $\alpha:\mathbf{I}\rightarrow\mathcal{N}_{1}$
with $\alpha(0)=\man a,$ $\alpha'(0)=v$. Take $\beta=f\circ\alpha$.
Then it can be shown that the operator $df_{\man a}(v)$
defined by
\[
df_{\man a}(v):=\beta'(0)
\]
is a tangent vector of $T_{f(\man a)}\mathcal{N}_{2}$. Moreover the
mapping the
\[
df_{\man a}:T_{\man a}\mathcal{N}_{1}\rightarrow T_{f(\man a)}\mathcal{N}_{2}:v\mapsto\beta'(0)
\]
is linear and does not depend on the choice of $\alpha$ \cite{DoCarmo1992}.
This linear mapping $df_{\man a}$ is called the \emph{differential}
of $f$ at $\man a$.
\end{defn}
\begin{defn}[Vector field \cite{DoCarmo1992,Absil2008}]
\label{def:vector-field}A \emph{vector field} $\ensuremath{\mathcal{X}}$ on a
differentiable manifold $\mathcal{N}$ is a correspondence that associates
to each point $\man a\in\mathcal{N}$ a vector $\ensuremath{\mathcal{X}}(\man a)\in T_{\man a}\mathcal{N}$.
Given a vector field $\ensuremath{\mathcal{X}}$ on $\mathcal{N}$ and a differentiable
real-valued function $f:\mathcal{N}\rightarrow\ensuremath{\mathbb{R}}$, we let $\ensuremath{\mathcal{X}} f$
denote the real-valued function on $\mathcal{N}$ defined by
\begin{eqnarray*}
\left(\ensuremath{\mathcal{X}} f\right): & \mathcal{N} & \rightarrow\ensuremath{\mathbb{R}}\\
& \man a & \mapstov f,\quadv\in T_{\man a}\mathcal{N}.
\end{eqnarray*}
The set of all vector fields of $\mathcal{N}$ is denote by $\vfieldset{\mathcal{N}}$.
The multiplication of a vector field $\ensuremath{\mathcal{X}}$ by a function $f:\mathcal{N}\rightarrow\ensuremath{\mathbb{R}}$
is defined by $f\ensuremath{\mathcal{X}}$: $\mathcal{N}\rightarrow T_{\man a}\mathcal{N}$:
$\man a\mapsto f(\man a)v$, $v\in T_{\man a}\mathcal{N}$;
and the addition of two vector fields $\ensuremath{\mathcal{X}}$ and $\mathcal{Y}$
by $\ensuremath{\mathcal{X}}+\mathcal{Y}:$ $\mathcal{N}\rightarrow T_{\man a}\mathcal{N}:$
$\man a\mapsto\ensuremath{\mathcal{X}}(\man a)+\mathcal{Y}(\man a)$. The \emph{Lie
bracket of vector fields} is defined as the unique vector field $[\ensuremath{\mathcal{X}},\mathcal{Y}]$
satisfying $([\ensuremath{\mathcal{X}},\mathcal{Y}]f)\coloneqq(\ensuremath{\mathcal{X}}(\mathcal{Y} f))-(\mathcal{Y}(\ensuremath{\mathcal{X}} f))$
for all real valued smooth functions $f$ defined on $\mathcal{N}$. A \emph{vector
field $\mathcal{Z}urvea$ along a curve $\alpha:\mathbf{I}\rightarrow\mathcal{N}$}
is a differentiable mapping that associates to every $t\in\mathbf{I}$
a tangent vector $\mathcal{Z}urvea(t)\in T_{\alpha(t)}\mathcal{N}$.
\end{defn}
\begin{defn}[Riemannian manifold]
\label{Definition:Riemannian-metric}A \emph{Riemannian metric} $\inprod{}{}$
or $g$ on a differentiable manifold $\mathcal{N}$ is a correspondence
which associates to each point $\man a$ of $\mathcal{N}$ an inner product
$g_{\man a}:=\inprod{}{}_{\man a}$ on a tangent space $T_{\man a}\mathcal{N}$,
with $\inprod{}{}_{\man a}$ varying differentially in the following
sense: if $\ensuremath{\varphi}:U\subset\ensuremath{\mathbb{R}}^{n}\rightarrow\mathcal{N}$ is a system
of coordinates (or chart) around $\man a$, with $\ensuremath{\varphi}(u_{1},u_{2},...,u_{n})=\man a\in\ensuremath{\varphi}(U)$
and $\text{P}artial/\text{P}artialu_{i}(\man a)=d\ensuremath{\varphi}_{\man a}(0,...,0,1,0,...0),$
then
\[
g_{i,j}\left(u_{1},u_{2},...,u_{n}\right)=\inprod{\frac{\text{P}artial}{\text{P}artialu_{i}}(\man a)}{\frac{\text{P}artial}{\text{P}artialu_{j}}(\man a)}_{\man a}
\]
is a differentiable function on $U$ \cite{DoCarmo1992}.\emph{ }We
delete the index $\man a$ in the functions $g_{\man a}$ and $\inprod{}{}_{\man a}$
whenever there is no possibility of confusion.
The pair $(\mathcal{N},g)$ is called a \emph{Riemannian manifold }\cite{Absil2008}.
For simplicity, we can also denote the Riemannian manifold $(\mathcal{N},g)$
by the set $\mathcal{N}$.
\end{defn}
\begin{defn}[Riemannian gradient \cite{DoCarmo1992}]
Let $\mathcal{N}$ be a Riemannian manifold. Given a smooth function $f:\mathcal{N}\rightarrow\mathbb{R}$,
the \emph{Riemannian gradient} of $f$ at $\man x$, denoted by $\text{\ensuremath{\grad}} f(\man x)$
is defined as the unique element of $T_{\man x}\mathcal{N}$ that satisfies
\[
\left\langle \text{\ensuremath{\grad}} f(\man x),v\right\rangle _{\man x}=df_{\man x}(v),\ \forall v\in T_{\man x}\mathcal{N}.
\]
\end{defn}
\begin{defn}[Critical point \cite{PT:06}]
Let $\mathcal{N}$ and $\mathcal{R}$ be smooth manifolds. If $f:\mathcal{N}\rightarrow\mathcal{R}$
is a smooth map, then a point $\man x\in\mathcal{N}$ is a \emph{critical
point} of $f$ if $df_{\man x}:T_{\man x}\mathcal{N}\rightarrow T_{\man{f(x)}}\mathcal{R}$
is not surjective. In the particular case that $\mathcal{R}=\mathbb{R}$,
then the critical points of $f$ are exactly the points $\man x$
which $df_{\man x}=0$. Moreover, if $\mathcal{N}$ is a Riemannian manifold,
the critical points are the points $\man x\in\mathcal{N}$ such that $\text{\ensuremath{\grad}} f(\man x)=0$.
\end{defn}
\begin{defn}[Affine connection \cite{DoCarmo1992}]
\label{thm:Affine-connection}An \emph{affine connection $\nabla$
}on a differentiable manifold $\mathcal{N}$ is a mapping $\nabla:\vfieldset{\mathcal{N}}\times\vfieldset{\mathcal{N}}\rightarrow\vfieldset{\mathcal{N}}$
which is denoted by $(\ensuremath{\mathcal{X}},\mathcal{Y})\mapsto\nabla_{\ensuremath{\mathcal{X}}}\mathcal{Y}$
and which satisfies the following properties, for $\ensuremath{\mathcal{X}}$, $\mathcal{Y}$,
$\mathcal{Z}$ $\in$ $\vfieldset{\mathcal{N}}$ and $f$, $g$ $\in$ $\mathscr{D}{\mathcal{N}}$:
\begin{enumerate}
\item $\nabla_{f\ensuremath{\mathcal{X}}+g\mathcal{Y}}\mathcal{Z}=f\nabla_{\ensuremath{\mathcal{X}}}\mathcal{Z}+g\nabla_{\ensuremath{\mathcal{X}}}\mathcal{Z}$,
\item $\nabla_{\ensuremath{\mathcal{X}}}(\mathcal{Y}+\mathcal{Z})=\nabla_{\ensuremath{\mathcal{X}}}\mathcal{Y}+\nabla_{\ensuremath{\mathcal{X}}}\mathcal{Z}$,
\item $\nabla_{\ensuremath{\mathcal{X}}}(f\mathcal{Y})=f\nabla_{\ensuremath{\mathcal{X}}}\mathcal{Y}+(\ensuremath{\mathcal{X}} f)\mathcal{Y}$.
\end{enumerate}
If \emph{$\nabla$} satisfies the following additional properties:
\begin{enumerate}
\item $\ensuremath{\mathcal{X}}\left\langle \mathcal{Y},\mathcal{Z}\right\rangle =\left\langle \nabla_{\ensuremath{\mathcal{X}}}\mathcal{Y},\mathcal{Z}\right\rangle +\left\langle \mathcal{Y},\nabla_{\ensuremath{\mathcal{X}}}\mathcal{Z}\right\rangle $,
for all $\ensuremath{\mathcal{X}}$,$\mathcal{Y}$, $\mathcal{Z}\in\vfieldset{\mathcal{N}}$,
\item $\nabla_{\ensuremath{\mathcal{X}}}\mathcal{Y}-\nabla_{\mathcal{Y}}\ensuremath{\mathcal{X}}=[\ensuremath{\mathcal{X}},\mathcal{Y}]$,
for all $\ensuremath{\mathcal{X}}$,$\mathcal{Y}\in\vfieldset{\mathcal{N}}$,
\end{enumerate}
then \emph{$\nabla$} is known as the \emph{Riemannian connection}
of $\mathcal{N}$. The Levi-Cevita theorem \cite{DoCarmo1992} says that
any Riemannian manifold has a Riemannian connection and it is unique.
\end{defn}
\begin{thm}[Covariant derivative \cite{DoCarmo1992}]
\label{thm:covariant-derivative}\label{thm:covariant-derivative-1}Let
$\mathcal{N}$ be a differentiable manifold with an affine connection $\nabla$.
There exists a unique correspondence which associates to a vector
field $\mathcal{Z}urvea$ along the differentiable curve $\alpha:\mathbf{I}\rightarrow\mathcal{N}$
another vector field $D\mathcal{Z}urvea/dt$ along $\alpha$, called
the covariant derivative of $\mathcal{N}$ along $\alpha$, such that:
\end{thm}
\begin{enumerate}
\item $\frac{D}{dt}\left(\mathcal{Z}urvea+\mathcal{Z}urveb\right)=\frac{D\mathcal{Z}urvea}{dt}+\frac{D\mathcal{Z}urveb}{dt}$;
\item $\frac{D}{dt}\left(\mathcal{Z}urvea V\right)=\frac{df}{dt}\mathcal{Z}urvea+f\frac{D\mathcal{Z}urvea}{dt}$,
where $f$ is a differentiable function on $\mathbf{I}$;
\item if $\mathcal{Z}urvea$ is induced by a vector field $\mathcal{Z}\in\vfieldset{\mathcal{N}}$,
i.e., $\mathcal{Z}urvea(t)=\mathcal{Z}(\alpha(t))$, then $D\mathcal{Z}urvea/dt=\nabla_{\alpha'(t)}\mathcal{Z}$.
\end{enumerate}
\begin{defn}[Parallel Transport \cite{DoCarmo1992}]
\label{def:parallel-transport}Let $\mathcal{N}$ be a differentiable manifold
with an affine connection $\nabla.$ A vector field $\mathcal{Z}urvea$
along a curve $\alpha:\mathbf{I}\rightarrow\mathcal{N}$ is called \emph{parallel}
when
\[
\frac{D\mathcal{Z}urvea}{dt}(t)=0,\text{ for all }t\in\mathbf{I}.
\]
Moreover, let $\alpha$ be differentiable and $v_{0}$ a vector
tangent to $\mathcal{N}$ at $\alpha(t_{0})$, $t_{0}\in\mathbf{I}$. Then
there exists a unique parallel vector field $\mathcal{Z}urvea$ along
$\alpha$, such that $\mathcal{Z}urvea(t_{0})=v_{0}$; $V(t)$
is called the \emph{parallel transport} of $\mathcal{Z}urvea(t_{0})$
along $\alpha$.
\end{defn}
\begin{defn}[Geodesic \cite{DoCarmo1992}]
\label{def:geodesics}A parameterized curve $\curve:\mathbf{I}\rightarrow\mathcal{N}$
is a \emph{geodesic at} $t_{0}\in\mathbf{I}$ if
\[
\frac{D}{dt}(\alpha'(t))=0
\]
at the point $t_{0}$; if $\curve$ is a geodesic at $t$, for all
$t\in\mathbf{I}$, we say that $\curve$ is a \emph{geodesic} \cite{DoCarmo1992}.
If the definition domain of all geodesics of $\mathcal{N}$ can be extended
to $\ensuremath{\mathbb{R}}$, then $\mathcal{N}$ is said to be \emph{geodesically-complete}.
\end{defn}
\begin{defn}[Exponential and logarithm mappings \cite{Pennec2006}]
\label{def:exponential-map}Consider a point $\man a\in\mathcal{N}$ and
let $V\subset T_{\man a}\mathcal{N}$ be an open set of $T_{\man a}\mathcal{N}$.
For a given vector $v\in V$ and $1\in\mathbf{I}$, consider the \emph{geodesic}
$\curve:\mathbf{I}\rightarrow\mathcal{N}$ passing through $\man a$ with
initial velocity $\alpha'(0)=v$. Then the mapping $\exp_{\man a}:V\rightarrow\mathcal{N}$
defined by $v\mapsto\alpha(1)$ is well-defined \cite{DoCarmo1992}
and is called the (Riemannian) \emph{exponential mapping} on $V$.
The mapping $\exp_{\man a}$ is differentiable, and there is a neighborhood
$\mathcal{\mathcal{U}}$ of $\man a$ such that the exponential map at $\man a$
is a diffeomorphism from the tangent space to the manifold. For $\mathcal{\mathcal{U}}$
being this neighborhood and $\man a,\man b\in\mathcal{\mathcal{U}}$, $\man b=\exp_{\man a}(v)$,
then the inverse mapping $\log_{\man a}:\mathcal{\mathcal{U}}\rightarrow T_{\man a}\mathcal{N}$
defined by $\man b\mapstov$ is called the (Riemannian) \emph{logarithm
mapping}. For brevity, we can also write $\logb{\man a\man b}$
in the place of $\log_{\man a}(\man b)$.
\end{defn}
\begin{defn}[Riemannian curvature tensor and sectional curvatures \cite{DoCarmo1992}]
\label{def:Riemannian_curvature} Let $\mathbb{X}(\mathcal{N})$ be the
set of mappings from $\vfieldset{\mathcal{N}}$ to $\vfieldset{\mathcal{N}}$.
The \emph{Riemannian curvature tensor} $R$ of a differentiable manifold
$\mathcal{N}$ is the correspondence $R:\vfieldset{\mathcal{N}}\times\vfieldset{\mathcal{N}}\rightarrow\mathbb{X}(\mathcal{N})$
that associates to each pair of vector fields $\ensuremath{\mathcal{X}},\mathcal{Y}\in\vfieldset{\mathcal{N}}$
the application $R(\ensuremath{\mathcal{X}},\mathcal{Y}):\vfieldset{\mathcal{N}}\rightarrow\vfieldset{\mathcal{N}}$
given by
\[
R(\ensuremath{\mathcal{X}},\mathcal{Y})\mathcal{Z}\coloneqq\nabla_{\mathcal{Y}}\nabla_{\ensuremath{\mathcal{X}}}\mathcal{Z}-\nabla_{\ensuremath{\mathcal{X}}}\nabla_{\mathcal{Y}}\mathcal{Z}+\nabla_{[\ensuremath{\mathcal{X}},\mathcal{Y}]}\mathcal{Z},
\]
where $\nabla$ is the Riemannian connection of $\mathcal{N}$. A notion
closely related to the Riemannian curvature tensor is the sectional
curvatures of $\mathcal{N}$. Given two linearly independent tangent vectors
$u$ and $v$ at the same point, the expression
\[
K(u,v)\coloneqq\frac{\left\langle R(u,v)u,v\right\rangle }{\left\langle u,u\right\rangle \left\langle v,v\right\rangle -\left\langle u,v\right\rangle ^{2}}
\]
does not depend on the choice of $u,v$, but only on the the subspace
$\sigma$ spanned by them \cite{DoCarmo1992}. Given a point $\man p\in\mathcal{N}$
and a bidimensional subspace $\sigma$ of $T_{\man p}\mathcal{N}$, the
real number $K(u,v)=K(\sigma)$ where $\{u,v\}$ is any basis of $\sigma$,
is the \emph{sectional curvature} of $\sigma$ in $\man p$.
\end{defn}
\subsection{Proof of Theorem \ref{thm:Euclidean-to-Riemannian-sigma-rep}}
\label{proof:theoremRiSRs}
Suppose $\man{\chi}$ is a Ri$l$th$N\sigma$R of $\man{X}$.
Then, from\emph{ }(\ref{eq:Riemannian-sigma-rep-definition-condition}),
(\ref{eq:sigma-rep-def-weights-condition}) is satisfied. Because
$\man{\chi}$ is a Ri$l$th$N\sigma$R of $\man{X}$, from (\ref{eq:Riemannian-sigma-rep-definition-mean-condition}),
$\man{\mean{X}}$ is a Riemannian sample mean of $\man{\chi}$
and, therefore, from (\ref{eq:Riemannian-sample-mean-definition}),
$\man{\mean{X}}$ minimizes the function
\[
g(\man{\vector x}):=\sum_{i=1}^{N}w_{i}^{m}\text{dist}^{2}\left(\man{\vector x},\exp_{\mean{\man{X}}}\chi_{i}\right).
\]
The function $g\circ\exp_{\mean{\man{X}}}:\Omega(\mean{\man{X}})\subset T_{\mean{\man{X}}}\mathcal{N}\rightarrow[0,\infty)$
is a real valued function defined in a subset of the vector space
$T_{\mean{\man{X}}}\mathcal{N}$. Since $\Omega(\mean{\man{X}})$
is convex by hypothesis and its second derivative is positive, then
$g\circ\exp_{\mean{\man{X}}}$ is a strictly convex function.
Because it is also a differentiable function, $g\circ\exp_{\man a}$
has an unique minimum $\vector x^{*}\in\Omega(\man a)$ and it
is a critical point of $g\circ\exp_{\man a}$. Thus
\begin{equation}
[0]_{n\times1}=\left.\frac{d\big(g\circ\exp_{\mean{\man{X}}}\big)(x)}{dx}\right|_{x=\vector x^{*}}\Leftrightarrow\vector x^{*}=\sum_{i=1}^{N}w_{i}^{m}\chi_{i}.\label{eq:sample-mean-in-tangent-space}
\end{equation}
By Theorem~7.9 of \cite{NB:13} , $\man{\mean{X}}$ is the unique
minimum and critical point of $g$. Thus $\log_{\man a}\man{\mean{X}}$
is a critical point of $g\circ\exp_{\man a}$. and
\begin{equation}
[0]_{n\times1}=\logb{\mean{\man{X}}\man{\mean{X}}}=\vector x^{*}=\sum_{i=1}^{N}w_{i}^{m}\chi_{i}=:\text{\ensuremath{\mu}}_{\chi}.\label{eq:sample-mean-in-tangent-space-1}
\end{equation}
Hence, (\ref{eq:sigma-rep-def-mean-condition}) is satisfied.
Now let us prove the converse for the mean. Suppose all points $\chi_{i}$
belong to the domain of $\exp_{\mean{\man{X}}}$, and that $\chi$
is an $l$th$N\sigma$R of $X:=\logb{\mean{\man{X}}\man{X}}$.
Define the set
\begin{multline}
\man{\chi}:=\{\exp_{\mean{\man{X}}}\chi_{i},w_{i}^{m},w_{i}^{c,j},w_{i}^{cc,j}|\man{\chi}_{i}\in\mathcal{N};\\
\,w_{i}^{cc,j},w_{i}^{cc,j},w_{i}^{cc,j}>0\}{}_{i=1}^{N}\label{eq:sigma-rep-proof}
\end{multline}
Then, from (\ref{eq:sigma-rep-def-weights-condition}) and (\ref{eq:sigma-rep-proof})\emph{,
}(\ref{eq:Riemannian-sigma-rep-definition-condition}) is satisfied.
From (\ref{eq:sample-mean-in-tangent-space}) and (\ref{eq:sample-mean-in-tangent-space-1}),
we have that $\exp_{\mean{\man{X}}}(\text{\ensuremath{\mu}}_{\chi})=\exp_{\mean{\man{X}}}(\logb{\mean{\man{X}}\man{\mean{X}}})=\mean{\man{X}}$
minimizes $g$ and (\ref{eq:Riemannian-sigma-rep-definition-mean-condition})
is satisfied.
For even $j$, we have, from (\ref{eq:sigma-rep-def-mean-condition})
and (\ref{eq:Riemannian-sample-moment-definition}),
\[
\man{\mbox{\ensuremath{\mathcal{M}}} }_{\man{\chi}}^{j}=\sum_{i=1}^{N}w_{i}^{cc,j}\big[(\chi_{i}-\text{\ensuremath{\mu}}_{\chi})\left(\diamond\right)^{T}\big]^{\otimes\frac{j}{2}}=:\mathcal{\mbox{\ensuremath{\mathcal{M}}} }_{\chi}^{j};
\]
and from (\ref{eq:sigma-rep-def-moments-condition}), it follows $\man{\mbox{\ensuremath{\mathcal{M}}} }_{\man{\chi}}^{j}=\mathcal{\mbox{\ensuremath{\mathcal{M}}} }_{\chi}^{j}=M _{X}^{j};$
for odd $j$, the reasoning is similar. The remaining is straightforward.
\subsection{Proof of Corollary \ref{cor:Riemannian-sr-minimum-numbers}}
\label{proof:Corollary_particularRiSRs}
From Theorem \ref{thm:Euclidean-to-Riemannian-sigma-rep}, $\chi$
is a normalized\emph{ }$l$th$N\sigma$R of\emph{ }$X\sim(\log_{\mean{\man{X}}}(\man{X}),\man{P}_{\man{X}\man{X}})^{n}$.
From Corollary 1 of \cite{Menegaz2015}, it follows that i) $N\geq r+1$;
and ii), if $\man{\chi}$ is symmetric, then $N=2r$. The remaining
of the proof is a direct consequence of Theorem \ref{thm:Euclidean-to-Riemannian-sigma-rep}.
\subsection{Proof of Proposition \ref{prop:Addition of Riemannian random points}}
\label{proof:proposition1}
A Riemannian mean $\mean{\man{X}}$ of $\man{X}$ is such that
it solves (\ref{eq:Riemannian-mean-definition}). Consider the following
optimization problem
\begin{align}
\mbox{minimize } & \tilde{g}(\tilde{c}):=g\circ\exp_{\bar{\man a}}\left(\logb{\mean{\man a}\man uentral}\right)=\scalar{\sigma}_{\logb{\mean{\man a}\man a}+p}^{2}(\tilde{c})\nonumber \\
\mbox{subject to } & \man uentral\in\mathcal{N};\label{eq:Riemannian-addition-optimization-problem2}
\end{align}
From a reasoning similar to the proof of Theorem \ref{thm:Euclidean-to-Riemannian-sigma-rep}
(Appendix \ref{proof:theoremRiSRs}), if $\tilde{c}$ solves (\ref{eq:Riemannian-addition-optimization-problem2}),
then $\log_{\mean{\man a}}^{-1}\tilde{c}=\exp_{\bar{\man a}}\tilde{c}$
solves (\ref{eq:Riemannian-mean-definition}), and $\mean{\man{X}}=\exp_{\bar{\man a}}\tilde{c}$.
Since $\scalar{\sigma}_{\logb{\mean{\man a}\man a}+p}^{2}(\tilde{c})$ is the
variance of $\logb{\mean{\man a}\man a}+p$ it follows that $\mathcal{E} _{\logb{\mean{\man a}\man a}+p}\{\logb{\mean{\man a}\man a}+p\}=\mean p$
minimizes $\tilde{g}\left(\man uentral\right)$; thus $\mean{\man{X}}:=\exp_{\bar{\man a}}\mean p$.
For the covariance part, we have
\[
\man{P}_{\man{X}\man{X}}:=\int_{\mathcal{N}-\mathcal{C}(\mean{\man{X}})}\logb{\mean{\man{X}}\man x}\big(\logb{\mean{\man{X}}\man x}\big)^{T}\man{\ensuremath{\mbox{p}}}_{\man{X}}(\man x)d\mathcal{N}\left(\man x\right)=\man{P}_{\man a\man a}+P_{pp}.
\]
\subsection{Proof of Theorem \ref{thm:General-to-Hauberg-UKF}}
\label{proof:TheoremRiUKFcorrection}
First, by considering $\man c_{\man{\vector x}}=\man b_{\man{\vector x}}=\mean{\man{\vector x}}_{k|k-1}$
and $\man c_{\man{\vector y}}=\man b_{\man{\vector y}}=\mean{\man{\vector y}}_{k|k-1}$
in the definitions of $\man{P}_{\man{\vector x}\man{\vector x},**}^{k|k-1}$,
$\man{P}_{\man{\vector y}\man{\vector y},**}^{k|k-1}$, and $\man{P}_{\man{\vector x}\man{\vector y},**}^{k|k-1}$,
(\ref{eq:UKF-Riemannian-CartesianSolution-3}) yields
\begin{equation}
\man{G}_{k,**}=\left[\begin{array}{cc}
[0]_{n_{\state}\timesn_{\state}} & \man{G}_{k}\\{}
[0]_{n_{\meas}\timesn_{\state}} & [0]_{n_{\meas}\timesn_{\meas}}
\end{array}\right],\label{eq:proof-general-to-Hauberg-UKF-3}
\end{equation}
and substituting $\man c_{\man{\vector x}}=\man b_{\man{\vector x}}=\est{\man{\vector x}}_{k|k-1}$,
$\man c_{\man{\vector y}}=\man b_{\man{\vector y}}=\est{\man{\vector y}}_{k|k-1}$,
and (\ref{eq:proof-general-to-Hauberg-UKF-3}) into (\ref{eq:UKF-Riemannian-CartesianSolution-4})
gives $\mean{\vector x}_{k|k,**}^{T_{\man c}\mathcal{N}_{\man{\vector x},\man{\vector y}}}=\big[\begin{array}{c}
\man{G}_{k}\log_{\mean{\man{\vector y}}_{k|k-1}}(\outcome{\man{\vector y}}_{k}),[0]_{n_{\meas}\times1}\end{array}\big]^{T};$ consequently, from (\ref{eq:augmented-corrected-state-to-non-augmented}),
$x_{k|k}^{TM}:=[x_{k|k,**}^{T_{\man c}\mathcal{N}_{\man{\vector x},\man{\vector y}}}]_{1:n_{\state},1}=\man{G}_{k}\log_{\man{\vector y}_{k|k-1}}\left(\man{\vector y}_{k}\right)$.
Second, considering (\ref{eq:proof-general-to-Hauberg-UKF-3}) into
(\ref{eq:UKF-Riemannian-CartesianSolution-6}) yields
\[
\man{P}_{\man{\vector x}\man{\vector x},**}^{k|k,T_{\man c}M}=\text{diag}\Big(\man{P}_{\man{\vector x}\man{\vector x}}^{k|k-1}-\man{G}_{k}\big(\man{P}_{\man{\vector y}\man{\vector y}}^{k|k-1}\big)^{-1}\man{G}_{k}^{T},[0]_{n_{\meas}\timesn_{\meas}}\Big);
\]
and, from (\ref{eq:augmented-corrected-state-to-non-augmented}),
it follows that $\man{P}_{\man{\vector x}\man{\vector x},**}^{k|k,\est{\man{\vector x}}_{k|k-1}}=\man{P}_{\man{\vector x}\man{\vector x}}^{k|k-1}-\man{G}_{k}(\man{P}_{\man{\vector y}\man{\vector y}}^{k|k-1})^{-1}\man{G}_{k}^{T}$.
\subsection{Notation and Acronyms}
\label{subsec:Notation-and-Acronyms}
Throughout this paper, we use the following notations:
\begin{itemize}
\item for a matrix $A$, $(A)\left(\diamond\right)^{T}$ stands for $(A)\left(A\right)^{T}$,
and $\sqrt{A}$ for a square-root matrix of $A$ such that $A=\sqrt{A}\sqrt{A}^{T}$.
\item $\otimes$ stands for the Kronecker product operator, and $A^{\otimes n}:=A\otimes\cdots\otimes A$.
\item {\small{}$[A]_{p\times q}$} stands for a block matrix consisting
of the matrix $A$ being repeated $p$ times in the rows and $q$
times in the columns.
\item {\small{}$[A]_{i_{1}:i_{2},j_{1}:j_{2}}$} stands for a sub-matrix
of the matrix $A$ formed by the rows $i_{1}$ to $i_{2}$ and the
columns $j_{1}$ to $j_{2}$ of $A$.
\item $\mathbf{I}$ stands for an open interval in $\ensuremath{\mathbb{R}}$.
\end{itemize}
Below, we provide a list of acronyms and parts of acronyms—these parts
end with an '-' and are followed by examples—along with their meaning.
There are other acronyms in the text that can be composed by i) concatenating
some items below (e.g., Mi$\sigma$R {[}Mi- with $\sigma$R{]} standing
for Minimum $\sigma$Representation) , and ii) adding Ri- (standing
for Riemannian; e.g., RiMi$\sigma$R\ {[}Ri- with Mi$\sigma$R{]}
standing for RiMi$\sigma$R):
\begin{itemize}
\item \textbf{AdUKF}: Additive Unscented Kalman Filter.
\item \textbf{AuUKF}: Augmented Unscented Kalman Filter.
\item \textbf{EKF}: Extended Kalman Filter.
\item \textbf{HoMiSy-}: Homogeneous Minimum Symmetric- (e.g., HoMiSy$\sigma$R,
RiHoMiSy$\sigma$R, RiHoMiSyAdUKF, RiHoMiSyAuUKF).
\item \textbf{KF}: Kalman Filter.
\item \textbf{$l$th$N\sigma$R}: $l$th order $N$ points $\sigma$-representation.
\item \textbf{$l$UT}: $l$th order UT.
\item \textbf{Mi-}: Minimum- (e.g., RiMi$\sigma$R, RiMiAdUKF, RiMiAuUKF).
\item \textbf{MiSy-}: Minimum Symmetric- (e.g., RiMiSy$\sigma$R, RiMiSyAdUKF,
RiMiSyAuUKF).
\item \textbf{RhoMi-}: Rho Minimum- (e.g., RiRhoMi$\sigma$R, RiRhoMiAdUKF,
RiRhoMiAuUKF).
\item \textbf{$\sigma$R}: $\sigma$-Representation.
\item \textbf{UKF}: Unscented Kalman Filter.
\item \textbf{UT}: Unscented Transformation.
\end{itemize}
\begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{fig_henrique_pb.jpg}}]{Henrique M. T. Menegaz} received the B.S. degree in Electrical Engineering from the Universidade de Brasília (UnB), Brazil, in 2007. He received the M.S. and Ph.D. degrees in Engineering of Electronic Systems and Automation from the UnB in 2011 and 2016 respectively. He is currently an Assistant Professor with Faculdade Gama, UnB. His major field of study is filtering of nonlinear dynamic systems and their applications.
\end{IEEEbiography}
\begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{fig_joao_pb}}]{João Y. Ishihara} received the Ph.D. degree in Electrical Engineering from the University of São Paulo, Brazil, in 1998.
He is currently an Associate Professor at the University of Brasília, Brazil. His research interests include robust filtering and control theory, singular systems, and robotics.
\end{IEEEbiography}
\begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{fig_hugo_pb.jpg}}]{Hugo T. M. Kussaba} received the B.S. degree in control engineering and the M.S. degree in engineering of electronic systems and automation from the University of Brasília (UnB), Brazil, in 2012 and 2014 respectively. Currently, he is a Ph.D. student at the same university. His research interests include control and estimation on Lie groups, robust control and linear matrix inequalities, and hybrid dynamical systems.
\end{IEEEbiography}
\end{document} |
\begin{document}
\title{On a nonhomogeneous Kirchhoff type elliptic system with the singular Trudinger-Moser growth}
\author
{Shengbing Deng\footnote{
E-mail address:\, {\tt shbdeng@swu.edu.cn} (S. Deng), {\tt xltianswumaths@163.com} (X. Tian)} \, and Xingliang Tian\\
\footnotesize School of Mathematics and Statistics, Southwest University,
Chongqing, 400715, P.R. China}
\date{ }
\maketitle
\begin{abstract}
{The aim of this paper is to study the multiplicity of solutions for the following Kirchhoff type elliptic systems
\begin{eqnarray*}
\left\{ \arraycolsep=1.5pt
\begin{array}{ll}
-m\left(\sum^k_{j=1}\|u_j\|^2\right)\Delta u_i=\frac{f_i(x,u_1,\ldots,u_k)}{|x|^\beta}+\varepsilon h_i(x),\ \ & \mbox{in}\ \ \Omega, \ \ i=1,\ldots,k ,\\[2mm]
u_1=u_2=\cdots=u_k=0,\ \ & \mbox{on}\ \ \partial\Omega,
\end{array}
\right.
\end{eqnarray*}
where $\Omega$ is a bounded domain in $\mathbb{R}^2$ containing the origin with smooth boundary, $\beta\in [0,2)$, $m$ is a Kirchhoff type function, $\|u_j\|^2=\int_\Omega|\nabla u_j|^2dx$, $f_i$ behaves like $e^{\beta s^2}$ when $|s|\rightarrow \infty$ for some $\beta>0$, and there is $C^1$ function $F: \Omega\times\mathbb{R}^k\to \mathbb{R}$ such that $\left(\frac{\partial F}{\partial u_1},\ldots,\frac{\partial F}{\partial u_k}\right)=\left(f_1,\ldots,f_k\right)$, $h_i\in \left(\big(H^1_0(\Omega)\big)^*,\|\cdot\|_*\right)$.
We establish sufficient conditions for the multiplicity of solutions of the above system by using
variational methods with a suitable singular Trudinger-Moser inequality when $\varepsilon>0$ is small.}
\emph{\bf Keywords:} Kirchhoff type elliptic systems; multiple solutions; singular Trundinger-Moser inequality.
\emph{\bf 2020 Mathematics Subject Classification:} 35J50, 35J57.
\end{abstract}
\section{{\bfseries Introduction}}
In last decades, a great attention has been focused on the study of problems involving exponential growth nonlinearities, which is related to the famous Trudinger-Moser inequality. Let $\Omega$ be a bounded domain in $\mathbb{R}^2$, and denote with $H_0^{1}(\Omega)$ the standard first order Sobolev space given by
\[
H_0^{1}(\Omega)=cl\Big\{u\in C^\infty_0(\Omega)\ :\ \int_\Omega|\nabla u|^2{\rm d}x<\infty\Big\},\quad\ \ \|u\| =\left(\int_\Omega|\nabla u|^2{\rm d}x\right)^{\frac{1}{2}}.
\]
This space is a limiting case for the Sobolev embedding theorem, which yields $H_0^{1}(\Omega)\hookrightarrow L^p(\Omega)$ for all $1\leq p<\infty$, but one knows by easy examples that $H_0^{1}(\Omega)\not\subseteq L^\infty(\Omega)$ such as, $u(x)=\log(1-\log|x|)$ in $B_1(0)$. Hence, one is led to look for a function $g(s):\mathbb{R}\to\mathbb{R}^+$ with maximal growth such that
\[
\sup\limits_{u\in H_0^{1}(\Omega),\|u\| \leq 1}\int_\Omega g(u){\rm d}x<\infty.
\]
It was shown by Trudinger \cite{Trudinger} and Moser \cite{m} that the maximal growth is of exponential type. More precisely, named the Trudinger-Moser inequality that
\[
\exp(\alpha u^{2})\in L^1(\Omega),\quad \forall\ u\in H_0^{1}(\Omega),\ \ \forall\ \alpha>0,
\]
and
\begin{align*}
\sup\limits_{u\in H_0^{1}(\Omega),\|u\| \leq 1}\int_\Omega \exp(\alpha u^{2}){\rm d}x< \infty,\quad \mbox{if}\ \alpha\leq 4\pi,
\end{align*}
where $4\pi$ is the sharp constant in the sense that the supremum in the left is $\infty$ if $\alpha >4\pi$.
In order to treat the system problems, here we give some definitions. For all $1\leq p<\infty$, we define $L^p(\Omega,\mathbb{R}^k)$ as
\[
L^p(\Omega,\mathbb{R}^k):=\underbrace{L^p(\Omega)\times\cdots\times L^p(\Omega)}_{k},
\]
where $L^p(\Omega)$ is the standard $L^p$-space, and since
\begin{equation}\label{eqk}
\frac{1}{k}\left(\sum^k_{i=1}|u_i|^p\right)\leq\left(\sum^k_{i=1}|u_i|^2\right)^{\frac{p}{2}}\leq
k^p\left(\sum^k_{i=1}|u_i|^p\right),
\end{equation}
we can know that $L^p(\Omega,\mathbb{R}^k)$ is well defined and for $U\in L^p(\Omega,\mathbb{R}^k)$, we define $\|U\|_p=\left(\int_\Omega |U|^p dx\right)^{1/p}$ where $|U|=(\sum^k_{i=1}|u_i|^2)^{1/2}$. Moreover we denote
\[
H^1_0(\Omega,\mathbb{R}^k):=\underbrace{H_0^{1}(\Omega)\times\cdots\times H_0^{1}(\Omega)}_{k},
\]
be the Sobolev space modeled in $L^2(\Omega,\mathbb{R}^k)$ with the scalar product
\begin{equation*}\
\langle U,V\rangle=\sum^k_{i=1}\int_{\Omega}\nabla u_i\nabla v_idx,
\end{equation*}
where $U, V\in L^2(\Omega,\mathbb{R}^k)$, to which corresponds the norm $\|U\|=\langle U,U\rangle^{1/2}=(\sum^k_{i=1}\|u_i\|^2)^{1/2}$, then $H^1_0(\Omega,\mathbb{R}^k)$ is well defined and also is a Hilbert space. For all $1\leq p<\infty$,
by the inequality (\ref{eqk}) and the Sobolev embedding theorem, we can know that the embedding $H^1_0(\Omega,\mathbb{R}^k)\hookrightarrow L^p(\Omega,\mathbb{R}^k)$ is compact and $H^1_0(\Omega,\mathbb{R}^k)\nsubseteq L^\infty(\Omega,\mathbb{R}^k)$, where $L^\infty(\Omega,\mathbb{R}^k):=L^\infty(\Omega)\times\cdots\times L^\infty(\Omega)$. In Section \ref{sec preliminaries}, we will establish the Trudinger-Moser type inequality for $H^1_0(\Omega,\mathbb{R}^k)$.
Now, we begin to state our problem. Let $\Omega$ be a bounded domain in $\mathbb{R}^2$ containing the origin with smooth boundary, we study the multiplicity of solutions for the following Kirchhoff type systems
\begin{eqnarray}\label{P}
\left\{ \arraycolsep=1.5pt
\begin{array}{ll}
-m\left(\sum^k_{j=1}\|u_j\|^2\right)\Delta u_i=\frac{f_i(x,u_1,\ldots,u_k)}{|x|^\beta}+\varepsilon h_i(x),\ \ & \mbox{in}\ \ \Omega, \ \ i=1,\ldots,k ,\\[2mm]
u_1=u_2=\cdots=u_k=0,\ \ & \mbox{on}\ \ \partial\Omega,
\end{array}
\right.
\end{eqnarray}
where $\beta\in [0,2)$, $m$ is a continuous Kirchhoff type function, $h_i\in \big(\big(H^1_0(\Omega)\big)^*,\|\cdot\|_*\big)\backslash\{0\}$ for some $i\in\{1,\ldots,k\}$, $\varepsilon$ is a small positive parameter, and $f_i$ has the maximal growth which allows treating (\ref{P}) variationally in the Sobolev space $H^1_0(\Omega,\mathbb{R}^k)$. We shall consider the variational situation in which
\begin{equation*}
(f_1(x,U),\ldots,f_k(x,U))=\nabla F(x,U)
\end{equation*}
for some function $F:\Omega \times \mathbb{R}^k \rightarrow \mathbb{R}$ of class $C^1$, where $\nabla F$ stands for the gradient of $F$ in the variables $U=(u_1,\ldots,u_k)\in \mathbb{R}^k$.
We then rewrite ($\ref{P}$) in the matrix form as
\begin{equation}\label{Pb1}
-m\left(\|U\|^2\right)\Delta U=\frac{\nabla F(x,U)}{|x|^\beta}+\varepsilon H(x),
\end{equation}
where $\Delta U=(\Delta u_1,\ldots,\Delta u_k)$, $\frac{\nabla F(x,U)}{|x|^\beta}=\Big(\frac{f_1(x,U)}{|x|^\beta},\ldots,\frac{f_k(x,U)}{|x|^\beta}\Big)$ and $H(x)=\big(h_1(x),\ldots,h_k(x)\big)$.
System (\ref{P}) is called nonlocal because of the term $m\left(\sum^k_{j=1}\|u_j\|^2\right)$ which implies that the equation in (\ref{P}) is no longer a pointwise identity. As we will see later the presence of the term $m\left(\sum^k_{j=1}\|u_j\|^2\right)$ provokes some mathematical difficulties which makes the study of such a class of problems particularly interesting. Moreover, System (\ref{P}) with $k=1$ has a physical appeal which is generalization of a model introduced in 1883 by Kirchhoff \cite{k}.
There are many results about the existence and multiplicity of solutions for Kirchhoff problems by many mathematicians, we refer to \cite{acm,chen,cy2,c2,fs,fiscellaValdinoci,hezou,hezou2,nt} and the references therein.
When $k=1$, $\beta=0$ and $\varepsilon=0$, system (\ref{P}) become the following Kirchhoff type problem
\begin{eqnarray}\label{P1}
\left\{ \arraycolsep=1.5pt
\begin{array}{ll}
-m\Big(\int_\Omega|\nabla u|^2dx\Big)\Delta u =f(x,u),\ \ & \mbox{in}\ \ \Omega,\\[2mm]
u=0,\ \ & \mbox{on}\ \ \partial\Omega,
\end{array}
\right.
\end{eqnarray}
where the Kirchhoff function $m : \mathbb{R}_{+} \rightarrow \mathbb{R}_{+}$ satisfies
$(\overline{M}_1)$ there exists $m_0>0$ such that $m(t)\geq m_0$ for all $t\geq0$ and $M(t+s)\geq M(t)+M(s)$ for all $s,t\geq0$, where $M(t)=\int^t_0 m(\tau)d\tau$ is the primitive of $m$.
$(\overline{M}_2)$ there exist constants $a_1,a_2>0$ and $t_0>0$ such that for some $\sigma\in\mathbb{R}$, $m(t) \leq a_{1}+a_{2} t^{\sigma}, \forall t \geq t_{0}$.
$(\overline{M}_3)$ $\frac{m(t)}{t}$ is nonincreasing for $t>0$.
\noindent Moreover, the nonlinearity $f:\Omega\times\mathbb{R}\to\mathbb{R}$ is continuous and satisfies
$(\overline{F}_1)$ there exist constants $s_0,K_0>0$ such that
$F(x, s) \leq K_{0} f(x, s), \ \forall(x, s) \in \Omega \times\left[s_{0},+\infty\right)$.
$(\overline{F}_2)$ for each $x \in \Omega, \frac{f(x, s)}{s^{3}}$ is increasing for $s>0$.
$(\overline{F}_3)$ there exists $\beta_{0}>\frac{2}{\alpha_{0} d^{2}} m\left(4 \pi / \alpha_{0}\right)$ such that
$\lim _{s \rightarrow+\infty} \frac{s f(x, s)}{\exp \left(\alpha_{0} s^{2}\right)} \geq \beta_{0}$ uniformly in $x \in \Omega$.\\
Under these assumptions, by using minimax techniques with the Trudinger-Moser inequality, Figueiredo and Severo \cite{fs} obtained
the existence of ground state solution of (\ref{P1}).
We note that hypothesis $(\overline{F}_2)$ is necessary to obtain precise information about
the minimax level of the energy functional associated to problem (\ref{P1}), they show
the existence of the least energy solution.
Recently, Naimen and Tarsi \cite{nt} studied the existence and multiplicity of solutions for problem (\ref{P1}) with $m(t)=1+\alpha t$ under some weaker assumptions than those in \cite{fs}.
On the other hand, we mention that the existence of solutions for elliptic equations involving critical exponential nonlinearities and a small nonhomogeneous term was considered by many authors, see \cite{adiyang,am,doms,lamlu4,y2012} and the references therein. In the whole Euclidean space $\mathbb{R}^N$, for $N$-Laplacian problems in \cite{doms},
for $N$-Laplacian problem with the nonlinear term involving critical Hardy exponential growth and the nonhomogeneous term in \cite{adiyang,y2012}. What's more, Lam and Lu \cite{lamlu4} established the existence and multiplicity of nontrivial solutions for the nonuniformly elliptic equations of $N$-Laplacian type.
Moreover, Manasses de Souza \cite{ds} has studied the existence of solutions for a singular class of elliptic systems involving critical exponential growth in a bounded domain of $\mathbb{R}^2$. To the best of our knowledge, there are no results for (\ref{P}) with Kirchhoff function and exponential growth nonlinearity.
The main purpose of the present paper is to consider the multiplicity of solutions of system (\ref{P}) and overcome the lack of compactness due to the presence of exponential growth terms as well as the degenerate nature of the Kirchhoff coefficient.
Let us introduce the precise assumptions under which our problem is studied. For this, we define $M(t)=\int^ t_0 m(\tau)d\tau$, the primitive of $m$ so that $M(0)=0$. The hypotheses on Kirchhoff function $m:\mathbb{R}^+ \rightarrow\mathbb{R}^+$ are the following:
($M_1$)
there exists $m_0>0$ such that $m(t)\geq m_0$ for all $t\geq 0$;
($M_2$)
$m(t)$ is nondecreasing for $t\geq 0$;
($M_3$)
there exists \ $\theta>1$ such that $\theta M(t)-m(t)t$ is nondecreasing for $t\geq 0$.
\begin{remark}\label{rem1}\rm
By $(M_1)$, we can get that $M(t)$ is increasing for $t\geq 0$.
\end{remark}
\begin{remark}\label{rem3}\rm
From $(M_3)$, we have that
\begin{equation}\label{1.4}
\theta \mathcal{M}(t)-M(t)t\geq 0,\ \ \forall t\geq 0.
\end{equation}
\end{remark}
\begin{remark}\label{rem2}\rm
A typical example of a function $m$ satisfying the conditions $(M_1)-(M_3)$ is given by $m(t)=m_0+at^{\theta-1}$ with $\theta>1, m_0>0$ and $a\geq 0$. Another example is $m(t)=1+\ln(1+t)$.
\end{remark}
\begin{remark}\label{remcmm}\rm
Here, we compare assumptions $(\overline{M}_1)-(\overline{M}_3)$ in \cite{fs} as shown before with our present assumptions $(M_1)-(M_3)$.
From $(\overline{M}_3)$, we can obtain $(M_3)$ with $\theta=2$. Indeed, for any $0<t_1< t_2$,
\begin{equation*}
\begin{split}
2M(t_1)-m(t_1)t_1&=2M(t_2)-2\int^{t_2}_{t_1}m(s)ds-\frac{m(t_1)t^2_1}{t_1} \\
&\leq 2M(t_2)-\frac{m(t_2)(t^2_2-t^2_1)}{t_2}-\frac{m(t_2)t^2_1}{t_2} \\
&=2 M(t_2)-m(t_2)t_2,
\end{split}
\end{equation*}
thus $2M(t)-m(t)t$ is nondecreasing for $t\geq 0$.
From $(M_1)-(M_2)$, we can obtain $(\overline{M}_1)$. Indeed, by $m(t)$ is nondecreasing for $t\geq 0$, we have $\int^{t+s}_t m(\tau)d\tau\geq \int^{s}_0 m(\tau)d\tau$ for all $s,t\geq0$, then it holds that $\int^{t}_0 m(\tau)d\tau+\int^{t+s}_t m(\tau)d\tau\geq \int^{t}_0 m(\tau)d\tau+\int^{s}_0 m(\tau)d\tau$, i.e. $M(t+s)\geq M(t)+M(s)$.
Then from (\ref{1.4}), we can get $M(t)\geq M(1)t^\theta$ for $t\leq 1$, and $M(t)\leq M(1)t^\theta$ for $t\geq 1$, thus $M(t)\leq C_1t^\theta+C_2$ for some $C_1,C_2>0$.
\end{remark}
Motivated by pioneer works of Adimurthi \cite{ad}, de Figueiredo et al. \cite{dmr1} and J.M. do \'{O} \cite{do}, we treat the so-called subcritical case and also the critical case. They say that a function $f:(\Omega,\mathbb{R})\to\mathbb{R}$ has subcritical growth on $\Omega\subset \mathbb{R}^2$ if
\[
\lim _{|u| \rightarrow \infty} \frac{|f(x, u)|}{\exp \left(\alpha u^{2}\right)}=0, \text { uniformly on } \Omega,\ \forall \alpha>0,
\]
and $f$ has critical growth on $\Omega$ if there exists $\alpha_0>0$ such that
\[
\lim _{|u| \rightarrow \infty} \frac{|f(x, u)|}{\exp \left(\alpha u^{2}\right)}=0, \text { uniformly on } \Omega,\ \forall \alpha>\alpha_0,
\]
and
\[
\lim _{|u| \rightarrow \infty} \frac{|f(x, u)|}{\exp \left(\alpha u^{2}\right)}=\infty, \text { uniformly on } \Omega,\ \forall \alpha<\alpha_0.
\]
Throughout this paper, we assume the following hypotheses on the function $f_i:\Omega\times\mathbb{R}^k\rightarrow\mathbb{R}$ and $F$:
($F_0$) $f_i$ is continuous and $f_i(x,0,\ldots,0)=0$, $F(x,0,\ldots,0)=0$ uniformly on $x\in \Omega$.
($F_1$)
$\lim \sup_{|U|\rightarrow 0} \frac {2F(x,U)}{|U|^2}<\lambda_1 m_0$ uniformly on $\Omega$, where
$
\lambda_1=\inf_{U\in H^1_0(\Omega,\mathbb{R}^k)\setminus\{0\}} \frac {\|U\|^2}{\int_{\Omega}|U|^2/|x|^\beta dx}>0;
$
($F_2$)
there exist constants $S_0,M_0>0$ such that
$0<F(x,U)\leq M_0|\nabla F(x,U)|$, \ for all \ $|U|\geq S_0$ uniformly on $\Omega$;
($F_3$)
there exists \ $\mu>2\theta$ such that
$0<\mu F(x,U)\leq U\cdot\nabla F(x,U)$, \ for all \ $(x,U)\in \Omega\times\mathbb{R}^k\setminus\{\mathbf{0}\}$;
We say that $U\in H^1_0(\Omega,\mathbb{R}^k)$ is a weak solution of problem (\ref{P}) it holds
\begin{equation*}\
m(\|U\|^2)\int_{\Omega}\nabla U\cdot\nabla \Phi dx=\int_{\Omega}\frac{\Phi\cdot \nabla F(x,U)}{|x|^\beta} dx+\varepsilon\int_{\Omega}\Phi\cdot H dx, \ \ \forall \ \Phi\in H^1_0(\Omega,\mathbb{R}^k)\\[3pt].
\end{equation*}
Since $f_i(x,0,\ldots,0)=0$,\ $U\equiv \mathbf{0}$ is the trivial solution of problem (\ref{P}). Thus, our aim is to obtain nontrivial solutions. Now, the main results of this work can state as follows.
\begin{theorem}\label{thm1.2}
Assume $f_i$ has subcritical growth at $\infty$, that is,
\begin{equation}\label{1.2}
\lim_{|U|\to \infty}\frac{|f_i(x,U)|}{e^{\alpha |U|^2}}=0,
\ \ \text{uniformly on }x\in\Omega,\ \ \forall\alpha >0.
\end{equation}
Moreover, assume $(M_1)$,\ $(M_3)$ and $(F_1)-(F_3)$, then there exists $\varepsilon_{sc}>0$ such that for each $0<\varepsilon<\varepsilon_{sc}$, problem (\ref{P}) has at least two nontrivial weak solutions. One of them with positive energy, while the other one with negative energy.
\end{theorem}
\begin{theorem}\label{thm1.3}
Assume $f_i$ has critical growth at $\infty$, that is, if there exists $\alpha_0 >0$ such that
\begin{equation}\label{1.3}
\lim_{|u_i|\to\infty}\frac{|f_i(x,U)|}{e^{\alpha |U|^2}}
= \begin{cases}
0,\ \ &\forall\alpha >\alpha_0,\\[3pt]
+\infty,\ \ &\forall\alpha <\alpha_0,
\end{cases}
\end{equation}
uniformly on $x\in\Omega$ and $u_j$ where $j\in\{1,\ldots,k\}\backslash\{i\}$. Moreover, suppose $(M_1)-(M_3)$, $(F_1)-(F_3)$ hold and
$(F_4)$ if for some $i\in \{1,\ldots,k\}$, there exists $\eta_0$ such that
$$
\liminf_{|u_i|\rightarrow\infty}\frac {u_i f_i(x,0,\ldots,0,u_i,0,\ldots,0)}{e^{\alpha_0 |u_i|^2}}\geq \eta_0>\frac { (2-\beta)^2m\left(\frac {2\pi(2-\beta)}{\alpha_0}\right)}{\alpha_0 d^{2-\beta} e },
$$
uniformly on $\Omega$,
where $d$ is the radius of the largest open ball contained in $\Omega$ centered at the origin.
Then there exists $\varepsilon_c>0$ such that for each $0<\varepsilon<\varepsilon_c$, problem (\ref{P}) has at least two nontrivial weak solutions. One of them with positive energy, while the other one with negative energy.
\end{theorem}
\begin{remark}\rm
If $\beta=\varepsilon=0,\ k=1$, for $(F_4)$, in \cite{fs}, the author replaced $e$ with 2, therefore, in order to get this improvement on the growth of the nonlinearity $f_i$ at $\infty$, it is crucial in our argument to use a new sequence in \cite{ddr}.
\end{remark}
\begin{remark}\rm
When $m\equiv 1$, $k=1$, $\beta=\varepsilon=0$, problems with critical growth involving the Laplace operator in bounded domains of $\mathbb{R}^2$ have been investigated in \cite{asy,ay,am,dmr1}, quasilinear elliptic problems with critical growth for $N$-Laplacian in bounded domains of $\mathbb{R}^N$ have been studied in \cite{ad,do}. Moreover, for the problems with critical growth in bounded domains in $\mathbb{R}^2$ and $f$ satisfied (see examples in \cite{ad,dmr1,do}) the asymptotic hypothesis
\begin{align}\label{f41}
\liminf_{|u|\rightarrow\infty}\frac {u f(x,u)}{e^{\alpha_0 u^2}}\geq \eta_0'>\frac {2}{\alpha_0 d^2},
\end{align}
and for Kirchhoff problem, in \cite{fs}
\begin{align}\label{f42}
\liminf_{|u|\rightarrow\infty}\frac {u f(x,u)}{e^{\alpha_0 u^2}}\geq \eta_0''>\frac {2 m\big(\frac {4\pi}{\alpha_0}\big)}{\alpha_0 d^2}.
\end{align}
What's more, when $m\equiv 1$, de Souza studied this problem in \cite{ds} and he assumed the hypothesis
\begin{align}\label{f43}
\lim\inf_{|U|\rightarrow\infty}\frac {u_i f_i(x,U)}{e^{2^{k-1}\alpha_0 |U|^2}}\geq \eta_0'''>\frac {(2-\beta)^2}{2^{k-1}\alpha_0 d^{2-\beta} e },
\end{align}
for some $i\in\{1,2,\ldots,k\}$.
Motivated by \cite{as} and \cite{ra}, where they proved a version of Trudinger-Moser inequality with singular weight and studied the existence of positive weak solutions for the following semilinear and homogeneous elliptic problem
\begin{eqnarray*}
\left\{
\begin{array}{ll}
-\Delta u=\frac{f(x,u)}{|x|^\beta},\ \ & \mbox{in}\ \ \Omega,
\\[2mm]
u=0,\ \ & \mbox{on}\ \ \partial\Omega.
\end{array}
\right.
\end{eqnarray*}
In the present paper, we improve and complement some of the results cited above for singular and nonhomogeneous case and extend the results to systems. Moreover, thanks to de Figueiredo, do \'{O} and Ruf of \cite{ddr} have constructed a proper sequence which makes the hypotheses (\ref{f41}) and (\ref{f42}) can be improved to $(F_4)$ in Theorem \ref{thm1.3}. And using the improvement of the Young's inequality which will be introduced in Lemma \ref{yi}, (\ref{f43}) can be improved better in $(F_4)$.
\end{remark}
\begin{remark}\rm
On the basis of assumption $(F_0)$, if we further assume that for any $u_j\leq 0$ where $j\in \{1,\ldots,k\}$, $f_i(x,u_1,\ldots,u_k)\equiv 0$ for all $i\in \{1,\ldots,k\}$, uniformly in $x\in\Omega$, and $h_i\geq 0$ for all $i\in \{1,\ldots,k\}$, and $(F_4)$ changes to
$$
\liminf_{u_1,\ldots,u_k\rightarrow +\infty}\frac {U\cdot\nabla F(x,U)}{e^{\alpha_0 |U|^2}}\geq \eta_0>\frac { (2-\beta)^2m\left(\frac {2\pi(2-\beta)}{\alpha_0}\right)}{\alpha_0 d^{2-\beta} e },
$$
where $U=(u_1,\ldots,u_k)$, then by using Maximum principle, we can proof the solutions obtained in Theorem \ref{thm1.3} are entire positive, i.e. each of the component is positive. A typical example is $F(x,U)=|U|^\mu\exp(\alpha_0|U|^2)\prod^{k}_{i=1}{\rm sign} u_i$, where ${\rm sign} t=0$ if $t\leq 0$ and ${\rm sign} t=1$ if $t>0$.
\end{remark}
This paper is organized as follows: Section \ref{sec preliminaries} contains some technical results. In Section \ref{vf}, we present the variational setting in which our problem will be treated. Section \ref{ps} is devoted to show some properties of the Palais-Samle sequences. Finally, we split Section \ref{main} into two subsections for the subcritical and critical cases, and we complete the proofs of our main results. Hereafter, $C,C_0,C_1,C_2...$ will denote positive (possibly different) constants.
\section{{\bfseries Some preliminary results}}\label{sec preliminaries}
Now, we introduce some famous inequalities as follows, and inspired by those inequalities, we conclude some similar forms of inequalities. In this paper, we shall use the following version of the Trudinger-Moser inequality with a singular weight due to Adimurthi-Sandeep \cite{as}:
\begin{lemma}\label{lemtm1}
Let $\Omega$ be a bounded domain in $\mathbb{R}^2$ containing the origin and $u\in H^1_0(\Omega)$. Then for every $\alpha >0$, and $\beta \in [0,2)$,
\begin{align*}
\int_{\Omega} \frac{e^{\alpha |u|^2}}{|x|^\beta}<\infty.
\end{align*}
Moreover, there exists constant $C(\Omega)$ depending only on $\Omega$ such that
\begin{align*}
\sup_{||\nabla u||_2\leq 1}\int_{\Omega} \frac{e^{\alpha |u|^2}}{|x|^\beta}\leq C(\Omega),
\end{align*}
if and only if $\frac{\alpha}{4\pi}+\frac{\beta}{2} \leq 1$.
\end{lemma}
Then, we give two useful algebraic inequalities that will be used systematically in the rest of the paper as the following:
\begin{lemma}\label{yi}
({\bfseries Improvement of the Young's inequality})
Let $a_1,\ldots,a_k>0$, $p_1,\ldots,p_k>1$ with $\frac{1}{p_1}+\frac{1}{p_2}+\cdots+\frac{1}{p_k}=1$, then
\begin{align}\label{yic}
a_1 a_2 \cdots a_k\leq \frac{a^{p_1}_1}{p_1}+\frac{a^{p_2}_2}{p_2}+\cdots+\frac{a^{p_k}_k}{p_k}.
\end{align}
\end{lemma}
\begin{proof}
We will use the mathematical induction to proof this. When $k=2$, by Young inequality, we can know this conclusion is correct. Suppose that when $k=s-1$ the conclusion is correct, we are going to show that when $k=s$ the conclusion is still correct. Let
\begin{align*}
\frac{1}{q}=\sum^{s-1}_{i=1}\frac{1}{p_i},\ \ \frac{1}{q}+\frac{1}{p_s}=1,\ \ \mbox{then}\ \ \sum^{s-1}_{i=1}\frac{1}{p_i/q}=1.
\end{align*}
Thus,
\begin{align*}
\prod^s_{i=1} a_i=\Big(\prod^{s-1}_{i=1}a_i\Big)a_s\leq \frac{1}{q}\Big(\prod^{s-1}_{i=1}a_i\Big)^q+\frac{1}{p_s}a^{p_s}_s,
\end{align*}
by the mathematical induction, we can get that
\begin{align*}
\frac{1}{q}\Big(\prod^{s-1}_{i=1}a_i\Big)^q=\frac{1}{q}\Big(\prod^{s-1}_{i=1}a^{q}_i\Big)\leq \frac{1}{q}\sum^{s-1}\Big[\frac{1}{p_i/q}\big(a^q_i)^{p_i/q}\Big]=\sum^{s-1}\frac{1}{p_i}a^{p_i}_i.
\end{align*}
Therefore
\begin{align*}
\prod^s_{i=1} a_i\leq \sum^{s-1}_{i=1}\frac{1}{p_i}a^{p_i}_i+\frac{1}{p_s}a^{p_s}_s=\sum^{s}_{i=1}\frac{1}{p_i}a^{p_i}_i.
\end{align*}
This lemma is proved. If we take $p_1=p_2=\cdots=p_k=k$, we can get that
\begin{align}\label{yiy}
a_1 a_2 \cdots a_k\leq \frac{1}{k}\sum^{k}_{i=1}a^{k}_i\leq \sum^{k}_{i=1}a^{k}_i.
\end{align}
\end{proof}
\begin{lemma}\label{yib}
Suppose $a_1, a_2, \ldots,a_k\geq0$ with $a_1+a_2+\cdots+a_k<1$, then there exist $p_1,\ldots,p_k>1$ satisfying $\frac{1}{p_1}+\frac{1}{p_2}+\cdots+\frac{1}{p_k}=1$, such that
\begin{align}\label{yibc}
p_ia_i<1, \quad \mbox{for all}\ \ i=1,2,\ldots,k.
\end{align}
Moreover, if $a_1, a_2, \ldots,a_k\geq0$ satisfying $a_1+a_2+\cdots+a_k=1$, then we can take $p_i=\frac{1}{a_i}$ such that $\frac{1}{p_1}+\frac{1}{p_2}+\cdots+\frac{1}{p_k}=1$ and
\begin{align}\label{yibcd}
p_ia_i=1, \quad \mbox{for all}\ \ i=1,2,\ldots,k.
\end{align}
\end{lemma}
\begin{proof}
For the case $a_1, a_2, \ldots,a_k\geq0$ with $a_1+a_2+\cdots+a_k<1$. We also make use of the mathematical induction as previous. When $k=2$, $a_1, a_2\geq0$ with $a_1+a_2<1$. If $a_2=0$ (or $a_1=0$), then taking $p_1=\left(\frac{1}{2}+\frac{1}{2a_1}\right)>1$ (or $p_2=\left(\frac{1}{2}+\frac{1}{2a_2}\right)>1$), we can obtain $p_1a_1<1,\ p_2a_2<1$, where $p_2=\frac{p_1}{p_1-1}$. If $a_1, a_2>0$, then taking $p_1=\left(\frac{1}{2(1-a_1)}+\frac{1}{2a_2}\right)>1$, we can obtain $p_1a_1<1,\ p_2a_2<1$, where $p_2=\frac{p_1}{p_1-1}$. Suppose that when $k=s-1$ the conclusion is correct, if $k=s$ the conclusion is still correct, then the lemma follows. Let
\begin{align*}
(a_1+a_2+\cdots+a_{s-1})+a_s<1,
\end{align*}
then there exist $q_1, q_2>1$ satisfying $\frac{1}{q_1}+\frac{1}{q_2}=1$ such that
\begin{align*}
q_1(a_1+a_2+\cdots+a_{s-1})<1, \ \ q_2a_s<1.
\end{align*}
Then by the assumption, there exist $q_3, q_4,\ldots, q_{s+1}>1$ satisfying $\frac{1}{q_3}+\frac{1}{q_4}+\cdots+\frac{1}{q_{s+1}}=1$ such that
\begin{align*}
q_3(q_1a_1)<1,\ q_4(q_1a_2)<1, \ldots,\ q_{s+1}(q_1a_{s-1})<1,
\end{align*}
i.e.
\begin{align*}
(q_3q_1)a_1<1,\ (q_4q_1)a_2<1, \ldots,\ (q_{s+1}q_1)a_{s-1}<1.
\end{align*}
Taking $p_1=q_1q_3,\ p_2=q_1q_4,\ldots,\ p_{s-1}=q_1q_{s+1},\ p_s=q_2$, then it holds that
\begin{align*}
\frac{1}{p_1}+\frac{1}{p_2}+\cdots+\frac{1}{p_s}=\frac{1}{q_1}\left(\frac{1}{q_3}+\frac{1}{q_4}+\cdots+\frac{1}{q_{s+1}}\right)+\frac{1}{q_2}
=\frac{1}{q_1}+\frac{1}{q_2}=1,
\end{align*}
and
\begin{align*}
p_1a_1<1,\ p_2a_2<1, \ldots,\ p_sa_s<1.
\end{align*}
For the case $a_1, a_2, \ldots,a_k\geq0$ satisfying $a_1+a_2+\cdots+a_k=1$. If $a_i>0$ for all $i\in\{1,\ldots,k\}$, then we can take $p_i=\frac{1}{a_i}$ such that $\frac{1}{p_1}+\frac{1}{p_2}+\cdots+\frac{1}{p_k}=1$ and (\ref{yibc}) holds. If $a_i=0$ for some $i\in\{1,\ldots,k\}$ which same as the case $(k-1)$.
The proof is complete.
\end{proof}
By the above inequalities, we begin to establish the singular Trudinger-Moser type inequalities in $H^1_0(\Omega,\mathbb{R}^k)$:
\begin{lemma}\label{lemtm2}
({\bfseries Improvement of the Trudinger-Moser inequality})
Let $\Omega$ be a bounded domain in $\mathbb{R}^2$ containing the origin and $U=(u_1,\ldots,u_k)\in H^1_0(\Omega,\mathbb{R}^k)$. Then for every $\alpha >0$, and $\beta\in [0,2)$,
\begin{align}\label{tmit}
\int_{\Omega} \frac{e^{\alpha |U|^2}}{|x|^\beta}<\infty.
\end{align}
Moreover, it holds that
\begin{align}\label{tmiht}
\sup_{U\in H^1_0(\Omega,\mathbb{R}^k),\ \|U\|\leq 1}\int_{\Omega} \frac{e^{\alpha |U|^2}}{|x|^\beta}\leq C(\Omega).
\end{align}
if and only if $\frac{\alpha}{4\pi}+\frac{\beta}{2} \leq 1$, where $C(\Omega)$ is given in Lemma \ref{lemtm1}.
\end{lemma}
\begin{proof}
Because $U=(u_1,\ldots,u_k)$, we can get $|U|^2=\sum^k_{i=1}|u_i|^2$. Thus, by using (\ref{yiy}) and Lemma \ref{lemtm1}, we have
\begin{align*}
\int_{\Omega} \frac{e^{\alpha |U|^2}}{|x|^\beta}=\int_{\Omega} \frac{e^{\alpha |u_1|^2}\cdots e^{\alpha |u_k|^2}}{|x|^\beta} \leq \sum^k_{i=1}\int_{\Omega} \frac{e^{k\alpha |u_i|^2}}{|x|^{\beta}}<\infty.
\end{align*}
Then we begin to proof (\ref{tmiht}). For each $U\in H^1_0(\Omega,\mathbb{R}^k)$ satisfying $\|U\|\leq 1$, then $\|U\|^2=\sum^k_{i=1}\|u_i\|^2\leq 1$, Lemma \ref{yib} shows that there exist $p_1,\ldots,p_k>1$ satisfying $\frac{1}{p_1}+\frac{1}{p_2}+\cdots+\frac{1}{p_k}=1$ such that
$p_i\|u_i\|^2\leq 1$ holds, for all $i=1,2,\ldots,k$. If $\frac{\alpha}{4\pi}+\frac{\beta}{2} \leq 1$, then it also holds that $\frac{p_i\|u_i\|^2\alpha}{4\pi}+\frac{\beta}{2} \leq 1$, for all $i=1,2,\ldots,k$. Then by using Lemma \ref{yi}, i.e. the improvement of the Young's inequality, and from Lemma \ref{lemtm1} we have
\begin{align*}
\begin{split}
\int_{\Omega} \frac{e^{\alpha |U|^2}}{|x|^\beta} =\int_{\Omega} \frac{e^{\alpha |u_1|^2}\cdots e^{\alpha |u_k|^2}}{|x|^\beta} \leq \sum^k_{i=1}\int_{\Omega} \frac{e^{p_i\alpha |u_i|^2}}{p_i|x|^{\beta}} =\sum^k_{i=1}\int_{\Omega} \frac{e^{p_i\alpha\|u_i\|^2 (\frac{u_i}{\|u_i\|})^2}}{p_i|x|^{\beta}}
\leq \sum^k_{i=1}\frac{C(\Omega)}{p_i}=C(\Omega),
\end{split}
\end{align*}
then (\ref{tmiht}) follows.
If $\frac{\alpha}{4\pi}+\frac{\beta}{2} >1$, we take $U=(u,0,\ldots,0)$, then Lemma \ref{lemtm1} shows that the supremum for the integral in (\ref{tmiht}) is infinite. Thus the proof is complete.
\end{proof}
\begin{lemma}\label{lemtm3}
Let $\{U_n\}$ be a sequence of functions in $H^1_0(\Omega,\mathbb{R}^k)$ with $\|U_n\|=1$ such that $U_n\rightharpoonup U\neq0$ weakly in $H^1_0(\Omega,\mathbb{R}^k)$. Then for any $0<p<\frac{2\pi(2-\beta)}{{(1-\|U\|^2)}}$ and $\beta\in [0,2)$, we have
$$
\sup_n \int_{\Omega}\frac{e^{p|U_n|^2}}{|x|^\beta}<\infty.
$$
\end{lemma}
\begin{proof}
Since $U_n\rightharpoonup U\neq0$ and $\|\nabla U_n\|_2=1$, we conclude that
\begin{align*}
\|U_n-U\|^2=1-2\langle U_n,U\rangle+\|U\|^2\rightarrow 1-\|U\|^2<\frac {2\pi(2-\beta)}{p}
\end{align*}
Thus, for large $n$ we have
\begin{align*}
\frac{p\|U_n-U\|^2}{4\pi}+\frac{\beta}{2}<1.
\end{align*}
Now we can choose $q>1$ close to 1 and $\epsilon>0$ such that
\begin{align*}
\frac{qp(1+\epsilon^2)\|U_n-U\|^2}{4\pi}+\frac{q\beta}{2}\leq 1.
\end{align*}
Lemmas \ref{lemtm2} shows that
\begin{align*}
\int_{\Omega}\frac{e^{qp(1+\epsilon^2)|U_n-U|^2}}{|x|^{q\beta}}\leq C(\Omega).
\end{align*}
Moreover, since
\begin{align*}
p|U_n|^2 \leq p (1+ \epsilon ^2)|U_n-U|^2 + p(1+1/\epsilon ^2)|U|^2,
\end{align*}
which can be proved by Young inequality, then it follows that
\begin{align*}
e^{p|U_n|^2 }\leq e^{p (1+ \epsilon ^2)|U_n-U|^2} e^{ p(1+1/\epsilon ^2)|U|^2}.
\end{align*}
Consequently, by H\"{o}lder inequality,
\begin{align*}
\begin{split}
\int_{\Omega}\frac{e^{p|U_n|^2}}{|x|^\beta}&\leq \left (\int_{\Omega}\frac{e^{qp(1+\epsilon^2)|U_n-U|^2}}{|x|^{q\beta}}\right)^{1/q}\left(\int_{\Omega}e^{rp(1+1/\epsilon^2)|U|^2}\right)^{1/r}\leq C\left (\int_{\Omega}e^{rp(1+1/\epsilon^2)|U|^2}\right),
\end{split}
\end{align*}
for large $n$, where $r=\frac{q}{q-1}$. By Lemma \ref{lemtm2}, we know the second term in the last inequality is bounded, and this lemma is proved.
\end{proof}
\begin{remark}\label{remccp1}\rm
Lemma \ref{lemtm3} is actually an expression that Concentration-compactness principle for a singular Trudinger-Moser inequality which is better than (\cite{ds}, Lemma 2.3). However, it is still not clear whether $\frac{2\pi(2-\beta)}{1-\|U\|^2}$ is sharp or not. This is still an open question.
\end{remark}
\begin{lemma}\label{lemtm4}
If $V\in H^1_0(\Omega,\mathbb{R}^k),\ \alpha>0,\ q>0,\ \beta\in [0,2)$ and $\|V\|\leq N$ with $\frac{\alpha N^2}{4\pi}+\frac{\beta}{2}<1$, then there exists $C=C(\alpha,N,q)>0$ such that
\begin{align}\label{asd1}
\int_{\Omega} |V|^q \frac{e^{\alpha |V|^2}}{|x|^\beta}\leq C\|V\|^q.
\end{align}
\end{lemma}
\begin{proof}
We consider $r> 1$ close to 1 such that $\frac{r \alpha N^2}{4\pi}+\frac{r\beta}{2} \leq 1$ and $sq\geq 1$, where $s=\frac {r}{r-1}$. By using H\"{o}lder inequality and Lemma \ref{lemtm2}, we have
\begin{align*}
\begin{split}
\int_{\Omega} |V|^q \frac{e^{\alpha |V|^2}}{|x|^\beta} &\leq \left (\int_{\Omega}\frac{e^{r\alpha |V|^2}}{|x|^{r\beta}}\right)^{1/r}\|V\|^q_{qs}
\leq C(\Omega)\|V\|^q_{qs}.
\end{split}
\end{align*}
Finally, using the continuous embedding $H^1_0(\Omega,\mathbb{R}^k)\hookrightarrow L^{sq}(\Omega,\mathbb{R}^k)$, we conclude that
\begin{align*}\label{asd1}
\int_{\Omega} |V|^q \frac{e^{\beta |V|^2}}{|x|^\beta}\leq C\|V\|^q.
\end{align*}
\end{proof}
\section{{\bfseries The variational framework}}\label{vf}
We now consider the functional $I$ given by
\begin{equation*}
I_\varepsilon(U)=\frac {1}{2}M(\|U\|^2)-\int_{\Omega}\frac{F(x,U)}{|x|^\beta}dx-\varepsilon\int_{\Omega}U\cdot H(x)dx.
\end{equation*}
\begin{lemma}\label{ic1}
Under our assumptions we have that $I$ is well defined and $C^1$ on $H^1_0(\Omega,\mathbb{R}^k)$. Moreover,
\begin{equation*}
\langle I_\varepsilon'(U),\Phi\rangle_*=m(\|U\|^2)\langle U,\Phi\rangle-\int_{\Omega}\frac{\Phi\cdot\nabla {F(x,U)}}{|x|^\beta} dx-\varepsilon\int_{\Omega}\Phi\cdot H dx,
\end{equation*}
where $\Phi\in H^1_0(\Omega,\mathbb{R}^k)$, here $\langle \cdot,\cdot \rangle_*$ simply denotes the dual pairing between $H^1_0(\Omega,\mathbb{R}^k)$ and its dual space $\left(H^1_0(\Omega,\mathbb{R}^k)\right)^*$.
\end{lemma}
\begin{proof}
We have that $f_i$ is continuous and has subcritical (or critical) growth at $\infty$, as defined in (\ref{1.2}) (or (\ref{1.3})). Thus, given $\alpha>0$ (or $\alpha>\alpha_0$), there exists $C>0$ such that $|f_i(x,U)|\leq Ce^{\alpha |U|^2}$ for all $(x,U)\in\Omega \times \mathbb{R}^k$. Then,
\begin{equation} \label{3.1}
|\nabla F(x,U)|\leq \sum^k_{i=1}|f_i(x,U)|\leq C_1 e^{\alpha |U|^2},
\ \ \text{for all }(x,U)\in\Omega \times \mathbb{R}^k.\\[3pt]
\end{equation}
By $(F_1)$, given $\epsilon>0$ there exists $\delta>0$ such that
\begin{equation}\label{3.2}
|F(x,U)|\leq \frac {\lambda_1 m_0-\epsilon}{2} |U|^2
\ \ \text{always that }|U|<\delta.\\[3pt]
\end{equation}
Thus, using (\ref{3.1}), (\ref{3.2}) and $(F_3)$, we have
\begin{align*}
\int_{\Omega}\frac{|F(x,U)|}{|x|^\beta}dx \leq \frac {\lambda_1 m_0-\epsilon}{2}\int_{\Omega}\frac{|U|^2}{|x|^\beta} dx + C_1\int_{\Omega}\frac{|U|e^{\alpha |U|^2}}{|x|^\beta}dx.
\end{align*}
Considering the continuous imbedding $H^1_0(\Omega,\mathbb{R}^k)\hookrightarrow L^s(\Omega,\mathbb{R}^k)$ for $s\geq 1$ and using Lemma \ref{lemtm4}, it follows that $\frac{F(x,U)}{|x|^\beta}\in L^1(\Omega)$ which implies that $I_\varepsilon$ is well defined. And we can see that $I\in C^1(H^1_0(\Omega,\mathbb{R}^k),\mathbb R)$ with
\begin{equation*}
\langle I_\varepsilon'(U),\Phi\rangle_*=m(||U||^2)\langle U,\Phi\rangle-\int_{\Omega}\frac{\Phi\cdot\nabla {F(x,U)}}{|x|^\beta} dx-\varepsilon\int_{\Omega}\Phi\cdot H dx,
\end{equation*}
for all $U, \Phi\in H^1_0(\Omega,\mathbb{R}^k)$.
\end{proof}
From Lemma \ref{ic1}, we have that critical points of the functional $I_\varepsilon$ are precisely weak solutions of problem (\ref{P}). In the next three lemmas we check that the functional $I_\varepsilon$ satisfies the geometric conditions of the Mountain-pass theorem.
\begin{lemma}\label{lemgc1}
Suppose that $(M_1)$ and $(F_1),\ (F_3)$ hold and the function $f_i$ has subcritical (or critical) growth at $\infty$. Then for small $\varepsilon$, there exist positive number $\rho_\varepsilon$ and $\varsigma$ such that
\begin{align*}
I_\varepsilon(U)\geq \varsigma,\ \ \forall U\in H^1_0(\Omega,\mathbb{R}^k)\ \ \mbox{with}\ \ \|U\|=\rho_\varepsilon.
\end{align*}
Moreover, $\rho_\varepsilon$ can be chosen such that $\rho_\varepsilon\to0$ as $\varepsilon\to0$.
\end{lemma}
\begin{proof}
By $(F_1)$, given $\kappa>0$, there exists $\delta>0$ such that
\begin{align*}
|\nabla F(x,U)|\leq (\lambda_1 m_0-\kappa)|U|
\end{align*}
always that $|U|<\delta$. On the other hand, for $\alpha>0$ (subcritical case) or $\alpha>\alpha_0$ (critical case), we have that there exists $C_1>0$ such that $|f_i (x,U)|\leq C_1|U|^{q-1} e^{\alpha |U|^2}$ for all $|U|\geq \delta$ with $q>2$. Thus,
\begin{align}\label{3.3}
|\nabla F(x,U)|\leq \sum^k_{i=1} |f_i (x,U)|\leq (\lambda_1 m_0-\kappa)|U|+C_2|U|^{q-1} e^{\alpha |U|^2},\ \ \forall (x,U)\in \Omega\times \mathbb{R}^k.
\end{align}
Thus, by $(M_1),\ (F_3)$ and (\ref{3.3}),
\begin{align*}
\begin{split}
I_\varepsilon(U)=&\frac {1}{2}M(\|U\|^2)-\int_{\Omega}\frac{F(x,U)}{|x|^\beta}dx-\varepsilon\int_{\Omega}U\cdot H(x)dx \\
\geq &\frac{1}{2}m_0\|U\|^2-\int_{\Omega}\frac{F(x,U)}{|x|^\beta}dx-\varepsilon\int_{\Omega}U\cdot H(x)dx \\
\geq &\frac{1}{2}m_0\|U\|^2-\frac {1}{\mu}\int_{\Omega}\frac{U\cdot \nabla F(x,U)}{|x|^\beta}dx-\varepsilon\|U\|\|H\|_* \\
\geq &\frac{1}{2}m_0\|U\|^2-\frac {\lambda_1 m_0-\kappa}{\mu}\int_{\Omega}\frac{|U|^2}{|x|^\beta}dx-C_2\int_{\Omega}|U|^q \frac{e^{\alpha |U|^2}}{|x|^\beta}dx-\varepsilon\|U\|\|H\|_* \\
\geq &\Big(\frac {m_0}{2}-\frac {m_0}{\mu}+\frac {\kappa}{\mu \lambda_1}\Big)\|U\|^2-C_2\int_{\Omega}|U|^q \frac{e^{\alpha |U|^2}}{|x|^\beta}dx-\varepsilon\|U\|\|H\|_*,
\end{split}
\end{align*}
By Lemma \ref{lemtm4}, there exists $N>0$ such that $\alpha N^2/4\pi+\beta/2<1$ and we take $\|U\|\leq N$, there exists $C>0$ such that $\int_{\Omega}|U|^q \frac{e^{\alpha |U|^2}}{|x|^\beta}dx\leq C\|U\|^q$. Therefore,
\begin{align*}
I_\varepsilon(U)\geq \Big(\frac {m_0}{2}-\frac {m_0}{\mu}+\frac {\kappa}{\mu \lambda_1}\Big)\|U\|^2-C_3\|U\|^q-\varepsilon\|U\|\|H\|_*,
\end{align*}
Since $m_0>0,\ \mu>2\theta>2,\ q>2$, then for small $\varepsilon$, there exists $\xi_\varepsilon>0$ such that $\Big(\frac {m_0}{2}-\frac {m_0}{\mu}+\frac {\kappa}{\mu \lambda_1}\Big)\xi_\varepsilon^2-C_3\xi_\varepsilon^q-\varepsilon \xi_\varepsilon\|H\|_*>0$.
Consequently, taking $\rho_\varepsilon=\min\{N,\xi_\varepsilon\}>0$, we can get that $I_\varepsilon(U)\geq\varsigma$ whenever $\|U\|=\rho_\varepsilon$ where $\varsigma:=\Big(\frac {m_0}{2}-\frac {m_0}{\mu}+\frac {\kappa}{\mu \lambda_1}\Big)\rho_\varepsilon^2-C_3\rho_\varepsilon^q-\varepsilon \rho_\varepsilon\|H\|_*>0$. And it is worth noting that, $\rho_\varepsilon\rightarrow 0$ as $\varepsilon\rightarrow 0$.
\end{proof}
\begin{lemma}\label{lemgc2}
Assume that $(M_1),\ (M_3),\ (F_3)$ hold and the function $f_i$ has subcritical (or critical) growth at $\infty$. Then there exists $E\in H^1_0(\Omega,\mathbb{R}^k)$ with $\|E\|>\rho_\varepsilon$ such that
\begin{align*}
I_\varepsilon(E)<\inf_{\|U\|=\rho_\varepsilon}I(U).
\end{align*}
\end{lemma}
\begin{proof}
We shall make use of the polar coordinate representation
\begin{align*}
U=(\nu,\phi)=(\nu,\phi_1,\ldots,\phi_{k-1}),
\end{align*}
where $\nu\geq 1,\ -\pi\leq\phi_1\leq\pi,\ 0\leq\phi_2,\ldots,\phi_{k-1}\leq\pi$ and
\begin{align*}
\begin{split}
u_1&=\nu \sin(\phi_1)\sin(\phi_2)\cdots\sin(\phi_{k-1}),\\
u_2&=\nu \cos(\phi_1)\sin(\phi_2)\cdots\sin(\phi_{k-1}),\\
u_3&=\nu \cos(\phi_2)\cdots\sin(\phi_{k-1}),\\
\vdots \\
u_k&=\nu \cos(\phi_{k-1}).
\end{split}
\end{align*}
Substituting in $(F_3)$, we get $0<\mu F(x,U)\leq \nu F_{\nu}(x,U)$ and hence
\begin{align*}
F(x,U)\geq\left(\min_{|W|=1}F(x,W)\right)|U|^\mu,\ \ \mbox{for all}\ \ x\in \Omega \ \ \mbox{and}\ \ |U|\geq 1.
\end{align*}
Hence, for all $U\in H^1_0(\Omega,\mathbb{R}^k)\setminus\{0\}$ with $|U|\geq 1$, we have that
\begin{align*}
F(x,U)\geq C|U|^\mu.
\end{align*}
From (\ref{1.4}), we have that $0<M(t)\leq M(1) t^\theta$ for all $ t\geq 1$. Thus we have
\begin{align*}
I_\varepsilon(tU)\leq C_1t^{2\theta}\|U\|^{2\theta}-C_2 t^\mu \int_{K}\frac{|U|^\mu}{|x|^\beta} dx-t\varepsilon\int_{\Omega}U\cdot H dx,
\end{align*}
for $t$ large enough, where $C_1,\ C_2>0,\ \mu>2\theta>2$, $K$ is the compact subset of $\Omega$ which yields $I_\varepsilon(tU)\rightarrow -\infty$ as $t\rightarrow+\infty$. Setting $E=tU$ with $t$ large enough such that $I_\varepsilon(E)<0$ with $\|E\|>\rho_\varepsilon$. Thus, the proof is finished.
\end{proof}
\begin{lemma}\label{lemgc3}
If $f_i$ has subcritical (or critical) growth at $\infty$, there exists $\eta_\varepsilon>0$ and $V\in H^1_0(\Omega,\mathbb{R}^k)\backslash \{\mathbf{0}\}$ such that $I_\varepsilon(tV)<0$ for all $0<t<\eta_\varepsilon$. In particular,
\begin{align*}
\inf_{\|U\|\leq \eta_\varepsilon}I_\varepsilon(U)<0.
\end{align*}
\end{lemma}
\begin{proof}
Since $h_i\in \big(\big(H^1_0(\Omega)\big)^*,\|\cdot\|_*\big)\backslash\{0\}$ for some $i\in\{1,\ldots,k\}$, then by the Riesz representation theorem, the problem
\begin{align*}
-\Delta v_i=h_i,\ \ x\in\Omega;\ \ v_i=0\ \ \mbox{on}\ \ \partial\Omega,
\end{align*}
has a unique nontrivial weak solution $v_i$ in $H^1_0(\Omega)$. Thus,
\begin{align*}
\int_{\Omega} h_i v_i=\|v_i\|^2>0.
\end{align*}
Since $f_i(x,0,\ldots,0)=0$, by continuity, $(F_3)$ and (\ref{1.4}), $\theta>1$, it follows that there exists $\eta_\varepsilon>0$ such that for all $0<t<\eta_\varepsilon$,
\begin{align*}
\begin{split}
\frac{d}{dt}[I_\varepsilon((0,\ldots,tv_i,\ldots,0))]=&m(t^2\|v_i\|^2)t\|v_i\|^2-\int_{\Omega}\frac{v_if_i(x,(0,\ldots,tv_i,\ldots,0))}{|x|^\beta}
-\varepsilon\int_{\Omega} h_i v_i \\
\leq &C\|v_i\|^{2}t-\varepsilon\|v_i\|^2-\int_{\Omega}\frac{v_if_i(x,(0,\ldots,tv_i,\ldots,0))}{|x|^\beta}
<0.
\end{split}
\end{align*}
Using that $I_\varepsilon(\mathbf{0})=0$, it must hold that $I_\varepsilon(tV)<0$ for all $0<t<\eta_\varepsilon$ where $V=(0,\ldots,v_i,\ldots,0)$.
\end{proof}
\section{{\bfseries On Palais-Smale sequences}}\label{ps}
To prove that a Palais-Smale sequence converges to a weak solution of problem (\ref{P}), we need to establish the following lemma.
\begin{lemma}\label{lem4.1}
Assume that $(M_1),\ (M_3),\ (F_3)$ hold and $f_i$ has subcritical (or critical) growth at $\infty$. Let $\{U_n\}\subset H^1_0(\Omega,\mathbb{R}^k)$ be the Palais-Smale sequence for functional $I_\varepsilon$ at finite level. Then there exist $C>0$ such that
\begin{align*}
\|U_n\|\leq C,\ \ \int_{\Omega}\frac{\left|U_n\cdot \nabla F(x,U_n)\right|}{|x|^\beta}dx\leq C\ \ \mbox{and}\ \ \int_{\Omega}\frac{F(x,U_n)}{|x|^\beta}dx\leq C.
\end{align*}
\end{lemma}
\begin{proof}
Let $\{U_n\} \subset H^1_0(\Omega,\mathbb{R}^k)$ be a sequence such that $I_\varepsilon(U_n)\rightarrow c$ and $I_\varepsilon'(U_n)\rightarrow 0$, where $|c|<\infty$, then we can take this as follows:
\begin{equation}\label{4.1}
I_\varepsilon(U_n)=\frac{1}{2}M(\|U_n\|^2)-\int_{\Omega}\frac{F(x,U_n)}{|x|^\beta}dx-\varepsilon\int_{\Omega}U_n\cdot H dx=c+\delta_n,
\end{equation}
where $\delta_n\rightarrow 0$ as $n\rightarrow \infty$, and
\begin{equation}\label{4.2}
\langle I_\varepsilon'(U_n),U_n\rangle_*=m(\|U_n\|^2)\|U_n\|^2-\int_{\Omega}\frac{U_n\cdot \nabla F(x,U_n)}{|x|^\beta}dx-\varepsilon\int_{\Omega}U_n\cdot H dx=o(\|U_n\|).
\end{equation}
Then for $n$ large enough, by $(M_1),\ (F_3)$ and (\ref{1.4}), it holds that
\begin{align*}
\begin{split}
C+\|U_n\|\geq& I_\varepsilon(U_n)-\frac {1}{\mu} \langle I'_\varepsilon(U_n),U_n\rangle_* \\
=&\frac {1}{2}M(\|U_n\|^2)-\frac {1}{\mu}m(\|U_n\|^2)\|U_n\|^2+\frac{1}{\mu} \int_{\Omega}\frac{\left (U_n\cdot \nabla F(x,U_n) -\mu F(x,U_n)\right )}{|x|^\beta}dx \\
&-\frac{\mu-1}{\mu}\varepsilon\int_{\Omega}U_n\cdot H dx \\
\geq& \frac {1}{2\theta}\Big[\theta M(\|U_n\|^2)-m(\|U_n\|^2)\|U_n\|^2\Big]+\Big(\frac {1}{2\theta}-\frac {1}{\mu}\Big)m(\|U_n\|^2)\|U_n\|^2 \\
&-\frac{\mu-1}{\mu}\varepsilon \|U_n\| \|H\|_* \\
\geq& \Big(\frac {1}{2\theta}-\frac {1}{\mu}\Big)m(\|U_n\|^2)\|U_n\|^2-\frac{\mu-1}{\mu}\varepsilon \|U_n\| \|H\|_* \\
\geq& \frac {\mu-2\theta}{2\theta\mu}m_0\|U_n\|^2-\frac{\mu-1}{\mu}\varepsilon \|U_n\| \|H\|_*,
\end{split}
\end{align*}
for some $C>0$. Since $\mu>2\theta>2,\ m_0>0,\ \varepsilon>0$, we obtain $\|U_n\|$ is bounded. From (\ref{4.1}) and (\ref{4.2}), it can be concluded directly that there exist $C>0$ such that $\int_{\Omega}\frac{U_n\cdot \nabla F(x,U_n)}{|x|^\beta}dx\leq C$ and $\int_{\Omega}\frac{F(x,U_n)}{|x|^\beta}dx\leq C$. Condition $(F_3)$ implies $U_n\cdot \nabla F(x,U_n)\geq 0$ for all $x\in\Omega$, thus we have $\int_{\Omega}\frac{\left|U_n\cdot \nabla F(x,U_n)\right|}{|x|^\beta}dx\leq C$.
\end{proof}
In order to show that the limit of a sequence in $H^1_0(\Omega,\mathbb{R}^k)$ is a weak solution of problem (\ref{P}) we will use the following convergence result due to Figueiredo-do \'{O}-Ref \cite{ddr2} and the dominated result due to do \'{O}-Medeiros-Severo \cite{dms}.
\begin{lemma}\label{lemcr}
\cite{ddr2} Let $\Omega \in \mathbb{R}^2$ be a bounded domain and $f: \Omega\times \mathbb{R} \rightarrow \mathbb{R}$ be a continuous function, $\beta \in [0,2)$. Then for any sequence $\{u_n\}$ in $L^1(\Omega)$ such that
\begin{align*}
u_n\rightarrow u\ \ in\ \ L^1(\Omega),\ \ \frac{f(x,u_n)}{|x|^\beta} \in L^1(\Omega)\ \ \mbox{and}\ \ \int_{\Omega}\frac{|f(x,u_n)u_n|}{|x|^\beta}dx\leq C,
\end{align*}
up to a sequence we have
\begin{align*}
\frac{f(x,u_n)}{|x|^\beta}\rightarrow \frac{f(x,u)}{|x|^\beta} \ \ \mbox{in}\ \ L^1(\Omega).
\end{align*}
\end{lemma}
\begin{lemma}\label{lemdr}
\cite{dms} Let $\{u_n\}$ be a sequence of functions in $H^1_0(\Omega)$ strongly convergent. Then there exists a subsequence $\{u_{n_k}\}$ of $\{u_n\}$ and $g\in H^1_0(\Omega)$ such that $|u_{n_k}(x)|\leq g(x)$ almost everywhere in $\Omega$.
\end{lemma}
\begin{lemma}\label{lem4.4}
Assume $(M_1),\ (M_3),\ (F_2),\ (F_3)$ hold and $f_i$ has subcritical (or critical) growth at $\infty$. Let $\{U_n\}\subset H^1_0(\Omega,\mathbb{R}^k)$ be the Palais-Smale sequence for functional $I_\varepsilon$ at finite level, then there exists $U\in H^1_0(\Omega,\mathbb{R}^k)$ such that
\begin{equation}\label{dcfi}
\frac{f_i(x,U_n)}{|x|^\beta}\rightarrow \frac{f_i(x,U)}{|x|^\beta}\ \ \mbox{in}\ \ L^1(\Omega), \ \ \mbox{for all}\ \ i=1,\ldots,k.
\end{equation}
and
\begin{equation}\label{dch}
U_n\cdot H\rightarrow U\cdot H\ \ \mbox{in}\ \ L^1(\Omega).
\end{equation}
Moreover,
\begin{equation}\label{4.5}
\frac{F(x,U_n)}{|x|^\beta}\rightarrow \frac{F(x,U)}{|x|^\beta}\ \ \mbox{in}\ \ L^1(\Omega).
\end{equation}
\end{lemma}
\begin{proof}
According to Lemma \ref{lem4.1}, we know that $\{U_n\}$ is bounded in $H^1_0(\Omega,\mathbb{R}^k)$, then up to a subsequence, for some $U \in H^1_0(\Omega,\mathbb{R}^k)$ such that $U_n\rightharpoonup U$ weakly in $H^1_0(\Omega,\mathbb{R}^k)$,\ $U_n\rightarrow U$ in $L^p(\Omega,\mathbb{R}^k)$ for all $p\geq 1$ and $U_n(x)\rightarrow U(x)$ almost everywhere in $\Omega$. Consequently, by Lemmas \ref{lem4.1} and \ref{lemcr}, we have
\begin{equation*}
\frac{f_i(x,U_n)}{|x|^\beta}\rightarrow \frac{f_i(x,U)}{|x|^\beta}\ \ \mbox{in}\ \ L^1(\Omega), \ \ \mbox{for all}\ \ i=1,\ldots,k.
\end{equation*}
Since
\begin{align*}
\int_{\Omega}|U_n\cdot H-U\cdot H|dx\leq \int_{\Omega}|H| |U_n -U|dx\leq \|H\|_2\|U_n-U\|_2 \rightarrow 0,
\end{align*}
we can conclude that
\begin{equation*}
U_n\cdot H\rightarrow U\cdot H\ \ \mbox{in}\ \ L^1(\Omega).
\end{equation*}
Then by Lemma \ref{lemdr}, there exists $g_i\in L^1(\Omega)$ such that $\frac{|f_i(x,U_n)|}{|x|^\beta}\leq g_i$ almost everywhere in $\Omega$. From $(F_2)$ we can conclude that
\begin{align*}
|F(x,U_n)|\leq \sup_{\Omega \times [-S_0,S_0]} |F(x,U_n(x))|+M_0|\nabla F(x,U_n)| \ \ \mbox{a.e. in}\ \ \Omega.
\end{align*}
Thus, by the generalized Lebesgue dominated convergence theorem, we get
\begin{equation*}
\frac{F(x,U_n)}{|x|^\beta}\rightarrow \frac{F(x,U)}{|x|^\beta}\ \ \mbox{in}\ \ L^1(\Omega).
\end{equation*}
\end{proof}
\section{{\bfseries Proof of the main results}}\label{main}
In order to obtain a weak solution with positive energy, according to Lemmas \ref{lemgc1} and \ref{lemgc2}, let
\begin{equation}\label{defmpl}
c_{M,\varepsilon}=\inf_{\gamma\in\Upsilon}\max_{t\in [0,1]}I_\varepsilon(\gamma(t))>0,
\end{equation}
be the minimax level of $I_\varepsilon$, where $\Upsilon=\{\gamma\in C\big([0,1],H^1_0(\Omega,\mathbb{R}^k)\big):\gamma(0)=\mathbf{0}, I_\varepsilon(\gamma(1))<0\}$. Therefore, using the Mountain-pass theorem, there exists a sequence $\{U_n\} \subset H^1_0(\Omega,\mathbb{R}^k)$ satisfying
\begin{equation}\label{5.1}
I_\varepsilon(U_n)\rightarrow c_{M,\varepsilon}\ \ \mbox{and}\ \ I'_\varepsilon(U_n)\rightarrow 0.
\end{equation}
And in order to obtain another weak solution with negative energy, by Lemmas \ref{lemgc1} and \ref{lemgc3}, we take $\eta_\varepsilon\leq \rho_\varepsilon$ and so we have that
\begin{equation}\label{defc0}
-\infty<c_{0,\varepsilon}:=\inf_{\|V\|\leq \rho_\varepsilon}I_\varepsilon(V)<0,
\end{equation}
where $\rho_\varepsilon$ is given as in Lemma \ref{lemgc1}. Since $\overline{B}_{\rho_\varepsilon}$ is a complete metric space with the metric given by the norm of $H^1_0(\Omega,\mathbb{R}^k)$, convex and the functional $I_\varepsilon$ is of class $C^1$ and bounded below on $\overline{B}_{\rho_\varepsilon}$, by the Ekeland variational principle, there exists a sequence $\{V_n\}$ in $\overline{B}_{\rho_\varepsilon}$ such that
\begin{equation}\label{5.2}
I_\varepsilon(V_n)\rightarrow c_{0,\varepsilon}\ \ \mbox{and}\ \ I'_\varepsilon(V_n)\rightarrow 0.
\end{equation}
\subsection{Subcritical case: Proof of Theorem \ref{thm1.2}}\label{ssectpfthmsc}
In this subsection, we assume that $f_i$ has subcritical growth at $\infty$ satisfying (\ref{1.2}) and proof Theorem \ref{thm1.2}.
\begin{lemma}\label{lem5.1}
The functional $I_\varepsilon$ satisfies the Palais-Smale condition at any finite level $c$.
\end{lemma}
\begin{proof}
Let $\{U_n\} \subset H^1_0(\Omega,\mathbb{R}^k)$ be a sequence such that $I_\varepsilon(U_n)\rightarrow c$ and $I_\varepsilon'(U_n)\rightarrow 0$. Lemma \ref{lem4.1} shows that $\{U_n\}$ is bounded in $H^1_0(\Omega,\mathbb{R}^k)$, then we can get a subsequence still labeled by $\{U_n\}$, for some $U\in H^1_0(\Omega,\mathbb{R}^k)$ such that
\begin{align*}
U_n\rightharpoonup U\ \ \mbox{in}\ \ H^1_0(\Omega,\mathbb{R}^k);\quad U_n \rightarrow U\ \ \mbox{in}\ \ L^q(\Omega,\mathbb{R}^k) \ \mbox{for all}\ \ q\geq 1.
\end{align*}
Since
\begin{equation}\label{5.3}
\begin{split}
\langle I_\varepsilon'(U_n),U_n-U\rangle_*=&m(\|U_n\|^2)\langle U_n,U_n-U\rangle-\int_{\Omega}\frac{(U_n-U)\cdot\nabla F(x,U_n)}{|x|^\beta}dx
-\varepsilon\int_{\Omega}(U_n-U_0)\cdot H dx.
\end{split}
\end{equation}
From $I_\varepsilon'(U_n)\rightarrow 0$ in $\big(H^1_0(\Omega,\mathbb{R}^k)\big)_*$, we have $\langle I'_\varepsilon(U_n),U_n-U\rangle_* \rightarrow 0$.
Meanwhile, Lemma \ref{lem4.1} shows that $\|U_n\|$ is bounded, i.e. $\|U_n\|^2\leq C_0$ for some $C_0>0$, then by subscritical condition, the H\"{o}lder's inequality and Lemma \ref{yi}, it follows that
\begin{align*}
\begin{split}
\left|\int_{\Omega}\frac{(U_n-U)\cdot\nabla F(x,U_n)}{|x|^\beta}dx \right| & \leq \int_{\Omega}\frac{|U_n-U| |\nabla F(x,U_n)|}{|x|^\beta}dx \\
&\leq C_1\int_{\Omega}|U_n-U|\frac{e^{\alpha |U_n|^2}}{|x|^\beta}dx \\
&\leq C_2\|U_n-U\|_{\frac {r}{r-1}}\left(\int_{\Omega}\frac{e^{r\alpha |U_n|^2}}{|x|^{r\beta}}dx\right)^\frac {1}{r} \\
&\leq C_2\|U_n-U\|_{\frac {r}{r-1}}\left(\sum^k_{i=1}\int_{\Omega}\frac{e^{kr\alpha\|U_n\|^2\left(\frac{u^i_n}{\|U_n\|}\right)^2}}{|x|^{r\beta}}dx\right)^\frac {1}{r}
\end{split}
\end{align*}
for some $C_1,C_2>0$, where $\alpha=\frac{4\pi(1-r\beta/2)}{krC_0}$ and $r>1$ sufficiently close to 1 such that $r\beta<2$. Since $\frac{kr\alpha\|U_n\|^2}{4\pi}+\frac{r\beta}{2}\leq 1$, then by Lemma \ref{lemtm1} and $H^1_0(\Omega,\mathbb{R}^k)\hookrightarrow L^s(\Omega,\mathbb{R}^k)$ is compact for $s\geq 1$, the fourth term in the last inequality converges to zero.
Thus from (\ref{dch}), in (\ref{5.3}), it must be that
\begin{align*}
m(\|U_n\|^2)\langle U_n,U_n-U\rangle \rightarrow 0\ \ \mbox{as}\ \ n\rightarrow+\infty.
\end{align*}
Because $(M_1)$: $m(t)\geq m_0>0$ for $t\geq 0$ and $U_n\rightharpoonup U$ in $H^1_0(\Omega,\mathbb{R}^k)$, it must be that
\begin{align*}
\langle U_n,U_n-U\rangle\rightarrow 0,
\end{align*}
which means $\|U_n\|^2\rightarrow \|U\|^2$. By Radon's Theorem, $U_n\rightarrow U$ strongly in $H^1_0(\Omega,\mathbb{R}^k)$. This proof is complete.
\end{proof}
\noindent{\bfseries Proof of Theorem \ref{thm1.2}.}
By (\ref{5.1}), (\ref{5.2}) and Lemma \ref{lem5.1}, there exists $\varepsilon_{sc}>0$ such that for each $0<\varepsilon<\varepsilon_{sc}$, using Minimax principle, there exist critical points $U_{M,\varepsilon}$ for $I_\varepsilon$ at level $c_{M,\varepsilon}$ and $V_{0,\varepsilon}$ for $I_\varepsilon$ at level $c_{0,\varepsilon}$. We claim that $U_{M,\varepsilon}\neq \mathbf{0}$. In fact, suppose by contradiction that $U_{M,\varepsilon}\equiv \mathbf{0}$. We can know that $0<c_{M,\varepsilon}=\lim_{n\rightarrow\infty}I_\varepsilon(U_n)=I_\varepsilon(U_{M,\varepsilon})=I(\mathbf{0})=0$, what is absurd. Similarly, we have $V_{0,\varepsilon}\neq \mathbf{0}$. In the end, we claim $U_{M,\varepsilon}\neq V_{0,\varepsilon}$. Suppose by contradiction that $ U_{M,\varepsilon}\equiv V_{0,\varepsilon}$, then $0>c_{0,\varepsilon}=\lim_{n\rightarrow\infty}I_\varepsilon(V_n)=I(V_{0,\varepsilon})=I(U_{M,\varepsilon})=\lim_{n\rightarrow\infty}I_\varepsilon(U_n)=c_{M,\varepsilon}>0$, what is absurd. Thus, the proof of Theorem \ref{thm1.2} is complete.
\qed
\subsection{Critical case: Proof of Theorem \ref{thm1.3}}\label{ssectpfthmc}
In this subsection, we assume that $f_i$ has critical growth at $\infty$ satisfying (\ref{1.3}) and give the proof Theorem \ref{thm1.3}.
Firstly, we give a conclusion that functional $I_\varepsilon$ satisfies the Palais-Smale condition if Palais-Smale sequence less than appropriate level:
\begin{lemma}\label{ms1}
If $\{V_n\}$ is a Palais-Smale sequence for $I_\varepsilon$ at any finite level with
\begin{align}\label{msVx}
\liminf_{n\rightarrow\infty}\|V_n\|^2<\frac{2\pi(2-\beta)}{\alpha_0},
\end{align}
then $\{V_n\}$ possesses a strongly subsequence.
\end{lemma}
\begin{proof}
Let $\{V_n\}\subset H^1_0(\Omega,\mathbb{R}^k)$ such that $I_\varepsilon(V_n)\rightarrow c$ and $I'_\varepsilon(V_n)\rightarrow 0$ in $\big(H^1_0(\Omega,\mathbb{R}^k)\big)^*$. By Lemma \ref{lem4.1}, $\|V_n\|\leq C$ for some $C>0$, thus, up to a sequence, for some $V\in H^1_0(\Omega,\mathbb{R}^k)$
\begin{align*}
\begin{split}
&V_n\rightharpoonup V\ \ \mbox{in}\ \ H^1_0(\Omega,\mathbb{R}^k).
\end{split}
\end{align*}
Taking $V_n=V+W_n$, it follows that $W_n\rightharpoonup 0$ in $H^1_0(\Omega,\mathbb{R}^k)$ and by the Br\'{e}zis-Lieb Lemma (see \cite{bl2}), we get
\begin{align}\label{ws1}
\begin{split}
\|V_n\|^2=\|V\|^2+\|W_n\|^2+o_n(1).
\end{split}
\end{align}
By $V_n\rightharpoonup V$ which means $\langle V_n,V \rangle\rightarrow \langle V,V \rangle=\|V\|^2$. Therefore, (\ref{ws1}) can be replaced by
\begin{align}\label{ws2}
\begin{split}
\|V_n\|^2=\langle V_n,V\rangle+\|W_n\|^2+o_n(1).
\end{split}
\end{align}
By $I_\varepsilon'(V_n)\rightarrow 0$ in $\big(H^1_0(\Omega,\mathbb{R}^k)\big)^*$ and (\ref{dch}), (\ref{ws2}), we can get
\begin{align*}
\begin{split}
\langle I_\varepsilon'(V_n),V_n-V\rangle_* &=m(\|V_n\|^2)\langle V_n,V_n-V \rangle-\int_{\Omega}\frac{(V_n-V)\cdot\nabla F(x,V_n)}{|x|^\beta}+o_n(1) \\
&=m(\|V_n\|^2)\|W_n\|^2-\int_{\Omega}\frac{W_n\cdot\nabla F(x,V_n)}{|x|^\beta}+o_n(1),
\end{split}
\end{align*}
that is,
\begin{align}\label{ws3}
\begin{split}
m(\|V_n\|^2)\|W_n\|^2=\int_{\Omega}\frac{W_n\cdot\nabla F(x,V_n)}{|x|^\beta}+o_n(1).
\end{split}
\end{align}
From (\ref{msVx}), there exists $\zeta>0$ such that $\alpha_0\|V_n\|^2<\zeta<2\pi(2-\beta)$ for $n$ sufficiently large and also, there exist $\alpha>\alpha_0$ close to $\alpha_0$ and $q>q$ close to~1~such that $q\alpha \|V_n\|^2<\zeta<2\pi(2-q\beta)$ for $n$ sufficiently large. Then by (\ref{3.1}), we have
\begin{align*}
\begin{split}
\left|\int_{\Omega}\frac{W_n\cdot\nabla F(x,V_n)}{|x|^\beta}\right|\leq C_1\int_{\Omega}|W_n|\frac{e^{\alpha|V_n|^2}}{|x|^\beta},
\end{split}
\end{align*}
and by the H\"{o}lder's inequality and Lemma \ref{lemtm2}, we can get that
\begin{align*}
\begin{split}
\int_{\Omega}|W_n|\frac{e^{\alpha|V_n|^2}}{|x|^\beta}\leq C_1\|W_n\|_s \bigg(\int_{\Omega}\frac{e^{q\alpha|V_n|^2}}{|x|^{q\beta}}\bigg)^{1/r}\leq C_2\|W_n\|_s,
\end{split}
\end{align*}
where $s=\frac{q}{q-1}$. By the compact embedding $H^1_0(\Omega,\mathbb{R}^k)\hookrightarrow L^s(\Omega,\mathbb{R}^k)$ for $s\geq 1$, we conclude that
\begin{align*}
\begin{split}
\int_{\Omega}\frac{W_n\cdot\nabla F(x,V_n)}{|x|^\beta}\rightarrow 0.
\end{split}
\end{align*}
Thus, this together with (\ref{ws3}) and $(M_1)$, we get that $\|W_n\|\rightarrow 0$ and the result follows.
\end{proof}
Then, in order to get a more precise information about the minimax level $c_{M,\varepsilon}$, let us consider the following sequence which was introduced in \cite{ddr}: for $n\in \mathbb{N}$ set $\delta_n=\frac{2\log n}{n}$, and let
\begin{eqnarray*}
y_n(t)=\frac {1}{\sqrt{2\pi}}
\left\{ \arraycolsep=1.5pt
\begin{array}{ll}
\frac {t}{n^{1/2}}(1-\delta_n)^{1/2},\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ &{\rm if}\ \ 0\leq t\leq n,\\[2mm]
\frac {1}{\big [n(1-\delta_n)\big ]^{1/2}}\log\frac{A_n+1}{A_n+e^{-(t-n)}}+\big [n(1-\delta_n)\big ]^{1/2},\ \ \ &{\rm if}\ \ \ t\geq n,\\[2mm]
\end{array}
\right.
\end{eqnarray*}
where $A_n$ is defined as $A_n=\frac{1}{en^2}+O(\frac{1}{n^4})$.
The sequence of function $\{y_n\}$ satisfies the following properties:
\begin{eqnarray*}
\left\{ \arraycolsep=1.5pt
\begin{array}{ll}
\{y_n\}\subset C\big([0,+\infty)\big),\ \mbox{piecewise differentiable, with}\ y_n(0)=0 \ \mbox{and}\ y'_n(t)\geq 0;\\[2mm]
\int^{+\infty}_0 |y'_n(t)|^2=1;\\[2mm]
\lim_{n\rightarrow \infty}\int^{+\infty}_0 e^{y^2_n(t)-t}dt=1+e.
\end{array}
\right.
\end{eqnarray*}
Now, let $y_n(t)=2\sqrt{\pi}\widehat{G}_n(e^{-t/2})$ with $|x|=e^{-t/2}$, define a function $\widehat{G}_n(x)=\widehat{G}_n(|x|)$ on $\overline{B_1(0)}$, which is nonnegative and radially symmetric. Moreover, we have
\begin{align*}
\int_{B_1(0)}|\nabla \widehat{G}_n(x)|^2dx=\int^{+\infty}_0|y'_n(t)|^2=1.
\end{align*}
Therefore $\|\widehat{G}_n\|=1$. Let $\tau=\frac{2-\beta}{2}$, then $\widehat{G}_n$ defines another function nonnegative and radially symmetric $\tilde{G}_n$ as follows:
\begin{align*}
\widehat{G}_n(\varrho)=\tau^{1/2}\tilde{G}_n(\varrho^{1/\tau})\ \ \mbox{for}\ \ \varrho\in [0,1].
\end{align*}
Note that
\begin{align*}
\int^1_0|\widehat{G}'_n(\varrho)|^2\varrho d\varrho=\int^1_0|\tilde{G}'_n(\varrho)|^2\varrho d\varrho.
\end{align*}
Therefore, $\|\widehat{G}_n\|=\|\tilde{G}_n\|$. The open ball $B_d(0)$ is contained in $\Omega$, where $d$ was given in $(F_4)$. Considering
\begin{align}\label{defgnd}
\mathscr{G}_{n,d}(x):=\big(G_{n,d}(x),0,\ldots,0\big),\ \ \ \mbox{where}\ \ \ G_{n,d}(x):=\tilde{G}_n\left(\frac{x}{d}\right),
\end{align}
then $\mathscr{G}_{n,d}(x)$ belongs to $H^1_0(\Omega,\mathbb{R}^k)$ with $\|\mathscr{G}_{n,d}\|=1$, and the support of $\mathscr{G}_{n,d}$ contained in $B_d(0)$.
\begin{remark}\label{rem5.2} \rm
If condition $(F_4)$ holds, we define $\mathscr{G}'_{n,d}(x)$ that $i$-th component is set to $G_{n,d}(x)$, and the remaining components are set to $0$, i.e., $\mathscr{G}'_{n,d}(x)=(0,\ldots,0,G_{n,d}(x),0,\ldots,0)$, then given $\delta>0$ there exists $s_\delta>0$ such that
\begin{align*}
\begin{split}
\mathscr{G}'_{n,d}\cdot\nabla F(x,\mathscr{G}'_{n,d})
= G_{n,d} f_i(x,\mathscr{G}'_{n,d})
\geq (\eta_0 -\delta)\exp\left(\alpha_0|\mathscr{G}'_{n,d}|^2\right)
= (\eta_0 -\delta)\exp\left(\alpha_0|G_{n,d}|^2\right),
\end{split}
\end{align*}
$\forall x\in \Omega,\ \ |\mathscr{G}'_{n,d}|=|G_{n,d}|\geq s_\delta$. This is the same as type (\ref{5.7}) below,
therefore without lose of generality, we can assume that $i=1$ in $(F_4)$.
\end{remark}
\begin{lemma}\label{leest}
For any $0<\epsilon<1$, we have that for $x\in B_\frac{d}{\varpi(n)}(0)$ with $\varpi(n)=\exp\left\{\frac{n^{(1+\epsilon)/2}}{2}\right\}$,
\begin{align}\label{psbc}
|\mathscr{G}_{n,d}(x)|\geq\frac{1}{2\sqrt{\pi}} n^{\frac{\epsilon}{2}}\left(1-\frac{2\log n}{n}\right)^{\frac{1}{2}},
\end{align}
where $\mathscr{G}_{n,d}$ is given in (\ref{defgnd}).
\end{lemma}
\begin{proof}
For $x\in B_\frac{d}{\varpi(n)}(0)$,
\begin{align*}
\left|\mathscr{G}_{n,d}(x)\right|=\left|G_{n,d}(x)\right|=\left|\tilde{G}_n\left(\frac{x}{d}\right)\right|=\left|\tilde{G}_n(y)\right|,
\end{align*}
where $y=\frac{x}{d}\in B_\frac{1}{\varpi(n)}(0)$. Moreover,
\begin{align*}
\left|\tilde{G}_n(y)\right|=\left|\tilde{G}_n(|y|)\right|=\frac{1}{2\sqrt{\pi}}y_n(-2\log(|y|))=\frac{1}{2\sqrt{\pi}}y_n(t),
\end{align*}
where $t=-2\log(|y|)\in (n^{\frac{1+\epsilon}{2}},+\infty)$. Noticing that, $y_n(t)\geq\big[n(1-\delta_n)\big]^{\frac{1}{2}}=n^{\frac{1}{2}}\left(1-\frac{2\log n}{n}\right)^{\frac{1}{2}}$ if $t\geq n$. Moreover, in $(n^{\frac{1+\epsilon}{2}},n)$,
\begin{align*}
\begin{split}
y_n(t)\geq \frac {n^{\frac{1+\epsilon}{2}}}{n^{\frac{1}{2}}}(1-\delta_n)^{\frac{1}{2}}=n^{\frac{\epsilon}{2}}\left(1-\frac{2\log n}{n}\right)^{\frac{1}{2}}.
\end{split}
\end{align*}
The proof is complete.
\end{proof}
\begin{lemma}\label{nl}
If conditions $(M_1),\ (M_3)$ and $(F_3),\ (F_4)$ hold, then
\begin{align*}
\max_{t\geq 0}\left[\frac{1}{2}M(t^2)-\int_{\Omega}\frac{F(x,t\mathscr{G}_{n,d})}{|x|^\beta}\right]
<\frac{1}{2}M\left(\frac{2\pi(2-\beta)}{\alpha_0}\right).
\end{align*}
\end{lemma}
\begin{proof}
Suppose by contradiction, that for all $n\in \mathbb{N}$, we have
\begin{align*}
\max_{t\geq 0}\left[\frac{1}{2}M(t^2)-\int_{\Omega}\frac{F(x,t\mathscr{G}_{n,d})}{|x|^\beta}\right]
\geq \frac{1}{2}M\left(\frac{2\pi(2-\beta)}{\alpha_0}\right).
\end{align*}
By Lemmas \ref{lemgc1} and \ref{lemgc2}, for each $n$ there exists $t_n>0$ such that
\begin{align*}
\frac{1}{2}M(t^2_n)-\int_{\Omega}\frac{F(x,t_n\mathscr{G}_{n,d})}{|x|^\beta}=\max_{t\geq 0}\left[\frac{1}{2}M(t^2)-\int_{\Omega}\frac{F(x,t\mathscr{G}_{n,d})}{|x|^\beta}\right]
\end{align*}
From this, and using $(F_3)$, one has $M(t^2_n)\geq M\left(\frac{2\pi(2-\beta)}{\alpha_0}\right)$. By $(M_1)$, which implies that $M:[0,+\infty)\rightarrow[0,+\infty)$ is an increasing bijection and so
\begin{align}\label{5.5}
t^2_n\geq \frac{2\pi(2-\beta)}{\alpha_0}.
\end{align}
On the other hand,
\begin{align*}
\frac{d}{dt}\left[\frac{1}{2}M(t^2)-\int_{\Omega}\frac{F(x,t\mathscr{G}_{n,d})}{|x|^\beta}\right]\bigg|_{t=t_n}=0,
\end{align*}
from which we obtain
\begin{align}\label{5.6}
m(t^2_n)t^2_n&=\int_{\Omega}\frac{t_n\mathscr{G}_{n,d}\cdot\nabla F(x,t_n\mathscr{G}_{n,d})}{|x|^\beta}dx
= \int_{\Omega}\frac{t_nG_{n,d}f_1(x,t_n\mathscr{G}_{n,d})}{|x|^\beta}dx.
\end{align}
By Remark \ref{rem5.2} and $(F_4)$, given $\delta>0$ there exists $s_\delta>0$ such that
\begin{align}\label{5.7}
u_1 f_1(x,u_1,0,\ldots,0)\geq (\eta_0 -\delta)e^{\alpha_0|u_1|^2},\ \ \ \forall\ x\in \Omega,\ \ |u_1|\geq s_\delta.
\end{align}
Lemma \ref{leest} shows that for any $0<\epsilon<1$, $t_n|\mathscr{G}_{n,d}|\geq s_\delta$ in $B_\frac{d}{\varpi(n)}(0)\subset\Omega$ for $n$ sufficiently large, where
$
\varpi(n)=\exp\left\{\frac{n^{(1+\epsilon)/2}}{2}\right\}.
$
Thus, by (\ref{5.6}) and (\ref{5.7}), we have
\begin{align*}
\begin{split}
m(t^2_n)t^2_n\geq (\eta_0-\delta)\int_{B_\frac{d}{\varpi(n)}(0)} \frac{e^{\alpha_0|t_n G_{n,d}|^2}}{|x|^\beta}dx
&=(\eta_0-\delta)\Big(\frac{d}{\varpi(n)}\Big)^{2-\beta}\int_{B_1(0)} \frac{e^{\beta_0|t_n \tilde{G}_n|^2}}{|x|^\beta}dx \\
&=2\pi(\eta_0-\delta)\Big(\frac{d}{\varpi(n)}\Big)^{2-\beta}\int^1_0 e^{\alpha_0|t_n \tilde{G}_n(\sigma)|^2}\sigma^{1-\beta} d\sigma.
\end{split}
\end{align*}
By performing the change of variable $\sigma=\tau^{\frac{2}{2-\beta}}$, we get
\begin{align*}
m(t^2_n)t^2_n\geq \frac{4\pi}{2-\beta}(\eta_0-\delta)\Big(\frac{d}{\varpi(n)}\Big)^{2-\beta}\int^{1}_0 e^{\frac{2\alpha_0|t_n \tilde{G}_n(\tau)|^2}{2-\beta}}\tau d\tau.
\end{align*}
Meanwhile, setting $\tau=e^{-t/2}$, we obtain
\begin{align*}
\begin{split}
m(t^2_n)t^2_n\geq \frac{2\pi}{2-\beta}(\eta_0-\delta)\Big(\frac{d}{\varpi(n)}\Big)^{2-\beta}\int^{+\infty}_0 e^{\frac{\alpha_0|t_n y_n(t)|^2}{2\pi(2-\beta)}}e^{-t}dt.
\end{split}
\end{align*}
Consequently,
\begin{align}\label{5.8}
\begin{split}
m(t^2_n)t^2_n&\geq \frac{2\pi}{2-\beta}(\eta_0-\delta)\Big(\frac{d}{\varpi(n)}\Big)^{2-\beta}\int^{+\infty}_n e^{\frac{\alpha_0t_n^2(n-2\log n)}{2\pi(2-\beta)}}e^{-t}dt \\
&=\frac{2\pi}{2-\beta}(\eta_0-\delta)d^{2-\beta}\exp\left\{\frac{\alpha_0t_n^2(n-2\log n)}{2\pi(2-\beta)}-(2-\beta)\log \varpi(n)-n\right\} \\
&=\frac{2\pi}{2-\beta}(\eta_0-\delta)d^{2-\beta}\exp\left\{\frac{\alpha_0t_n^2(n-2\log n)}{2\pi(2-\beta)}-\frac{(2-\beta)n^{\frac{1+\epsilon}{2}}}{2}-n\right\} \\
&=\frac{2\pi}{2-\beta}(\eta_0-\delta)d^{2-\beta}\exp\left\{\left[\frac{\alpha_0t_n^2}{2\pi(2-\beta)}-1\right]n-\frac{(2-\beta)n^{\frac{1+\epsilon}{2}}}{2}
-\frac{\alpha_0t_n^2}{\pi(2-\beta)}\log n\right\}.
\end{split}
\end{align}
From this
\begin{align}\label{5.9}
\begin{split}
1\geq \frac{2\pi}{2-\beta}(\eta_0-\delta)d^{2-\beta}\exp\left\{t^2_n n\left[\frac{\alpha_0\big(1-\frac{2\log n}{ n}\big)}{2\pi(2-\beta)}-\frac{(2-\beta)n^{\frac{1+\epsilon}{2}}+2n}{2t^2_n n}-\frac{\log{\big (m(t^2_n)t^2_n\big)}}{t^2_n n} \right]\right\},
\end{split}
\end{align}
thus, $\{t_n\}$ is bounded. Otherwise, noting that, from (\ref{1.4}), making use of the property of $M$ and $m$, we would have that
\begin{align*}
\begin{split}
t^2_n n\left[\frac{\alpha_0\big(1-\frac{2\log n}{n}\big)}{2\pi(2-\beta)}-\frac{(2-\beta)n^{\frac{1+\epsilon}{2}}+2n}{2t^2_n n}-\frac{\log{\big (m(t^2_n)t^2_n\big)}}{t^2_n n} \right]\rightarrow+\infty,
\end{split}
\end{align*}
which is a contradiction with (\ref{5.9}). Therefore $\{t_n\}$ has a subsequence convergent, from (\ref{5.5}), for some $t_0^2\geq \frac{2\pi(2-\beta)}{\alpha_0}$,\ $t_n\rightarrow t_0$. Moreover, using (\ref{5.8}), we must have $\frac{\alpha_0 t^2_0}{2\pi(2-\beta)}-1\leq 0$ and therefore,
\begin{align}\label{5.10}
t^2_n\rightarrow\frac{2\pi(2-\beta)}{\alpha_0}.
\end{align}
At this point, following arguments as in \cite{dmr1}, we are going to estimate (\ref{5.6}) more exactly. For this, in view of (\ref{5.7}), for $0<\delta<\eta_0$ and $n\in \mathbb{N}$ we set
\begin{equation*}
D_{n,\delta}:=\{x\in B_d(0):t_n G_{n,d}\geq s_\delta\}\ \ \mbox{and}\ \ E_{n,\delta}:=B_d(0)\backslash D_{n,\delta}.
\end{equation*}
Thus, by splitting the integral (5.6) on $D_{n,\delta}\ \mbox{and}\ E_{n,\delta}$, and using (\ref{5.7}), it follows that
\begin{align}\label{5.11}
\begin{split}
M(t^2_n)t^2_n\geq &(\eta_0-\delta)\int_{B_d(0)}\frac{e^{\alpha_0 t^2_n G^2_{n,d}}}{|x|^\beta}dx-(\eta_0-\delta)\int_{E_{n,\delta}}\frac{e^{\alpha_0 t^2_n G^2_{n,d}}}{|x|^\beta}dx \\
&+\int_{E_{n,\delta}}\frac{t_n G_{n,d}f_1(x,t_n \mathscr{G}_{n,d})}{|x|^\beta}dx.
\end{split}
\end{align}
Since $G_{n,d}(x)\rightarrow 0$ for almost everywhere $x\in B_d(0)$, we have that the characteristic functions $\chi_{E_{n,\delta}}$ satisfy
\begin{equation*}
\chi_{E_{n,\delta}}\rightarrow 1\ \ \mbox{a.e. in}\ \ B_d(0)\ \ \mbox{as}\ \ n\rightarrow+\infty.
\end{equation*}
Moreover, $t_n G_{n,d}<s_\delta$ in $E_{n,\delta}$. Thus, invoking the Lebesgue dominated convergence theorem, we obtain
\begin{equation*}
\int_{E_{n,\delta}}\frac{e^{\alpha_0 t^2_n G^2_{n,d}}}{|x|^\beta}dx\rightarrow\pi d^2 \ \ \mbox{and}\ \ \int_{E_{n,\delta}}\frac{t_n G_{n,d}f_1(x,t_n \mathscr{G}_{n,d})}{|x|^\beta}dx\rightarrow 0,\ \ \mbox{as}\ \ n\rightarrow+\infty.
\end{equation*}
Noting that
\begin{equation*}
\begin{split}
\int_{B_d(0)}\frac{e^{\alpha_0 t^2_n G^2_{n,d}}}{|x|^\beta}dx=d^{2-\beta}\int_{B_1(0)}\frac{e^{\alpha_0 t^2_n \tilde{G}^2_n}}{|x|^\beta}dx=2\pi d^{2-\beta}\int^1_0{e^{\alpha_0 t^2_n \tilde{G}^2_n(\sigma)}}\sigma^{2-\beta}d\sigma,
\end{split}
\end{equation*}
By performing the change of variable $\sigma=\tau^{\frac{2}{2-\beta}}$, we get
\begin{align*}
\int_{B_d(0)}\frac{e^{\alpha_0 t^2_n G^2_{n,d}}}{|x|^\beta}dx=\frac{4\pi}{2-\beta}d^{2-\beta}\int^{1}_0 e^{\frac{2\alpha_0|t_n \tilde{G}_n(\tau)|^2}{2-\beta}}\tau d\tau.
\end{align*}
Meanwhile, setting $\tau=e^{-t/2}$ and using (\ref{5.5}), we obtain
\begin{align*}
\begin{split}
\int_{B_d(0)}\frac{e^{\alpha_0 t^2_n G^2_{n,d}}}{|x|^\beta}dx=&\frac{2\pi}{2-\beta}d^{2-\beta}\int^{+\infty}_0 e^{\frac{\alpha_0|t_n y_n(t)|^2}{2\pi(2-\beta)}}e^{-t}dt \\
\geq& \frac{2\pi}{2-\beta}d^{2-\beta}\int^{+\infty}_0 e^{y^2_n(t)-t}dt.
\end{split}
\end{align*}
Passing to limit in (\ref{5.11}), we obtain that
\begin{align}\label{5.12}
m\left(\frac{2\pi(2-\beta)}{\alpha_0}\right)\frac{2\pi(2-\beta)}{\alpha_0}\geq (\eta_0-\delta)\left[\frac{2\pi}{2-\beta} d^{2-\beta}(1+e)-\frac{2\pi}{2-\beta} d^{2-\beta}\right]=(\eta_0-\delta)\frac{2\pi e}{2-\beta} d^{2-\beta},
\end{align}
and doing $\delta\rightarrow 0^+$, we get $\eta_0\leq \frac { (2-\beta)^2m\big(\frac {2\pi(2-\beta)}{\alpha_0}\big)}{\alpha_0 d^{2-\beta} e }$, which contradicts $(F_4)$. Thus, this lemma is proved.
\end{proof}
Now, we establish an estimate for the minimax level.
\begin{lemma}\label{lem5.3}
If conditions $(M_1),\ (M_3)$ and $(F_3)-(F_4)$ hold, then for small $\varepsilon$, it holds that
\begin{align*}
c_{M,\varepsilon}<\frac{1}{2}M\left(\frac{2\pi(2-\beta)}{\alpha_0}\right),
\end{align*}
where $c_{M,\varepsilon}$ is given as in (\ref{defmpl}).
\end{lemma}
\begin{proof}
Since $\|\mathscr{G}_{n,d}\|=1$, as in the proof of Lemma \ref{lemgc2}, we have that $I(t\mathscr{G}_{n,d})\rightarrow -\infty$ as $t\rightarrow +\infty$. Consequently, $c_{M,\varepsilon}\leq\max_{t\geq 0}I_\varepsilon(t\mathscr{G}_{n,d}),\ \forall \ n\in \mathbb{N}$. Thus, from Lemma \ref{nl}, taking $\varepsilon$ sufficiently small, we can get what we desired.
\end{proof}
\begin{lemma}\label{lemms}
If $f_i$ has critical growth at $\infty$,\ $(M_1)-(M_3)$ and $(F_1)-(F_4)$ satisfy, then for small $\varepsilon$, problem (\ref{P}) has one nontrivial mountain-pass type solution $U_{M,\varepsilon}$ at level $c_{M,\varepsilon}$, where $c_{M,\varepsilon}$ is given as in (\ref{defmpl}).
\end{lemma}
\begin{proof}
From (\ref{5.1}) and Lemma \ref{lem4.1}, there exists a bounded Palais-Smale consequence $\{U_n\}$ for $I_\varepsilon$ at level $c_{M,\varepsilon}$. And, up to a subsequence, for some $U\in H^1_0(\Omega,\mathbb{R}^k)$, one has
\begin{align}\label{5.14}
U_n\rightharpoonup U\ \ \mbox{in}\ \ H^1_0(\Omega,\mathbb{R}^k); \qquad
U_n \rightarrow U\ \ \mbox{in}\ \ L^s(\Omega,\mathbb{R}^k) \ \mbox{for all}\ \ s\geq 1.
\end{align}
By Lemma \ref{lem4.1}, $\|U_n\|\leq C$ for some $C>0$, and $\|U\|\leq \lim\inf_{n\rightarrow\infty} \|U_n\|\leq C$. And by $(F_3)$, we have that for small $\varepsilon$, it holds
\begin{align}\label{ms0}
\begin{split}
&\frac {1}{2\theta}\int_{\Omega}\bigg[\frac{U\cdot\nabla F(x,U)-2\theta F(x,U)}{|x|^\beta}-\varepsilon(2\theta-1) U\cdot H\bigg]dx \\
&\geq\frac {1}{2\theta}\int_{\Omega}\frac{U\cdot\nabla F(x,U)-2\theta F(x,U)}{|x|^\beta}dx-\frac {\varepsilon (2\theta-1)\|U\| \|H\|_*}{2\theta}
\geq 0.
\end{split}
\end{align}
Next, we will make some claims as follows.
\noindent{\bfseries Claim 1.} \ \ \ $U\neq \mathbf{0}$.
\begin{proof}
Suppose by contradiction that $U\equiv \mathbf{0}$. Then Lemma \ref{lem4.4} show that $\int_{\Omega}U_n\cdot H dx\rightarrow 0$ and $\int_{\Omega}\frac{F(x,U_n)}{|x|^\beta}dx\rightarrow 0$, thus
\begin{align*}
\frac{1}{2}M(\|U_n\|^2)\rightarrow c_{M,\varepsilon}<\frac{1}{2}M\left(\frac{2\pi(2-\beta)}{\alpha_0}\right),
\end{align*}
then condition $(M_1)$ implies
\begin{align*}
\liminf_{n\rightarrow\infty}\|U_n\|^2<\frac{2\pi(2-\beta)}{\alpha_0}.
\end{align*}
By Lemma \ref{ms1}, we have $\|U_n\|^2\rightarrow 0$ and therefore $I_\varepsilon(U_n)\rightarrow 0$, what is absurd for $c_{M,\varepsilon}$ and we must have $U\neq \mathbf{0}$.
\end{proof}
\noindent{\bfseries Claim 2.} \ \ \ Let $A:=\lim_{n\rightarrow \infty}\|U_n\|^2$, then $U$ is a weak solution of
\begin{eqnarray*}
\left\{ \arraycolsep=1.5pt
\begin{array}{ll}
-m(A)\Delta U=\frac{\nabla F(x,U)}{|x|^\beta}+\varepsilon H,\ \ &\mbox{in}\ \ \Omega,\\[2mm]
U=0,\ \ &\mbox{on}\ \ \partial\Omega.
\end{array}
\right.
\end{eqnarray*}
\begin{proof}
We define $C^\infty_0(\Omega,\mathbb{R}^k):=C^\infty_0(\Omega)\times \cdots\times C^\infty_0(\Omega)$. By $I_\varepsilon'(U_n)\rightarrow 0$ and Lemma \ref{lem4.4}, we see that
\begin{align*}
\begin{split}
m(A)\int_{\Omega}\nabla U\cdot\nabla \Phi dx-\int_{\Omega}\frac{\Phi\cdot\nabla F(x,U)}{|x|^\beta}dx-\varepsilon\int_{\Omega}\Phi\cdot Hdx=0,\quad \forall\Phi\in C^\infty_0(\Omega,\mathbb{R}^k).
\end{split}
\end{align*}
Since $C^\infty_0(\Omega)$ is dense in $H^1_0(\Omega)$, then $C^\infty_0(\Omega,\mathbb{R}^k)$ is also dense in $H^1_0(\Omega,\mathbb{R}^k)$, and we conclude this claim.
\end{proof}
\noindent{\bfseries Claim 3.} \ \ \ $A:=\lim_{n\rightarrow \infty}\|U_n\|^2<\|U\|^2+\frac{2\pi(2-\beta)}{\alpha_0}$.
\begin{proof}
Suppose by contradiction that $A\geq \|U\|^2+\frac{2\pi(2-\beta)}{\alpha_0}\geq \frac{2\pi(2-\beta)}{\alpha_0}$. Therefore, from (\ref{5.1}) and Lemma \ref{lem4.4}, we obtain
\begin{align*}
\begin{split}
c_{M,\varepsilon}=&\lim_{n\rightarrow\infty}\left[I_\varepsilon(U_n)-\frac {1}{2\theta}\langle I'_\varepsilon(U_n),U_n\rangle_*\right] \\
=&\frac {1}{2\theta}\lim_{n\rightarrow\infty}\Big[\theta M(\|U_n\|^2)-m(\|U_n\|^2)\|U_n\|^2\Big] \\
&+\frac {1}{2\theta}\lim_{n\rightarrow\infty}\int_{\Omega}\frac{U_n\cdot\nabla F(x,U_n)-U\cdot\nabla F(x,U)}{|x|^\beta}dx \\
&+\frac {1}{2\theta}\int_{\Omega}\bigg[\frac{U\cdot\nabla F(x,U)-2\theta F(x,U)}{|x|^\beta}-\varepsilon(2\theta-1)U\cdot H\bigg]dx, \\
\end{split}
\end{align*}
thus, by $(M_3)$ and (\ref{ms0})
\begin{align*}
\begin{split}
c_{M,\varepsilon}\geq&\frac {1}{2\theta}\lim_{n\rightarrow\infty}\Big[\theta M(\|U_n\|^2)-m(\|U_n\|^2)\|U_n\|^2\Big] \\
&+\frac {1}{2\theta}\lim_{n\rightarrow\infty}\int_{\Omega}\frac{U_n\cdot\nabla F(x,U_n)-U\cdot\nabla F(x,U)}{|x|^\beta}dx \\
=&\frac {1}{2\theta}\Big[\theta M(A)-m(A)A\Big]+\frac {1}{2\theta}\lim_{n\rightarrow\infty}\int_{\Omega}\frac{U_n\cdot\nabla F(x,U_n)-U\cdot\nabla F(x,U)}{|x|^\beta}dx \\
\geq&\frac {1}{2\theta}\bigg[\theta M\left(\frac{2\pi(2-\beta)}{\alpha_0}\right)-m\left(\frac{2\pi(2-\beta)}{\alpha_0}\right)\frac{2\pi(2-\beta)}{\alpha_0}\bigg] \\
&+\frac {1}{2\theta}\lim_{n\rightarrow\infty}\int_{\Omega}\frac{U_n\cdot\nabla F(x,U_n)-U\cdot\nabla F(x,U)}{|x|^\beta}dx \\
=&\frac {1}{2}M\left(\frac{2\pi(2-\beta)}{\alpha_0}\right) \\
&-\frac {1}{2\theta}\bigg[m\left(\frac{2\pi(2-\beta)}{\alpha_0}\right)\frac{2\pi(2-\beta)}{\alpha_0}-\lim_{n\rightarrow\infty}\int_{\Omega}\frac{U_n\cdot\nabla F(x,U_n)-U\cdot\nabla F(x,U)}{|x|^\beta}dx\bigg]. \\
\end{split}
\end{align*}
Here, we assert
\begin{align*}
0\geq m\left(\frac{2\pi(2-\beta)}{\alpha_0}\right)\frac{2\pi(2-\beta)}{\alpha_0}-\lim_{n\rightarrow\infty}\int_{\Omega}\frac{U_n\cdot\nabla F(x,U_n)-U\cdot\nabla F(x,U)}{|x|^\beta}dx.
\end{align*}
Indeed, $\langle I'(U_n),U_n\rangle_*\rightarrow 0$, (\ref{dch}) and Claim 2 indicate that
\begin{align*}
\begin{split}
0=&m(A)A-\lim_{n\rightarrow\infty}\int_{\Omega}\frac{U_n\cdot\nabla F(x,U_n)}{|x|^\beta}dx-\varepsilon\int_{\Omega}U\cdot H dx, \\
0=&m(A)\|U\|^2-\int_{\Omega}\frac{U\cdot\nabla F(x,U)}{|x|^\beta}dx-\varepsilon\int_{\Omega}U\cdot H dx.
\end{split}
\end{align*}
Subtracting the second equality from the first one, by $(M_2)$ which implies $m(t)$ and $m(t)t$ are nondecreasing for $t\geq 0$, we get
\begin{align*}
\begin{split}
0&=m(A)(A-\|U\|^2)-\lim_{n\rightarrow\infty}\int_{\Omega}\frac{U_n\cdot\nabla F(x,U_n)-U\cdot\nabla F(x,U)}{|x|^\beta}dx \\
&\geq m(A-\|U\|^2)(A-\|U\|^2)-\lim_{n\rightarrow\infty}\int_{\Omega}\frac{U_n\cdot\nabla F(x,U_n)-U\cdot\nabla F(x,U)}{|x|^\beta}dx \\
&\geq m\left(\frac{2\pi(2-\beta)}{\alpha_0}\right)\frac{2\pi(2-\beta)}{\alpha_0}-\lim_{n\rightarrow\infty}\int_{\Omega}\frac{U_n\cdot\nabla F(x,U_n)-U\cdot\nabla F(x,U)}{|x|^\beta}dx.
\end{split}
\end{align*}
This shows the assertion. Noting Lemma \ref{lem5.3}, we conclude
\begin{align*}
\frac {1}{2}M\left(\frac{2\pi(2-\beta)}{\alpha_0}\right)\leq c_{M,\varepsilon}<\frac {1}{2}M\left(\frac{2\pi(2-\beta)}{\alpha_0}\right),
\end{align*}
which is absurd. Thus this claim is proved.
\end{proof}
\noindent{\bfseries Claim 4.} \ \ \ $A:=\lim_{n\rightarrow \infty}\|U_n\|^2=\|U\|^2$.
\begin{proof}
By using semicontinuity of norm, we have $\|U\|^2\leq A$. We are going to show that the case $\|U\|^2< A$ can not occur. Indeed, if $\|U\|^2< A$, defining $Z_n=\frac{U_n}{\|U_n\|}$ and $Z_0=\frac{U}{A^{1/2}}$, we have $Z_n\rightharpoonup Z_0\ \mbox{in} \ H^1_0(\Omega,\mathbb{R}^k)$ and $\|Z_0\|<1$. Thus, by Lemma \ref{lemtm4}
\begin{align}\label{5.16}
\sup_n \int_{\Omega}\frac{e^{p|Z_n|^2}}{|x|^\beta}dx<\infty,\ \ \forall p<\frac{2\pi(2-\beta)} {1-\|Z_0\|^2}.
\end{align}
Since $A=\frac{A-\|U\|^2}{1-\|Z_0\|^2}$, from Claim 3 which follows that $A<\frac{2\pi(2-\beta)}{\alpha_0(1-\|Z_0\|^2)}$.
Thus, there exists $\zeta>0$ such that $\alpha_0\|U_n\|^2<\zeta<\frac{2\pi(2-\beta)}{1-\|Z_0\|^2}$ for $n$ sufficiently large. For $q>1$ close to 1 and $\alpha>\alpha_0$ close to $\alpha_0$ we still have $q\alpha\|U_n\|^2< \zeta<\frac{2\pi(2-q\beta)}{1-\|Z_0\|^2}$ with $q\beta<2$, and provoking (\ref{5.16}), for some $C>0$ and $n$ large enough, we conclude that
\begin{align*}
\int_{\Omega}\frac{e^{q\alpha|U_n|^2}}{|x|^{q\beta}}dx\leq \int_{\Omega}\frac{e^{\zeta|Z_n|^2}}{|x|^{q\beta}}dx\leq C.
\end{align*}
Hence, using (\ref{3.1}), (\ref{5.14}) and the H\"{o}lder's inequality, we get
\begin{align*}
\begin{split}
\left|\int_{\Omega} \frac{(U_n-U)\cdot\nabla F(x,U_n)}{|x|^\beta} dx\right|&\leq C_1\int_{\Omega}|U_n-U|\frac{e^{\alpha|U_n|^2}}{|x|^\beta} dx \\
&\leq C_1\|U_n-U\|_{\frac{q}{q-1}}\left(\int_{\Omega}\frac{e^{q\alpha|U_n|^2}}{|x|^{q\beta}} dx\right)^{1/q} \\
&\leq C_2\|U_n-U\|_{\frac{q}{q-1}}\rightarrow 0,
\end{split}
\end{align*}
as $n\rightarrow\infty.$ Since $\langle I'_\varepsilon(U_n),U_n-U\rangle_*\rightarrow 0$, by (\ref{dch}), it follows that $m(\|U_n\|^2)\langle U_n,U_n-U\rangle\rightarrow 0$. On the other hand,
\begin{align*}
\begin{split}
m(\|U_n\|^2)\langle U_n,U_n-U\rangle&=m(\|U_n\|^2)\|U_n\|^2-m(\|U_n\|^2)\int_{\Omega}\nabla U_n\cdot\nabla Udx \\
&\rightarrow m(A)A-m(A)\|U\|^2,
\end{split}
\end{align*}
which implies that $A=\|U\|^2$ what is absurd. Thus, this claim is proved.
\end{proof}
\noindent{\bfseries Finalizing the proof of Lemma \ref{lemms}}: Since $H^1_0(\Omega,\mathbb{R}^k)$ is uniformly convex Banach space and (\ref{5.14}), Claim 4, by Radon's Theorem, $U_n\rightarrow U$ in $H^1_0(\Omega,\mathbb{R}^k)$. Hence, by (\ref{5.1}): $I_\varepsilon'(U_n)\rightarrow 0$ and Lemma \ref{lem4.4}, we have
\begin{equation*}
m(\|U\|^2)\int_{\Omega}\nabla U \cdot\nabla \Phi=\int_{\Omega}\frac{\Phi\cdot\nabla {F(x,U)}}{|x|^\beta} dx+\varepsilon\int_{\Omega}H \cdot \Phi dx,\ \ \forall \Phi\in C^\infty_0(\Omega,\mathbb{R}^k).
\end{equation*}
Since $C^\infty_0(\Omega,\mathbb{R}^k)$ is dense in $H^1_0(\Omega,\mathbb{R}^k)$, we conclude that $U_{M,\varepsilon}:=U$ is a Mountain-pass type solution for problem (\ref{P}) with $I_\varepsilon(U_{M,\varepsilon})=c_{M,\varepsilon}>0$ and according to Claim 1, the proof is complete.
\end{proof}
Finally, let us to find out a minimum type solution $V_{0,\varepsilon}$ with $I_\varepsilon(V_{0,\varepsilon})=c_{0,\varepsilon}<0$, where $c_{0,\varepsilon}$ is given as in (\ref{defc0}).
\begin{lemma}\label{lemms2}
For small $\varepsilon$, problem (\ref{P}) has a nontrivial minimum type solution $V_{0,\varepsilon}$ with $I_\varepsilon(V_{0,\varepsilon})=c_{0,\varepsilon}<0$.
\end{lemma}
\begin{proof}
Let $\rho_\varepsilon$ be as in Lemma \ref{lemgc1}. Then we can choose $\varepsilon$ sufficiently small such that
\begin{align*}
\rho_\varepsilon<\left(\frac{2\pi(2-\beta)}{\alpha_0}\right)^{1/2}.
\end{align*}
Since $\overline{B}_{\rho_\varepsilon}$ is a complete metric space with the metric given by the norm of $H^1_0(\Omega,\mathbb{R}^k)$, convex and the functional $I_\varepsilon$ is of class $C^1$ and bounded below on $\overline{B}_{\rho_\varepsilon}$, by the Ekeland variational principle, there exists a sequence $\{V_n\}$ in $\overline{B}_{\rho_\varepsilon}$ such that
\begin{equation*}
I_\varepsilon(V_n)\rightarrow c_{0,\varepsilon}=\inf_{\|V\|\leq \rho_\varepsilon}I_\varepsilon(V)<0\ \ \mbox{and}\ \ I'_\varepsilon(V_n)\rightarrow 0.
\end{equation*}
Observing that
\begin{align*}
\|V_n\|^2\leq \rho_\varepsilon^2<\frac{2\pi(2-\beta)}{\alpha_0},
\end{align*}
by Lemma \ref{ms1}, there exists a strongly convergent subsequence and therefore, for some $V_{0,\varepsilon}$, $V_n\rightarrow V_{0,\varepsilon}$ strongly in $H^1_0(\Omega,\mathbb{R}^k)$. Consequently, we have $V_{0,\varepsilon}$ is a minimum type solution of problem (\ref{P}) with $I_\varepsilon(V_{0,\varepsilon})=c_{0,\varepsilon}<0$. We claim $V_{0,\varepsilon}\neq \mathbf{0}$. Indeed, suppose by a contradiction $V_{0,\varepsilon}= \mathbf{0}$, then $0>c_{0,\varepsilon}=I_\varepsilon(V_{0,\varepsilon})=I_\varepsilon(\mathbf{0})=0$, what is absurd and this lemma is proved.
\end{proof}
\noindent{\bfseries Proof of Theorem \ref{thm1.3}.}
By Lemmas \ref{lemms} and \ref{lemms2}, there exists $\varepsilon_c>0$ such that for each $0<\varepsilon<\varepsilon_c$, there exist nontrivial critical points $U_{M,\varepsilon}$ for $I_\varepsilon$ at level $c_{M,\varepsilon}$ and $V_{0,\varepsilon}$ for $I_\varepsilon$ at level $c_{0,\varepsilon}$. In the end, we claim $U_{M,\varepsilon}\neq V_{0,\varepsilon}$. Suppose by contradiction that $ U_{M,\varepsilon}\equiv V_{0,\varepsilon}$, then $0>c_{0,\varepsilon}=\lim_{n\rightarrow\infty}I_\varepsilon(V_n)=I(V_{0,\varepsilon})
=I(U_{M,\varepsilon})=\lim_{n\rightarrow\infty}I_\varepsilon(U_n)=c_{M,\varepsilon}>0$, what is absurd. Thus, the proof of Theorem \ref{thm1.3} is complete.
\qed
\noindent{\bfseries Acknowledgements.}
The authors have been supported by NSFC 11971392, Natural Science Foundation of Chongqing, China cstc2019jcyjjqX0022 and Fundamental Research
Funds for the Central Universities XDJK2019TY001.
\end{document} |
\begin{document}
\vspace*{1cm}
\title{Ancilla Assisted Quantum State Tomography in Many-Qubit Registers}
\author{Abhishek Shukla$^1$, K. Rama Koteswara Rao$^2$, and T. S. Mahesh$^1$}
\email{mahesh.ts@iiserpune.ac.in}
\affiliation{$ ^1 $Department of Physics and NMR Research Center,\\
Indian Institute of Science Education and Research, Pune 411008, India \\
$ ^2 $Department of Physics and NMR Research Centre,
Indian Institute of Science, Bangalore, India}
\begin{abstract}
{
The standard method of Quantum State Tomography (QST) relies on the
measurement of a set of noncommuting observables, realized in
a series of independent experiments. Ancilla Assisted QST (AAQST) proposed
by Nieuwenhuizen and co-workers
(Phys. Rev. Lett., {\bf 92}, 120402 (2004))
greatly reduces the number of independent
measurements by exploiting an ancilla register in a known initial state.
In suitable conditions AAQST allows mapping out
density matrix of an input register in a single experiment.
Here we describe methods for explicit construction of
AAQST experiments in multi-qubit registers.
We also report nuclear magnetic resonance studies on
AAQST of (i) a two-qubit input register using a one-qubit ancilla
in an isotropic liquid-state system and (ii) a three-qubit input register
using a two-qubit ancilla register
in a partially oriented system. The experimental results confirm the
effectiveness of AAQST in such many-qubit registers.
}
\end{abstract}
\keywords{state tomography, ancilla register, density matrix tomography}
\pacs{03.67.Lx, 03.67.Ac, 03.65.Wj, 03.65.Ta}
\maketitle
\section{Introduction}
Quantum computers have the potential to carry-out certain computational tasks
with an efficiency that is beyond the reach of their classical
counterparts \cite{chuangbook}. In practice however, harnessing the computational
power of a quantum system has been an enormously challenging task
\cite{exptqipbook}.
The difficulties include imperfect control on the quantum dynamics
and omnipresent interactions between the quantum system and its
environment leading to an irreversible loss of quantum coherence.
In order to optimize the control fields and to understand the effects
of environmental noise, it is often necessary to completely characterize the quantum state.
In experimental quantum information studies,
Quantum State Tomography (QST) is an important tool that is routinely used
to characterize an instantaneous quantum state \cite{chuangbook}.
QST on an initial state is usually carried out to confirm the efficiency of
initialization process. Though QST of the final state is usually not
part of a quantum algorithm, it allows one
to measure the fidelity of the output state.
QSTs in intermediate stages often help experimentalists
to tune-up the control fields better.
QST can be performed by a series of measurements of
noncommuting observables which together enables one to reconstruct
the complete complex density matrix. In the standard method,
the required number of independent experiments grows
exponentially with the number of input qubits \cite{ChuangPRSL98,ChuangPRA99}.
Anil Kumar and co-workers have illustrated QST using a single
two-dimensional NMR spectrum \cite{Aniltomo}. They showed that a two-dimensional
NMR experiment consisting of a series of identical measurements with systematic
increments in evolution time, can be used to quantitatively estimate all
the elements of the density matrix.
Later Nieuwenhuizen and co-workers have shown that
it is possible to reduce the number of independent experiments
in the presence of an ancilla register initialized to a known
state \cite{Nieuwenhuizen}.
They pointed out that in suitable situations, it is possible to
carry-out QST with a single measurement of a set of factorized
observables. We refer to this method as Ancilla Assisted QST (AAQST).
This method was experimentally illustrated by Suter
and co-workers using a single input qubit and a single ancilla
qubit \cite{sutertomo}. Recently Peng and coworkers have studied the effectiveness
of the method for qutrit-like systems using numerical simulations
\cite{pengtomo}.
Single shot mapping of density matrix by AAQST method not only
reduces the experimental time, but also alleviates the need to
prepare the target state several times. Often slow variations
in system Hamiltonian may result in systematic errors in
repeating the state preparation. Further, environmental noises
lead to random errors in multiple preparations.
These errors play important roles in the quality of the
reconstruction of the target state. Therefore AAQST has the
potential to provide a more reliable way of tomography.
In this article we first revisit the theory of QST and AAQST and provide
methods for explicit construction of the constraint matrices,
which will allow extending the tomography procedure for large registers.
An important feature of the method described here is that
it requires only global rotations and short evolutions under the collective
internal Hamiltonian.
We also describe nuclear magnetic resonance (NMR) demonstrations
of AAQST on two different types of systems:
(i) a two-qubit input register using a one-qubit ancilla in an isotropic liquid-state system
and (ii) a three-qubit input register using a two-qubit ancilla register
in a partially oriented system.
In the following section we briefly describe the theory of QST and AAQST.
In section III we describe experimental demonstrations and finally we conclude in section IV.
\section{Theory}
\subsection{Quantum State Tomography}
We consider an $n$-qubit register
formed by a system of $n$ mutually interacting spin-1/2 nuclei
with distinct resonance frequencies $\omega_i$ and mutual
interaction frequencies $2\pi J_{ij}$.
The Hamiltonian under weak-interaction limit ($2 \pi J_{ij}\ll \vert \omega_i-\omega_j \vert$)
consists of the Zeeman part and spin-spin interaction part, i.e.,
\begin{eqnarray}
{\cal H} = -\sum\limits_{i=1}^{n}\omega_i \sigma_z^i /2 +
\sum\limits_{i=1}^{n}\sum\limits_{j=i+1}^{n} 2\pi J_{ij} \sigma_z^i \sigma_z^j /4
\label{ham}
\end{eqnarray}
respectively, where $\sigma_{z}^i$ and $\sigma_{z}^j$ are the $z$-components of Pauli operators
of $i$th and $j$th qubits \cite{cavanagh}.
The set of $N=2^n$ eigenvectors $\{ \vert m_1 m_2 \cdots m_n \rangle \}$
of the Zeeman Hamiltonian form a complete orthonormal computational basis.
We can order the eigenvectors based on the decimal value $m$
of the binary string $(m_1 \cdots m_n)$, i.e., $m = m_1 2^{n-1}+\cdots+m_n 2^0$.
The general density matrix can be decomposed as
$\mathbbm{1}/N+\epsilon \rho$ where the identity part is known as the background,
the trace-less part $\rho$ is known as the \textit{deviation density matrix},
and the dimensionless constant $\epsilon$ is the purity factor
\cite{corypps}.
In this context, QST refers to complete characterization of the deviation density
matrix, which can be expanded in terms of $N^2-1$ real unknowns:
\begin{eqnarray}
\rho &=&
\sum\limits_{m=0}^{N-2} \rho_{mm}(\vert m \rangle \langle m \vert -\vert N-1 \rangle \langle N-1 \vert) \nonumber \\
&&+ \sum_{m=0}^{N-2}\sum_{m'=m+1}^{N-1} \{
R_{mm'}(\vert m \rangle \langle m' \vert+\vert m' \rangle \langle m \vert)\nonumber \\
&&~~~~~~~~~~~~~~~~+ iS_{mm'}(\vert m \rangle \langle m' \vert-\vert m' \rangle \langle m \vert)
\}.
\label{dmm}
\end{eqnarray}
Here first part consists of $N-1$ diagonal unknowns $\rho_{mm}$ with
the last diagonal element $\rho_{N-1,N-1}$ being constrained by the trace-less condition.
$R$ and $S$
each consisting of $(N^2-N)/2$ unknowns correspond to real and imaginary
parts of the off-diagonal elements respectively.
Thus a total of $N^2-1$ real unknowns needs to be determined.
Usually an experimental technique allows a particular set of observables
to be measured directly.
To explain the NMR case, we introduce $n$-bit binary strings,
$j_\nu = \nu_1 \nu_2 \cdots \nu_{j-1} 0 \nu_{j} \cdots \nu_{n-1}$ and
$j'_{\nu} = \nu_1 \nu_2 \cdots \nu_{j-1} 1 \nu_{j} \cdots \nu_{n-1}$
differed only by the flip of the $j$th bit.
Here $\nu = \nu_1 2^{n-2} + \nu_2 2^{n-3} + \cdots + \nu_{n-1} 2^0$ is the
value of the $n-1$ bit binary string $(\nu_1,\nu_2,\cdots,\nu_{n-1})$
and $\nu$ can take a value between $0$ and $\gamma = N/2-1$.
The real and imaginary parts of an NMR signal recorded in a
quadrature mode corresponds to the expectation values of
transverse magnetization observables $\sum\limits_{j=1}^{n}\sigma_{jx}$ and
$\sum\limits_{j=1}^{n}\sigma_{jy}$ respectively \cite{cavanagh}.
The background part of the density matrix neither evolves under
unitaries nor gives raise to any signal, and therefore we ignore it.
Under suitable conditions (when all the transitions are resolved),
a single spectrum directly yields $nN$
matrix elements $\{R_{j_\nu,j_\nu'},S_{j_\nu,j_\nu'}\}$
as complex intensities of spectral lines. These matrix elements
are often referred to as single quantum elements since they
connect eigenvectors related by the flip of a single qubit.
We refer the single-quantum terms $R_{j_\nu,j_\nu'}$
and $S_{j_\nu,j_\nu'}$ respectively
as the real and imaginary parts of $\nu$th spectral line of $j$th qubit.
Thus a single spectrum of an $n$-qubit system in an
arbitrary density matrix can yield $n N$ real unknowns.
In order to quantify the remaining elements,
one relies on multiple experiments all starting from the same initial state
$\rho$. The $k$th experiment consists of applying a unitary $U_k$
to the state $\rho$, leading to $\rho^{(k)} = U_k \rho U_k^\dagger$, and measuring
the single-quantum spectrum $\{R_{j_\nu,j_\nu'}^{(k)},S_{j_\nu,j_\nu'}^{(k)}\}$.
From eqn. (\ref{dmm}) we obtain
\begin{eqnarray}
&&R^{(k)}_{j_\nu,j_\nu'}
= \sum\limits_{m} a_{j\nu}^{(k)}(m) \rho_{mm} +
\nonumber \\
&& ~~~~~~~ \sum\limits_{m,m'>m}c_{j\nu}^{(k)}(m,m') R_{mm'} + e_{j\nu}^{(k)}(m,m') S_{mm'},
\nonumber \\
&&S^{(k)}_{j_\nu,j_\nu'}
= \sum\limits_{m} b_{j\nu}^{(k)}(m) \rho_{mm} +
\nonumber \\
&& ~~~~~~~ \sum\limits_{m,m'>m} d_{j\nu}^{(k)}(m,m') R_{mm'} + f_{j\nu}^{(k)}(m,m') S_{mm'},
\label{leq}
\end{eqnarray}
in terms of the unknowns $\rho_{mm'}$ and the known real constants $\{a, \cdots, f\}$:
\begin{eqnarray}
a_{j\nu}^{(k)}(m,m) + ib_{j\nu}^{(k)}(m,m) &=&
\langle j_\nu \vert U_k \vert m \rangle \langle m \vert U_k^\dagger \vert j'_\nu\rangle-
\nonumber \\
&&\langle j_\nu \vert U_k \vert N-1 \rangle \langle N-1 \vert U_k^\dagger \vert j'_\nu\rangle,
\nonumber \\
c_{j\nu}^{(k)}(m,m') +i d_{j\nu}^{(k)}(m,m')&=&
\langle j_\nu \vert U_k \vert m \rangle \langle m' \vert U_k^\dagger \vert j'_\nu\rangle +
\nonumber \\
&&\langle j_\nu \vert U_k \vert m' \rangle \langle m \vert U_k^\dagger \vert j'_\nu\rangle,
\nonumber \\
e_{j\nu}^{(k)}(m,m')+if_{j\nu}^{(k)}(m,m') &=&
i\langle j_\nu \vert U_k \vert m \rangle \langle m' \vert U_k^\dagger \vert j'_\nu\rangle -
\nonumber \\
&&i\langle j_\nu \vert U_k \vert m' \rangle \langle m \vert U_k^\dagger \vert j'_\nu\rangle
\end{eqnarray}
\cite{maheshtomo}.
After $K$ experiments, we can setup the matrix equation
\begin{eqnarray}
M
\left[
\begin{array}{c}
\rho_{0,0} \\
\cdots \\
\rho_{N-2,N-2} \\
-------- \\
R_{0,1} \\
\cdots \\
R_{0,N-1} \\
\cdots \\
R_{m,m'>m} \\
\cdots \\
R_{N-2,N-1} \\
-------- \\
S_{0,1} \\
\cdots \\
S_{0,N-1} \\
\cdots \\
S_{m,m'>m} \\
\cdots \\
S_{N-2,N-1} \\
\end{array}
\right]
=
\left[
\begin{array}{c}
R^{(1)}_{1_0,1_0'} \\
\cdots \\
R^{(1)}_{1_\gamma,1_\gamma'} \\
R^{(1)}_{2_0,2_0'} \\
\cdots \\
\cdots \\
R^{(K)}_{n_\gamma,n_\gamma'} \\
-------\\
S^{(1)}_{1_0,1_0'} \\
\cdots \\
S^{(1)}_{1_\gamma,1_\gamma'} \\
S^{(1)}_{2_0,2_0'} \\
\cdots \\
\cdots \\
S^{(K)}_{n_\gamma,n_\gamma'} \\
\end{array}
\right].
\label{meq}
\end{eqnarray}
Here the left column vector is formed by the $N^2-1$ unknowns of $\rho$:
diagonal elements in the top, real off-diagonals in the middle,
and imaginary off-diagonals in the bottom.
The right column vector is formed by $KnN$ numbers -
the real and imaginary parts of the experimentally obtained
spectral intensities ordered according to the value of the
binary string $\nu$, the qubit number $j$, and the experiment number $k$.
The $KnN\times(N^2-1)$ dimensional constraint matrix is of the form
\begin{eqnarray}
&&M = \nonumber \\
&&\left[
\begin{array}{c c|c c|c c}
a_{1,0}^{(1)}(0,0) & \cdots & c_{1,0}^{(1)}(m,m') & \cdots & e_{1,0}^{(1)}(m,m') & \cdots \\
\cdots &\cdots &\cdots &\cdots &\cdots &\cdots \\
a_{1,\gamma}^{(1)}(0,0) & \cdots & c_{1,\gamma}^{(1)}(m,m') & \cdots & e_{1,\gamma}^{(1)}(m,m') & \cdots \\
\cdots &\cdots &\cdots &\cdots &\cdots &\cdots \\
a_{n,0}^{(1)}(0,0) & \cdots & c_{n,0}^{(1)}(m,m') & \cdots & e_{n,0}^{(1)}(m,m') & \cdots \\
\cdots &\cdots &\cdots &\cdots &\cdots &\cdots \\
\cdots &\cdots &\cdots &\cdots &\cdots &\cdots \\
a_{n\gamma}^{(K)}(0,0) & \cdots & c_{n\gamma}^{(K)}(m,m') & \cdots & e_{n\gamma}^{(K)}(m,m') & \cdots \\
\hline
b_{1,0}^{(1)}(0,0) & \cdots & d_{1,0}^{(1)}(m,m') & \cdots & f_{1,0}^{(1)}(m,m') & \cdots \\
\cdots &\cdots &\cdots &\cdots &\cdots &\cdots \\
b_{1,\gamma}^{(1)}(0,0) & \cdots & d_{1,\gamma}^{(1)}(m,m') & \cdots & f_{1,\gamma}^{(1)}(m,m') & \cdots \\
\cdots &\cdots &\cdots &\cdots &\cdots &\cdots \\
b_{n,0}^{(1)}(0,0) & \cdots & d_{n,0}^{(1)}(m,m') & \cdots & f_{n,0}^{(1)}(m,m') & \cdots \\
\cdots &\cdots &\cdots &\cdots &\cdots &\cdots \\
\cdots &\cdots &\cdots &\cdots &\cdots &\cdots \\
b_{n\gamma}^{(K)}(0,0) & \cdots & d_{n\gamma}^{(K)}(m,m') & \cdots & f_{n\gamma}^{(K)}(m,m') & \cdots \\
\end{array}
\right]\nonumber .\\
\label{mmat}
\end{eqnarray}
Note that each column of the constraint matrix corresponds to
contribution of a particular unknown element of $\rho$ to
the various spectral intensities.
\begin{center}
\begin{figure}
\caption{Minimum number of independent experiments required for QST
(with zero ancilla) and AAQST.
}
\label{exptscaling}
\end{figure}
\end{center}
By choosing the unitaries $\{U_k\}$ such that
$\mathrm{rank} (M) \ge N^2-1$ (the number of unknowns),
eqn. (\ref{meq}) can be solved either by singular value
decomposition or by Gaussian elimination method
\cite{maheshtomo}.
Fig. \ref{exptscaling} illustrates the minimum number ($K$)
of experiments required for QST. As anticipated, $K$ increases
rapidly as $O(N/n)$ with the number of input qubits.
In the following we describe how it is possible to speed-up QST,
in the presence of an ancilla register, with fewer experiments.
\subsection{Ancilla Assisted QST (AAQST)}
Suppose the input register of $n$-qubits is associated with an
ancilla register consisting of $\hat{n}$ qubits.
The dimension of the combined system of $\tilde{n} = n+\hat{n}$ qubits
is $\tilde{N} = N\hat{N}$, where $\hat{N} = 2^{\hat{n}}$.
For simplicity we assume that each qubit interacts sufficiently with all other qubits so as to obtain a completely resolved spectrum yielding $\tilde{n}\tilde{N}$ real parameters.
Following method is applicable even if there are spectral overlaps,
albeit with lower efficiency (i.e., with higher number $(K)$ of minimum
experiments). Further for simplicity,
we assume that the ancilla register
begins with the maximally mixed initial state, with no contribution
to the spectral lines from it.
Otherwise, we need to add the contribution of the ancilla to
the final spectrum and the eqn. (\ref{meq}) will become inhomogeneous.
As explained later in the experimental
section, initialization of maximally mixed state can be achieved with high precision.
Thus the deviation density matrix of the combined system is $\tilde{\rho} = \rho \otimes \mathbbm{1}/\hat{N}$.
Now applying only local unitaries neither leads to ancilla coherences
nor transfers any of the input coherences to ancilla.
Therefore we consider applying a non-local unitary
exploiting the input-ancilla interaction,
\begin{eqnarray}
\tilde{U}_k = V \sum\limits_{a=0}^{\hat{N}-1} U_{ka} \otimes \vert a \rangle \langle a \vert,
\end{eqnarray}
where $U_{ka}$ is the $k$th unitary on the input register dependent
on the ancilla state $\vert a \rangle$ and $V$ is the local
unitary on the ancilla.
The combined state evolves to
\begin{eqnarray}
&& \tilde{\rho}^{(k)} = \tilde{U}_k \tilde{\rho} \tilde{U}_k^\dagger \nonumber \\
&& = \frac{1}{\hat{N}}\sum\limits_{m,m',a} \rho_{mm'} U_{ka} \vert m \rangle \langle m' \vert U_{ka}^\dagger
\otimes V \vert a \rangle \langle a \vert V^\dagger.
\end{eqnarray}
We now record the spectrum of the combined system corresponding
to the observable $\sum\limits_{j=1}^{\tilde{n}} \sigma_{jx}+i\sigma_{jy}$.
Each spectral line can again be expressed in terms of the unknown
elements of the ancilla matrix in the form given in eqn. (\ref{leq}).
The spectrum of the combined system yields $\tilde{n}\tilde{N}$ linear
equations. The minimum number of independent experiments
needed is now $O(N^2/(\tilde{n}\tilde{N}))$.
Since we can choose $\tilde{N} \gg N$, AAQST needs
fewer than O($N/n$) experiments required in the standard QST.
In particular, when $\tilde{n}\tilde{N} \ge N^2$, a single
optimized unitary suffices for QST.
Fig. \ref{exptscaling} illustrates the minimum number ($K$)
of experiments required for various sizes of input and ancilla
registers. As illustrated, QST can be achieved with only one
experiment, if an ancilla of sufficient size is provided along with.
\subsection{Building the constraint matrix}
The major numerical procedure in AAQST is obtaining the constraint matrix $M$.
For calculating the constraint coefficients $c_{rj}^{(k)}$,
one may utilize an elaborate decomposition of $U_k$
using numerical or analytical methods.
Alternatively, as described below, we can use a simple algorithmic approach to
construct the constraint matrix.
First imagine a diagonal state $\rho$ for the ancilla register
(eqn. (\ref{dmm})) with $\rho_{00}=1$ and $\rho_{mm}=0$ for all
other $1 \le m \le N-2$, $R_{mm'}=S_{mm'}=0$.
Applying the unitary $U_k$ on the composite deviation density matrix
$\tilde{\rho} = \rho \otimes \mathbbm{1}/\hat{N}$, we obtain all the
spectral intensities (using eqn. (\ref{leq}))
\begin{eqnarray}
a^{k}_{j\nu}(0,0) = R^{(k)}_{j\nu,j\nu'}, ~
b^{k}_{j\nu}(0,0) = S^{(k)}_{j\nu,j\nu'}.
\end{eqnarray}
Thus the spectral lines indicate the contributions only from $\rho_{00}$
(and $\rho_{N-1,N-1}$).
Repeating
the process with all the unitaries $\{U_k\}$ yields
the first column in $M$ matrix (eqn. (\ref{mmat})) corresponding to the unknown $\rho_{00}$.
Same procedure can be used for all the diagonal elements $\rho_{mm}$ with
$0 \le m \le N-2$.
To determine $M$ matrix column corresponding to a
real off-diagonal unknown $R_{mm'}$,
we start with an input-register density matrix
$R_{mm'} = 1$ and all other elements
set to zero. Again by applying the unitary
$U_k$ on the composite density matrix, and
using eqn. (\ref{leq}) we obtain
\begin{eqnarray}
c^{k}_{j\nu}(m,m') = R^{(k)}_{j\nu,j\nu'}, ~
d^{k}_{j\nu}(m,m') = S^{(k)}_{j\nu,j\nu'}.
\end{eqnarray}
Repeating the process with all unitaries $\{U_k\}$
determines the column of $M$ corresponding to the unknown $R_{mm'}$.
To determine $M$ matrix column corresponding to
an imaginary off-diagonal unknown $S_{mm'}$,
we set $S_{mm'} = 1$ and all other elements to zero,
and apply $U_k$ on the composite state to obtain
\begin{eqnarray}
e^{k}_{j\nu}(m,m') = R^{(k)}_{j\nu,j\nu'}, ~
f^{k}_{j\nu}(m,m') = S^{(k)}_{j\nu,j\nu'}.
\end{eqnarray}
Proceeding this way, by selectively setting the unknowns
one by one, the complete constraint matrix can be built easily.
\subsection{Optimization of Unitaries}
Solving the matrix equation (\ref{meq}) requires that
$\mathrm{rank}(M) \ge N^2-1$, the number of unknowns.
But having the correct rank is not sufficient.
The matrix $M$ must be well conditioned in order to ensure that small
errors in the observed intensities $\{R^{(k)}_{j\nu,j\nu'},S^{(k)}_{j\nu,j\nu'}\}$
do not contribute to large errors in the values of the elements $\rho_{mm'}$.
The quality of
the constraint matrix can be measured by a scalar quantity called condition number
$C(M)$ defined as the ratio of the largest singular
value of $M$ to the smallest \cite{bau}. Smaller the value of $C(M)$, better conditioned
is the constraint matrix $M$ for solving the unknowns. Thus the condition
number provides a convenient scalar quantity to optimize the set $\{U_k\}$
of unitaries to be selected for QST. As explained in the experimental
section, we used a simple unitary model $U_1(\tau_1,\tau_2)$ as an initial
guess and used genetic algorithm to minimize the condition number and
optimize the parameters $(\tau_1,\tau_2)$.
The necessary number ($K$) of independent experiments
is decided by the rank of the constraint matrix and the desired precision.
The rank condition requires that $KnN \ge N^2-1$. Introducing
additional experiments renders the problem over-determined, thus
reducing the condition number and increasing the precision.
In the following section
we describe the experimental results of AAQST for registers with (i) $n=2,\hat{n}=1,\tilde{n}=3$ and
(ii) $n=3,\hat{n}=2,\tilde{n}=5$ respectively.
\section{Experiments}
We report experimental demonstrations of AAQST on two
spin-systems of different sizes and environments.
In each case, we have chosen two density matrices for tomography.
All the experiments described below are carried out on a Bruker 500 MHz
spectrometer at an ambient temperature of 300 K
using high-resolution nuclear magnetic resonance techniques.
\begin{center}
\begin{figure}
\caption{Molecular structure of iodotrifluoroethylene, and
the table of Hamiltonian parameters in Hz:
chemical shifts (diagonal elements) and J-coupling constants
(off-diagonal elements).
}
\label{fffmol}
\end{figure}
\end{center}
\subsection{Two-qubit input, One-qubit ancilla}
Here we use three spin-1/2 $^{19}$F nuclei of
iodotrifluoroethylene (C$_2$F$_3$I) dissolved in acetone-D$_6$
as a 3-qubit system.
The molecular structure and the Hamiltonian parameters are shown
in Fig. \ref{fffmol}.
As can be seen in Fig.\ref{fffres}, all the 12 transitions
of this system are clearly resolved.
We have chosen $F_1$ as the ancilla
qubit and $F_2$ and $F_3$ as the input qubits.
QST was performed for two different density matrices
(i) thermal equilibrium state, i.e.,
$\rho_1 = \frac{1}{2} \left( \sigma_z^2+\sigma_z^3 \right)$,
and
(ii) state after a $(\pi/4)_{\pi/4}$ pulse applied
to the thermal equilibrium state, i.e.,
$
\rho_2 =
\frac{1}{2}\left(\sigma_x^2+\sigma_x^3\right)
-\frac{1}{2}\left(\sigma_y^2+\sigma_y^3\right)
+\frac{1}{\sqrt{2}}\left(\sigma_z^2+\sigma_z^3\right)
$.
In both the cases, the first qubit was initialized
into a maximally mixed state by applying a selective $(\pi/2)_y$
pulse on $F_1$ and followed by a strong pulsed-field-gradient (PFG)
in the $z$-direction.
The selective pulse was realized by GRAPE technique \cite{Khaneja}.
\begin{center}
\begin{figure}
\caption{
AAQST results for
thermal equilibrium state $\rho_1$ (left column), and that of state $\rho_2$
(right column), described in the text.
The reference spectra is in the top trace.
The spectra
corresponding to the real part ($R_{j\nu,j\nu'}
\label{fffres}
\end{figure}
\end{center}
AAQST of each of the above density matrices required just one
unitary evolution followed by the measurement of complex NMR signal.
We modelled the AAQST unitary as follows:
$U_1 = \left(\frac{\pi}{2}\right)_y U_\mathrm{int}(\tau_2)
\left(\frac{\pi}{2}\right)_x U_\mathrm{int}(\tau_1)$,
where $U_\mathrm{int}(\tau) = \exp\left(-i{\cal H}\tau\right)$ is
the unitary operator for evolution under the internal Hamiltonian
${\cal H}$ (see eqn. (\ref{ham})) for a time $\tau$, and $\left(\frac{\pi}{2}\right)$ rotations
are realized by non selective radio frequency pulses applied to all the spins
along the directions indicated by the subscripts.
The constraint matrix $M$ had 15 columns corresponding
to the unknowns and 24 rows corresponding to the real and imaginary
parts of the 12 spectral lines.
Only the durations
$\left\{\tau_1,\tau_2 \right\}$ needed to be optimized to minimize the condition
number $C(M)$. We used a genetic algorithm for the optimization and obtained
$C(M) = 17.3$ for $\tau_1 = 6.7783$ ms and $\tau_2 = 8.0182$ ms.
The real and imaginary parts of the single shot experimental AAQST spectrum,
along with the reference spectrum,
are shown in the top part of Fig. \ref{fffres}. The intensities
$\{R_{j\nu,j\nu'}^{(1)},S_{j\nu,j\nu'}^{(1)}\}$
were obtained by simple curve-fit routines, and the matrix eqn. (\ref{meq})
was solved to obtain all the unknowns. The reconstructed density matrices
along with the theoretically expected ones are shown below the spectra in
Fig. \ref{fffres}.
The fidelities of experimental states with
the theoretically expected states ($\rho_1$ and $\rho_2$)
are respectively 0.998 and 0.990. The high fidelities indicated successful
AAQST of the prepared states.
\begin{center}
\begin{figure}
\caption{Molecular structure of 1-bromo-2,4,5-trifluorobenzene, and
the table of Hamiltonian parameters in Hz:
chemical shifts (diagonal elements) and effective
coupling constants (J+2D)(off-diagonal elements).
}
\label{btfbzmol}
\end{figure}
\end{center}
\subsection{Three-qubit input, Two-qubit ancilla}
We use three $^{19}$F nuclei and two $^1$H nuclei
of 1-bromo-2,4,5-trifluorobenzene partially oriented
in a liquid crystal namely, N-(4-methoxybenzaldehyde)-4-
butylaniline (MBBA). Due to the partial orientational
order, the direct spin-spin interaction (dipolar interaction)
does not get fully averaged out, but gets scaled down
by the order parameter \cite{dongbook}. The chemical shifts and the
strengths of the effective couplings are shown in
Fig. \ref{btfbzmol}. As is evident, the partially oriented
system can display stronger and longer-range coupling
network leading to a larger register. Here we choose
the three $^{19}$F nuclei forming the input register
and two $^1$H nuclei forming the ancilla register.
The Hamiltonian for the heteronuclear dipolar interaction
(between $^1$H and $^{19}$F)
has an identical form as that of J-interaction \cite{dongbook}.
The homonuclear dipolar couplings
(among $^{19}$F, as well as among $^{1}$H nuclei) were small
compared to their chemical shift differences enabling us to approximate
the Hamiltonian in the form of eqn. (\ref{ham}).
\begin{center}
\begin{figure}
\caption{
AAQST results for
thermal equilibrium state, i.e., $(\sigma_z^1+\sigma_z^2+\sigma_z^3)/2$.
The reference spectrum is in the top trace. The spectra
corresponding to the real part ($R_{j\nu,j\nu'}
\label{btfbzres1}
\end{figure}
\end{center}
\begin{center}
\begin{figure}
\caption{
AAQST results for the state $\rho_2$ described in the text.
The reference spectrum is in the top trace. The
real (middle trace) and the imaginary spectra (bottom trace)
are obtained in a single shot AAQST experiment.
The bar plots correspond to theoretically expected
states (top row) and those obtained from AAQST
experiments (bottom row). Fidelity of the AAQST state is 0.95.
}
\label{btfbzres2}
\end{figure}
\end{center}
The partially oriented spin-system yields all the 80 transitions
sufficiently resolved.
Again we use just one experiment for the complete AAQST of the 3-qubit
input register.
We modelled the AAQST unitary in a similar way as before:
$U_1 = \left(\frac{\pi}{2}\right)_x U_\mathrm{int}(\tau_2)
\left(\frac{\pi}{2}\right)_x U_\mathrm{int}(\tau_1)$
where $U_\mathrm{int}(\tau) = \exp\left(-i{\cal H}\tau\right)$ is
the unitary operator for evolution under the internal Hamiltonian
${\cal H}$ (see eqn. (\ref{ham})) for a time $\tau$, and $\left(\frac{\pi}{2}\right)_x$ are
global x-rotations. The constraint matrix $M$ had 63 columns corresponding
to the unknowns and 160 rows corresponding to the real and imaginary
parts of 80 spectral lines.
After optimizing the durations by minimizing the condition number
using a genetic algorithm, we obtained $C(M) = 14.6$
for $\tau_1 = 431.2 ~\upmu$s and $\tau_2 = 511.5 ~\upmu$s.
Again we study AAQST on two states:
(i) Thermal equilibrium of the $^{19}$F spins: $\rho_1 = (\sigma_z^1+\sigma_z^2+\sigma_z^3)/2$,
and
(ii) a random density matrix $\rho_2$ obtained by applying unitary
$
U_0 = \left(\frac{\pi}{2}\right)_x^{F} \tau_0 (\pi)_x^{H} \tau_0 \left(\frac{\pi}{2}\right)_y^{F_1},
$ with $\tau_0 = 2.5$ ms, on thermal equilibrium state, i.e., $\rho_2 = U_0 \rho_1 U_0^\dagger$.
In both the cases, we initialize the ancilla i.e., the $^1$H qubits on to
a maximally mixed state by first applying a $(\pi/2)^{H}$ pulse followed
by a strong PFG in the $z$-direction.
The real and imaginary parts of the single shot AAQST spectra,
along with the reference spectra, are shown in
Figs. \ref{btfbzres1} and \ref{btfbzres2} respectively.
Again the line intensities
$\{R^{(1)}_{j\nu,j\nu'},S^{(1)}_{j\nu,j\nu'}\}$ are obtained by curve-fitting, and all the 63
unknowns of the 3-qubit deviation density matrix are obtained by solving the
matrix eqn. (\ref{meq}). The reconstructed density matrices
along with the theoretically expected states ($\rho_1$ and $\rho_2$)
are shown below the spectra
in Figs. \ref{btfbzres1} and \ref{btfbzres2}.
The fidelities of experimental states with
the theoretically expected states ($\rho_1$ and $\rho_2$)
are respectively 0.98 and 0.95. The lower fidelity in the latter case
is mainly due the imperfections in the preparation of the target state
$\rho_2$. The overall poorer performance in the liquid crystal system
is due to the lower fidelities of the QST pulses, spatial and temporal
variations of solute order-parameter, and stronger decoherence rates
compared to the isotropic case. In spite of these difficulties,
the three-qubit density matrix with 63 unknowns could be
estimated quantitatively through a single NMR experiment.
\section{Conclusions}
Quantum state tomography is an important part of experimental
studies in quantum information processing. The standard method
involves a large number of independent measurements to reconstruct
a density matrix. The ancilla-assisted quantum state tomography
introduced by Nieuwenhuizen and co-workers allows complete reconstruction
of complex density matrix with fewer experiments by letting the
unknown state of the input register to interact with an ancilla
register initialized in a known state.
Ancilla registers are essential in many of the quantum algorithms.
Usually, at the end of the quantum algorithms,
ancilla is brought to a state which is separable with the input
register. The same ancilla register which is used for computation
can be utilized for tomography after the computation.
The ancilla register can be prepared into a maximally mixed state
by dephasing all the coherences and equalizing the populations.
We provided methods for
explicit construction of tomography matrices in large registers.
We also discussed the optimization of tomography
experiments based on minimization of the condition number of the
constraint matrix. Finally, we demonstrated the experimental
ancilla-assisted quantum state tomography in two systems:
(i) a system with two input qubits and one ancilla qubit in an isotropic medium
and (ii) a system with three input qubits and two ancilla qubits
in a partially oriented medium. In both the cases, we
successfully reconstructed the target density matrices
with a single quadrature detection of transverse magnetization.
The methods introduced in this work should be useful for extending
the range of quantum state tomography to larger registers.
\end{document} |
\begin{document}
\title{
{Limited-control metrology approaching the Heisenberg limit without entanglement preparation}}
\author{Benedikt Tratzmiller}
\affiliation{Institut f\"ur Theoretische Physik and IQST, Albert-Einstein-Allee 11, Universit\"at Ulm, D-89081 Ulm, Germany}
\author {Qiong Chen}
\affiliation{Institut f\"ur Theoretische Physik and IQST, Albert-Einstein-Allee 11, Universit\"at Ulm, D-89081 Ulm, Germany}
\affiliation{College of Physics and Electronics, Hunan Normal University, Changsha 410081, China}
\author {Ilai Schwartz}
\affiliation{Institut f\"ur Theoretische Physik and IQST, Albert-Einstein-Allee 11, Universit\"at Ulm, D-89081 Ulm, Germany}
\affiliation{NVision Imaging Technologies GmbH, Albert-Einstein-Allee 11, Universit\"at Ulm, D-89081 Ulm, Germany}
\author {Susana F. Huelga}
\affiliation{Institut f\"ur Theoretische Physik and IQST, Albert-Einstein-Allee 11, Universit\"at Ulm, D-89081 Ulm, Germany}
\author {Martin B. Plenio}
\affiliation{Institut f\"ur Theoretische Physik and IQST, Albert-Einstein-Allee 11, Universit\"at Ulm, D-89081 Ulm, Germany}
\date{\today}
\begin{abstract}
{Current metrological bounds typically assume full control over all particles that are involved in the protocol.
Relaxing this assumption we study metrological performance when} only limited control is available. As an example,
{we measure} a static magnetic field when a fully controlled quantum sensor is supplemented by particles
over which only global control is possible. We show that even {for a noisy quantum sensor, a protocol that maps the magnetic field to a precession frequency can achieve transient super-Heisenberg scaling} {in measurement time
and Heisenberg scaling in the particle number. This leads to} an {estimation uncertainty} that approaches
that {achievable under} full control to within a factor independent of the particle number {for a given
total time}. {Applications to hybrid sensing devices and the crucial role of the quantum character of the
sensor are discussed.}
\end{abstract}
\maketitle
\section{Introduction} The use of quantum resources in sensing and metrology has a longstanding
history which originated with the use of single-mode squeezed states \cite{Caves81}
and multi-particle spin-squeezing \cite{WinelandBI+92,WinelandBI+94}, i.e., entanglement,
{to enhance precision in interferometry and atomic spectroscopy.}
The {goal of quantum metrology} is the optimisation of the scaling of metrological
precision with the available physical resources {\cite{Giovannetti, Braun}}. Notably,
in a noiseless setting, independent preparation and measurement of {$M$ particles in
parallel results} in a {$1/\sqrt{M}$} scaling of the uncertainty{, the so called
standard quantum limit (SQL)}, while the collective preparation of the particles in
an entangled state leads to a {$1/M$}-scaling, commonly referred to as Heisenberg
scaling (HS) \cite{WinelandBI+92,WinelandBI+94} {(see \cite{hyllus2012fisher, toth2013extremal}
for more general upper bounds obtained via the quantum Fisher information). The use of
entangled states is necessary to achieve the optimal precision and exact
HS but sequences of probe states with an asymptotically vanishing amount
of entanglement can reach a scaling arbitrarily close to the Heisenberg limit (HL)
\cite{augusiak2016asymptotic}.} Environmental noise is known to have a non-trivial
impact on metrology \cite{HuelgaMP+97} and a meaningful comparison of different
schemes needs to specify carefully the conditions under which the metrological protocol
is carried out, such as the number of particles or the total amount of time available
\cite{HuelgaMP+97, EscherMD11}. A wide variety of setting has been analysed \cite{HuelgaMP+97,EscherMD11,DemkowiczKG12,haase2018fundamental} and noise models have
been found to result in metrological scaling intermediate between SQL and the
HL \cite{ChinHP12, matsuzaki2011magnetic, macieszczak2015zeno, smirne2016ultimate, haase2018fundamental}.
However, these results depend on access to perfect and {arbitrarily} fast control and feedback
operations \cite{SekatskiSK+17, DD2014}.
In practice, however, only limited control is possible over experimental resources and the
asymptotic regime {of large} numbers of fully controlled particles is not accessible.
{What can be achieved in metrology for systems where, for example, particles cannot be addressed individually, multi-particle
quantum gates are not available or the rate of measurements, feedback and the number of accessible particles
is limited?}
In order to initiate investigations of this type in a concrete setting, we allow ourselves to
be motivated by the recently developed concept of quantum-hybrid sensors \cite{CaiJP14,WrachtrupF14}.
These are devices that integrate at least two components, one being {a fully controlled} quantum
sensor and another, typically an assembly of quantum particles, mutually interacting or
not, that are coupled to the quantum sensor but over which there is no individual control.
This second component acts as a transducer of a signal to a form that is then detected
by the quantum sensor.
{An example is a device composed of a piezo-magnetic material deposited on a diamond surface
that translates a force into a stray magnetic field which is then detected by a shallowly implanted
nitrogen-vacancy (NV) center in diamond\cite{GruberDT+97,MullerKC+14,WuJP+16}.
Another possibility, motivated by recently realised nanoscale NMR measurements
\cite{SchmittGS+2017,BossCZ+17,GlennBL+18}, consists of an ensemble of $M$ nuclear
spins and an NV center. Here an applied magnetic field can be measured as it induces
a nuclear Larmor precession which can be monitored by an NV center. Similarly, atoms
in microfabricated vapour cells \cite{ShahKS+07} could allow for observation of their Larmor
precession by an NV center rather than a classical laser field.
We assume that the quantum sensor is subject to noise,
and cannot exert individual control over the noise-free auxiliary spins.}
While we are motivated by concrete settings, our analysis yields more compact expressions,
without affecting the scaling properties, by assuming that all involved spins have the same magnetic
moment. Furthermore, all the following considerations will neglect direct interactions between the
auxiliary spins.
By means of analyzing the scaling of the Fisher information with respect to particle number $M$ and measurement time $T$ we will show that, despite the limitations of partial control, it is possible
to get close to the Heisenberg limit, we will discuss the origin of this scaling and compare our scheme to a single quantum sensor without auxiliary spins.
\begin{figure}
\caption{
The proposed measurement scheme uses a control sequence on the sensor spin (blue) to weakly measure the auxiliary spins (black) without the need of further control after initialisation in either the pure state $\ket{+++...}
\label{Fig0}
\end{figure}
\section{Approaching the Heisenberg limit without entanglement preparation}
{Before stating the main results, we briefly recapitulate the achievable uncertainties under unconstrained metrology to make it available for later comparison with our schemes.}
\subsection{The ideal case of full quantum control} {Consider} a quantum sensor and $M$ auxiliary
spins, all with the same magnetic moment {${\mu_{n}}$, over which we can exert arbitrary and fast control.
Then the optimal uncertainty for the estimation of the magnetic field in a time $T$ in the absence of
noise is obtained via Ramsey spectroscopy using the $M+1$ accessible particles prepared in a {highly
entangled} state of the form $(|0\ldots 0\rangle + |1\ldots 1\rangle)/\sqrt{2}$ and is given by} \cite{HuelgaMP+97}
\begin{equation}
\Delta B = \frac{\hbar}{{\mu_{n}} (M+1) T}.
\label{Heisenberg}
\end{equation}
We observe a linear decrease in the uncertainty, i.e. HS, both in the total measurement time $T$ and the number of spins $M$.
\subsection{ {The case} of limited control setting} We consider a perfectly controlled quantum
sensor supplemented by $M$ auxiliary spins all having the same magnetic moment. These $M$ auxiliary
spins can be controlled by a global field and interact weakly with the quantum sensor, i.e. the product
of sensor-auxiliary spin interaction strength and interaction time is much smaller than unity.
For simplicity we assume that each auxiliary particle is interacting with the same strength and phase
with the quantum sensor, however the basic findings remain the same in the more general case.
{We measure a static external magnetic field $B$ with this hybrid sensor and determine
the achievable uncertainty $\Delta B$.} To this end we initialise the {auxiliary spins}
in a fully polarised state \cite{FN1} and then subject {them} to a $\pi/2$-pulse.
We determine the resulting rate of precession of these $M$ spins by periodically
measuring the time-dependent magnetic field generated by the precessing {spins}
in regular time intervals using the quantum sensor {and a dynamical decoupling {(DD)} sequence to weakly
entangle it with the auxiliary spins}.
This allows for comparison of the precession frequency
to that of a local oscillator \cite{SchmittGS+2017,BossCZ+17} as sketched in figure \ref{Fig0}.
{This DD sequence does not only cancel static noise on the sensor spin, but also allows to accumulate signal over several auxiliary spin Larmor periods by creating an effective $\sigma_z \otimes \sigma_x$ interaction. In order to see this, we
start with a Hamiltonian of the form
\begin{align}
H = \frac{\Omega(t)}{2} \sigma_x &+ \sum_{m=1}^{M} \frac{\omega_L}{2} \sigma_z^{(m)}
\\& + A_\perp^{(m)} \sigma_{z} \otimes \left(\sigma_x^{(m)} \cos({\phi_{0m}})
+ \sigma_y^{(m)} \sin({\phi_{0m}}) \right)\nonumber
\end{align}
that describes a NV center coupled to $M$ nuclear spins in the interaction picture
where $\Omega(t)$ is the Rabi frequency with $\Omega(t)=0$ during the free evolution, $\omega_L$ is the nuclear Larmor frequency, $A_\perp^{(m)}$ is the perpendicular coupling of the nuclear spins and $\phi_{0m}$ the corresponding phase.
The operators $\sigma_i$ act on the electron spin (NV center), $\sigma_i^{(m)}$ act on the mth nuclear spin.
When we fulfil $\tau = \pi/\omega_L$ in the DD (e.g. XY-8) sequence, the
sequence produces a modulation function
{that modulates the NV $\sigma_z$ operator to $\sigma_z f(t)$ where}
\begin{equation}
f(t) = \frac{4}{\pi} \cos(\omega_L t) + \mathrm{rot.}
\end{equation}
and rot. denotes terms that vanish after the rotating-wave approximation in a frame rotating with the nuclear Larmor frequencies.
Using
\begin{equation}
\cos^2(\omega_L t) = \frac{1}{2}\left(1+\cos(2\omega_L t)\right),
\end{equation}
we obtain the effective interaction Hamiltonian
\begin{equation}\label{Heff}
H_\mathrm{eff} = \sum \limits_m \frac{2A_\perp^{(m)}}{\pi} \sigma_z \otimes \left(\sigma_x^{(m)} \cos({\phi_{0m}}) + \sigma_y^{(m)} \sin({\phi_{0m}}) \right)
\end{equation}
that will be the starting point for the following discussion.
}
We use ${k_0}={\mu_{n}} B_{s}/\hbar$ where $B_{s}$ is the
field generated by one of the $M$ auxiliary spins at the position of the NV center
\subsection{Transient super-Heisenberg scaling in measurement time}
In a first step we analyse the Fisher information scaling {with the} measurement time.
For this purpose we derive the probability $p_n$ of finding
the internal state of a spin-$1/2$ quantum sensor in the spin-down state in the n-th measurement in leading order
assuming the length $T_s$ of each instance of a magnetic field measurement is short and
if we apply these measurements every $\tau_m$.
{
We simplify {to the case} that all couplings $A_\perp^{(m)} = k$ and all phases $\phi_{m}$ are equal, see the \BTF{appendix} for a more general discussion.
Here $k_0={\mu_{n}} B_{s}/\hbar$ where $B_{s}$ is the field generated by one of the $M$ auxiliary spins at the position of the NV center.
In the described protocol the auxiliary spins gain a phase $\phi = \delta \tau_m $ in each step, where $\delta=2\pi(\nu-\nu_{loc})$ is the difference of precession frequency $\nu$ and local oscillator frequency $\nu_{loc}$.
For a nuclear spin with an already accumulated phase from $n$ cycles {equalling} $n \phi$
the readout probability is calculated for the NV measurement in basis $Y${, i.e. the eigenbasis of the Pauli $,\sigma_y$ operator,} and NV preparation in $\ket{+}$.
\begin{align}
p_n&-\frac{1}{2} = \mathrm{Tr} \left[\hat O_\mathrm{measurement} U \rho_n U^\dagger \right]\nonumber
\\&= \mathrm{Tr} \left[\left(\frac{\sigma_y}{2} \otimes \mathbbm{1}^{\otimes M} \right) U\nonumber
\right.\\&\left.
\left(\frac{\mathbbm{1}+\sigma_{x}}{2} \otimes
\left(\frac{\mathbbm{1} + \cos(\phi n) \sigma_{x} + \sin(\phi n) \sigma_y}{2}\right) \right)^{\otimes M} U^\dagger \right] \nonumber
\\&= \frac{i}{4} \left[
\left( \cos \frac{4 k_0 T_s}{\pi} - i \sin \frac{4 k_0 T_s}{\pi} \cos(\phi n) \right)^{M} \nonumber
\right.\\&\left.
- \left( \cos \frac{4 k_0 T_s}{\pi} + i \sin \frac{4 k_0 T_s}{\pi} \cos(\phi n) \right)^{M} \right].\label{noapprox}
\end{align}
For $M k_0 T_s \ll 1$ we can approximate
\begin{align}
&\left( \cos \frac{4 k_0 T_s}{\pi} \pm i \sin \frac{4 k_0 T_s}{\pi} \cos(\phi n) \right)^M \nonumber
\\
&\cong \exp\left( \pm i M \sin \frac{4k_0 T_s}{\pi} \cos(\phi n) \right)
\end{align}
to derive the signal
\begin{equation}
p_n = \cos^2\left(\frac{2M {k_0} T_s}{\pi}\cos\left(\phi n\right)-\frac{\pi}{4}\right).
\label{signal}
\end{equation}
Imperfect polarisation $P$ of the auxiliary spins can be incorporated via $k_0=\mu_{n} P
B_{s}/\hbar$.
}
By eq. (\ref{signal}) we estimate the frequency $2\pi\nu = {\mu_{n}}B/\hbar$ and hence the
magnitude of the magnetic field $B$. For $N$ measurements, the achievable uncertainty in the
estimate of $\nu$ is obtained via the classical Fisher information
\begin{equation}
{I_N = \sum_{n=1}^N \frac{1}{p_n(1-p_n)}\left(\frac{\partial p_n}{\partial \phi}\right)^2 \left(\frac{\partial \phi}{\partial \delta}\right)^2.}
\end{equation}
{We can describe decoherence processes in terms of a decay rate $\gamma = \gamma_2+\gamma_b$
(see \BTF{appendix}), where $\gamma_2$ refers to $T_2$ processes on the nuclear spins and
$\gamma_b$ refers to measurement backaction from the quantum sensor. Then} the effective
coupling after $n$ measurements is $k_n = k_0 e^{-\gamma n}$ and we find
\begin{eqnarray}
I_N(\gamma) = \label{exact} \sum_{n=1}^N \left(\frac{4 \tau_m M k_n T_s n}{\pi}\right)^2
\sin^2 \phi n.
\end{eqnarray}
Under the assumptions $max[\gamma,\frac{1}{N}]\ll 2\pi\phi$ and $M k_0 T_s\ll 1$, i.e. when we
sample at least one full oscillation of the signal of frequency $\delta$, eq. (\ref{exact}) is well
approximated by
\begin{eqnarray}
I_N(\gamma) &\cong& \frac{16 M^2 \tau_m^2 k_0^2 T_s^2}{\pi^2}\sum_{n=1}^N \frac{n^2}{2} e^{-2\gamma
n}\left(1-\cos 2\phi n\right)\nonumber\\
&\cong& \frac{2M^2 \tau_m^2 k_0^2 T_s^2}{\pi^2}\frac{1-e^{-2\gamma N}(1+2\gamma N(1+\gamma
N))}{\gamma^3}.
\label{approx}
\end{eqnarray}
For {$\gamma N \ll1$ (for $\gamma N < 0.6$ errors are smaller than 3\%)}
\begin{eqnarray}
I_N(\gamma)
\cong \frac{2 M^2 \tau_m^2 k_0^2 T_s^2}{\pi^2}(\frac{4N^3}{3}-2\gamma N^4 + \frac{6\gamma^2 N^5}{5})
\end{eqnarray}
and hence
\begin{eqnarray}
\Delta B &\le& \sqrt{\frac{3\pi^2\hbar^2}{8{\mu_{n}}^2 M^2 \tau_m^2 k_0^2 T_s^2} \frac{1}{N^3}}.
\end{eqnarray}
As a result, for small $\gamma N$ our limited control procedure exhibits a scaling in the number
of measurements $N$ or, equivalently, the total measurement time $T = N \tau_m$ that exceeds the
standard HS of eq. (\ref{Heisenberg}) while the scaling in the number of particles $M$ achieves
the HL. {Note that unlike the case of interaction-based quantum metrology \cite{Boixo2007} this
super-Heisenberg scaling is not due to interactions between the auxiliary spins.} {Intuitively
the scaling can be understood to emerge due to the fact that, without noise, the last measurement
of the measurement record alone would already give quadratic Fisher information scaling, and the
linear number of intermediate measurements leads to a cubic scaling in total.}
\subsection{Asymptotic SQL scaling in measurement time}
However, {the super-Heisenberg scaling identified in the previous subsection} has to be transient and cannot persist for arbitrarily long times
as this would {be in} violation of the fundamental limit of sensitivity that is
imposed by the full control scheme in the absence of any noise.
For $\gamma_2=0$, the remaining contribution to the decay rate $\gamma$ is due to the
measurement backaction of the quantum sensor on the auxiliary spins which is negligible only for
$\gamma N\ll 1$. {Our measurement scheme then yields (see \BTF{appendix} for a derivation)}
\begin{equation}
\gamma_b = \frac{4k_0^2T_s^2}{\pi^2}.
\label{disturbance}
\end{equation}
Due to the measurement backaction, the signal weakens with increasing number of measurements $N$ and the rate of increase of the Fisher information slows. When determining the scaling in this
regime, a note of caution is in order as the calculation of the measurement backaction in
eq. (\ref{approx}) determines the Fisher information of the averaged density matrix of the auxiliary
spins. However, as we have access to and use all the intermediate measurements, the {correct Fisher information
of the protocol is obtained by weighted averaging over measurement trajectories}. For $\gamma N\gg 1$
this results in a scaling linear in $N$ (see also \cite{Pfender, Beige, cohen2019achieving}), as indicated analytically in
the \BTF{appendix} and numerically in figure \ref{Fig1}. For $M=100$ nuclei in an
initial product state (red data) the transient super-Heisenberg $I_N \propto N^3 \propto T^{3}$ scaling
evolves into the shot noise scaling {(SQL)} of $I_N \propto N \propto T$ (blue asymptote) while eq. (\ref{approx}) {was calculated with the average density matrix and therefore}
would yield a constant. Remarkably, this linear scaling is independent of the initial state
and we can achieve the same scaling using a completely mixed initial state (orange triangles) \cite{Footnote3}. In the limit of small
interaction strength $k_0 T_s$ and decay, the asymptotic {($\gamma N \gg 1$)} value of the Fisher information can be estimated
to be
\begin{equation}\label{asymptote}
I_N = \frac{\sin^4(4 k_0 T_s/\pi)}{16\left(\gamma_b + \gamma_2\right)^3} \frac{M^2}{2} \tau_m^2 N
\end{equation}
\begin{figure}
\caption{Upper graph: Numerical Fisher information scaling (red: initial product state/ orange: initial mixed state), the analytical approximation for small $N$ eq. (\ref{approx}
\label{Fig1}
\end{figure}
\begin{figure}
\caption{
For the same parameters as in figure \ref{Fig1}
\label{Fig2}
\end{figure}
\subsection{Heisenberg scaling in particle number and relation to the Heisenberg limit}
{While the quadratic scaling with the number of nuclei $M$ is obvious for $\gamma N \ll 1$,
it also persists for $\gamma N \gg 1$ as confirmed in equation (\ref{asymptote}) and} in Fig.
\ref{Fig2} (see {Fig. \ref{Fig_Dec}} and the related discussion for details).
{In the full control scenario such a quadratic scaling can be traced back to the preparation
of an macroscopically entangled resource state of the form $(|0...0\rangle + |1...1\rangle)/\sqrt{2}$
including the quantum sensor and the auxiliary spins which contain one ebit of entanglement
\cite{Vedral98,Plenio05}. This entanglement is destroyed with the final measurement and represents
the resource that is required to achieve HS.} {This is in sharp contrast with our scenario,
which starts from an initial product state.} {Here the quadratic scaling {arises because
the auxiliary spins are interrogated by a quantum sensor which results in a} readout operator that
is not particle local, i.e. cannot be represented by a product of single particle operators {as
would be the case when using a classical readout device}.}
{Indeed, every measurement applies a CPTP (completely positive and
trace-preserving) map to the auxiliary spins that is represented by the Kraus operators (see
also \cite{Pfender} for the case $M=1$ and our \BTF{appendix})}
\begin{equation}
U_\pm = \bra{\pm_y} U \ket{+} = \frac{e^{-i \frac{2 k_0 T_s}{\pi} \sum \sigma_x^{(m)}} \mp i e^{i \frac{2 k_0 T_s}{\pi} \sum \sigma_x^{(m)}}}{2}
\end{equation}
{Crucially, this operator is diagonal in a basis different than the eigenbasis of the free
evolution operator - the latter being a tensor product of unitary evolutions on every auxiliary
spin. If the same held true for the readout operator, it is trivial to see that every spin would
be measured independently and therefor SQL scaling would apply. The readout operator here, however,
combines all particle states in a nontrivial, nonlocal, manner. This is similar to the metrological
advantage obtained for indistinguishable particles \cite{Braun} (section III).}
{In our work, this advantage is achieved by the sensor spin that, unlike a classical sensor,
allows to apply the same, particle non-local, CPTP map in every measurement.}
{We would like to stress that the entanglement build-up due to the backaction via a non-local
measurement operator does not contribute to the quadratic scaling with the number of auxiliary spins,
nor does it support the transient $N^3$ super-Heisenberg scaling.}
In the limit of large nuclear coherence
times $0 \approx \gamma_2 \ll \gamma_b$ the Fisher information approaches the HL
achievable under full control for $N_{opt}\approx \left({2 k_0 T_s}/{\pi}\right)^{-2}=1/
\gamma_b$ measurements after which the $N^3$-scaling {turns} into a scaling $\propto N$.
{{Note that the fundamental Heisenberg limit} is not violated as with decreasing interaction
$k_0 T_s$ {both the backaction and the information gain per measurement decrease at the same rate thus compensating each other.}}
Remarkably, at this point the ratio of ultimate sensitivity under global control and the
limited control scheme used here only depends on the interaction strength and therefore can be tuned
to approach the HL {for $N_{opt}$ measurements to within {a Fisher minimal distance of} a factor independent of $T$, see figure \ref{Fig1}.
Furthermore it is natural to assume that this holds independently of $M$ as the Fisher information
of both the HL and the asymptote for $\gamma N\gg 1$ exhibit the same quadratic in $M$
scaling. This was confirmed numerically (see \BTF{appendix} for confirmation).
\section{Discussion} Metrology {assisted} by environmental spins have been considered
before, see e.g. \cite{GoldsteinCM+11,CappellaroGH+12}. There, however, the emphasis was placed
on spins that are strongly interacting with the quantum sensor and the measurement protocol creates
a joint entangled state of the quantum sensor and the auxiliary spins which then evolves
for some time followed by an inversion of the entangling operation and the subsequent measurement of
the state of the quantum sensor. In this approach the HS is achieved in the number of
{\em strongly} coupled auxiliary spins while we assume no such spins in our set-up. Furthermore, this protocol suffers from the drawback that it is fundamentally limited by the coherence
time of the quantum sensor and hence does not take full advantage of the long coherence time of the
auxiliary spins. In contrast, the measurements in our protocol can be made shorter than the
coherence time without adversely affecting the achievable sensitivity.
Besides the theoretical interest in the novel scaling regimes, we stress that the proposed
scheme employing auxiliary spins under limited control provides enhanced sensitivity as compared to the quantum sensor alone. This advantage
is the result of two processes. First, the transduction of the static magnetic field to a time-dependent
Larmor precession which is then detected by the quantum sensor facilitates the use of dynamical decoupling
schemes to filter out noise without adversely affecting the signal. Secondly, as each auxiliary spin contributes
to the signal, the overall signal strength scales with the number of spins and hence leads to a considerable
signal enhancement.
{Remarkably, magnetometry schemes such as atoms in gas cells which are probed independently
by a classical field lead to a $M^{-1}$ scaling of the variance with the particle number $M$. In
sharp contrast, it is the transduction of the signal to a quantum sensors, e.g. an NV center, which
results in a particle non-local measurement which causes the $M^{-2}$ Heisenberg scaling. This suggests
a practical route for enhancing the measurement capacity of gas cell magnetometers.}
Furthermore, for an NV center as quantum sensor, even when considering nuclei with their
small magnetic moment as auxiliary spins, we may obtain an increased sensitivity. To this end, let us consider
the $|m=0\rangle \leftrightarrow|m=+1\rangle$ transition of an NV center in an external magnetic field $B$
and assume that the NV center is dominated by pure dephasing which results in a coherence time {$T_2^{(NV)}$}.
For perfect readout efficiency, the optimal interrogation scheme yields
\begin{equation}
{\Delta B = \sqrt{\frac{2e\hbar^2}{\mu_e^2T_2^{(NV)}T}}
= \sqrt{\frac{4e\hbar^2}{\mu_e^2 N{T_2^{(NV)}}^2}}}
\end{equation}
\cite{HuelgaMP+97} where {$N=2T/T_2^{(NV)}$}.
{The maximum of equation (\ref{asymptote}) that takes the form $\gamma_b^2/(\gamma_b+\gamma_2)^3$ is obtained by choosing
$\gamma_b = 2\gamma_2$.
This allows to compare}
with our indirect measurement scheme using $M$ hydrogen nuclear spins and assuming
$\left(\frac{\mu_e}{{\mu_{n}}}\right)^2 \frac{T_2^{(NV)}}{T_2^{n}} \approx 10^3$ we find that for
\begin{equation}
{M > \sqrt{\frac{27}{4e}\frac{\mu_e^2 T_2^{(NV)} }{\mu_n^2 T_2^{(n) }}} \approx 50}
\end{equation}
the auxiliary spin assisted sensor outperforms the bare NV center.
{Nuclear spins couple more weakly to both noise and signal due to smaller gyromagetic ratio resulting in typically larger coherence times $\frac{\mu_e}{{\mu_{n}}} \approx \frac{T_2^{n}}{T_2^{(NV)}} $, so similar results apply for other systems.}
While our protocol makes use of a far
smaller magnetic moment compared to even a single electron spin $M \mu_n \ll \mu_e$, this is compensated
by the longer coherence time and the possibility to measure during the signal accumulation.
{Furthermore the obtained expression also highlights the advantage of the $M^{-2}$ Heisenberg scaling over the SQL that individual measurements on the nuclear spins give, providing higher Fisher Information for $M>27/4e\approx 2.5$.}
Finally, we note that our analysis also covers the case $M=1$ corresponding to
the detection of the Larmor frequency of a single nuclear spin via an NV center. Super-Heisenberg scaling
applies for as long as the measurement backaction is weak. This applies for distant nuclear spins or
for measurements that are designed to be weak, i.e. not obtaining a full bit of information in each
single measurement.
{\em Conclusions --} We have examined metrology in a realistic setting of limited control and found transient
super-Heisenberg scaling in the total measurement time and a metrological precision approaching that
of the same number of particles under full experimental control. This is despite the absence of initial
entanglement in the system. In fact, in this scheme entanglement emerges only with increasing number
of measurements and adversely affects the metrological scaling. Furthermore, the proposed set-up, which employs
auxiliary spin under limited control, also represents an hybrid sensor that may outperform a bare quantum
sensor thus providing new design principles for quantum sensors.
{\em Acknowledgements --} The authors thank Liam McGuinness and Jan F. Haase for discussions
and comments on the manuscript. This work was supported by the ERC
Synergy Grant BioQ, the EU projects AsteriQs, HYPERDIAMOND, the BMBF projects NanoSpin and DiaPol,
the DFG CRC 1279 and the DFG project 414061038. The authors acknowledge support by the state of
Baden-W{\"u}rttemberg through bwHPC and the German Research Foundation (DFG) through grant no
INST 40/467-1 FUGG (JUSTUS cluster). The first results of this work have been presented at the
Workshop On Quantum Metrology, 22nd - 23rd June 2017 in Ulm, Germany.
\begin{thebibliography}{38}
\providecommand{\natexlab}[1]{#1}
\providecommand{\url}[1]{\texttt{#1}}
\expandafter\ifx\csname urlstyle\endcsname\relax
\providecommand{\doi}[1]{doi: #1}\else
\providecommand{\doi}{doi: \begingroup \urlstyle{rm}\Url}\fi
\bibitem[Caves(1981)]{Caves81}
C.~M. Caves.
\newblock Quantum-mechanical noise in an interferometer.
\newblock \emph{Physical Review D}, 23\penalty0 (8):\penalty0 1693, 1981.
\bibitem[Wineland et~al.(1992)Wineland, Bollinger, Itano, Moore, and
Heinzen]{WinelandBI+92}
D.~J. Wineland, J.~J. Bollinger, W.~M. Itano, F.~Moore, and D.~Heinzen.
\newblock Spin squeezing and reduced quantum noise in spectroscopy.
\newblock \emph{Physical Review A}, 46\penalty0 (11):\penalty0 R6797, 1992.
\bibitem[Wineland et~al.(1994)Wineland, Bollinger, Itano, and
Heinzen]{WinelandBI+94}
D.~J. Wineland, J.~J. Bollinger, W.~M. Itano, and D.~Heinzen.
\newblock Squeezed atomic states and projection noise in spectroscopy.
\newblock \emph{Physical Review A}, 50\penalty0 (1):\penalty0 67, 1994.
\bibitem[Giovannetti et~al.(2004)Giovannetti, Lloyd, and Maccone]{Giovannetti}
V.~Giovannetti, S.~Lloyd, and L.~Maccone.
\newblock Quantum-enhanced measurements: beating the standard quantum limit.
\newblock \emph{Science}, 306\penalty0 (5700):\penalty0 1330--1336, 2004.
\bibitem[Braun et~al.(2018)Braun, Adesso, Benatti, Floreanini, Marzolino,
Mitchell, and Pirandola]{Braun}
D.~Braun, G.~Adesso, F.~Benatti, R.~Floreanini, U.~Marzolino, M.~W. Mitchell,
and S.~Pirandola.
\newblock Quantum-enhanced measurements without entanglement.
\newblock \emph{Reviews of Modern Physics}, 90\penalty0 (3):\penalty0 035006,
2018.
\bibitem[Hyllus et~al.(2012)Hyllus, Laskowski, Krischek, Schwemmer, Wieczorek,
Weinfurter, Pezz{\'e}, and Smerzi]{hyllus2012fisher}
P.~Hyllus, W.~Laskowski, R.~Krischek, C.~Schwemmer, W.~Wieczorek,
H.~Weinfurter, L.~Pezz{\'e}, and A.~Smerzi.
\newblock Fisher information and multiparticle entanglement.
\newblock \emph{Physical Review A}, 85\penalty0 (2):\penalty0 022321, 2012.
\bibitem[T{\'o}th and Petz(2013)]{toth2013extremal}
G.~T{\'o}th and D.~Petz.
\newblock Extremal properties of the variance and the quantum fisher
information.
\newblock \emph{Physical Review A}, 87\penalty0 (3):\penalty0 032324, 2013.
\bibitem[Augusiak et~al.(2016)Augusiak, Ko{\l}ody{\'n}ski, Streltsov, Bera,
Acin, and Lewenstein]{augusiak2016asymptotic}
R.~Augusiak, J.~Ko{\l}ody{\'n}ski, A.~Streltsov, M.~N. Bera, A.~Acin, and
M.~Lewenstein.
\newblock Asymptotic role of entanglement in quantum metrology.
\newblock \emph{Physical Review A}, 94\penalty0 (1):\penalty0 012339, 2016.
\bibitem[Huelga et~al.(1997)Huelga, Macchiavello, Pellizzari, Ekert, Plenio,
and Cirac]{HuelgaMP+97}
S.~F. Huelga, C.~Macchiavello, T.~Pellizzari, A.~K. Ekert, M.~B. Plenio, and
J.~I. Cirac.
\newblock Improvement of frequency standards with quantum entanglement.
\newblock \emph{Physical Review Letters}, 79\penalty0 (20):\penalty0 3865,
1997.
\bibitem[Escher et~al.(2011)Escher, de~Matos~Filho, and Davidovich]{EscherMD11}
B.~M. Escher, R.~L. de~Matos~Filho, and L.~Davidovich.
\newblock General framework for estimating the ultimate precision limit in
noisy quantum-enhanced metrology.
\newblock \emph{Nature Physics}, 7\penalty0 (5):\penalty0 406, 2011.
\bibitem[Demkowicz-Dobrza{\'n}ski et~al.(2012)Demkowicz-Dobrza{\'n}ski,
Ko{\l}ody{\'n}ski, and Gu{\c{t}}{\u{a}}]{DemkowiczKG12}
R.~Demkowicz-Dobrza{\'n}ski, J.~Ko{\l}ody{\'n}ski, and M.~Gu{\c{t}}{\u{a}}.
\newblock The elusive heisenberg limit in quantum-enhanced metrology.
\newblock \emph{Nature Communications}, 3:\penalty0 1063, 2012.
\bibitem[Haase et~al.(2018)Haase, Smirne, Ko{\l}ody{\'n}ski,
Demkowicz-Dobrza{\'n}ski, and Huelga]{haase2018fundamental}
J.~F. Haase, A.~Smirne, J.~Ko{\l}ody{\'n}ski, R.~Demkowicz-Dobrza{\'n}ski, and
S.~F. Huelga.
\newblock Fundamental limits to frequency estimation: a comprehensive
microscopic perspective.
\newblock \emph{New Journal of Physics}, 20\penalty0 (5):\penalty0 053009,
2018.
\bibitem[Chin et~al.(2012)Chin, Huelga, and Plenio]{ChinHP12}
A.~W. Chin, S.~F. Huelga, and M.~B. Plenio.
\newblock Quantum metrology in non-markovian environments.
\newblock \emph{Physical Review Letters}, 109\penalty0 (23):\penalty0 233601,
2012.
\bibitem[Matsuzaki et~al.(2011)Matsuzaki, Benjamin, and
Fitzsimons]{matsuzaki2011magnetic}
Y.~Matsuzaki, S.~C. Benjamin, and J.~Fitzsimons.
\newblock Magnetic field sensing beyond the standard quantum limit under the
effect of decoherence.
\newblock \emph{Physical Review A}, 84\penalty0 (1):\penalty0 012103, 2011.
\bibitem[Macieszczak(2015)]{macieszczak2015zeno}
K.~Macieszczak.
\newblock Zeno limit in frequency estimation with non-markovian environments.
\newblock \emph{Physical Review A}, 92\penalty0 (1):\penalty0 010102, 2015.
\bibitem[Smirne et~al.(2016)Smirne, Ko{\l}ody{\'n}ski, Huelga, and
Demkowicz-Dobrza{\'n}ski]{smirne2016ultimate}
A.~Smirne, J.~Ko{\l}ody{\'n}ski, S.~F. Huelga, and R.~Demkowicz-Dobrza{\'n}ski.
\newblock Ultimate precision limits for noisy frequency estimation.
\newblock \emph{Physical Review Letters}, 116\penalty0 (12):\penalty0 120801,
2016.
\bibitem[Sekatski et~al.(2017)Sekatski, Skotiniotis, Ko{\l}ody{\'n}ski, and
D{\"u}r]{SekatskiSK+17}
P.~Sekatski, M.~Skotiniotis, J.~Ko{\l}ody{\'n}ski, and W.~D{\"u}r.
\newblock Quantum metrology with full and fast quantum control.
\newblock \emph{Quantum}, 1:\penalty0 27, 2017.
\bibitem[Demkowicz-Dobrza{\'n}ski and Maccone(2014)]{DD2014}
R.~Demkowicz-Dobrza{\'n}ski and L.~Maccone.
\newblock Using entanglement against noise in quantum metrology.
\newblock \emph{Physical Review Letters}, 113\penalty0 (25):\penalty0 250801,
2014.
\bibitem[Cai et~al.(2014)Cai, Jelezko, and Plenio]{CaiJP14}
J.~Cai, F.~Jelezko, and M.~B. Plenio.
\newblock Hybrid sensors based on colour centres in diamond and piezoactive
layers.
\newblock \emph{Nature Communications}, 5:\penalty0 4065, 2014.
\bibitem[Wrachtrup and Finkler(2014)]{WrachtrupF14}
J.~Wrachtrup and A.~Finkler.
\newblock Applied physics: Hybrid sensors ring the changes.
\newblock \emph{Nature}, 512\penalty0 (7515):\penalty0 380, 2014.
\bibitem[Gruber et~al.(1997)Gruber, Dr{\"a}benstedt, Tietz, Fleury, Wrachtrup,
and Von~Borczyskowski]{GruberDT+97}
A.~Gruber, A.~Dr{\"a}benstedt, C.~Tietz, L.~Fleury, J.~Wrachtrup, and
C.~Von~Borczyskowski.
\newblock Scanning confocal optical microscopy and magnetic resonance on single
defect centers.
\newblock \emph{Science}, 276\penalty0 (5321):\penalty0 2012--2014, 1997.
\bibitem[M{\"u}ller et~al.(2014)M{\"u}ller, Kong, Cai, Melentijevi{\'c},
Stacey, Markham, Twitchen, Isoya, Pezzagna, Meijer, et~al.]{MullerKC+14}
C.~M{\"u}ller, X.~Kong, J.-M. Cai, K.~Melentijevi{\'c}, A.~Stacey, M.~Markham,
D.~Twitchen, J.~Isoya, S.~Pezzagna, J.~Meijer, et~al.
\newblock Nuclear magnetic resonance spectroscopy with single spin sensitivity.
\newblock \emph{Nature Communications}, 5:\penalty0 4703, 2014.
\bibitem[Wu et~al.(2016)Wu, Jelezko, Plenio, and Weil]{WuJP+16}
Y.~Wu, F.~Jelezko, M.~B. Plenio, and T.~Weil.
\newblock Diamond quantum devices in biology.
\newblock \emph{Angewandte Chemie International Edition}, 55\penalty0
(23):\penalty0 6586--6598, 2016.
\bibitem[Schmitt et~al.(2017)Schmitt, Gefen, St{\"u}rner, Unden, Wolff,
M{\"u}ller, Scheuer, Naydenov, Markham, Pezzagna, et~al.]{SchmittGS+2017}
S.~Schmitt, T.~Gefen, F.~M. St{\"u}rner, T.~Unden, G.~Wolff, C.~M{\"u}ller,
J.~Scheuer, B.~Naydenov, M.~Markham, S.~Pezzagna, et~al.
\newblock Submillihertz magnetic spectroscopy performed with a nanoscale
quantum sensor.
\newblock \emph{Science}, 356\penalty0 (6340):\penalty0 832--837, 2017.
\bibitem[Boss et~al.(2017)Boss, Cujia, Zopes, and Degen]{BossCZ+17}
J.~M. Boss, K.~S. Cujia, J.~Zopes, and C.~L. Degen.
\newblock Quantum sensing with arbitrary frequency resolution.
\newblock \emph{Science}, 356\penalty0 (6340):\penalty0 837--840, 2017.
\bibitem[Glenn et~al.(2018)Glenn, Bucher, Lee, Lukin, Park, and
Walsworth]{GlennBL+18}
D.~R. Glenn, D.~B. Bucher, J.~Lee, M.~D. Lukin, H.~Park, and R.~L. Walsworth.
\newblock High-resolution magnetic resonance spectroscopy using a solid-state
spin sensor.
\newblock \emph{Nature}, 555\penalty0 (7696):\penalty0 351, 2018.
\bibitem[Shah et~al.(2007)Shah, Knappe, Schwindt, and Kitching]{ShahKS+07}
V.~Shah, S.~Knappe, P.~D.~D. Schwindt, and J.~Kitching.
\newblock Subpicotesla atomic magnetometry with a microfabricated vapour cell.
\newblock \emph{Nature Photonics}, 1:\penalty0 649 -- 652, 2007.
\bibitem[FN1()]{FN1}
Our considerations are easily extended to the case of an arbitrary polarisation
as the signal and hence the sensitivity is directly proportional to the
polarisation level.
\bibitem[Boixo et~al.(2007)Boixo, Flammia, Caves, and Geremia]{Boixo2007}
S.~Boixo, S.~T. Flammia, C.~M. Caves, and J.~M. Geremia.
\newblock Generalized limits for single-parameter quantum estimation.
\newblock \emph{Physical Review Letters}, 98\penalty0 (9):\penalty0 090401,
2007.
\bibitem[Pfender et~al.(2019)Pfender, Wang, Sumiya, Onoda, Yang, Dasari,
Neumann, Pan, Isoya, Liu, et~al.]{Pfender}
M.~Pfender, P.~Wang, H.~Sumiya, S.~Onoda, W.~Yang, D.~B.~R. Dasari, P.~Neumann,
X.-Y. Pan, J.~Isoya, R.-B. Liu, et~al.
\newblock High-resolution spectroscopy of single nuclear spins via sequential
weak measurements.
\newblock \emph{Nature Communications}, 10\penalty0 (1):\penalty0 594, 2019.
\bibitem[Clark et~al.(2019)Clark, Stokes, and Beige]{Beige}
L.~A. Clark, A.~Stokes, and A.~Beige.
\newblock Quantum jump metrology.
\newblock \emph{Physical Review A}, 99\penalty0 (2):\penalty0 022102, 2019.
\bibitem[Cohen et~al.(2019)Cohen, Gefen, Ortiz, and
Retzker]{cohen2019achieving}
D.~Cohen, T.~Gefen, L.~Ortiz, and A.~Retzker.
\newblock Achieving the ultimate precision limit in quantum nmr spectroscopy.
\newblock \emph{
arXiv:1912.09062}, 2019.
\bibitem[Foo()]{Footnote3}
See also \cite{GoreckaPL+18} for the related observation that for metrology in
non-markovian environments the zeno-scaling can be achieved with a mixed
initial state.
\bibitem[Vedral and Plenio(1998)]{Vedral98}
V.~Vedral and M.~B. Plenio.
\newblock Entanglement measures and purification procedures.
\newblock \emph{Physical Review A}, 57\penalty0 (3):\penalty0 1619, 1998.
\bibitem[Plenio(2005)]{Plenio05}
M.~B. Plenio.
\newblock Logarithmic negativity: a full entanglement monotone that is not
convex.
\newblock \emph{Physical Review Letters}, 95\penalty0 (9):\penalty0 090503,
2005.
\bibitem[Goldstein et~al.(2011)Goldstein, Cappellaro, Maze, Hodges, Jiang,
S{\o}rensen, and Lukin]{GoldsteinCM+11}
G.~Goldstein, P.~Cappellaro, J.~R. Maze, J.~S. Hodges, L.~Jiang, A.~S.
S{\o}rensen, and M.~D. Lukin.
\newblock Environment-assisted precision measurement.
\newblock \emph{Physical Review Letters}, 106\penalty0 (14):\penalty0 140502,
2011.
\bibitem[Cappellaro et~al.(2012)Cappellaro, Goldstein, Hodges, Jiang, Maze,
S{\o}rensen, and Lukin]{CappellaroGH+12}
P.~Cappellaro, G.~Goldstein, J.~Hodges, L.~Jiang, J.~Maze, A.~S{\o}rensen, and
M.~D. Lukin.
\newblock Environment-assisted metrology with spin qubits.
\newblock \emph{Physical Review A}, 85\penalty0 (3):\penalty0 032336, 2012.
\bibitem[G{\'o}recka et~al.(2018)G{\'o}recka, Pollock, Liuzzo-Scorpo, Nichols,
Adesso, and Modi]{GoreckaPL+18}
A.~G{\'o}recka, F.~A. Pollock, P.~Liuzzo-Scorpo, R.~Nichols, G.~Adesso, and
K.~Modi.
\newblock Noisy frequency estimation with noisy probes.
\newblock \emph{New Journal of Physics}, 20:\penalty0 083008, 2018.
\end{thebibliography}
\widetext
\appendix
{\Large Appendix: Limited-control metrology approaching the Heisenberg limit without entanglement preparation}
\section{Derivation of the signal for few measurements}
{This section presents a more general calculation of equations (6-8) of the main text.}
For a nuclear spin (initial state described by polarisation P) with an already accumulated phase from $n$ cycles $\phi_{1m} = \delta \tau_m n + \phi_0 $ the readout probability is
(NV measurement in basis $X c_\alpha + Y s_\alpha$ and NV preparation in $X$)
\begin{align}
p_n-\frac{1}{2} &= \mathrm{Tr} \left[\hat O_\mathrm{measurement} U \rho_n U^\dagger \right]
\\&= \mathrm{Tr} \left[\left(\frac{\sigma_x \cos\alpha + \sigma_y \sin\alpha}{2} \otimes \mathbbm{1}^{\otimes M} \right) U \left(\frac{\mathbbm{1}+\sigma_{x}}{2} \otimes
\prod\limits_{m=1}^M \left(\frac{\mathbbm{1} +P \cos\phi_{1m} \sigma_{x}^{(m)} + P \sin\phi_{1m} \sigma_y^{(m)}}{2}\right) \right) U^\dagger \right]
\\&= \frac{1}{4} \left[ \cos\alpha \left(\prod\limits_{m=1}^M\left( \cos \frac{4 A_\perp^{(m)} T_s}{\pi} - i \sin \frac{4 A_\perp^{(m)} T_s}{\pi} P \cos\phi_{m} \right) + \prod\limits_{m=1}^M\left( \cos \frac{4 A_\perp^{(m)} T_s}{\pi} + i \sin \frac{4 A_\perp^{(m)} T_s}{\pi} P \cos\phi_{m} \right)\right)
\right.\\&\left.
+i \sin \alpha \left(\prod\limits_{m=1}^M\left( \cos \frac{4 A_\perp^{(m)} T_s}{\pi} - i \sin \frac{4 A_\perp^{(m)} T_s}{\pi} P \cos\phi_{m} \right) - \prod\limits_{m=1}^M\left( \cos \frac{4 A_\perp^{(m)} T_s}{\pi} + i \sin \frac{4 A_\perp^{(m)} T_s}{\pi} P \cos\phi_{m} \right)\right) \right]\label{noapprox}
\end{align}
where $\phi_m = \phi_{1m}-\phi_{0m}$.
For $\sum\limits_{m=1}^M 4 A_\perp^{(m)} T_s/\pi \ll 1$ we can approximate
\begin{align}
\prod\limits_{m=1}^M\left( \cos \frac{4 A_\perp^{(m)} T_s}{\pi} \pm i \sin \frac{4 A_\perp^{(m)} T_s}{\pi} P \cos\phi_{m} \right)
&=\prod\limits_{m=1}^M \exp\left( \pm i \sin \frac{4 A_\perp^{(m)} T_s}{\pi} P \cos\phi_{m} +O\left(\frac{4 A_\perp^{(m)} T_s}{\pi}\right)^2 \right)
\\
&\cong \exp\left( \pm i \sum\limits_{m=1}^M \sin \frac{4 A_\perp^{(m)} T_s}{\pi} P \cos\phi_{m} \right)
\end{align}
to derive the signal
\begin{align}
p_n
&= \frac{1}{2} + \frac{1}{2}\cos\left( \sum\limits_{m=1}^M \sin \frac{4 A_\perp^{(m)} T_s}{\pi} P \cos\phi_{m} - \alpha \right) = \cos^2 \left( \sum\limits_{m=1}^M \sin \frac{2 A_\perp^{(m)} T_s}{\pi} P \cos\phi_{m} - \frac{\alpha}{2} \right).
\end{align}
{The generalisation of this case described in the main text to different coupling $A_\perp^{(m)} \ne A_\perp^{(m)}$ can be described by an effective coupling.
Different phases $\cos\phi_{m_1} \ne \cos\phi_{m_2}$ can produce additional features, but in the asymptotic case these effects are irrelevant as the initial state becomes less important, see next section.}
\section{Derivation of the Fisher Information from the probabilities for a full measurement record}
The outcome of the kth measurement is denoted by
every individual $X_k \in \{0=+,1=-\}$, $X^k$ is a measurement record of the form $\{1,0,1,1,0,1,...\}$ with k components, $X_l$ is the lth component of it and $\beta \equiv 2 A_x T_s/\pi$ is the coupling achieved by the XY-sequence.
Furthermore $U_\phi = \exp\left(-i \phi \sum \sigma_z^{(m)}/2\right)$ and
\begin{equation}
U_\pm = \bra{\pm_y} U \ket{+} = \frac{e^{-i \beta \sum \sigma_x^{(m)}} \mp i e^{i \beta \sum \sigma_x^{(m)}}}{2}
\end{equation}
with the coupling from the first section $U = \exp(-i \beta \sigma_z^{NV} \sum \sigma_x^{(m)})$.
Here we assumed all coupling constants to be equal, however different $\beta_m$ don't change the structure of the result.
Each measurement probability can be described by
\begin{equation}
p_\pm = \mathrm{Tr} \left[ (\ket{\pm_y}\bra{\pm_y}\otimes \mathbbm{1}) U U_\phi (\ket{\pm}\bra{\pm}\otimes\rho_{N-1})U_\phi^\dagger U^\dagger\right] = \mathrm{Tr}\left[U_{X_1}U_\phi \rho_0 U_\phi^\dagger U_{X_1}^\dagger\right]
\end{equation}
and evolves the nuclear state to
\begin{equation}
\rho_{\pm} = \frac{1}{p_\pm}\mathrm{Tr_{NV}} \left[U_{X_1}U_\phi \rho_0 U_\phi^\dagger U_{X_1}^\dagger\right].
\end{equation}
The probability for a measurement record $X^k$ can be described as
\begin{align}
{p_{X^k}} &= p_{X_1} p_{X_2|X_1} ... p_{X_{N-1}|X_{N-2}...X_1} p_{X_N|X_{N-1}}
\\&= \mathrm{Tr}\left[U_{X_1}U_\phi \rho_0 U_\phi^\dagger U_{X_1}^\dagger\right] \frac{\mathrm{Tr}\left[U_{X_2}U_\phi U_{X_1}U_\phi \rho_0 U_\phi^\dagger U_{X_1}^\dagger U_\phi^\dagger U_{X_2}^\dagger\right]}{\mathrm{Tr}\left[U_{X_1}U_\phi \rho_0 U_\phi^\dagger U_{X_1}^\dagger\right]}...
\\&= \mathrm{Tr}\left[ \prod\limits_{k=1}^{N} \left(U_{X_k}U_\phi\right) \rho_0 \left(\prod\limits_{k=1}^{N} \left(U_{X_k}U_\phi\right)\right)^\dagger \right].
\end{align}
Each part of the sum contributes roughly $2^{-N}$, sum over all contributions is 1. To analyse the effect of these operators, we apply them on a permutation-invariant product state
\begin{equation}
U_\phi \left(\frac{a \mathbbm{1} + b \sigma_x + c \sigma_y + d \sigma_z}{2}\right)^{\otimes M} U_\phi^\dagger
= \left(\frac{a \mathbbm{1} + \left(b c_\phi - c s_\phi \right) \sigma_x + \left(c c_\phi + b s_\phi \right) \sigma_y + d \sigma_z}{2}\right)^{\otimes M}
\end{equation}
\begin{align}
4 U_\pm \left(\frac{a \mathbbm{1} + b \sigma_x + c \sigma_y + d \sigma_z}{2}\right)^{\otimes M} U_\pm^\dagger
&= \left(\frac{a \mathbbm{1} + b \sigma_x + \left(c c_{2\beta} + d s_{2\beta} \right) \sigma_y + \left(d c_{2\beta} - c s_{2\beta} \right) \sigma_z}{2}\right)^{\otimes M} \label{eq9}
\\&+
\left(\frac{a \mathbbm{1} + b \sigma_x + \left(c c_{2\beta} - d s_{2\beta} \right) \sigma_y + \left(d c_{2\beta} + c s_{2\beta} \right) \sigma_z}{2}\right)^{\otimes M}\label{eq10}
\\&\pm i \left[ \left(\frac{\left(a c_{2\beta} +i b s_{2\beta} \right) \mathbbm{1} + \left(b c_{2\beta} +i a s_{2\beta} \right) \sigma_x + c \sigma_y + d \sigma_z}{2}\right)^{\otimes M}
\right.
\\&\left. \hspace{.5 cm} -\left(\frac{\left(a c_{2\beta} -i b s_{2\beta} \right) \mathbbm{1} + \left(b c_{2\beta} -i a s_{2\beta} \right) \sigma_x + c \sigma_y + d \sigma_z}{2}\right)^{\otimes M}
\right].
\end{align}
It is very difficult to calculate the full expression because every measurement multiplies the number of terms by 4.
So we want to find the relevant terms for the Fisher Information for different limits.
For a single nucleus M=1, only two terms are created every measurement and $d \sigma_z$ can be neglected. Therefore we simplify to
\begin{align}
4 U_\pm \left(\frac{a \mathbbm{1} + b \sigma_x + c \sigma_y}{2}\right) U_\pm^\dagger
&= 2\left(\frac{a \mathbbm{1} + b \sigma_x + c c_{2\beta} \sigma_y}{2}\right)
\pm 2\left(\frac{- b s_{2\beta}\mathbbm{1} - a s_{2\beta} \sigma_x + c \sigma_y}{2}\right)
\\&= 2\mathcal{A} [\rho] + 2\mathcal{B} [\rho].
\end{align}
The $\cos(2\beta)$ for the $c$ coefficient produces the backaction-induced decay $\gamma_b$. We can approximate
\begin{equation}
\mathcal{A} [\rho] = \left(\frac{a \mathbbm{1} + b \sigma_x + c c_{2\beta} \sigma_y}{2}\right) \approx
\left(\frac{a \mathbbm{1} + (1+c_{2\beta})/2 \left(b \sigma_x + c \sigma_y\right)}{2}\right).
\end{equation}
which reduces the bloch vector according to
\begin{equation}
\left(\frac{1+c_{2\beta}}{2}\right)^k = \exp\left(k \log \left(\frac{1+c_{2\beta}}{2}\right)\right)
\approx \exp\left(k \log \left(1 + \beta^2 \right)\right)
\approx \exp\left(-k \beta^2 \right).
\end{equation}
This is valid because the higher orders will be negligible in the further calculation.
T2 processes have a similar effect, why we define the decay of population in the x-y-plane with
\begin{equation}
\gamma = -\log\frac{1+c_{2\beta}}{2} + \frac{\tau_m}{T_2^{(nuc)}} \approx \beta^2 + \frac{\tau_m}{T_2^{(nuc)}}
\end{equation}
where $\tau_m$ is the time for each of the N repetitions.
When starting with $\rho_0 = (\mathbbm{1}+\sigma_x)/2$ can expand the probability for a N measurement record $X^N$ as
\begin{align}
2^N p_{X^N} &\approx 1 + s_{2\beta} \sum\limits_{l=1}^N (-1)^{X_l} \exp(- \gamma l) \cos(l \phi)
\\&+ s_{2\beta}^2 \sum\limits_{1 \leqslant l_1 < l_2 \leqslant N} (-1)^{X_{l_1}}(-1)^{X_{l_2}} \exp(- \gamma (l_2-l_1)) \cos((l_2-l_1) \phi)
\\&+ s_{2\beta}^3 \sum\limits_{1 \leqslant l_1 < l_2 < l_3 \leqslant N} (-1)^{X_{l_1}}(-1)^{X_{l_2}}(-1)^{X_{l_3}} \exp(- \gamma (l_3-l_2+l_1)) \cos((l_3-l_2+l_1) \phi)
\\&+...
\end{align}
In a first step we
calculate the Fisher Information
\begin{align}
I_N &= \sum\limits_{X^N} \frac{1}{p_{X^N}} \left(\frac{\partial p_{X^N}}{\partial \phi}\right)^2 \left(\frac{\partial \phi}{\partial \delta}\right)^2
\end{align}
in the limit $\gamma N \ll 1$
using a geometric series
\begin{align}
\frac{1}{\tau_m^2} I_N &= 2^{-N} \sum\limits_{X^N} \sum\limits_{k=0}^\infty \left(-s_{2\beta} \sum\limits_{l=1}^N (-1)^{X_l} \exp(- \gamma l) \cos(l \phi)- s_{2\beta}^2... \right)^k
\left(-s_{2\beta} \sum\limits_{l=1}^N (-1)^{X_l} \exp(- \gamma l) l\sin(l \phi) - ... \right)^2.
\end{align}
As we average over all $(-1)^{X_l} = \pm 1$, only terms with an even number of all $(-1)^{X_l}$ contribute.
The first order ($\gamma N \ll 1$, $k=0$) results in the $N^3$ scaling that is discussed in the main text.
\begin{align}
\frac{1}{\tau_m^2} I_N &= s_{2\beta}^2 \sum\limits_{l=1}^N \exp(-2 \gamma l) l^2\sin^2(l \phi)
\\&\approx \frac{s_{2\beta}^2}{2} \int\limits_{0}^N \mathrm{d}l \exp(-2 \gamma l) l^2 (1-\cos(2 l \phi))
\\&\approx \frac{s_{2\beta}^2}{2} \int\limits_{0}^N \mathrm{d}l \exp(-2 \gamma l) l^2
\\&= \frac{s_{2\beta}^2}{2}\frac{e^{-2 \gamma N}(-2\gamma N(\gamma N+1)-1)+1}{4\gamma^3} \approx \frac{s_{2\beta}^2}{2} \left[\frac{N^3}{3} - \frac{\gamma N^4}{2}\right].
\end{align}
For $\gamma N > 1$ the
geometric series is not valid anymore and many higher orders in $l$ need to be considered. To show that terms linear in $N$ exist, we consider the approximated second order ($1/p_X^N\approx 2^N$)
\begin{align}
\frac{1}{\tau_m^2} I_N &= s_{2\beta}^4 \sum\limits_{1 \leqslant l_1 < l_2 \leqslant N} \exp(-2 \gamma (l_2-l_1)) \sin^2((l_2-l_1) \phi) (l_2-l_1)^2
\\&\overset{l=l_2-l_1}{=} s_{2\beta}^4 \sum\limits_{1 \leqslant l \leqslant N} (N-l) \exp(-2 \gamma l) \sin^2(l \phi) l^2
\\&\approx \frac{s_{2\beta}^4}{2} \int\limits_{0}^N \mathrm{d}l (N-l) \exp(-2 \gamma l) l^2 (1-\cos(2 l \phi))
\\&\approx \frac{s_{2\beta}^4}{2} \int\limits_{0}^N \mathrm{d}l (N-l) \exp(-2 \gamma l) l^2
\\&= \frac{s_{2\beta}^4}{2}\frac{e^{-2 \gamma N}(2\gamma N(\gamma N+2)+3) + 2\gamma N - 3}{8\gamma^4} \approx \frac{s_{2\beta}^4}{2} \frac{N}{4\gamma^3}.
\end{align}
The numerically obtained prefactor from the main text is smaller by a factor 2 in case of $M=1$ and a factor $\left(c_{2\beta}^{M-1}\right)^2 M^2/4$ for $M>1$.
While the first factor 2 is likely to originate from higher order contributions (the dominant order in $l$ depends on $N$), the difference for higher $M$ can be explained by the additional terms that arise in the calculation.
Many terms like in
\begin{align}
2^N p_{X^N} &\approx 1 + \frac{i}{2}\sum\limits_{l=1}^N (-1)^{X_l} \left[\left(c_{2\beta} - i s_{2\beta} \exp(- \gamma l) \cos(l \phi)\right)^M
- \left(c_{2\beta} + i s_{2\beta} \exp(- \gamma l) \cos(l \phi)\right)^M\right]
\\&+\frac{i^2}{4} \sum\limits_{1 \leqslant l_1 < l_2 \leqslant N} (-1)^{X_{l_1}}(-1)^{X_{l_2}}\left[\left(c_{2\beta}^2 - s_{2\beta}^2 \exp(- \gamma (l_2-l_1)) \cos((l_2-l_1) \phi) + i\alpha \right)^M +... \right]
\\&+...
\end{align}
will produce roughly the same Fisher information, in particular the first order scales as expected. The derivative gives a factor $M^2$, and the leading order has a factor $\left(c_{2\beta}^{M-1}\right)^2$. The additional factor 1/2 will arise from averaging random phases $\alpha$.
{The scaling $\propto \gamma^{-3}$ was tested numerically in figure \ref{Fig_Dec}. The simulation results seem to deviate in a regime $\gamma_2\approx \gamma_b$ by a factor of 2. This can be explained by higher order terms being affected more by $\gamma_2$. As a result, lower order terms with the expected scaling dominate.}
{Remarkably these asymptotic results are independent of the initial state of the nuclei. Therefore different phases $\cos\phi_{m_1} \ne \cos\phi_{m_2}$ can be transformed to a basis with equal phases and a different initial state, which yields the same result as for $\cos\phi_{m_1} = \cos\phi_{m_2}$ $\forall m_1,m_2$.}
\begin{figure}
\caption{
{For M=10 nuclei: Fisher information after $2^{24}
\label{Fig_Dec}
\end{figure}
\section{Relation to the Heisenberg limit}
We numerically investigated the minimum difference between our protocol and the Heisenberg limit, which can only be achieved given full control over the nuclei in absence of decoherence $\gamma_2 = 0$.
{
Figure \ref{Fig2SI} shows the maximum ratio between the Fisher Information of the investigated protocol and the Heisenberg limit. For different coupling strength the ratio is independent of the number of nuclei.
Note that the peaks are due to the monte carlo simulation. This is confirmed by the curves on the right hand side of figure \ref{Fig2SI}, where the maxima are found at $N \approx \beta^{-2}$ as expected. }
\begin{figure}
\caption{
{Left: Maximum of the ratio between the Fisher information of our protocol and the Heisenberg limit
{for different coupling strength. The results do not depend on the number of nuclei $M$, but are dominated by variations originating from the 9600 averages in the monte carlo simulation. Right: Curves for $M=100$ nuclei: The highest value is reached at $N \approx \beta^{-2}
\label{Fig2SI}
\end{figure}
\section{Simulation}\label{invsim}
The normal simulation (without making use of the permutation invariance) repeats the following steps (after initializing the nuclear spins to $\ket{\psi_0}=\ket{+}^{\otimes M}$, $\rho_0=\ket{\psi_0}\bra{\psi_0}$)
\begin{enumerate}
\item Simulate nuclear spin evolution with the operator
\begin{equation}
U_\mathrm{free} = \exp \left(-i \delta \tau_m \sum_m \sigma_{z}^{(m)}/2\right)
\end{equation}
where $\tau_m$ is the {time between two measurements.}
\item Determine probability to measure the NV in $\ket{+_y}$ after preparing it in $\ket{+}$ and evolving it with the nuclear spins according to (\ref{Heff}) by
\begin{align}
p &= \mathrm{Tr}\left[ \ket{+_y}\bra{+_y}\otimes \mathbbm{1}^{\otimes M} U \ket{+}\bra{+}\otimes \rho_n U^\dagger \right]
= \mathrm{Tr}\left[ U_+ \rho_n U_+^\dagger \right]
\\&= \bra{\psi_n} U_+^\dagger U_+ \ket{\psi_n} \text{(for pure states)}\label{eq6}
\end{align}
where $U_+ = \bra{+_y}U\otimes \mathbbm{1}^{\otimes M} \ket{+}$
\item Probabilistically choose result according to $p$, save result and evolve accordingly including normalisation
$\rho_{n+1}= \mathcal{N} U_{+/-} \rho_n U_{+/-}^\dagger$.
\end{enumerate}
By using the subspace resulting from the symmetry in the case of many spins with equal coupling strength, many spins can be simulated efficiently, as this subspace has dimension $M+1$ instead of $2^M$.
The Fisher Information
\begin{equation}
I_N = \sum\limits_{X^N} p_X\frac{1}{p_X^2} \left(\frac{\partial p_X}{\partial \delta}\right)^2
\end{equation}
was calculated numerically for many different runs evolving $\rho$ following the recipe above to determine $p_X$
Evolving $\rho_{2/3}$ according to the same measurement outcomes as $\rho$, but with a different evolution parameter $\delta \pm \mathrm{d}\delta$ allows to determine
\begin{equation}
\left(\frac{\partial p_X}{\partial \delta}\right) = \frac{p_X(\delta + \mathrm{d}\delta)- p_X(\delta -
\mathrm{d}\delta)}{2\mathrm{d}\delta}
\end{equation}
for many measurement records. After calculating the Fisher Information for every measurement record, the average
and standard deviation can be obtained
The accuracy is limited by the Fisher Information due to the {Cramer-Rao bound}
\begin{equation}
\delta \omega_N \ge \frac{1}{\sqrt{I_N}}.
\end{equation}
For pure states, the Logarithmic negativity can be simplified to an expression depending on the Schmidt coefficients $\alpha_i$:
\begin{align}
LN(\ket{\Psi}\bra{\Psi}) = 2 \log \left(\sum\limits_i \alpha_i\right),
\end{align}
which can be calculated for considerably larger systems than the partial trace.
{
In order to obtain some insights into the entanglement buildup and its potential role as a
resource in the metrology scheme, we use the logarithmic negativity \cite{Plenio05} as a
quantifier of the entanglement between one of the auxiliary spins with the remaining $M-1$
spins and between equal bi-partitions of the auxiliary spins. While the entanglement between
the nuclei builds up to a steady state after a time $~1/\gamma_b$,
it does not contribute to the quadratic
scaling with the number of auxiliary spins as this effect is related to the readout process, nor does it support the $N^3$ super-Heisenberg scaling.
However, destroying the entanglement after every measurement would lead to lower prefactor in the asymptotic SQL scaling as it inevitably leads to destruction of information.
Figure \ref{Fig3SI} shows this buildup of entanglement of a scale of $N_{opt}\approx \left({2 k_0 T_s}/{\pi}\right)^{-2}=1/
\gamma_b$.
}
\begin{figure}
\caption{
{Logarithmic negativity for $M$ spins, each spin coupled with $ \beta = 0.01 \times 2/\pi$ with negligible decay $\gamma_2 =0$. The left graph shows the entanglement of one of the
auxiliary spins with the remaining M-1 spins and the right graph shows entanglement in an equal bi-partition
of the auxiliary spins.
The results are averaged over 2000 runs.}
\label{Fig3SI}
\end{figure}
\end{document} |
\begin{document}
\title{Parity proofs of the Kochen-Specker theorem based on the Lie algebra E8}
\begin{abstract}
The 240 root vectors of the Lie algebra E8 lead to a system of 120 rays in a real 8-dimensional Hilbert space that contains a large number of parity proofs of the Kochen-Specker theorem. After introducing the rays in a triacontagonal representation due to Coxeter, we present their Kochen-Specker diagram in the form of a ``basis table'' showing all 2025 bases (i.e., sets of eight mutually orthogonal rays) formed by the rays. Only a few of the bases are actually listed, but simple rules are given, based on the symmetries of E8, for obtaining all the other bases from the ones shown. The basis table is an object of great interest because all the parity proofs of E8 can be exhibited as subsets of it. We show how the triacontagonal representation of E8 facilitates
the identification of substructures that are more easily searched for their parity proofs. We have found hundreds of different types of parity proofs, ranging from 9 bases (or contexts) at the low end to 35 bases at the high end, and involving projectors of various ranks and multiplicities. After giving an overview of the proofs we found, we present a few concrete examples of the proofs that illustrate both their generic features as well as some of their more unusual properties. In particular, we present a proof involving 34 rays and 9 bases that appears to provide the most compact proof of the KS theorem found to date in 8 dimensions.
\end{abstract}
\section{\label{sec:Intro}Introduction}
The exceptional Lie algebra E8 plays a role in a number of physical theories such as supergravity and heterotic string theory\cite{e8refs}. Here we show that its system of root vectors can be used to exhibit a large number of ``parity proofs'' of the Kochen-Specker (KS) theorem\cite{KS1967} ruling out the existence of noncontextual hidden variables theories as viable alternatives to quantum mechanics. The fact that the root vectors of E8 could be made to serve this end was pointed out, in different ways, by Lisonek et. al.\cite{lisonek1} and by Ruuge and van Oystaeyen\cite{ruuge}. However the proof in \cite{ruuge} is unrelated to parity proofs, while \cite{lisonek1}, though proving that $2^{1940}$ parity proofs exist, does not list even a single example of such a proof. The purpose of this paper is to supplement the observations in \cite{lisonek1} and \cite{ruuge} by (a) presenting a general framework (namely, the ``basis table'') within which the parity proofs can be exhibited, (b) showing how the symmetries of E8 can be exploited to simplify the search for its parity proofs, (c) providing an overview of the parity proofs found by our search and, finally, (d) presenting a few concrete examples of the proofs in order to convey some feeling for their variety and intricacy.\\
Parity proofs of the KS theorem are appealing because they take no more than simple counting to verify. What a parity proof is, and how it accomplishes its goals, are matters that will be explained later in this paper.\\
We make a few remarks about E8, to shed light on the way it is used in this paper. For us E8 is simply a collection of 240 vectors (namely, its roots) in a real 8-dimensional Euclidean space. These vectors define the vertices of a semiregular polytope discovered by Thorold Gosset\cite{coxeter} and sometimes described by the symbol $4_{21}$. The vectors have eight real coordinates that can be chosen in a variety of ways. A particularly judicious choice, for our purposes, is the ``triacontagonal representation'' of Coxeter and Shepard\cite{leonardo}. In this representation the coordinates of the vectors are chosen in such a way that if only the first two coordinates are retained (which amounts to projecting the vectors orthogonally from eight dimensions down to two), the tips of the vectors lie at the vertices of eight regular triacontagons lying on concentric circles. Such a projection is shown in Fig.10 of \cite{leonardo}. The eight rings of thirty points are easily picked out in the figure, while the dense network of lines connecting pairs of points are projections of the 6720 edges of Gosset's polytope.\\
Two slight modifications of this figure will convert it into the Kochen-Specker diagram of E8: the first is that only one member of each pair of diametrically opposite vertices should be retained, since one is concerned with rays rather than vectors in a proof of the KS theorem; and the second is that the line segments corresponding to the edges of Gosset's polytope should be replaced by new segments that connect only pairs of vertices that correspond to orthogonal rays. Rather than construct such a diagram, we later present a ``basis table'' that conveys essentially the same information in a more useful form. The basis table is simply a listing of all the bases (i.e., sets of eight mutually orthogonal rays) in the system. Because there are 2025 bases, and this is too large a number to display explicitly, we list just a few bases and give a simple set of rules (based on the symmetries of E8) for generating all the bases from the ones shown. It should be pointed out that the entire basis table can in fact be generated from any one of its elements by applying products of all possible powers of three basic symmetry operations of E8. The basis table is of central importance in this paper because all the parity proofs of E8 can be exhibited as subsets of it.\\
We say a few words about the broader area in which this work is set, to provide some perspective. After their initial discovery in two-qubit systems\cite{Peres1991,Kernaghan1994,Cabello1996,Pavicic2010,Waegell2011c}, parity proofs were discovered in three-\cite{KP1995,Waegell2012a} and higher-qubit\cite{DiVin,Waegell2013} systems, in systems of rays derived from the four-dimensional regular polytopes \cite{Waegell2011a,Waegell2011b,Waegell2014} and the root systems of exceptional Lie algebras\cite{ruugeE6} and, very recently, in a remarkably compact six-dimensional system of complex rays\cite{Lisonek2013} in connection with which an experiment has also been reported\cite{canas2014a}. Aside from revealing the many guises in which quantum contextuality can arise in spaces of different dimensionality, parity proofs are interesting because they have a variety of applications: they can be used to derive state-independent inequalities for ruling out noncontextuality\cite{Cabello2008, badziag,kirchmair,bartosik,amselem ,moussa} and Bell inequalities for identifying fully nonlocal correlations\cite{Aolita}; they have applications to quantum games\cite{Ambrosio}, quantum zero-error communication\cite{Cubitt}, quantum error correction\cite{Error,raussendorf} and the design of relational databases\cite{Abramsky2012}; they can be used to witness the dimension of quantum systems\cite{guhne2014}; and they underlie surprising phenomena such as the quantum pigeonhole effect\cite{aharonov2014,yu2014,rae2014}. Although the KS theorem is theoretically compelling, it has been argued\cite{mayer,kent,barrett} that the finite precision of real measurements nullifies practical attempts at verifying it. There is some debate about this matter\cite{mermin}, but it should be mentioned that methods of establishing contextuality that are not open to this objection have been proposed\cite{Klyachko,Liang,Bengtsson,YuOh}. Spekkens\cite{spekkens} has recently expanded upon the conditions that must be satisfied by realistic experiments that claim to rule out noncontextual ontological models. It has been argued in \cite{emerson} that contextuality is the source of the speedup in many quantum information protocols. This brief survey is far from complete, but it serves to show that the KS theorem and quantum contextuality are at the heart of many current research efforts.\\
The plan of this paper is as follows. Section \ref{sec:2} introduces the triacontagonal representation of the 120 rays derived from the root vectors of E8, and shows how their symmetries can be exploited to give an efficient construction of their basis table. Section \ref{sec:3} points out some interesting substructures within E8 that have been shown in the past to give rise to proofs of the KS theorem. Section \ref{sec:4} reviews the notion of a parity proof and shows how the triacontagonal representation of E8 facilitates the identification of subsets of its bases, of distinct symmetry types, that each house a large number of parity proofs. The significance of these smaller subsets is that they are far more easily searched for parity proofs than the entire system. After giving an overview of the parity proofs we found among the different subsets of bases, we present a few examples of the parity proofs that illustrate their important features. Section \ref{sec:5} concludes with a discussion of our results.
\section{\label{sec:2} The E8 system: rays and bases}
The 240 root vectors of E8 come in 120 pairs, with the members of each pair being the negatives of each other. Choosing just one member from each pair yields the 120 rays associated with E8. Each ray has eight real coordinates, which may be chosen in a variety of ways. We use a coordinatization due to Richter\cite{richter}, which differs from the one introduced earlier by Coxeter and Shepard\cite{leonardo}. Let $\omega = \exp(\frac{i\pi}{30})$ and let $\tau = \frac{1+\surd 5}{2}$ be the golden ratio. Define $a,b,c$ and $d$ as the positive numbers satisfying the equations \\
\hspace{5mm} $2a^{2}=1+3^{-1/2}5^{-1/4}\tau^{3/2} , \hspace{5mm} 2b^{2}=1+3^{-1/2}5^{-1/4}\tau^{-3/2}$ \\
\indent
\hspace{5mm} $2c^{2}=1-3^{-1/2}5^{-1/4}\tau^{-3/2} , \hspace{3mm} 2d^{2}=1-3^{-1/2}5^{-1/4}\tau^{3/2}$ .\\
\noindent
For any integer $n$, let $c_{n}=\omega^{n}+\omega^{-n}=2\cos(\frac{n\pi}{30})$ and define the quantities \\
\hspace{2mm} $r_{1}=a/c_{9}$ , \hspace{2mm} $r_{2}=b/c_{9}$ , \hspace{2mm} $r_{3}=c/c_{9}$ , \hspace{2mm} $r_{4}=d/c_{9}$ , \\
\indent
\hspace{2mm} $r_{5}=a/c_{3}$ , \hspace{2mm} $r_{6}=b/c_{3}$ , \hspace{2mm} $r_{7}=c/c_{3}$ , \hspace{2mm} $r_{8}=d/c_{3}$ . \\
\noindent
The 120 rays $|i\rangle$ ($i=1,\cdots, 120$) are then defined as\\
\noindent
$|n+1\rangle = (r_{1}\omega^{2n},r_{4}\omega^{22n},r_{6}\omega^{14n+1},r_{7}\omega^{26n+1})$ \hspace{23 mm} for $0\leq n \leq 14$\\
$|n+16\rangle = (r_{4}\omega^{2n},-r_{1}\omega^{22n},r_{7}\omega^{14n+1},-r_{6}\omega^{26n+1})$ \hspace{16 mm} for $0\leq n \leq 14$\\
$|n+23\rangle = (r_{7}\omega^{29+2n},-r_{6}\omega^{19+22n},-r_{1}\omega^{24+14n},r_{4}\omega^{18+26n})$ \hspace{3 mm} for $8\leq n \leq 14$\\
$|n+38\rangle = (r_{7}\omega^{29+2n},-r_{6}\omega^{19+22n},-r_{1}\omega^{24+14n},r_{4}\omega^{18+26n})$ \hspace{3 mm} for $0\leq n \leq 7$\\
$|n+38\rangle = (r_{6}\omega^{29+2n},r_{7}\omega^{19+22n},r_{4}\omega^{24+14n},r_{1}\omega^{18+26n})$ \hspace{9 mm} for $8\leq n \leq 14$\\
$|n+53\rangle = (r_{6}\omega^{29+2n},r_{7}\omega^{19+22n},r_{4}\omega^{24+14n},r_{1}\omega^{18+26n})$ \hspace{9 mm} for $0\leq n \leq 7$\\
$|n+61\rangle = (r_{8}\omega^{2n},-r_{5}\omega^{22n},-r_{3}\omega^{14n+1},r_{2}\omega^{26n+1})$ \hspace{16 mm} for $0\leq n \leq 14$\\
$|n+76\rangle = (r_{5}\omega^{2n},r_{8}\omega^{22n},-r_{2}\omega^{14n+1},-r_{3}\omega^{26n+1})$ \hspace{16 mm} for $0\leq n \leq 14$\\
$|n+83\rangle = (r_{2}\omega^{29+2n},r_{3}\omega^{19+22n},-r_{8}\omega^{24+14n},-r_{5}\omega^{18+26n})$ \hspace{3 mm} for $8\leq n \leq 14$\\
$|n+98\rangle = (r_{2}\omega^{29+2n},r_{3}\omega^{19+22n},-r_{8}\omega^{24+14n},-r_{5}\omega^{18+26n})$ \hspace{3 mm} for $0\leq n \leq 7$\\
$|n+98\rangle = (r_{3}\omega^{29+2n},-r_{2}\omega^{19+22n},r_{5}\omega^{24+14n},-r_{8}\omega^{18+26n})$ \hspace{3 mm} for $8\leq n \leq 14$\\
$|n+113\rangle = (r_{3}\omega^{29+2n},-r_{2}\omega^{19+22n},r_{5}\omega^{24+14n},-r_{8}\omega^{18+26n})$ \hspace{2 mm} for $0\leq n \leq 7$ \hspace{1 mm} ,\\
\noindent
with each ray being an 8-component column vector whose (real) components are given by the real and imaginary parts of the four complex numbers listed for it\footnote{For example, the components of the column vector $|3\rangle$, in the proper order, are $r_{1}\cos(\frac{2\pi}{15}),r_{1}\sin(\frac{2\pi}{15}),r_{4}\cos(\frac{22\pi}{15}),r_{4}\sin(\frac{22\pi}{15}),
r_{6}\cos(\frac{29\pi}{30}),r_{6}\sin(\frac{29\pi}{30}),r_{7}\cos(\frac{53\pi}{30}),r_{7}\sin(\frac{53\pi}{30}).$}. We will use $\langle i |$ to denote the 8-component row vector that is the transpose of $|i\rangle$.\\
Let us denote by the letters A,...,H each consecutive set of 15 rays (thus A denotes rays 1-15, B rays 16-30, etc.). If we add to each group of 15 rays all their negatives, we get groups of 30 vectors whose first two coordinates define the vertices of regular triacontagons in the plane, with the triacontagons corresponding to the eight letter groups being concentric to one another. This is just the triacontagonal representation of the roots of E8 (or of the vertices of Gosset's polytope $4_{21}$) mentioned in the introduction. Although the coordinates we have introduced for the rays are identical to those of Richter\cite{richter}, our numbering of the rays is a bit different from his (in essence, we have swapped some of his triacontagons and rotated some of them relative to the others for convenience in the presentation of some of our results).\\
A straightforward calculation shows that each of the 120 rays is orthogonal to 63 others and that the rays form 2025 bases. Each ray occurs in 135 bases and its only companions in these bases are the 63 other rays it is orthogonal to. We will denote this system of rays and bases by the symbol $120_{135}$-$2025_{8}$, with the subscript on the left indicating the multiplicity of each of the rays (i.e., the number of bases it occurs in) and that on the right the number of rays in each basis. The product of the numbers in the left half of the symbol equals the product on the right, as it should. The basis table of E8 (i.e., the complete list of all its bases) is \textit{saturated}, by which we mean that all the orthogonalities between its rays are represented in it. Because of this, the basis table is completely equivalent to the Kochen-Specker diagram of its rays\footnote{This is a graph whose vertices correspond to the rays and whose edges join vertices corresponding to orthogonal rays}. However it has the great advantage over the Kochen-Specker diagram that it is easy to interpret and work with.\\
Later we will encounter other systems of rays and bases having a high degree of symmetry, and the notation we have introduced above is easily modified to deal with such cases. For example, a system of 45 rays that forms 15 bases, with 30 of the rays being of multiplicity 2 and the other 15 of multiplicity 4 can be represented by the symbol $30_{2}15_{4}$-$15_{8}$ (again the sum of the products of each number on the left with its subscript equals the product of the number and its subscript on the right). The parity proofs we will present later, which are subsets of the basis table, can also be described by symbols of this kind.\\
We now present the basis table of E8. Figure 1 shows 15 bases that contain all 120 rays once each. The entire basis table can be derived from these 15 bases by permuting the rays in them in the manner we now describe. Let $V$ be the permutation of order 9 with the cycle decomposition\footnote{By the cycle (1 5 9 13 ... 11), we mean the permutation in which 1 goes to 5, 5 to 9, 9 to 13 ... and 11 to 1.} $V$ = (1 5 9 13 53 40 82 105 11)(2 91 90 55 28 42 54 119 49)(3 47 38 66 31 30 41 103 12)(4 10 51 89 117 106 87 27 36)(6 93 97 101 86 71 48 69 113)(7 14 79 67 33 29 64 32 100)(8 95 99 45 44 92 112 63 78)(15 104 34 46 109 77 118 107 85)(16 120 98 60 61 75 18 35 68)(17 73 20 24 59 96 58 57 94)(19 23 76 52 84 56 21 25 37)(22 74 116 108 115 72 62 50 70)(65 80 88 102 83 110 114 81 111)(26 43 39) and $W$ the permutation of order 15 with the cycle decomposition $W$ = (1 $\cdots$ 15)(16 $\cdots$ 30)(31 $\cdots$ 45)(46 $\cdots$ 60)(61 $\cdots$ 75)(76 $\cdots$ 90)(91 $\cdots$ 105)(106 $\cdots$ 120), where the dots signify all the integers between the two extremes. Let each basis in Figure 1 be assigned the label $(0,0,l)$, where the first two labels are fixed and the third varies, in integer steps, from $0$ to $14$. Then any other member of the basis table, which is assigned the label $(n,m,l)$ with $0\leq l,n \leq 14$ and $0 \leq m \leq 8$, can be generated by applying suitable powers of $W$ and $V$ to one of the bases in Fig.1, as described by the equation $(n,m,l) = W^{n}V^{m}(0,0,l)$. The number of bases that can be generated in this way is $15\cdot 9 \cdot 15 = 2025$, which is the entire basis table.\\
\begin{figure}
\caption{Fifteen bases of the E8 system, involving the rays 1-120 once each. The first column shows the three-index label of each basis.}
\label{tab1}
\end{figure}
In Figure \ref{tab2} we show the 9 blocks of bases obtained by applying all powers of $V$ to the block of Figure \ref{tab1}. The remaining blocks of bases can be obtained by applying powers of $W$ to these nine blocks. Since an application of $W$ amounts, for the most part, to increasing the ray numbers by one, these other blocks are easily written down.\\
\begin{figure}
\caption{The nine blocks of bases obtained by applying all powers of $V$, from 0 to 8, to the block of Figure \ref{tab1}
\label{tab2}
\end{figure}
The construction we have given of the basis table can be compressed even further by introducing a permutation of order 15, which we will term $U$, whose cycle decomposition is given by the eight columns of numbers obtained by aligning the bases in Fig.\ref{tab1} and reading down them vertically\footnote{To be explicit, $U$ =(1 29 64 $\cdots$ 83)(7 115 34 $\cdots$ 8) $\cdots$ (111 52 94 $\cdots$ 68), where there are 8 cycles and each consists of 15 numbers.}. Then all the bases can be generated from the first basis of Fig.\ref{tab1} by applying powers of the permutations $U,V$ and $W$ to it, as described by the equation
\begin{equation}
(n,m,l) = W^{n}V^{m}U^{l}(0,0,0) \hspace{2mm} ,
\label{eq1}
\end{equation} \
\noindent
with $0 \leq l \leq 14, 0 \leq m \leq 8$ and $0 \leq n \leq 14$. This procedure works even if an arbitrary basis is substituted for $(0,0,0)$ as the seed basis. The three-index label $(n,m,l)$ serves as a convenient shorthand for the basis if one wishes to avoid listing all its rays. \\
Instead of describing $U,V$ and $W$ by the permutations they perform on the rays, one can represent them by the $8 \times 8$ orthogonal matrices
\begin{equation}
U = -|29\rangle \langle 1|-|115\rangle \langle 7|-|33\rangle \langle 62|+|11\rangle \langle 66|+|74\rangle \langle 70|+|61\rangle \langle 73|-|5\rangle \langle 107|-|52\rangle \langle 111|
\label{eq2a}
\end{equation}
\begin{equation}
V = |5\rangle \langle 1|+|14\rangle \langle 7|-|50\rangle \langle 62|-|31\rangle \langle 66|-|22\rangle \langle 70|-|20\rangle \langle 73|-|85\rangle \langle 107|+|65\rangle \langle 111|
\label{eq2b}
\end{equation}
and
\begin{equation}
W = |2\rangle \langle 1|+|8\rangle \langle 7|+|63\rangle \langle 62|+|67\rangle \langle 66|+|71\rangle \langle 70|+|74\rangle \langle 73|+|108\rangle \langle 107|+|112\rangle \langle 111| \hspace{2mm} ,
\label{eq2c}
\end{equation}
as one can easily check by applying them to the column vectors representing the rays and verifying that they produce the desired permutations.\\
We can construct all the symmetries of E8 by looking at the mappings of a fixed basis on to all the bases of the system. Let $x_{1} x_{2} \cdots x_{8}$ and $y_{1} y_{2} \cdots y_{8}$ be two bases and let $y'_{1} y'_{2} \cdots y'_{8}$ be some permutation of the numbers in the latter. Consider the orthogonal transformation
\begin{equation}
T= (-1)^{n_{1}}|y'_{1}\rangle \langle x_{1}|+(-1)^{n_{2}}|y'_{2} \rangle \langle x_{2}|+\cdots+(-1)^{n_{8}}|y'_{8}\rangle \langle x_{8}| \hspace{2mm} ,
\label{eq3}
\end{equation}
where $n_{i}\in (0,1)$ for $1 \leq i \leq 8$. Keeping the basis $x_{1} x_{2} \cdots x_{8}$ fixed and letting $y_{1} y_{2} \cdots y_{8}$ vary, the number of transformations of the form (\ref{eq3}) that one can construct is the number of possibilities for the variable basis (2025) times the number of permutations of the variable basis labels ($8!$) times the number of possibilities for the signs of the terms ($2^{8}$). However an investigation shows that only $1/30$ of the $8!$ permutations of the basis labels lead to symmetries of E8, so the total number of its symmetries is $2025\cdot\frac{8!}{30}\cdot2^{8}=696729600$, which equals the number of $192\cdot 10!$ given by Coxeter\cite{coxeter} as the order of the symmetry group of Gosset's polytope $4_{21}$.
\section{\label{sec:3}Substructures within the E8 system}
The E8 system contains a number of interesting substructures that yield proofs of the KS theorem. These substructures have all been studied in the past, and we discuss each of them briefly. \\
Two interesting substructures are the Lie algebras E7 and E6, whose roots/rays can be exhibited as subsets of the roots/rays of E8. The rays of E7 are simply the 63 rays orthogonal to any ray of E8; these rays lie in a 7-dimensional space, and if one adjoins to them all their negatives, one gets the 126 roots of E7. The 63 rays of E7 form 135 bases, with each ray occurring in 15 bases. This system can be described by the symbol $63_{15}$-$135_{7}$, and it is saturated. The basis table of E7 is easily extracted from that of E8 by picking out the 135 bases involving a particular ray and then dropping that ray from these bases. This construction shows that the symmetry group of E7 is a subgroup of index 240 of that of E8; thus its order is $192 \cdot 10!/240$, which agrees with the figure of $8\cdot 9!$ given by Coxeter for the order of the symmetry group of the associated 7-dimensional polytope $3_{21}$. The rays of E7 can be used to give a proof of the KS theorem. Because the system is saturated, the proof requires showing that it is impossible to assign noncontextual $0/1$ values to the rays in such a way that each of the 135 bases has just a single ray assigned the value 1 in it, and this is easily done using a ``proof-tree'' argument\footnote {The argument is a \textit{reductio ad absurdum} one: one assumes that a noncontextual value assignment exists and then shows that it leads to a contradiction. Since E7 has a symmetry group that is transitive on its rays, one can begin, without loss of generality, by assigning an arbitrary ray the value 1. This forces all rays orthogonal to that ray to have the value 0, and one then finds that a basis with three rays having the value 0 appears. Assigning one of the remaining rays in this basis the value 1 forces a basis with five rays having the value 0 to appear. However assigning either of the remaining rays in this basis the value 1 forces a basis with all its rays assigned the value 0 to appear, which is not allowed. To avoid this conflict, one must proceed backwards along the chain and make alternative choices for the rays assigned the value 1 at every earlier step of the argument and see if any of these alternative possibilities leads to a valid value assignment. One then finds that all the alternatives lead to a situation in which at least one basis has all its rays assigned the value 0, showing that a valid value assignment does not exist and proving the KS theorem. The ``proof-tree'' leading to this contradiction has eight branches, with each branch leading to a contradiction at the fourth step.}. An alternative proof of the KS theorem based on the rays of E7 has been given by Ruuge\cite{ruugeE6}.\\
If one picks the 36 rays orthogonal to any two nonorthogonal rays of E8, one gets the rays of the E6 system, and the roots of E6 are these 36 rays along with their negatives. The rays of E6 do not form even a single basis (i.e., a set of six mutually orthogonal rays) and so do not yield a proof of the KS theorem. This was pointed out by Ruuge\cite{ruugeE6}, who discussed how two rotated copies of E6 could be superposed to give a system of rays that yields a proof of the KS theorem.\\
Another interesting subsystem of E8 is what we will term a Kernaghan-Peres (KP) set\cite{KP1995}. Such a set consists of 40 rays that form 25 bases, with each ray occurring in five bases, so that its symbol is $40_{5}$-$25_{8}$. Kernaghan and Peres\cite{KP1995} constructed such a set as the simultaneous eigenstates of five sets of mutually commuting observables of a system of three qubits. The caption to Fig.\ref{tab3} explains how KP sets can be extracted from the bases of E8 and the figure gives an example of a set constructed using this procedure. The caption to Fig.\ref{tab3a} gives a simple procedure for obtaining all the parity proofs in a KP set\cite{Waegell2012a} and the figure gives one example of each of the three types of parity proofs contained in the KP set of Fig.\ref{tab3} (see the beginning of Sec. \ref{sec:4} for an explanation of the notion of a parity proof). Finally, Fig.\ref{tab3b} gives an example of a ``pseudo'' KP set that closely resembles a KP set but is not one, and in fact yields no proofs of the KS theorem.
\begin{figure}
\caption{A Kernaghan-Peres (KP) set can be extracted from the bases of E8 by choosing any five (``seed'') bases from one of the blocks in Fig.\ref{tab2}
\label{tab3}
\end{figure}
\begin{figure}
\caption{Parity proofs can be extracted from any KP set by picking one member from each of the 10 pairs of complementary bases and supplementing them with the needed seed bases. There are three types of parity proofs that can be constructed in this way, and they involve the addition of one, three or five seed bases. These proofs have the symbols $28_{2}
\label{tab3a}
\end{figure}
\begin{figure}
\caption{The construction that gave rise to the KP set of Fig.\ref{tab3}
\label{tab3b}
\end{figure}
\section{\label{sec:4}Parity proofs in the E8 system}
We will say that a set of projectors in a Hilbert space of even dimension furnishes a parity proof of the KS theorem if the projectors form an odd number of bases in such a way that each projector occurs in an even number of the bases (a basis is any set of mutually orthogonal projectors that sums to the identity, and we will allow for the possibility that the projectors are not all of the same rank). Such a set of projectors proves the KS theorem because it is impossible to assign noncontextual $0/1$ values to them in such a way that the sum of the values assigned to the projectors in any basis is always 1. Because an even-odd conflict makes this assignment impossible, we refer to this type of proof as a parity proof. In \cite{lisonek1} it was pointed out that the E8 system has $2^{1940}$ parity proofs in it, but no examples of such proofs were given. In this section we would like to describe a straightforward method we used to discover a large number of these proofs, and then present a few examples of them. These proofs are far more numerous and varied than those in the KP sets we know to be contained in the E8 system.\\
We will discuss only \textit{critical} parity proofs, where by a critical proof we mean one that ceases to provide a proof of the KS theorem if even a single basis is dropped from it\footnote{Dropping a single basis from a parity proof leaves an even number of bases, which can never provide a parity proof of the KS theorem. However the reduced system may not admit a valid noncontextual value assignment to its rays, and so provide a proof of the KS theorem. If this happens, the original parity proof would not be deemed critical.}. We restrict ourselves to critical proofs to avoid redundancy, since many noncritical proofs can often be reduced to the same critical proof. We present just a few of the more striking critical proofs we found from among the staggeringly large number that exist.\\
In Sec. \ref{sec:2} we introduced the letters A to H for each consecutive set of 15 rays of E8. These letters can be used to attach an 8-letter label to each basis. For example, the basis 1 7 62 66 70 73 107 111 would have the label AAEEEEHH. Viewed in terms of their labels (which specify how the rays of a basis are distributed over the triacontagons of E8), the bases fall into 33 families with distinct triacontagon profiles. We made the important discovery that if one looks at only the bases of a particular family, the parity proofs housed by them could be unearthed with relatively little effort. In the following subsections we discuss the parity proofs we found in a few of the families.
{\bf \subsection{\label{subsec:type1} Type 1 Bases and their parity proofs }}
Let us term bases with the profile AAEEEEHH, BBFFFFGG, CCEEGGGG or DDFFHHHH as Type 1 bases. The 15 bases with each of these profiles give a parity proof that can be characterized by the symbol $15_{4}30_{2}$-$15_{8}$. Figure \ref{tab4} shows the proof given by the bases with profile AAEEEEEHH (the proofs given by the other three profiles are very similar).\\
The Type 1 bases give rise to two other types of parity proofs if bases of different profiles can be combined. These proofs are characterized by the symbols $15_{4}70_{2}$-$25_{8}$ and $45_{4}50_{2}$-$35_{8}$ and there are 12 versions of each (all structurally identical, but involving different rays). The properties of the three different types of parity proofs made up only of Type 1 bases are summarized in the first row of Figure \ref{tab5}. \\
\begin{figure}
\caption{A $15_{4}
\label{tab4}
\end{figure}
\begin{figure}
\caption{Parity proofs in E8. For each of the classes of bases in the first column, the second column lists the symbols of the parity proofs that exist and the third column the number of versions of each of the proofs. The listings for Type 1 and Type 2 bases are complete. For Type 3 bases, the listing in the second column is complete but only the total count of all the proofs has been included in the third column. For Type 4 proofs, only nine of the over hundreds of different types of proofs we found are listed. The left halves of the proof symbols in the second column indicate the numbers of rays of different multiplicities present in the proofs, with no attempt being made to associate the rays with projectors of different ranks. Figs. \ref{tab6}
\label{tab5}
\end{figure}
{\bf \subsection{\label{subsec:type2} Type 2 Bases and their parity proofs }}
We will term bases with the profile AABBEEFF or CCDDGGHH as Type 2 bases. There are 30 bases with each of these profiles, and therefore 60 Type 2 bases in all. These bases contain just the two types of parity proofs shown in the second row of Figure \ref{tab5}.\\
Figure \ref{tab6} shows two $36_{2}$-$9_{8}$ proofs of this class that seem very similar at first sight, but are subtly different from one another. While both proofs involve 36 rays that occur two times each over 9 bases, the proof on the left always has the following pairs of rays occur together over the bases: (1,66), (7,62), (16,86), (22,87), (9,84), (19,64), (21,76), (14,89), (17,82), (2,72), (11,61), (27,77), (29,74), (26,81), (4,79), (12,67), (24,69) and (6,71). Thus each of these pairs of rays can be regarded as defining a two-dimensional subspace, or a rank-2 projector, and the proof can be reinterpreted as involving 18 rank-2 projectors that each occur twice over nine bases; this situation can be captured in the symbol $18^{2}_{2}$-$9_{4}$, where the superscript on the left indicates the rank of the projectors and the subscript their multiplicity, and the subscript on the right that each of the 9 bases is made up of four rank-2 projectors. For the proof on the right, 18 of the rays can be paired into the rank-2 projectors (1,16), (62,87), (75,80), (70,90), (82,72), (65,85), (67,77), (11,26) and (6,21), while the remaining 18 rays are associated with rank-1 projectors; thus the symbol of this proof can be written as $9^{2}_{2}18^{1}_{2}$-$9_{6}$, with the superscripts and subscripts on the left having the same meaning as before and the subscript on the right indicating that each basis is made up of six projectors (four of rank-1 and two of rank-2).\\
\begin{figure}
\caption{Two parity proofs made up exclusively of Type 2 bases with the profile AABBEEFF or CCDDGGHH. The proof on the left involves only rank-2 projectors and is characterized by the symbol $18^{2}
\label{tab6}
\end{figure}
{\bf \subsection{\label{subsec:type3} Type 3 Bases and their parity proofs }}
Type 3 bases are those with the profile EEFFGGHH. There are 45 such bases involving 60 rays, and they form a $60_{6}$-$45_{8}$ system. Despite their small number, the bases of this system are a fecund lot and give rise to 20 different types of parity proofs, each of which can come in hundreds to thousands of versions. The symbols of all the possible proofs are shown in the third row of Figure \ref{tab5}. When the different versions of each of the proofs are taken into account, the total number of distinct proofs is 700,326. Figure \ref{tab7} shows a $6_{6}12_{4}42_{2}$-$21_{8}$ proof of this class involving rays of multiplicity 6,4 and 2.\\
\begin{figure}
\caption{A $6_{6}
\label{tab7}
\end{figure}
{\bf \subsection{\label{subsec:type3} Type 4 Bases and their parity proofs }}
Type 4 bases have the profile AABBCCDD. There are 75 such bases involving 60 rays, and they form a $60_{10}$-$75_{8}$ system. We have found over 400 different types of parity proofs in this system, with each coming in anywhere from scores to thousands of versions. We show just nine of these proofs in the last row of Figure \ref{tab5}. There are no critical proofs with more than 27 bases in this class. The number of proofs in this class greatly exceeds those in the previous three classes combined. Figure \ref{tab8} shows a $36_{2}$-$9_{8}$ proof of this class and Fig. \ref{tab8a} shows a rather unusual proof consisting entirely of rank-2 projectors.\\
\begin{figure}
\caption{A $36_{2}
\label{tab8}
\end{figure}
\begin{figure}
\caption{A parity proof made up of 11 Type 4 bases with the profile AABBCCDD. It consists of 44 rays that each occur twice over the 11 bases, so its symbol is $44_{2}
\label{tab8a}
\end{figure}
We end by presenting a proof of this class, in Fig.\ref{tab9}, that involves 34 rays (2 of multiplicity four and 32 of multiplicity two) that occur over 9 bases. This proof is more economical than the best proofs found earlier in 8 dimensions, which involve 36 rays occurring an even number of times over 11 bases\cite{KP1995} or 81 bases\cite{cabello2005} or 9 bases\cite{Waegell2012a} . As explained in the caption to Fig.\ref{tab9}, this proof can also be interpreted as involving 26 rank-1 projectors and 4 rank-2 projectors that occur over 9 bases.\\
\begin{figure}
\caption{A $32_{2}
\label{tab9}
\end{figure}
\section{\label{sec:5}Discussion}
We pointed out at the end of Sec. \ref{sec:3} that the bases of E8 have 33 different triacontagon profiles. Our survey of parity proofs in Sec. \ref{sec:4} covered just four of these profiles, so it is clear that we have left the vast majority of the proofs untouched. The basis table of E8 presented in this paper serves a convenient template for displaying all the proofs in this gargantuan system.\\
It is interesting that the triacontagonal representations of both the 600-cell and Gosset's polytope lead to some of the simplest parity proofs contained in them. In the case of the 600-cell, the vertices project into four triacontagons, with two of the triacontagons uniting to yield a parity proof of 15 bases and the other two triacontagons yielding the complementary proof (i.e, one involving all the rays not present in the earlier proof). In the case of Gosset's polytope, the vertices project into eight triacontagons, and one can construct a parity proof (actually four different proofs) by picking out 15 bases that each span all the triacontagons in the same way. The great virtue of the triacontagonal representation for Gosset's polytope (or E8) is, of course, that it allows the bases to be organized into smaller families that are more easily searched for their parity proofs. Although we have unearthed only a tiny fraction of the parity proofs present in E8, their variety and intricacy seems to exceed that in any of the other systems we have studied to date. This is doubtless due to the large basis table of E8 (at 2025 bases, a record) and its huge symmetry group (of over $10^{8}$ elements).\\
A comment should be made about the experimental measurements needed to realize the bases of E8, on which all the parity proofs of this paper depend. It might be asked if the projectors corresponding to some of the bases can be realized as simultaneous eigenstates of commuting three-qubit observables that are tensor products of Pauli operators of the individual qubits. While this is true of some of the bases, such as the ones we have identified as the Kernaghan-Peres sets, it is not true of the bases in general. The simplest way of generating an arbitrary basis from the computational basis is by following Eq.(\ref{eq1}) and applying a product of the appropriate powers of the three unitary operators $U,V$ and $W$. Designing efficient quantum gates for these operators is an interesting problem that we will not take up here. However it seems worth pursuing because a recent experiment\cite{canas2014b} has successfully generated several KS sets in a three-qubit system and holds out the possibility of eventually generating the more complex sorts of KS sets considered here.\\
It was pointed out in \cite{badziag} that any KS proof can be converted into an inequality that is satisfied by any noncontextual hidden variables theory but violated in measurements carried out on an arbitrary quantum state. It might be asked what the extent of the violation is for the parity proofs discussed in this paper. The answer to this question has already been given in an earlier work of ours\cite{Waegell2011b}. We showed there that for any basis-critical parity proof (i.e., one which fails if even a single basis is omitted from it), the upper bound of the inequality for any noncontextual hidden variable theory is $B-2$ (where $B$ is the number of bases in the proof) whereas quantum mechanics predicts the value of $B$. This gap of 2 between the values predicted by hidden variable theories and quantum mechanics is a universal feature of all basis critical parity proofs. Thus the present proofs do not offer any particular advantage, from this point of view, over the many similar proofs\cite{Waegell2011c,Waegell2012a,Waegell2013,Waegell2011b,Waegell2014} we found earlier.\\
Gosset's polytope is the real representative of a complex polytope known as Witting's polytope\cite{leonardo}. Coxeter\cite{coxeter2} has carried out a systematic study of a large number of complex polytopes. It is possible that the ray systems derived from some of them might yield new proofs of the KS theorem. Whether this is true, and of what use it might be, are matters that remain to be explored.\\
{\bf Acknowledgements.} One of us (PKA) would like to thank David Richter for stimulating his interest in E8 and supplying him with a copy of Ref.\cite{richter}, which planted the seed for this work. \\
\end{document} |
\begin{document}
\title{Coproducts in brane topology}
\begin{abstract}
We extend the loop product and the loop coproduct
to the mapping space from the $k$-dimensional sphere, or more generally from any $k$-manifold,
to a $k$-connected space with finite dimensional rational homotopy group, $k\geq 1$.
The key to extending the loop coproduct is the fact that
the embedding $M\rightarrow M^{S^{k-1}}$ is of ``finite codimension''
in a sense of Gorenstein spaces.
Moreover, we prove the associativity, commutativity, and Frobenius compatibility of them.
\end{abstract}
\section{Introduction}
Chas and Sullivan \cite{chas-sullivan} introduced the loop product
on the homology $\mathrm{Hom}ol{\loopsp{M}}$ of the free loop space $\loopsp{M}=\map(S^1, M)$ of a manifold.
Cohen and Godin \cite{cohen-godin} extended this product to other string operations,
including the loop coproduct.
Generalizing these constructions,
F\'elix and Thomas \cite{felix-thomas09} defined the loop product and coproduct
in the case $M$ is a Gorenstein space.
A Gorenstein space is a generalization of a manifold in the point of view of Poincar\'e duality,
including the classifying space of a connected Lie group
and the Borel construction of a connected oriented closed manifold and a connected Lie group.
But these operations tend to be trivial in many cases.
Let $\mathbb K$ be a field of characteristic zero.
For example,
the loop coproduct is trivial for a manifold with the Euler characteristic zero \cite[Corollary 3.2]{tamanoi},
the composition of the loop coproduct followed by the loop product is trivial for any manifold \cite[Theorem A]{tamanoi},
and the loop product over $\mathbb K$ is trivial for the classifying space of a connected Lie group \cite[Theorem 14]{felix-thomas09}.
A space with the nontrivial composition of loop coproduct and product is not found.
On the other hand,
Sullivan and Voronov
\todo{article([SV05] in \cite{cohen-hess-voronov}) not found}
generalized the loop product to the sphere space $\spheresp[k]{M}=\map(S^k, M)$\todo{Voronov's notation} for $k\geq 1$.
This product is called the \textit{brane product}.
See \cite[Part I, Chapter 5]{cohen-hess-voronov}.
In this article,
we will generalize the loop coproduct to sphere spaces,
to construct nontrivial and interesting operations.
We call this coproduct the \textit{brane coproduct}.
Here, we review briefly the construction of the loop product and the brane product.
For simplicity, we assume $M$ is a connected oriented closed manifold of dimension $m$.
The loop product is constructed as a mixture of
the Pontrjagin product $\mathrm{Hom}ol{\Omega M \times \Omega M} \rightarrow \mathrm{Hom}ol{\Omega M}$
defined by the composition of based loops
and the intersection product $\mathrm{Hom}ol{M\times M} \rightarrow \mathrm{Hom}ol[*-m]{M}$.
More precisely, we use the following diagram
\begin{equation}
\label{equation:loopProdDiagram}
\xymatrix{
\loopsp{M}\times\loopsp{M} \ar_{\mathrm{ev}_1\times\mathrm{ev}_1}[d] & \loopsp{M}\times_M\loopsp{M} \ar_-\mathrm{incl}[l]\ar[d]\ar^-\mathrm{comp}[r] & \loopsp{M}\\
M\times M & M. \ar_-\Delta[l]
}
\end{equation}
Here,
the square is a pullback diagram by
the diagonal map $\Delta$ and
the evaluation map $\mathrm{ev}_1$ at $1$, identifying $S^1$ with the unit circle $\{z\in \mathbb C\mid \abs{z}=1\}$,
and $\mathrm{comp}$ is the map defined by the composition of loops.
Since the diagonal map $\Delta\colon M\rightarrow M\times M$ is an embedding of finite codimension,
we have the shriek map $\shriekhomol{\Delta}\colon \mathrm{Hom}ol{M\times M}\rightarrow\mathrm{Hom}ol[*-m]{M}$,
which is called the intersection product.
Using the pullback diagram, we can ``lift'' $\shriekhomol{\Delta}$ to
$\shriekhomol{\mathrm{incl}}\colon \mathrm{Hom}ol{\loopsp{M}\times \loopsp{M}}\rightarrow\mathrm{Hom}ol[*-m]{\loopsp{M}\times_M \loopsp{M}}$.
Then, we define the loop product to be the composition
$\mathrm{comp}_*\circ\shriekhomol{\mathrm{incl}}\colon\mathrm{Hom}ol{\loopsp{M}\times \loopsp{M}}\rightarrow\mathrm{Hom}ol[*-m]{\loopsp{M}}.$
The brane product can be defined by a similar way.
Let $k$ be a positive integer.
We use the diagram
\begin{equation*}
\xymatrix{
\spheresp[k]{M}\times\spheresp[k]{M} \ar[d] & \spheresp[k]{M}\times_M\spheresp[k]{M} \ar_\mathrm{incl}[l]\ar[d]\ar^-\mathrm{comp}[r] & \spheresp[k]{M}\\
M\times M & M. \ar_-\Delta[l]
}
\end{equation*}
Since the base map of the pullback diagram is the diagonal map $\Delta$,
which is the same as that for the loop product,
we can use the same method to define the shriek map
$\shriekhomol{\mathrm{incl}}\colon \mathrm{Hom}ol{\spheresp[k]{M}\times \spheresp[k]{M}}\rightarrow\mathrm{Hom}ol[*-m]{\spheresp[k]{M}\times_M \spheresp[k]{M}}$.
Hence we define the brane product to be the composition
$\mathrm{comp}_*\circ\shriekhomol{\mathrm{incl}}\colon\mathrm{Hom}ol{\spheresp[k]{M}\times \spheresp[k]{M}}\rightarrow\mathrm{Hom}ol[*-m]{\spheresp[k]{M}}.$
Next, we review the loop coproduct.
Using the diagram
\begin{equation}
\label{equation:loopCopDiagram}
\xymatrix{
\loopsp{M} \ar_{\mathrm{ev}_1\times\mathrm{ev}_{-1}}[d] & \loopsp{M}\times_M\loopsp{M} \ar_-\mathrm{comp}[l]\ar[d]\ar^-\mathrm{incl}[r] & \loopsp{M}\times\loopsp{M}\\
M\times M & M, \ar_-\Delta[l]
}
\end{equation}
we define the loop coproduct to be the composition
$\mathrm{incl}_*\circ\shriekhomol{\mathrm{comp}}\colon \mathrm{Hom}ol{\loopsp{M}}\rightarrow\mathrm{Hom}ol[*-m]{\loopsp{M}\times\loopsp{M}}$.
But the brane coproduct cannot be defined in this way.
To construct the brane coproduct,
we have to use the diagram
\begin{equation*}
\xymatrix{
\spheresp[k]{M} \ar_\mathrm{res}[d] & \spheresp[k]{M}\times_M\spheresp[k]{M} \ar_-\mathrm{comp}[l]\ar[d]\ar^-\mathrm{incl}[r] & \spheresp[k]{M}\times\spheresp[k]{M}\\
\spheresp[k-1]{M} & M. \ar_-\mathrm{incl}const[l]
}
\end{equation*}
Here, $\mathrm{incl}const\colon M\rightarrow\spheresp[k-1]{M}$ is the embedding by constant maps
and $\mathrm{res}\colon\spheresp[k]{M}\rightarrow\spheresp[k-1]{M}$ is the restriction map to $S^{k-1}$,
which is embedded to $S^k$ as the equator.
In a usual sense, the base map $\mathrm{incl}const$ is not an embedding of finite codimension.
But using the algebraic method of F\'elix and Thomas \cite{felix-thomas09},
we can consider this map as an embedding of codimension $\bar{m}=\dim\Omega M$,
which is the dimension as a $\mathbb K$-Gorenstein space and is finite when $\pi_*(M)\otimes\mathbb K$ is of finite dimension.
Hence, under this assumption, we have the shriek map
$\shriekhomol{\mathrm{incl}const}\colon\mathrm{Hom}ol{\spheresp[k-1]{M}}\rightarrow\mathrm{Hom}ol[*-\bar{m}]{M}$
and the lift
$\shriekhomol{\mathrm{comp}}\colon\mathrm{Hom}ol{\spheresp[k]{M}}\rightarrow\mathrm{Hom}ol[*-\bar{m}]{\spheresp[k]{M}\times_M\spheresp[k]{M}}$.
This enables us to define the brane coproduct to be the composition
$\mathrm{incl}_*\circ\shriekhomol{\mathrm{comp}}\mathrm{Hom}ol{\spheresp[k]{M}}\rightarrow\mathrm{Hom}ol[*-\bar{m}]{\spheresp[k]{M}\times\spheresp[k]{M}}$.
More generally,
using connected sums,
we define the product and coproduct for mapping spaces from manifolds.
Let $S$ and $T$ be manifolds of dimension $k$.
Let $M$ be a $k$-connected $\mathbb K$-Gorenstein space of finite type.\todo{$k$-conn?}
Denote $m = \dim M$.
Then we define the \textit{$(S,T)$-brane product}
\begin{equation*}
\mu_{ST}\colon \mathrm{Hom}ol{M^S\times M^T} \rightarrow \mathrm{Hom}ol[*-m]{M^{S\#T}}
\end{equation*}
using the diagram
\begin{equation}
\label{equation:STProdDiagram}
\xymatrix{
M^S\times M^T \ar[d] & M^S\times_MM^T \ar_-\mathrm{incl}[l]\ar[d]\ar^-\mathrm{comp}[r] & M^{S\#T}\\
M\times M & M. \ar_-\Delta[l]
}
\end{equation}
Assume that $M$ is $k$-connected and
$\pi_*(M)\otimes\mathbb K = \bigoplus_n\pi_n(M)\otimes\mathbb K$ is of finite dimension.
Then the iterated based loop space $\Omega^{k-1} M$ is a Gorenstein space,
and denote $\bar{m} = \dim\Omega^{k-1} M$.
Then we define the \textit{$(S,T)$-brane coproduct}
\begin{equation*}
\delta_{ST}\colon \mathrm{Hom}ol{M^{S\#T}} \rightarrow \mathrm{Hom}ol[*-\bar{m}]{M^S\times M^T}
\end{equation*}
using the diagram
\begin{equation}
\label{equation:STCopDiagram}
\xymatrix{
M^{S\#T}\ar[d] & M^S\times_MM^T \ar_-\mathrm{comp}[l]\ar[d]\ar^-\mathrm{incl}[r] & M^S\times M^T\\
\spheresp[k-1]{M} & M. \ar_-\mathrm{incl}const[l]
}
\end{equation}
Note that, if we take $S=T=S^{k}$,
then $\mu_{ST}$ and $\delta_{ST}$ are the brane product and coproduct, respectively.
Next, we study some fundamental properties of the brane product and coproduct.
For the loop product and coproduct on Gorenstein spaces,
Naito \cite{naito13} showed their associativity and the Frobenius compatibility.
In this article,
we generalize them to the case of the brane product and coproduct.
Moreover, we show the commutativity of the brane product and coproduct,
which was not known even for the case of the loop product and coproduct on Gorenstein spaces.
\begin{theorem}
\label{theorem:associativeFrobenius}
Let $M$ be a $k$-connected space with $\dim\pi_*(M)\otimes\mathbb K < \infty$.
Then the above product and coproduct satisfy following properties.
\begin{enumerate}[label={\rm(\arabic*)}]
\item \label{item:assocProd} The product is associative and commutative.
\item \label{item:assocCop} The coproduct is associative and commutative.
\item \label{item:Frob} The product and coproduct satisfy the Frobenius compatibility.
\end{enumerate}
In particular,
if we take $S=T=S^k$,
the shifted homology
$\mathrm{Hom}olshift{\spheresp[k]{M}} = \mathrm{Hom}ol[*+m]{\spheresp[k]{M}}$
is a non-unital and non-counital Frobenius algebra,
where $m$ is the dimension of $M$ as a Gorenstein space.
\end{theorem}
\todo{Explain Frobenius algebra}
\todo{commutativity,unitality}
Note that $M$ is a Gorenstein space by the assumption $\dim\pi_*(M)\otimes\mathbb K < \infty$
(see \cref{proposition:FinDimImplyGorenstein}).
The associativity of the product holds even
if we assume that $M$ is a Gorenstein space instead of assuming $\dim\pi_*(M)\otimes\mathbb K < \infty$.
But we need the assumption to prove the commutativity of the \textit{product}.
A non-unital and non-counital Frobenius algebra corresponds to a ``positive boundary'' TQFT,
in the sense that TQFT operations are defined
only when each component of the cobordism surfaces has
a \textit{positive} number of incoming and outgoing boundary components \cite{cohen-godin}.
See \cref{section:proofOfAssocAndFrob} for the precise statement and the proof of the associativity, the commutativity and the Frobenius compatibility.
It is interesting that
the proof of the commutativity of the loop coproduct (i.e., $k=1$) is easier
than that of the brane coproduct with $k\geq 2$.
In fact,
we prove the commutativity of the loop coproduct
using the explicit description of the loop coproduct constructed in \cite{wakatsuki16}.
On the other hand,
we prove the commutativity of the brane coproduct with $k\geq 2$ directly from the definition.
Moreover, we compute an example of the brane product and coproduct.
Here, we consider the shifted homology $\mathrm{Hom}olshift[*]{\spheresp[k]{M}} = \mathrm{Hom}ol[*+m]{\spheresp[k]{M}}$.
We also have the shifts of the brane product and coproduct on $\mathrm{Hom}olshift{\spheresp[k]{M}}$
with the sign determined by the Koszul sign convention.
\begin{theorem}
\label{theorem:braneOperationsOfSphere}
\newcommand{S^{2n+1}}{S^{2n+1}}
The shifted homology $\mathrm{Hom}olshift{\spheresp[2]S^{2n+1}}$, $n\geq 1$, equipped with the brane product $\mu$
is isomorphic to the exterior algebra
$\wedge(y,z)$ with $\deg{y}=-2n-1$ and $\deg{z}=2n-1$.
The brane coproduct $\delta$ is described as follows.
\begin{align*}
\delta(1) &= 1\otimes yz - y\otimes z + z\otimes y + yz\otimes 1\\
\delta(y) &= y\otimes yz + yz\otimes y\\
\delta(z) &= z\otimes yz + yz\otimes z\\
\delta(yz) &= - yz\otimes yz
\end{align*}
\end{theorem}
Note that both of the brane product and coproduct are non-trivial.
Moreover, $(\delta\otimes 1)\circ\delta\neq 0$
in contrast with the case of the loop coproduct,
in which the similar composition is always trivial \cite[Theorem A]{tamanoi}.
On the other hand,
the brane coproduct is trivial in some cases.
\begin{theorem}
\label{theorem:coprodTrivForPure}
If the minimal Sullivan model $(\wedge V, d)$ of $M$ is pure
and satisfies $\dim V^{\mathrm{even}}>0$,
then the brane coproduct on $\mathrm{Hom}ol{\spheresp[2]{M}}$ is trivial.
\end{theorem}
See \cref{definition:pureSullivanAlgebra} for the definition of a pure Sullivan algebra.
\begin{remark}
\label{remark:generalizedConnectedSum}
\todo{orientation?}
If we fix embeddings of disks $D^k\hookrightarrow S$ and $D^k\hookrightarrow T$
instead of assuming $S$ and $T$ being manifolds,
we can define the product and coproduct using ``connected sums'' defined by these embedded disks.
Moreover, if we have two disjoint embeddings $i,j\colon D^k\hookrightarrow S$ to the same space $S$,
we can define the ``connected sum'' along $i$ and $j$,
and hence we can define the product and coproduct using this.
We call these $(S,i,j)$-brane product and coproduct,
and give definitions in \cref{section:definitionOfSijBraneOperations}.
\end{remark}
\todo{outline}
\cref{section:constructionFelixThomas} contains brief background material on string topology on Gorenstein spaces.
We define the $(S,T)$-brane product and coproduct in \cref{section:definitionOfBraneOperations}
and $(S,i,j)$-brane product and coproduct in \cref{section:definitionOfSijBraneOperations}.
Here, we defer the proof of \cref{corollary:extSphereSpace}
to \cref{section:constructModel}.
In \cref{section:computeExample},
we compute examples and prove \cref{theorem:braneOperationsOfSphere} and \cref{theorem:coprodTrivForPure}.
\cref{section:proofOfAssocAndFrob} is devoted to the proof of \cref{theorem:associativeFrobenius},
where we defer the determination of some signs
to \cref{section:determineSign} and \cref{section:proofOfExtAlgebraic}.
\tableofcontents
\section{Construction by F\'elix and Thomas}
\label{section:constructionFelixThomas}
In this section,
we recall the construction of the loop product and coproduct by F\'elix and Thomas \cite{felix-thomas09}.
Since the cochain models are good for fibrations,
the duals of the loop product and coproduct are defined at first,
and then we define the loop product and coproduct as the duals of them.
Moreover we focus on the case when the characteristic of the coefficient $\mathbb K$ is zero.
So we make full use of rational homotopy theory.
For the basic definitions and theorems on homological algebra and rational homotopy theory,
we refer the reader to \cite{felix-halperin-thomas01}.
\begin{definition}
[{\cite{felix-halperin-thomas88}}]
Let $m\in\mathbb Z$ be an integer.
\begin{enumerate}
\item An augmented dga (differential graded algebra) $(A,d)$ is called a \textit{($\mathbb K$-)Gorenstein algebra of dimension} $m$ if
\begin{equation*}
\dim \mathrm{Ext}_A^l(\mathbb K, A) =
\begin{cases}
1 & \mbox{ (if $l = m$)} \\
0 & \mbox{ (otherwise),}
\end{cases}
\end{equation*}
where the field $\mathbb K$ and the dga $(A,d)$ are $(A,d)$-modules via the augmentation map and the identity map, respectively.
\item A path-connected topological space $M$ is called a \textit{($\mathbb K$-)Gorenstein space of dimension} $m$
if the singular cochain algebra $\cochain{M}$ of $M$ is a Gorenstein algebra of dimension $m$.
\end{enumerate}
\end{definition}
Here, $\mathrm{Ext}_A(M, N)$ is defined using a semifree resolution of $(M,d)$ over $(A,d)$,
for a dga $(A,d)$ and $(A,d)$-modules $(M,d)$ and $(N,d)$.
$\mathrm{Tor}_A(M,N)$ is defined similarly.
See \cite[Section 1]{felix-thomas09} for details of semifree resolutions.
An important example of a Gorenstein space is given by the following \lcnamecref{proposition:FinDimImplyGorenstein}.
\begin{proposition}
[{\cite[Proposition 3.4]{felix-halperin-thomas88}}]
\label{proposition:FinDimImplyGorenstein}
A 1-connected topological space $M$ is a $\mathbb K$-Gorenstein space if $\pi_*(M)\otimes\mathbb K$ is finite dimensional.
Similarly, a Sullivan algebra $(\wedge V, d)$ is a Gorenstein algebra if $V$ is finite dimensional.
\end{proposition}
Note that this \lcnamecref{proposition:FinDimImplyGorenstein} is stated
only for $\mathbb Q$-Gorenstein spaces in \cite{felix-halperin-thomas88},
but the proof can be applied for any $\mathbb K$ and Sullivan algebras.
Let $M$ be a 1-connected $\mathbb K$-Gorenstein space of dimension $m$ whose cohomology $\cohom{M}$ is of finite type.
As a preparation to define the loop product and coproduct, F\'elix and Thomas proved the following theorem.
\begin{theorem}
[{\cite[Theorem 12]{felix-thomas09}}]
\label{theorem:ExtDiagonal}
The diagonal map $\Delta\colon M \rightarrow M^2$ makes $\cochain{M}$ into a $\cochain{M^2}$-module.
We have an isomorphism
\[
\mathrm{Ext}_{\cochain{M^2}}^*(\cochain{M}, \cochain{M^2}) \cong \cohom[*-m]{M}.
\]
\end{theorem}
By \cref{theorem:ExtDiagonal}, we have $\mathrm{Ext}_{\cochain{M^2}}^m(\cochain{M}, \cochain{M^2})\cong\cohom[0]{M}\cong\mathbb K$, hence the generator
\[
\shriek\Delta \in \mathrm{Ext}_{\cochain{M^2}}^m(\cochain{M}, \cochain{M^2})
\]
is well-defined up to the multiplication by a non-zero scalar.
We call this element the \textit{shriek map} for $\Delta$.
Using the map $\shriek\Delta$, we can define the duals of the loop product and coproduct.
Then, using the diagram \cref{equation:loopProdDiagram},
we define the dual of the loop product to be the composition
\begin{equation*}
\shriek\mathrm{incl}\circ\mathrm{comp}^*\colon\cohom{\loopsp{M}}
\xrightarrow{\mathrm{comp}^*}\cohom{\loopsp{M}\times_M\loopsp{M}}
\xrightarrow{\shriek\mathrm{incl}}\cohom[*+m]{\loopsp{M}\times\loopsp{M}}.
\end{equation*}
Here, the map $\shriek\mathrm{incl}$ is defined by the composition
\begin{equation*}
\begin{array}{l}
\cohom{\loopsp{M}\times_M \loopsp{M}}
\xleftarrow[\cong]{\mathrm{EM}} \mathrm{Tor}^*_{\cochain{M^2}}(\cochain{M},\cochain{\loopsp{M}\times \loopsp{M}}) \\
\xrightarrow{\mathrm{Tor}_{\rm id}(\shriek\Delta, {\rm id})}\mathrm{Tor}^{*+m}_{\cochain{M^2}}(\cochain{M^2},\cochain{\loopsp{M}\times \loopsp{M}})
\xrightarrow[\cong]{} \cohom[*+m]{\loopsp{M}\times \loopsp{M}},
\end{array}
\end{equation*}
where the map $\mathrm{EM}$ is the Eilenberg-Moore map, which is an isomorphism (see \cite[Theorem 7.5]{felix-halperin-thomas01} for details).
Similarly, using the diagram \cref{equation:loopCopDiagram},
we define the dual of the loop coproduct to be the composition
\begin{equation*}
\shriek\mathrm{comp}\circ\mathrm{incl}^*\colon\cohom{\loopsp{M}\times\loopsp{M}}
\xrightarrow{\mathrm{incl}^*}\cohom{\loopsp{M}\times_M\loopsp{M}}
\xrightarrow{\shriek\mathrm{comp}}\cohom{\loopsp{M}}.
\end{equation*}
Here, the map $\shriek\mathrm{comp}$ is defined by the composition
\begin{equation*}
\begin{array}{l}
\cohom{\loopsp{M}\times_M \loopsp{M}}
\xleftarrow[\cong]{\mathrm{EM}} \mathrm{Tor}^*_{\cochain{M^2}}(\cochain{M}, \cochain{\loopsp{M}})\\
\xrightarrow{\mathrm{Tor}_{\rm id}(\shriek\Delta, {\rm id})} \mathrm{Tor}^{*+m}_{\cochain{M^2}}(\cochain{M^2}, \cochain{\loopsp{M}})
\xrightarrow[\cong]{} \cohom[*+m]{\loopsp{M}}.
\end{array}
\end{equation*}
\section{Definition of $(S,T)$-brane coproduct}
\label{section:definitionOfBraneOperations}
Let $\mathbb K$ be a field of characteristic zero,
$S$ and $T$ manifolds of dimension $k$,\todo{connected? connected sum at where}
and $M$ a $k$-connected Gorenstein space of finite type.
As in the construction by F\'elix and Thomas,
which we reviewed in \cref{section:constructionFelixThomas},
we construct the duals
\begin{align*}
\mu^\vee_{ST}\colon& \cohom{M^{S\#T}} \rightarrow \cohom[*+\dim M]{M^S\times M^T}\\
\delta^\vee_{ST}\colon& \cohom{M^S\times M^T} \rightarrow \cohom[*+\dim\Omega^{k-1} M]{M^{S\#T}}
\end{align*}
of the $(S,T)$-brane product and the $(S,T)$-brane coproduct.
The $(S,T)$-brane product is defined by a similar way to that of F\'elix and Thomas.
Using the diagram \cref{equation:STProdDiagram},
we define $\mu^\vee_{ST}$ to be the composition
\begin{equation*}
\shriek\mathrm{incl}\circ\mathrm{comp}^*\colon\cohom{M^{S\#T}}
\xrightarrow{\mathrm{comp}^*}\cohom{M^S\times_MM^T}
\xrightarrow{\shriek\mathrm{incl}}\cohom[*+m]{M^S\times M^T}.
\end{equation*}
Here, the map $\shriek\mathrm{incl}$ is defined by the composition
\begin{equation*}
\begin{array}{l}
\cohom{M^S\times_M M^T}
\xleftarrow[\cong]{\mathrm{EM}} \mathrm{Tor}^*_{\cochain{M^2}}(\cochain{M},\cochain{M^S\times M^T}) \\
\xrightarrow{\mathrm{Tor}_{\rm id}(\shriek\Delta, {\rm id})}\mathrm{Tor}^{*+m}_{\cochain{M^2}}(\cochain{M^2},\cochain{M^S\times M^T})
\xrightarrow[\cong]{} \cohom[*+m]{M^S\times M^T},
\end{array}
\end{equation*}
Next,\todo{,?} we begin the definition of the $(S,T)$-brane coproduct.
But \cref{theorem:ExtDiagonal} cannot be applied to this case
since the base map of the pullback is $\mathrm{incl}const\colon M\rightarrow\spheresp[k-1]{M}$.
Instead of \cref{theorem:ExtDiagonal},
we use the following theorem to define the $(S,T)$-brane coproduct.
A graded algebra $A$ is \textit{connected} if $A^0=\mathbb K$ and $A^i=0$ for any $i<0$.
A dga $(A,d)$ is \textit{connected} if $A$ is connected.
\newcommand{\bar{m}}{\bar{m}}
\begin{theorem}
\label{theorem:extAlgebraic}
\todo{characteristic can be nonzero}
\todo{Gor dim $m$? $\bar{m}$?}
Let $(A\otimes B, d)$ be a dga such that
$A$ and $B$ are connected commutative graded algebras,
$(A,d)$ is a sub dga of finite type,
and $(A\otimes B, d)$ is semifree over $(A,d)$.
Let $\eta\colon(A\otimes B, d) \rightarrow (A,d)$ be a dga homomorphism.
Assume that the following conditions hold.
\begin{enumerate}[label={\rm(\alph{enumi})}]
\item \label{item:assumpResId} The restriction of $\eta$ to $A$ is the identity map of $A$.
\item \label{item:assumpGorenstein} The dga $(B,\bar{d})=\mathbb K\otimes_A(A\otimes B, d)$ is a Gorenstein algebra of dimension $\bar{m}$.
\item \label{item:assump1conn} For any $b\in B$, the element $db-\bar{d}b$ lies in $A^{\geq 2}\otimes B$.
\end{enumerate}
Then we have an isomorphism
\begin{equation*}
\mathrm{Ext}^*_{A\otimes B}(A, A\otimes B) \cong \cohom[*-\bar{m}]{A}.
\end{equation*}
\end{theorem}
This can be proved by a similar method to \cref{theorem:ExtDiagonal} \cite[Theorem 12]{felix-thomas09}.
The proof is given in \cref{section:proofOfExtAlgebraic}.
Applying to sphere spaces,
we have the following corollary.
\begin{corollary}
\label{corollary:extSphereSpace}
Let $M$ be a $(k-1)$-connected (and 1-connected) space with $\pi_*(M)\otimes\mathbb K$ of finite dimension.
Then we have an isomorphism
\begin{equation*}
\mathrm{Ext}^*_{\cochain{\spheresp[k-1]{M}}}(\cochain{M}, \cochain{\spheresp[k-1]{M}}) \cong \cohom[*-\bar{m}]{M},
\end{equation*}
where $\bar{m}$ is the dimension of $\Omega^{k-1}M$ as a Gorenstein space.
\end{corollary}
To prove the corollary,
we need to construct models of sphere spaces satisfying the conditions of \cref{theorem:extAlgebraic}.
This will be done in \cref{section:constructModel}.
Note that, since $\spheresp[0]M = M\times M$, this is a generalization of \cref{theorem:ExtDiagonal}
(in the case that the characteristic of $\mathbb K$ is zero).
Assume that $M$ is a $k$-connected space with $\pi_*(M)\otimes\mathbb K$ of finite dimension.
Then we have
$\mathrm{Ext}^{\bar{m}}_{\cochain{\spheresp[k-1]{M}}}(\cochain{M}, \cochain{\spheresp[k-1]{M}}) \cong \cohom[0]{M}\cong\mathbb K$,
hence the shriek map for $\mathrm{incl}const\colon M\rightarrow\spheresp[k-1]{M}$ is defined to be the generator
\begin{equation*}
\shriek\mathrm{incl}const\in
\mathrm{Ext}^{\bar{m}}_{\cochain{\spheresp[k-1]{M}}}(\cochain{M}, \cochain{\spheresp[k-1]{M}}),
\end{equation*}
which is well-defined up to the multiplication by a non-zero scalar.
Using $\shriek\mathrm{incl}const$ with the diagram \cref{equation:STCopDiagram},
we define the dual $\delta^\vee_{ST}$ of the $(S,T)$-brane coproduct to be the composition
\begin{equation*}
\shriek\mathrm{comp}\circ\mathrm{incl}^*\colon\cohom{M^S\times M^T}
\xrightarrow{\mathrm{incl}^*}\cohom{M^S\times_MM^T}
\xrightarrow{\shriek\mathrm{comp}}\cohom{M^{S\#T}}.
\end{equation*}
Here, the map $\shriek\mathrm{comp}$ is defined by the composition
\begin{equation*}
\begin{array}{l}
\cohom{M^S\times_M M^T}
\xleftarrow[\cong]{\mathrm{EM}} \mathrm{Tor}^*_{\cochain{\spheresp[k-1]{M}}}(\cochain{M}, \cochain{M^{S\#T}})\\
\xrightarrow{\mathrm{Tor}_{\rm id}(\shriek\mathrm{incl}const, {\rm id})} \mathrm{Tor}^{*+\bar{m}}_{\cochain{\spheresp[k-1]{M}}}(\cochain{\spheresp[k-1]{M}}, \cochain{M^{S\#T}})
\xrightarrow[\cong]{} \cohom[*+\bar{m}]{\loopsp{M}}.
\end{array}
\end{equation*}
Note that the Eilenberg-Moore isomorphism can be applied since $\spheresp[k-1]{M}$ is 1-connected.
\section{Definition of $(S,i,j)$-brane product and coproduct}
\label{section:definitionOfSijBraneOperations}
\newcommand{\#}{\#}
\newcommand{\bigvee}{\bigvee}
\newcommand{Q}{Q}
\newcommand{D}{D}
\newcommand{\smalld^\circ}{D^\circ}
\newcommand{\partial \smalld}{\partial D}
In this section, we give a definition of $(S,i,j)$-brane product and coproduct.
Let $S$ be a topological space, and $i$ and $j$ embeddings $D^k\rightarrow S$.
Fix a small $k$-disk $D\subset D^k$ and denote its interior by $\smalld^\circ$ and its boundary by $\partial \smalld$.
Then we define three spaces $\#(S,i,j)$, $Q(S,i,j)$, and $\bigvee(S,i,j)$ as follows.
The space $\#(S,i,j)$ is obtained from $S\setminus(i(\smalld^\circ)\cup j(\smalld^\circ))$
by gluing $i(\partial \smalld)$ and $j(\partial \smalld)$ by an orientation reversing homeomorphism.
We obtain $Q(S,i,j)$ by collapsing two disks $i(D)$ and $j(D)$ to two points, respectively.
$\bigvee(S,i,j)$ is defined as the quotient space of $Q(S,i,j)$ identifying the two points.
Then, since the quotient space $D^k/D$ is homeomorphic to the disk $D^k$,
we identify $Q(S,i,j)$ with $S$ itself.
By the above definitions, we have the maps
$\#(S,i,j)\rightarrow\bigvee(S,i,j)$ and $S=Q(S,i,j)\rightarrow\bigvee(S,i,j)$.
For a space $M$, these maps induce the maps
$\mathrm{comp}\colon M^{\bigvee(S,i,j)}\rightarrow M^{\#(S,i,j)}$
and $\mathrm{incl}\colon M^{\bigvee(S,i,j)}\rightarrow M^S$.
Moreover, we have diagrams
\begin{equation*}
\xymatrix{
M^S \ar[d] & M^{\bigvee(S,i,j)} \ar_-\mathrm{incl}[l]\ar[d]\ar^-\mathrm{comp}[r] & M^{\#(S,i,j)}\\
M\times M & M \ar_-\Delta[l]
}
\end{equation*}
and
\begin{equation*}
\xymatrix{
M^{\#(S,i,j)}\ar[d] & M^{\bigvee(S,i,j)} \ar_-\mathrm{comp}[l]\ar[d]\ar^-\mathrm{incl}[r] & M^S\\
\spheresp[k-1]{M} & M \ar_-\mathrm{incl}const[l]
}
\end{equation*}
in which the squares are pullback diagrams.
\todo{remove label?}
If $M$ is a $k$-connected space with $\pi_*(M)\otimes\mathbb K$ of finite dimension,
we define the $(S,i,j)$-brane product and coproduct by a similar method to \cref{section:definitionOfBraneOperations},
using these diagrams instead of the diagrams \cref{equation:STProdDiagram} and \cref{equation:STCopDiagram}.
Note that this generalizes $(S,T)$-brane product and coproduct defined in \cref{section:definitionOfBraneOperations}.
\section{Construction of models and proof of \cref{corollary:extSphereSpace}}
\label{section:constructModel}
In this section, we give a proof of \cref{corollary:extSphereSpace},
constructing a Sullivan model of the dga homomorphism
$\mathrm{incl}const^*\colon \cochain{\spheresp[k-1]{M}}\rightarrow\cochain{M}$
satisfying the assumptions of \cref{theorem:extAlgebraic}.
\newcommand{s^{(k-1)}}{s^{(k-1)}}
\newcommand{s^{(k)}}{s^{(k)}}
\newcommand{\diffsphere}[1][k-1]{\bar{d}^{(#1)}}
\newcommand{\diffdisk}[1][k]{d^{(#1)}}
First, we construct models algebraically.
Let $(\wedge V, d)$ be a Sullivan algebra.
For an integer $l\in \mathbb Z$,
let $\susp[l]V$ be a graded module defined by $(\susp[l]V)^n=V^{n+l}$
and $\susp[l]v$ denotes the element in $\susp[l]V$ corresponding to the element $v \in V$.
Define two derivations $s^{(k-1)}$ and $\diffsphere$
on the graded algebra $\wedge V\otimes \wedge \susp[k-1]V$ by
\begin{align*}
&s^{(k-1)}(v)=\susp[k-1]v,\quad s^{(k-1)}(\susp[k-1]v)=0, \\
&\diffsphere(v)=dv,\ \mbox{and}\quad\diffsphere(\susp[k-1]v)=(-1)^{k-1}s^{(k-1)} dv.
\end{align*}
Then it is easy to see that
$\diffsphere\circ\diffsphere=0$ and hence
$(\wedge V\otimes \wedge\susp[k-1]V, \diffsphere)$ is a dga.
Similarly, define derivations $s^{(k)}$ and $\diffdisk$
on the graded algebra $\wedge V\otimes \wedge\susp[k-1]V \otimes\wedge\susp[k]V$ by
\begin{align*}
&s^{(k)}(v)=\susp[k]v,\quads^{(k)}(\susp[k-1]v)=s^{(k)}(\susp[k]v)=0,\quad
\diffdisk(v)=dv, \\
&\diffdisk(\susp[k-1]v)=\diffsphere(\susp[k-1]v),\
\mbox{and}\quad\diffdisk(\susp[k]v)=\susp[k-1]v+(-1)^ks^{(k)} dv.
\end{align*}
Then it is easy to see that
$\diffdisk\circ\diffdisk=0$ and hence
$(\wedge V\otimes \wedge\susp[k-1]V \otimes\wedge\susp[k]V, \diffdisk)$ is a dga.
Note that the tensor product
$(\wedge V, d) \otimes_{\wedge V\otimes\wedge\susp[k-1]V}(\wedge V\otimes\wedge\susp[k-1]V\otimes\wedge\susp[k]V,\diffdisk)$
is canonically isomorphic to $(\wedge V\otimes\wedge\susp[k]V,\diffsphere[k])$,
where $(\wedge V, d)$ is a $(\wedge V\otimes\wedge\susp[k-1]V, \diffsphere)$-module by the dga homomorphism
$\phi\colon (\wedge V\otimes\wedge\susp[k-1]V, \diffsphere) \rightarrow (\wedge V, d)$
defined by $\phi(v)=v$ and $\phi(\susp[k-1]v)=0$.
It is clear that,
if $V^{\leq k-1}=0$,
the dga $(\wedge V\otimes\wedge\susp[k-1]V,\diffsphere)$ is a Sullivan algebra
and, if $V^{\leq k}=0$,
the dga $(\wedge V\otimes \wedge\susp[k-1]V \otimes\wedge\susp[k]V, \diffdisk)$
is a relative Sullivan algebra over $(\wedge V\otimes\wedge\susp[k-1]V,\diffsphere)$.
\newcommand{\tilde{\varepsilon}}{\tilde{\varepsilon}}
Define a dga homomorphism
\begin{equation*}
\tilde{\varepsilon}\colon
(\wedge V\otimes \wedge\susp[k-1]V \otimes\wedge\susp[k]V, \diffdisk)
\rightarrow (\wedge V, d)
\end{equation*}
by $\tilde{\varepsilon}(v)=v$ and $\tilde{\varepsilon}(\susp[k-1]v)=\tilde{\varepsilon}(\susp[k]v)=0$.
Then the linear part
\begin{equation*}
Q(\tilde{\varepsilon})\colon
(V \oplus\susp[k-1]V \oplus\susp[k]V, \diffdisk_0)
\rightarrow (V, d_0)
\end{equation*}
is a quasi-isomorphism,
and hence $\tilde{\varepsilon}$ is a quasi-isomorphism \cite[Proposition 14.13]{felix-halperin-thomas01}.
Using these algebras,
we have the following proposition.
\begin{proposition}
\label{proposition:modelOfInclConst}
Let $k\geq 2$ be an integer,
$M$ a $(k-1)$-connected (and 1-connected) Gorenstein space of finite type,
and $(\wedge V, d)$ its Sullivan model such that $V^{\leq k-1}=0$ and $V$ is of finite type.
Then the dga homomorphism
$\phi\colon (\wedge V\otimes\wedge\susp[k-1]V, \diffsphere) \rightarrow (\wedge V, d)$
is a Sullivan representative of the map
$\mathrm{incl}const\colon M\rightarrow\spheresp[k-1]{M}$,
i.e., there is a homotopy commutative diagram
\begin{equation*}
\xymatrix{
(\wedge V\otimes\wedge\susp[k-1]V, \diffsphere) \ar[r]^-\phi \ar[d]^\simeq & (\wedge V, d) \ar[d]^\simeq\\
\cochain{\spheresp[k-1]{M}} \ar[r]^-{\mathrm{incl}const^*} & \cochain{M}
}
\end{equation*}
such that the vertical arrows are quasi-isomorphisms.
\end{proposition}
\begin{proof}
We prove the proposition by induction on $k\geq 2$.
The case $k=2$ is well-known.
See \cite[Section 15 (c) Example 1]{felix-halperin-thomas01} or \cite[Appendix A]{wakatsuki16} for details.
Assume that the proposition holds for some $k$.
Consider the commutative diagram
\begin{equation*}
\begin{tikzcd}[row sep=2.5em]
M \arrow[rr,"="] \arrow[dr,swap,"\mathrm{incl}const"] \arrow[dd,swap,"="] &&
M \arrow[dd,swap,"=" near start] \arrow[dr,""] \\
& \spheresp[k]{M} \arrow[rr,crossing over,"" near start] &&
M^{D^k} \arrow[dd,"\mathrm{res}"] \\
M \arrow[rr,"=" near end] \arrow[dr,swap,"="] && M \arrow[dr,swap,"\mathrm{incl}const"] \\
& M \arrow[rr,"\mathrm{incl}const"] \arrow[uu,<-,crossing over,"\mathrm{ev}" near end]&& \spheresp[k-1]{M},
\end{tikzcd}
\end{equation*}
where the front and back squares are pullback diagrams.
Since any pullback diagram of a fibration is modeled by a tensor product of Sullivan algebras \cite[Section 15 (c)]{felix-halperin-thomas01},
this proves the proposition.
\end{proof}
\begin{proof}
[Proof of \cref{corollary:extSphereSpace}]
In the case $k=1$, apply \cref{theorem:extAlgebraic} to the product map
$(\wedge V, d)^{\otimes 2}\rightarrow (\wedge V, d)$.
(Note that this case is a result of F\'elix and Thomas \cite{felix-thomas09}.)
In the case $k\geq 2$, using \cref{proposition:modelOfInclConst},
apply \cref{theorem:extAlgebraic} to the map $\phi$.
\end{proof}
\section{Computation of examples}
\label{section:computeExample}
\newcommand{S^{2n+1}}{S^{2n+1}}
\makeatletter
\newcommand{\@tensorpower}[1]{\ifx#1\relax\else^{\otimes #1}\fi}
\newcommand{\@modelcommand}[3]{
\ifx#2\relax
{#1}\@tensorpower{#3}
\else
#1(#2)\@tensorpower{#3}
\fi
}
\newcommand{{\mathcal M}_\mathrm{P}}{{\mathcal M}_\mathrm{P}}
\newcommand{{\mathcal M}_\mathrm{L}}{{\mathcal M}_\mathrm{L}}
\newcommand{\mpath}[1][\relax]{\@modelcommand{{\mathcal M}_\mathrm{P}}{\relax}{#1}}
\newcommand{\mpathv}[2][\relax]{\@modelcommand{{\mathcal M}_\mathrm{P}}{\wedge #2}{#1}}
\newcommand{\mloop}[1][\relax]{\@modelcommand{{\mathcal M}_\mathrm{L}}{\relax}{#1}}
\newcommand{\mloopv}[2][\relax]{\@modelcommand{{\mathcal M}_\mathrm{L}}{\wedge #2}{#1}}
\newcommand{\mdisk}[1]{\@modelcommand{{\mathcal M}_{D^{#1}}}{\relax}{\relax}}
\newcommand{\msphere}[1]{\@modelcommand{{\mathcal M}_{S^{#1}}}{\relax}{\relax}}
\makeatother
In this section,
we will compute the brane product and coproduct for some examples,
which proves \cref{theorem:braneOperationsOfSphere,theorem:coprodTrivForPure}.
In \cite{naito13}, the duals of the loop product and coproduct are described in terms of Sullivan models using the torsion functor description of \cite{kuribayashi-menichi-naito}.
By a similar method,
we can describe the brane product and coproduct as follows.
Let $M$ be a $k$-connected $\mathbb K$-Gorenstein space of finite type
and $(\wedge V, d)$ its Sullivan model such that $V^{\leq k}=0$ and $V$ is of finite type.
Denote $(\wedge V\otimes\wedge\susp[k]{V}, \diffsphere[k])$ by $\msphere{k}$
and $(\wedge V\otimes\wedge\susp[k-1]{V}\otimes\wedge\susp[k]{V}, \diffdisk[k])$ by $\mdisk{k}$
(see \cref{section:constructModel} for the definitions).
Define a relative Sullivan algebra $\mpath=(\wedge V\tpow2\otimes\wedge \susp{V}, d)$
over $(\wedge V, d)\tpow2$ by the formula
\begin{equation*}
d(\susp v)=1\otimes v - v\otimes 1 - \sum_{i=1}^\infty\frac{(sd)^i}{i!}(v\otimes 1)
\end{equation*}
inductively
(see \cite[Section 15 (c)]{felix-halperin-thomas01} or \cite[Appendix A]{wakatsuki16} for details\todo{wakatsuki16?,inductively?}).
Note that $\cohom{\msphere{k}}\cong\cohom{\spheresp[k]{M}}$ and
$\cohom{\mdisk{k}}\cong\cohom{M^{D^k}}\cong\cohom{M}$.
Then the dual of the brane product on $\cohom{\spheresp[k]{M}}$ is induced by the composition
\begin{equation*}
\begin{array}{l}
\msphere{k}
\xrightarrow{\cong} \wedge V \otimes_{\msphere{k-1}} \mdisk{k}
\xleftarrow[\simeq]{\tilde{\varepsilon}\otimes {\rm id}} \mdisk{k} \otimes_{\msphere{k-1}} \mdisk{k}
\xrightarrow{(\phi\otimes{\rm id})\otimes_\phi(\phi\otimes{\rm id})} \msphere{k} \otimes_{\wedge V} \msphere{k}\\
\xrightarrow{\cong} \wedge V \otimes_{\wedge V\tpow2}\msphere{k}\tpow2
\xleftarrow[\simeq]{\bar{\varepsilon}\otimes{\rm id}} \mpath\otimes_{\wedge V\tpow2}\msphere{k}\tpow2
\xrightarrow{\shriek\delta\otimes{\rm id}} \wedge V\tpow2\otimes_{\wedge V\tpow2}\msphere{k}\tpow2
\xrightarrow{\cong} \msphere{k}\tpow2,
\end{array}
\end{equation*}
where $\shriek\delta$ is a representative of $\shriek\Delta$.
See \cref{section:constructModel} for the definitions of the other maps.
Assume that $\pi_*(M)\otimes\mathbb K$ is of finite dimension.
Then the dual of the brane coproduct is induced by the composition
\begin{equation*}
\begin{array}{l}
\msphere{k}\tpow2
\xrightarrow{\cong} \wedge V\tpow2 \otimes_{\msphere{k-1}\tpow2} \mdisk{k}\tpow2
\xrightarrow{\mu\otimes_{\mu'}\eta} \wedge V \otimes_{\msphere{k-1}} (\mdisk{k}\otimes_{\msphere{k-1}}\mdisk{k})\\
\xleftarrow[\simeq]{\tilde{\varepsilon}\otimes{\rm id}} \mdisk{k}\otimes_{\msphere{k-1}}(\mdisk{k}\otimes_{\msphere{k-1}}\mdisk{k})
\xrightarrow{\shriek\gamma\otimes{\rm id}} \msphere{k-1}\otimes_{\msphere{k-1}}(\mdisk{k}\otimes_{\msphere{k-1}}\mdisk{k})\\
\xrightarrow{\cong} \mdisk{k}\otimes_{\msphere{k-1}}\mdisk{k}
\xrightarrow[\simeq]{\tilde{\varepsilon}\otimes{\rm id}} \wedge V\otimes_{\msphere{k-1}}\mdisk{k}
\xrightarrow{\cong} \msphere{k},
\end{array}
\end{equation*}
where $\shriek\gamma$ is a representative of $\shriek\mathrm{incl}const$,
the maps $\mu$ and $\mu'$ are the product maps,
and $\eta$ is the quotient map.
As a preparation of computation,
recall the definition of a pure Sullivan algebra.
\begin{definition}
[c.f. {\cite[Section 32]{felix-halperin-thomas01}}]
\label{definition:pureSullivanAlgebra}
A Sullivan algebra $(\wedge V, d)$ with $\dim V < \infty$ is called {\it pure}
if $d(V^{\rm even})=0$ and $d(V^{\rm odd}) \subset \wedge V^{\rm even}$.
\end{definition}
For a pure Sullivan algebra,
we have an explicit construction of the shriek map $\shriek\delta$ and $\shriek\gamma$.
For $\shriek\delta$, see \cite{naito13}\todo{correct?}.
For $\shriek\gamma$, we have the following proposition.
\begin{proposition}
\label{proposition:shriekInclconstForPure}
Let $(\wedge V, d)$ be a pure minimal Sullivan algebra.
Take bases $V^{\mathrm{even}}=\mathbb K\{x_1,\dots x_p\}$ and $V^{\mathrm{odd}}=\mathbb K\{y_1,\dots y_q\}$.
Define a $(\wedge V\otimes\wedge \susp{V}, d)$-linear map
\begin{equation*}
\shriek\gamma\colon
(\wedge V\otimes\wedge\susp{V}\otimes\wedge\susp[2]V, d)
\rightarrow(\wedge V\otimes\wedge\susp{V}, d)
\end{equation*}
by $\shriek\gamma(\susp[2]y_1\cdots\susp[2]y_q)=\susp x_1\cdots\susp x_p$
and $\shriek\gamma(\susp[2]y_{j_1}\cdots\susp[2]y_{j_l})=0$ for $l<q$.
Then $\shriek\gamma$ defines a non-trivial element in
$\mathrm{Ext}_{\wedge V\otimes\wedge\susp V}
(\wedge V,\wedge V\otimes\wedge\susp V)$
\end{proposition}
\begin{proof}
By a straightforward calculation, $\shriek\gamma$ is a cocycle in
$\mathrm{Hom}_{\wedge V\otimes\wedge \susp{V}}
(\wedge V\otimes\wedge\susp{V}\otimes\wedge\susp[2]V,\wedge V\otimes\wedge\susp{V})$.
In order to prove the non-triviality,
we define an ideal
$I=(x_1,\dots, x_p,y_1,\dots, y_q,\susp y_1,\dots,\susp y_q)\subset\wedge V\otimes\wedge\susp V$.
By the purity and minimality, we have $d(I)\subset I$.
Using this ideal, we have the evaluation map of the form
\begin{align*}
&\mathrm{Ext}_{\wedge V\otimes\wedge\susp V}(\wedge V,\wedge V\otimes\wedge\susp V)
\otimes \mathrm{Tor}_{\wedge V\otimes\wedge\susp V}(\wedge V, \wedge V\otimes\wedge \susp V/I) \\
&\xrightarrow{\mathrm{ev}} \mathrm{Tor}_{\wedge V\otimes\wedge\susp V}(\wedge V\otimes\wedge\susp V, \wedge V\otimes\wedge \susp V/I)
\xrightarrow{\cong} \wedge\susp V^{\mathrm{even}}.
\end{align*}
By this map,
the element $[\shriek\gamma]\otimes[\susp[2]y_1\cdots\susp[2]y_q\otimes 1]$ is mapped to
the element $\susp x_1\cdots \susp x_p$, which is obviously non-trivial.
Hence $[\shriek\gamma]$ is also non-trivial.
\end{proof}
Now, we give proofs of \cref{theorem:braneOperationsOfSphere,theorem:coprodTrivForPure}.
\begin{proof}
[Proof of \cref{theorem:braneOperationsOfSphere}]
Using the above descriptions,
we compute the brane product and coproduct for $M=S^{2n+1}$ and $k=2$.
In this case,
we can take $(\wedge V, d) = (\wedge x, 0)$ with $\deg{x}=2n+1$,
and have $\msphere{1}=(\wedge(x,\susp{x}), 0)$
and $\mdisk{2}=(\wedge(x,\susp{x},\susp[2]x),d)$ where $dx=d\susp{x}=0$ and $d\susp[2]x=\susp{x}$.
The computation is straightforward except for the shriek maps $\shriek\delta$ and $\shriek\gamma$.
The map $\shriek\delta$ is the linear map $\mpath\rightarrow(\wedge x, 0)\tpow2$ over $(\wedge x, 0)\tpow2$
determined by $\shriek\delta(1)=1\otimes x - x\otimes 1$ and $\shriek\delta((\susp{x})^l)=0$ for $l\geq 1$.
By \cref{proposition:shriekInclconstForPure}, the map $\shriek\gamma$ is the linear map $\mdisk{k}\rightarrow\msphere{k-1}$ over $\msphere{k-1}$
determined by $\shriek\gamma(\susp[2]x)=1$ and $\shriek\gamma(1)=0$.
Then the dual of the brane product $\mu^\vee$ is a linear map
\begin{equation*}
\mu^\vee\colon \wedge(x,\susp[2]x)\rightarrow\wedge(x,\susp[2]x)\otimes\wedge(x,\susp[2]x).
\end{equation*}
of degree $(1-2n)$
over $\wedge(x)\otimes\wedge(x)$,
which is characterized by
\begin{equation*}
\mu^\vee(1) = 1\otimes x - x\otimes 1,\
\mu^\vee(\susp[2]x) = (1\otimes x - x\otimes 1)(\susp[2]x\otimes 1 + 1\otimes\susp[2]x).
\end{equation*}
Similarly, the dual of the brane coproduct $\delta^\vee$ is a linear map
\begin{equation*}
\delta^\vee\colon \wedge(x,\susp[2]x)\otimes\wedge(x,\susp[2]x)\rightarrow\wedge(x,\susp[2]x).
\end{equation*}
of degree $(1-2n)$
over $\wedge(x)\otimes\wedge(x)$,
which is characterized by
\begin{equation*}
\delta^\vee(1) = 0,\
\delta^\vee(\susp[2]x\otimes 1) = -1,\
\delta^\vee(1\otimes\susp[2]x) = 1,\
\delta^\vee(\susp[2]x\otimes\susp[2]x) = -\susp[2]x.
\end{equation*}
Dualizing these results,
we get the brane product and coproduct on the homology,
which proves \cref{theorem:braneOperationsOfSphere}.
\end{proof}
\begin{proof}
[Proof of \cref{theorem:coprodTrivForPure}]
By \cref{proposition:shriekInclconstForPure},
we have that $\operatorname{Im}(\shriek\gamma\otimes{\rm id})$ is contained in the ideal $(\susp x_1,\dots \susp x_p)$,
which is mapped to zero by the map $\tilde\varepsilon\otimes{\rm id}$.
\end{proof}
\section{Proof of the associativity, the commutativity, and the Frobenius compatibility}
\newcommand{\tau_{\mathrm{\times}}}{\tau_{\mathrm{\times}}}
\newcommand{\tau_{\mathrm{\#}}}{\tau_{\mathrm{\#}}}
\label{section:proofOfAssocAndFrob}
In this section,
we give a precise statement and the proof of \cref{theorem:associativeFrobenius}.
First, we give a precise statement of \cref{theorem:associativeFrobenius}.
For simplicity, we omit the statement for $(S,i,j)$-brane product and coproduct,
which is almost the same as that for $(S,T)$-brane product and coproduct.
Let $M$ be a $k$-connected $\mathbb K$-Gorenstein space of finite type with $\dim\pi_*(M)\otimes\mathbb K < \infty$.
Denote $m=\dim M$.
Then the precise statement of \cref{item:assocProd} is
that the diagrams
\begin{equation}
\label{equation:assocProdDiagram}
\xymatrix{
\cohom{M^{S\#T\#U}} \ar[r]^-{\mu^\vee_{S\#T,U}} \ar[d]^{\mu^\vee_{S,T\#U}} &
\cohom{M^{S\#T}\times M^U} \ar[d]^{\mu^\vee_{S,T\amalg U}} \\
\cohom{M^S\times M^{T\#U}} \ar[r]^-{\mu^\vee_{S\amalg T,U}} & \cohom{M^S\times M^T\times M^U}
}
\end{equation}
and
\begin{equation}
\label{equation:commProdDiagram}
\xymatrix{
\cohom{M^{T\#S}} \ar[r]^-{\mu^\vee_{T,S}} \ar[d]^{\tau_{\mathrm{\#}}^*} & \cohom{M^T\times M^S} \ar[d]^{\tau_{\mathrm{\times}}^*}\\
\cohom{M^{S\#T}} \ar[r]^-{\mu^\vee_{S,T}} & \cohom{M^S\times M^T}
}
\end{equation}
commute by the sign $(-1)^m$.\todo{commutes by the sign?}
Here, $\tau_{\mathrm{\times}}$ and $\tau_{\mathrm{\#}}$ are defined as the transposition of $S$ and $T$.
Note that the associativity of the product holds even if the assumption $\dim\pi_*(M)\otimes\mathbb K < \infty$ is dropped.
Denote $\bar{m} = \dim\Omega^{k-1} M$.
Then \cref{item:assocCop} states that the diagrams
\begin{equation}
\label{equation:assocCopDiagram}
\xymatrix{
\cohom{M^S\times M^T\times M^U} \ar[r]^-{\delta^\vee_{S\amalg T, U}} \ar[d]^{\delta^\vee_{S,T\amalg U}} &
\cohom{M^S\times M^{T\#U}} \ar[d]^{\delta^\vee_{S,T\#U}} \\
\cohom{M^{S\#T}\times M^U} \ar[r]^-{\delta^\vee_{S\#T,U}} & \cohom{M^{S\#T\#U}}
}
\end{equation}
and
\begin{equation}
\label{equation:commCopDiagram}
\xymatrix{
\cohom{M^{T\times S}} \ar[r]^-{\delta^\vee_{T,S}} \ar[d]^{\tau_{\mathrm{\#}}^*} & \cohom{M^T\#M^S} \ar[d]^{\tau_{\mathrm{\times}}^*}\\
\cohom{M^{S\times T}} \ar[r]^-{\delta^\vee_{S,T}} & \cohom{M^S\#M^T}
}
\end{equation}
commute by the sign $(-1)^{\bar{m}}$.
Similarly, \cref{item:Frob} states that the diagram
\begin{equation}
\label{equation:FrobDiagram}
\xymatrix{
\cohom{M^S\times M^{T\#U}} \ar[r]^-{\delta^\vee_{S,T\#U}} \ar[d]^{\mu^\vee_{S\#T,U}} &
\cohom{M^{S\#T\#U}} \ar[d]^{\mu^\vee_{S\amalg T,U}} \\
\cohom{M^S\times M^T\times M^U} \ar[r]^-{\delta^\vee_{S,T\amalg U}} & \cohom{M^{S\#T}\times M^U}
}
\end{equation}
commutes by the sign $(-1)^{m\bar{m}}$.
\todo{the other diagram?}
\newcommand{\lift}[2]{#1_{#2}}
Before proving \cref{theorem:associativeFrobenius},
we give a notation $\lift{g}{\alpha}$ for a shriek map.
\begin{definition}
Consider a pullback diagram
\begin{equation*}
\xymatrix{
X \ar[r]^g \ar[d]^p & Y \ar[d]^q \\
A \ar[r]^f & B
}
\end{equation*}
of spaces, where $q$ is a fibration.
Let $\alpha$ be an element of $\mathrm{Ext}^m_{\cochain{B}}(\cochain{A}, \cochain{B})$.
Assume that the Eilenberg-Moore map
\begin{equation*}
\operatorname{EM}\colon \mathrm{Tor}^*_{\cochain{B}}(\cochain{A}, \cochain{Y})\xrightarrow{\cong}\cohom{X}
\end{equation*}
is an isomorphism (e.g., $B$ is 1-connected and the cohomology of the fiber is of finite type).
Then we define $\lift{g}{\alpha}$ to be the composition
\begin{equation*}
\lift{g}{\alpha}\colon \cohom{X}
\xleftarrow{\cong} \mathrm{Tor}^*_{\cochain{B}}(\cochain{A}, \cochain{Y})
\xrightarrow{\mathrm{Tor}(\alpha, {\rm id})} \mathrm{Tor}^{*+m}_{\cochain{B}}(\cochain{B}, \cochain{Y})
\xrightarrow{\cong} \cohom[*+m]{Y}
\end{equation*}
\end{definition}
Using this notation,
we can write
the shriek map $\shriek\mathrm{incl}$ as $\lift{\mathrm{incl}}{\shriek\Delta}$
for the diagram \cref{equation:STProdDiagram},
and the shriek map $\shriek\mathrm{comp}$ as $\lift{\mathrm{comp}}{\shriek\mathrm{incl}const}$
for the diagram \cref{equation:STCopDiagram}.
Now we have the following two propositions
as a preparation of the proof of \cref{theorem:associativeFrobenius}.
\begin{proposition}
\label{proposition:naturalityOfShriek}
Consider a diagram
\begin{equation*}
\begin{tikzcd}[row sep=2.5em]
X \arrow[rr,"g"] \arrow[dr,swap,"\varphi"] \arrow[dd,swap,""] &&
Y \arrow[dd,swap,"q" near start] \arrow[dr,"\psi"] \\
& X' \arrow[rr,crossing over,"g'" near start] &&
Y' \arrow[dd,"q'"] \\
A \arrow[rr,"" near end] \arrow[dr,swap,"a"] && B \arrow[dr,swap,"b"] \\
& A' \arrow[rr,""] \arrow[uu,<-,crossing over,"" near end]&& B',
\end{tikzcd}
\end{equation*}
where $q$ and $q'$ are fibrations
and the front and back squares are pullback diagrams.
Let $\alpha \in \mathrm{Ext}^m_{\cochain{B}}(\cochain{A}, \cochain{B})$
and $\alpha' \in \mathrm{Ext}^m_{\cochain{B'}}(\cochain{A'}, \cochain{B'})$.
Assume that the elements $\alpha$ and $\alpha'$
are mapped to the same element in $\mathrm{Ext}^m_{\cochain{B'}}(\cochain{A'}, \cochain{B})$
by the morphisms induced by $a$ and $b$,
and that the Eilenberg-Moore maps of two pullback diagrams are isomorphisms.
Then the following diagram commutes.
\begin{equation*}
\xymatrix{
\cohom{X'} \ar[r]^-{\lift{g'}{\alpha'}} \ar[d]^{\varphi^*} & \cohom[*+m]{Y'} \ar[d]^{\psi^*}\\
\cohom{X} \ar[r]^{\lift{g}{\alpha}} & \cohom[*+m]{Y}
}
\end{equation*}
\end{proposition}
\begin{proposition}
\label{proposition:functorialityOfShriek}
Consider a diagram
\begin{equation*}
\xymatrix{
X \ar[r]^{\tilde{f}} \ar[d]^p & Y \ar[r]^{\tilde{g}} \ar[d]^q & Z \ar[d]^r \\
A \ar[r]^f & B \ar[r]^g & C,
}
\end{equation*}
where the two squares are pullback diagrams.
Let $\alpha$ be an element of $\mathrm{Ext}^m_{\cochain{B}}(\cochain{A}, \cochain{B})$
and $\beta$ an element of $\mathrm{Ext}^n_{\cochain{C}}(\cochain{B}, \cochain{C})$.
Assume that the Eilenberg-Moore maps are isomorphisms for two pullback diagrams.
Then we have
\begin{equation*}
\lift{(\tilde{g}\circ\tilde{f})}{\beta\circ (g_*\alpha)}
= \lift{\tilde{g}}{\beta} \circ\lift{\tilde{f}}{\alpha},
\end{equation*}
where
$g_*\colon \mathrm{Ext}^m_{\cochain{B}}(\cochain{A}, \cochain{B}) \rightarrow \mathrm{Ext}^m_{\cochain{C}}(\cochain{A}, \cochain{B})$
is the morphism induced by the map $g\colon B\rightarrow C$.
\end{proposition}
These propositions can be proved by straightforward arguments.
\begin{proof}
[Proof of \cref{theorem:associativeFrobenius}]
First, we give a proof for \cref{item:Frob}.
Note that the associativity in \cref{item:assocProd} and \cref{item:assocCop} can be proved similarly.
Consider the following diagram.
\begin{equation*}
\xymatrix{
\cohom{M^S\times M^{T\#U}} \ar[r]^-{\mathrm{incl}^*} \ar[d]^-{\mathrm{comp}^*} &
\cohom{M^S\times_MM^{T\#U}} \ar[r]^-{\lift\mathrm{comp}{\shriek\mathrm{incl}const}} \ar[d]^-{\mathrm{comp}^*} &
\cohom{M^{S\#T\#U}} \ar[d]^-{\mathrm{comp}^*} \\
\cohom{M^S\times M^T\times_MM^U} \ar[r]^-{\mathrm{incl}^*} \ar[d]^-{\lift\mathrm{incl}{\shriek\Delta}} &
\cohom{M^S\times_MM^T\times_MM^U} \ar[r]^-{\lift\mathrm{comp}{\shriek{(\mathrm{incl}const\times{\rm id})}}} \ar[d]^-{\lift\mathrm{incl}{\shriek{({\rm id}\times\Delta)}}} &
\cohom{M^{S\#T}\times_MM^U} \ar[d]^-{\lift\mathrm{incl}{\shriek{({\rm id}\times\Delta)}}} \\
\cohom{M^S\times M^T\times M^U} \ar[r]^-{\mathrm{incl}^*} &
\cohom{M^S\times_MM^T\times M^U} \ar[r]^-{\lift\mathrm{comp}{\shriek{(\mathrm{incl}const\times{\rm id})}}} &
\cohom{M^{S\#T}\times M^U}
}
\end{equation*}
Note that the boundary of the whole square is the same as the diagram \cref{equation:FrobDiagram}.
The upper left square is commutative by the functoriality of the cohomology and
so are the upper right and lower left squares by \cref{proposition:naturalityOfShriek}.
Next, we consider the lower right square.
Applying \cref{proposition:functorialityOfShriek} to the diagram
\begin{equation*}
\xymatrix{
M^S\times_MM^T\times_MM^U \ar[r]^-{\mathrm{comp}} \ar[d] & M^{S\#T}\times_MM^U \ar[r]^-{\mathrm{incl}} \ar[d] & M^{S\#T}\times M^U \ar[d]\\
M\times M \ar[r]^-{\mathrm{incl}const\times{\rm id}} & \spheresp[k-1]{M}\times M \ar[r]^-{{\rm id}\times\Delta} & \spheresp[k-1]{M}\times M^2,
}
\end{equation*}
we have
\begin{equation*}
\lift\mathrm{incl}{\shriek{({\rm id}\times\Delta)}}
\circ \lift\mathrm{comp}{\shriek{(\mathrm{incl}const\times{\rm id})}}
= \lift{(\mathrm{incl}\circ\mathrm{comp})}
{\shriek{({\rm id}\times\Delta)} \circ (({\rm id}\times\Delta)_*\shriek{(\mathrm{incl}const\times{\rm id})})}.
\end{equation*}
Using appropriate semifree resolutions,
we have a representation
\begin{align*}
\shriek{({\rm id}\times\Delta)} \circ (({\rm id}\times\Delta)_*\shriek{(\mathrm{incl}const\times{\rm id})})
& = [{\rm id}\otimes\shriek\delta] \circ [\shriek\gamma\otimes{\rm id}] \\
& = [(-1)^{m\bar{m}}\shriek\gamma\otimes\shriek\delta]
\end{align*}
as a chain map.\todo{write more detailed proof}
Here,
$[\shriek\delta]=\shriek\Delta\in\mathrm{Ext}_{\cochain{M^2}}^m(\cochain{M}, \cochain{M^2})$
and
$[\shriek\gamma]=\shriek\mathrm{incl}const\in\mathrm{Ext}^{\bar{m}}_{\cochain{\spheresp[k-1]{M}}}(\cochain{M}, \cochain{\spheresp[k-1]{M}})$
are representations as cochains.
Similarly, we compute the other composition to be
\begin{equation*}
\lift\mathrm{comp}{\shriek{(\mathrm{incl}const\times{\rm id})}}
\circ \lift\mathrm{incl}{\shriek{({\rm id}\times\Delta)}}
= \lift{(\mathrm{comp}\circ\mathrm{incl})}
{\shriek{(\mathrm{incl}const\times{\rm id})} \circ ((\mathrm{incl}const\times{\rm id})_*)\shriek{({\rm id}\times\Delta)}}
\end{equation*}
with
\begin{equation*}
\shriek{(\mathrm{incl}const\times{\rm id})} \circ ((\mathrm{incl}const\times{\rm id})_*)\shriek{({\rm id}\times\Delta)}
= [\shriek\gamma\otimes\shriek\delta].
\end{equation*}
This proves the commutativity by the sign $(-1)^{m\bar{m}}$ of the lower right square.
Next, we prove the commutativity of the coproduct in \cref{item:assocCop}.
This follows from the commutativity of the diagram
\begin{equation}
\label{equation:proofOfCommutativityDiagram}
\xymatrix{
\cohom{M^T\times M^S} \ar[r]^{\mathrm{incl}^*} \ar[d]^{\tau_{\mathrm{\times}}^*}
& \cohom{M^T\times_MM^S} \ar[r]^-{\shriek{\mathrm{comp}}} \ar[d]^{\tau_{\mathrm{\times}}^*}
& \cohom{M^{T\#S}} \ar[d]^{\tau_{\mathrm{\#}}^*}\\
\cohom{M^S\times M^T} \ar[r]^{\mathrm{incl}^*}
& \cohom{M^S\times_MM^T} \ar[r]^-{\shriek{\mathrm{comp}}}
& \cohom{M^{S\#T}}.
}
\end{equation}
The commutativity of the left square is obvious.
If one can apply \cref{proposition:naturalityOfShriek} to the diagram
\cref{equation:cubeDiagramForCommutativity},
we obtain the commutativity of the right square of \cref{equation:proofOfCommutativityDiagram}.
\begin{equation}
\label{equation:cubeDiagramForCommutativity}
\begin{tikzcd}[row sep=2.5em]
M^S\times_MM^T \arrow[rr,"\mathrm{comp}"] \arrow[dr,swap,"\tau_{\mathrm{\times}}"] \arrow[dd,swap,""] &&
M^{S\#T} \arrow[dd,swap,"\mathrm{res}" near start] \arrow[dr,"\tau_{\mathrm{\#}}"] \\
& M^T\times_MM^S \arrow[rr,crossing over,"\mathrm{comp}" near start] &&
M^{T\#S} \arrow[dd,"\mathrm{res}"] \\
M \arrow[rr,"\mathrm{incl}const" near end] \arrow[dr,swap,"{\rm id}"] && \spheresp[k-1]{M} \arrow[dr,swap,"\tau"] \\
& M \arrow[rr,"\mathrm{incl}const"] \arrow[uu,<-,crossing over,"" near end]&& \spheresp[k-1]{M}
\end{tikzcd}
\end{equation}
In order to apply \cref{proposition:naturalityOfShriek},
it suffices to prove the equation
\begin{equation}
\label{equation:commutativityOfInclconstShriek}
\mathrm{Ext}_{\tau^*}({\rm id},\tau^*)(\shriek{\mathrm{incl}const})=(-1)^{\bar{m}}\shriek{\mathrm{incl}const}
\end{equation}
in $\mathrm{Ext}_{\cochain{\spheresp[k-1]{M}}}(\cochain{M},\cochain{\spheresp[k-1]{M}})$.
Since $\mathrm{Ext}^{\bar{m}}_{\cochain{\spheresp[k-1]{M}}}(\cochain{M}, \cochain{\spheresp[k-1]{M}}) \cong \mathbb K$ and
$\mathrm{Ext}_{\tau^*}({\rm id},\tau^*) \circ \mathrm{Ext}_{\tau^*}({\rm id},\tau^*) = {\rm id}$,
we have \cref{equation:commutativityOfInclconstShriek} up to sign.
In \cref{section:proofOfExtAlgebraic},
we will determine the sign to be $(-1)^{\bar{m}}$.
Similarly, in order to prove the commutativity of the product in \cref{item:assocProd},
we need to prove the equation
\begin{equation}
\label{equation:commutativityOfDeltaShriek}
\mathrm{Ext}_{\tau^*}({\rm id},\tau^*)(\shriek{\Delta})=(-1)^m\shriek{\Delta}
\end{equation}
in $\mathrm{Ext}_{\cochain{M^2}}(\cochain{M},\cochain{M^2})$.
As above, we have \cref{equation:commutativityOfDeltaShriek} up to sign.
The sign is determined to be $(-1)^m$ in \cref{section:determineSign}.
The same proofs can be applied for $(S,i,j)$-brane product and coproduct.
\end{proof}
\section{Proof of \cref{equation:commutativityOfDeltaShriek}}
\label{section:determineSign}
\newcommand{f}{f}
In this section, we will prove \cref{equation:commutativityOfDeltaShriek}, determining the sign.
Here, we need the explicit description of $\shriek{\Delta}$ in \cite{wakatsuki16}.
Let $M$ be a $1$-connected space with $\dim\pi_*(M)\otimes\mathbb K < \infty$.
By \cite[Theorem 1.6]{wakatsuki16}, we have a Sullivan model $(\wedge V, d)$ of $M$ which is semi-pure,
i.e., $d(I_V)\subset I_V$, where $I_V$ is the ideal generated by $V^{\mathrm{even}}$.
Let $\varepsilon\colon (\wedge V, d)\rightarrow \mathbb K$ be the augmentation map
and $\mathrm{pr}\colon (\wedge V, d)\rightarrow(\wedge V/I_V, d)$ the quotient map.
Take bases $V^{\mathrm{even}}=\mathbb K\{x_1,\dots x_p\}$ and $V^{\mathrm{odd}}=\mathbb K\{y_1,\dots y_q\}$.
Recall the relative Sullivan algebra $\mpath=(\wedge V\tpow2\otimes\wedge \susp{V}, d)$
over $(\wedge V, d)\tpow2$ from \cref{section:computeExample}.
Note that the relative Sullivan algebra $(\wedge V\tpow2\otimes\wedge \susp{V}, d)$ is a relative Sullivan model of
the multiplication map $(\wedge V, d)^{\otimes 2}\rightarrow (\wedge V, d)$,
Hence, using this as a semifree resolution, we have
$\mathrm{Ext}_{\wedge V^\otimes 2}(\wedge V, \wedge V^{\otimes 2})
= \cohom{\mathrm{Hom}_{\wedge V^\otimes 2}(\wedge V^{\otimes 2}\otimes\wedge\susp{V}, \wedge V^{\otimes 2})}$.
By \cite[Corollary 5.5]{wakatsuki16}, we have a cocycle
$f \in \mathrm{Hom}_{\wedge V^\otimes 2}(\wedge V^{\otimes 2}\otimes\wedge\susp{V}, \wedge V^{\otimes 2})$
satisfying $f(\susp{x_1}\cdots\susp{x_p}) = \mathrm{pr}od_{j=1}^{j=q}(1\otimes y_j - y_j\otimes 1) + u$
for some $u \in (y_1\otimes y_1, \ldots, y_q\otimes y_q)$.
Consider the evaluation map
\begin{align*}
\mathrm{ev}\colon \mathrm{Ext}_{\wedge V\tpow{2}}(\wedge V, \wedge V\tpow{2})
\otimes \mathrm{Tor}_{\wedge V\tpow{2}}(\wedge V, \wedge V / I_V)
&\rightarrow \mathrm{Tor}_{\wedge V\tpow{2}}(\wedge V\tpow{2}, \wedge V / I_V)\\
&\xrightarrow{\cong} \cohom{\wedge V / I_V},
\end{align*}
where
$(\wedge V, d)\tpow{2}$, $(\wedge V, d)$, and $(\wedge V / I_V, d)$
are $(\wedge V, d)\tpow{2}$-module via
${\rm id}$, $\varepsilon\cdot{\rm id}$, and $\mathrm{pr}\circ(\varepsilon\cdot{\rm id})$, respectively.
Here, we use $(\wedge V\tpow2\otimes\wedge\susp{V},d)$ as a semifree resolution of $(\wedge V, d)$.
Then, we have
\begin{equation*}
\mathrm{ev}([f]\otimes[\susp{x_1}\cdots\susp{x_p}]) = [y_1\cdots y_q] \neq 0,
\end{equation*}
and hence $[f]\neq 0$ in $\mathrm{Ext}_{\wedge V\tpow{2}}(\wedge V, \wedge V\tpow{2})$.
\newcommand{t}{t}
\newcommand{\tilde{\transposeSullivan}}{\tilde{t}}
Thus, it is enough to calculate $\mathrm{Ext}_{t}({\rm id},t)([f])$
to determine the sign in \cref{equation:commutativityOfDeltaShriek},
where $t\colon (\wedge V, d)\tpow2 \rightarrow (\wedge V, d)$
is the dga homomorphism defined by
$t(v\otimes1)=1\otimes v$ and $t(1\otimes v)=v\otimes 1$.
\begin{proof}
[Proof of \cref{equation:commutativityOfDeltaShriek}]
By definition, $\mathrm{Ext}_{t}({\rm id},t)$ is induced by the map
\begin{equation*}
\mathrm{Hom}_t(\tilde{\transposeSullivan}, t)\colon
\mathrm{Hom}_{\wedge V\tpow2}(\wedge V\tpow2\otimes\wedge\susp{V}, \wedge V\tpow2)
\rightarrow \mathrm{Hom}_{\wedge V\tpow2}(\wedge V\tpow2\otimes\wedge\susp{V}, \wedge V\tpow2),
\end{equation*}
where $\tilde{\transposeSullivan}$ is the dga automorphism defined by
$\tilde{\transposeSullivan}|_{\wedge V\tpow2}=t$ and
$\tilde{\transposeSullivan}(\susp{v}) = -\susp{v}$.
Since $\tilde{\transposeSullivan}(\susp{x_1}\cdots\susp{x_p})=(-1)^p\susp{x_1}\cdots\susp{x_p}$
and $t(\mathrm{pr}od_{j=1}^{j=q}(1\otimes y_j - y_j\otimes 1))=(-1)^q\mathrm{pr}od_{j=1}^{j=q}(1\otimes y_j - y_j\otimes 1)$,
we have
\begin{equation*}
\mathrm{ev}([\mathrm{Hom}_t(\tilde{\transposeSullivan}, t)(f)]
\otimes[\susp{x_1}\cdots\susp{x_p}])
= \mathrm{ev}([t\circf\circ\tilde{\transposeSullivan}]
\otimes[\susp{x_1}\cdots\susp{x_p}])
= (-1)^{p+q}[y_1\cdots y_q].
\end{equation*}
Since the parity of $p+q$ is the same as that of the dimension of $(\wedge V, d)$ as a Gorenstein algebra,
the sign in \cref{equation:commutativityOfDeltaShriek} is proved to be $(-1)^{m}$.
\end{proof}
\section{Proof of \cref{equation:commutativityOfInclconstShriek}}
\label{section:proofOfExtAlgebraic}
\newcommand{\mathrm{res}ol}{\eta}
In this section, we give the proof of \cref{equation:commutativityOfInclconstShriek},
using the spectral sequence constructed in the proof of \cref{theorem:extAlgebraic}.
Although the key idea of the proof of \cref{theorem:extAlgebraic} is
the same as \cref{theorem:ExtDiagonal} due to F\'elix and Thomas,
we give the proof here for the convenience of the reader.
\todo{??}
\begin{proof}
[Proof of \cref{theorem:extAlgebraic}]
Take a $(A\otimes B, d)$-semifree resolution $\mathrm{res}ol\colon(P,d)\xrightarrow{\simeq}(A,d)$.
Define $(C,d)=(\mathrm{Hom}_{A\otimes B}(P,A\otimes B),d)$.
Then $\mathrm{Ext}_{A\otimes B}(A,A\otimes B) = \cohom{C,d}$.
We fix a non-negative integer $N$, and define a complex
$(C_N,d) = (\mathrm{Hom}_{A\otimes B}(P,(A/A^{>n})\otimes B),d)$.
We will compute the cohomology of $(C_N,d)$.
Define a filtration $\{F^pC_N\}_{p\geq 0}$ on $(C_N,d)$ by
$F^pC_N = \mathrm{Hom}_{A\otimes B}(P,(A/A^{>n})^{\geq p}\otimes B)$.
Then we obtain a spectral sequence $\{E^{p,q}_r\}_{r\geq 0}$
converging to $\cohom{C_N, d}$.
\begin{claim}
\label{lemma:E2termOfSS}
\begin{equation*}
E^{p,q}_2 =
\begin{cases}
\cohom[p]{A/A^{>N}} & \mbox{(if $q=m$)}\\
0 & \mbox{(if $q\neq m$)}
\end{cases}
\end{equation*}
\end{claim}
\begin{proof}
[Proof of \cref{lemma:E2termOfSS}]
We may assume $p\leq N$.
Then we have an isomorphism of complexes
\begin{equation*}
(A^{\geq p}/A^{\geq p+1},0)\otimes (\mathrm{Hom}_B(B\otimes_{A\otimes B}P,B),d)
\xrightarrow{\cong} (E^p_0, d_0),
\end{equation*}
hence
\begin{equation*}
(A^{\geq p}/A^{\geq p+1})\otimes \cohom{\mathrm{Hom}_B(B\otimes_{A\otimes B}P,B),d}
\xrightarrow{\cong} E^p_1.
\end{equation*}
Define
\begin{equation*}
\bar{\mathrm{res}ol}\colon (B,\bar{d})\otimes_{A\otimes B}(P,d)
\xrightarrow{1\otimes\mathrm{res}ol}(B,\bar{d})\otimes_{A\otimes B}(A,d)
\cong \mathbb K.
\end{equation*}
Note that the last isomorphism follows from the assumption \cref{item:assumpResId}.
Then, since $\mathrm{res}ol$ is a quasi-isomorphism, so is $\bar{\mathrm{res}ol}$.
Hence we have
\begin{equation*}
\cohom[q]{\mathrm{Hom}_B(B\otimes_{A\otimes B}P,B),d}
\cong \mathrm{Ext}^q_B(\mathbb K, B) \cong
\begin{cases}
\mathbb K & \mbox{(if $q = m$)}\\
0 & \mbox{(if $q\neq m$)}
\end{cases}
\end{equation*}
by the assumption \cref{item:assumpGorenstein}.
Hence we have
\begin{align*}
E^{p,q}_1 &\cong (A^{\geq p}/A^{\geq p+1}) \otimes \cohom[q]{\mathrm{Hom}_B(B\otimes_{A\otimes B}P, B),d}\\
&\cong A^p\otimes\mathrm{Ext}^q_B(\mathbb K, B).
\end{align*}
Moreover, using the assumption \cref{item:assump1conn} and the above isomorphisms,
we can compute the differential $d_1$ and have an isomorphism of complexes
\begin{equation}
\label{equation:computationOfE1}
(E^{*,q}_1,d_1) \cong (A^*,d) \otimes \mathrm{Ext}^q_B(\mathbb K, B).
\end{equation}
This proves \cref{lemma:E2termOfSS}.
\end{proof}
Now we return to the proof of \cref{theorem:extAlgebraic}.
We will recover $\cohom{C}$ from $\cohom{C_N}$ taking a limit.
Since ${\varprojlim}^1_NC_N=0$, we have an exact sequence
\begin{equation*}
0\rightarrow {\varprojlim}^1_N\cohom{C_N}
\rightarrow \cohom{{\varprojlim}_NC_N}
\rightarrow \cohom{{\varprojlim}_N\cohom{C_N}}
\rightarrow 0.
\end{equation*}
By \cref{lemma:E2termOfSS},
the sequence $\{\cohom{C_N}\}_N$ satisfies the (degree-wise) Mittag-Leffler condition,
and hence ${\varprojlim}^1_N\cohom{C_N}=0$.
Thus, we have
\begin{equation*}
\cohom[l]{C}
\cong \cohom[l]{{\varprojlim}_NC_N}
\cong {\varprojlim}_N\cohom[l]{C_N}
\cong \cohom[l-m]{A}.
\end{equation*}
This proves \cref{theorem:extAlgebraic}.
\end{proof}
\newcommand{\tauSullivan}{t}
\newcommand{\tauRelative}{{\tilde{\tauSullivan}}}
\newcommand{\tauFiber}{{\bar{\tauSullivan}}}
\newcommand{\tauFiberResol}{{\hat{\tauSullivan}}}
Next, using the above spectral sequence,
we determine the sign in \cref{equation:commutativityOfInclconstShriek}.
\begin{proof}
[Proof of \cref{equation:commutativityOfInclconstShriek}]
If $k=1$, \cref{equation:commutativityOfInclconstShriek} is the same as \cref{equation:commutativityOfDeltaShriek},
which was proved in \cref{section:determineSign}.
Hence we assume $k\geq 2$.
As in \cref{section:proofOfAssocAndFrob},
let $M$ be a $k$-connected $\mathbb K$-Gorenstein space of finite type with $\dim\pi_*(M)\otimes\mathbb K < \infty$,
and $(\wedge V, d)$ its minimal Sullivan model.
Using the Sullivan models constructed in \cref{section:constructModel},
we have that the automorphism $\mathrm{Ext}_{\tau^*}({\rm id},\tau^*)$
on $\mathrm{Ext}_{\cochain{\spheresp[k-1]{M}}}(\cochain{M},\cochain{\spheresp[k-1]{M}})$
is induced by the automorphism $\mathrm{Hom}_\tauSullivan(\tauRelative, \tauSullivan)$
on
$\mathrm{Hom}_{\wedge V\otimes\wedge\susp[k-1]{V}}
(\wedge V\otimes\wedge\susp[k-1]{V}\otimes\wedge\susp[k]{V}, \wedge V\otimes\wedge\susp[k-1]{V})$,
where $\tauSullivan$ and $\tauRelative$ are the dga automorphisms
on $(\wedge V\otimes\wedge\susp[k-1]{V}, d)$
and $(\wedge V\otimes\wedge\susp[k-1]{V}\otimes\wedge\susp[k]{V}, d)$, respectively,
defined by
\begin{align*}
&\tauSullivan(v) = v,\ \tauSullivan(\susp[k-1]{v}) = -\susp[k-1]{v},\\
&\tauRelative(v) = v,\ \tauRelative(\susp[k-1]{v}) = -\susp[k-1]{v},\ \mathrm{and}\
\tauRelative(\susp[k]{v}) = -\susp[k]{v}.
\end{align*}
Now, consider the spectral sequence $\{E^{p,q}_r\}$ in the proof of \cref{theorem:extAlgebraic}
by taking $(A\otimes B, d) = (\wedge V \otimes \wedge \susp[k-1]{V}, d)$
and $(P,d) = (\wedge V\otimes\wedge\susp[k-1]{V}\otimes\wedge\susp[k]{V}, d)$.
Since $k\geq 2$,
$\mathrm{Hom}_\tauSullivan(\tauRelative, \tauSullivan)$ induces
automorphisms on the complexes $C_N$ and $F^pC_N$,
and hence on the spectral sequence $\{E^{p,q}_r\}$.
By the isomorphism \cref{equation:computationOfE1},
we have
\begin{equation*}
E^{p,q}_2\cong \cohom[p]{A}\otimes\mathrm{Ext}^q_{\wedge \susp[k-1]{V}}(\mathbb K, \wedge \susp[k-1]{V}),
\end{equation*}
and that the automorphism induced on $E_2$ is the same as ${\rm id}\otimes\mathrm{Ext}_\tauFiber({\rm id}, \tauFiber)$,
where $\tauFiber$ is defined by $\tauFiber(\susp[k-1]{v})=-\susp[k-1]{v}$ for $v\in V$.
Since the differential is zero on $\wedge \susp[k-1]{V}$,
we have an isomorphism
\begin{equation*}
\mathrm{Ext}^*_{\wedge \susp[k-1]{V}}(\mathbb K, \wedge \susp[k-1]{V})
\cong \bigotimes_i\mathrm{Ext}^*_{\wedge \susp[k-1]{v_i}}(\mathbb K, \wedge \susp[k-1]{v_i})
\end{equation*}
where $\{v_1,\ldots,v_l\}$ is a basis of $V$.
Using this isomorphism, we can identify
\begin{equation*}
\mathrm{Ext}_\tauFiber({\rm id}, \tauFiber) = \bigotimes_i\mathrm{Ext}_{\tauFiber_i}({\rm id}, \tauFiber_i),
\end{equation*}
where $\tauFiber_i$ is defined by $\tauFiber_i(\susp[k-1]{v_i})=-\susp[k-1]{v_i}$.
Since $(-1)^{\dim V}=(-1)^{\bar{m}}$,
it suffices to show $\mathrm{Ext}_{\tauFiber_i}({\rm id}, \tauFiber_i)=-1$.
Taking a resolution, we have
\begin{align*}
&\mathrm{Ext}^*_{\wedge \susp[k-1]{v_i}}(\mathbb K, \wedge \susp[k-1]{v_i})
= \cohom{\mathrm{Hom}_{\wedge \susp[k-1]{v_i}}
(\wedge \susp[k-1]{v_i}\otimes\wedge\susp[k]{v_i}, \wedge \susp[k-1]{v_i})}\\
&\mathrm{Ext}_{\tauFiber_i}({\rm id}, \tauFiber_i)=\cohom{\mathrm{Hom}_{\tauFiber_i}(\tauFiberResol_i, \tauFiber_i)},
\end{align*}
where the differential $d$ on $\wedge \susp[k-1]{v_i}\otimes\wedge\susp[k]{v_i}$
is defined by $d(\susp[k-1]{v_i})=0$ and $d(\susp[k]{v_i})=\susp[k-1]{v_i}$,
and the dga homomorphism $\tauFiberResol_i$ is defined by
$\tauFiberResol_i(\susp[k-1]{v_i})=-\susp[k-1]{v_i}$ and $\tauFiberResol_i(\susp[k]{v_i})=-\susp[k]{v_i}$.
\newcommand{\mathrm{Ext}gen}{f}
Using this resolution, we have the generator $[\mathrm{Ext}gen]$ of
$\cohom{\mathrm{Hom}_{\wedge \susp[k-1]{v_i}}(\wedge \susp[k-1]{v_i}\otimes\wedge\susp[k]{v_i}, \wedge \susp[k-1]{v_i})}\cong\mathbb K$
as follows:
\begin{itemize}
\item If $\deg{\susp[k-1]{v_i}}$ is odd,
define $\mathrm{Ext}gen$ by $\mathrm{Ext}gen(1)=\susp[k-1]{v_i}$ and $\mathrm{Ext}gen((\susp[k]{v_i})^l)=0$ for $l\geq 1$.
\item If $\deg{\susp[k-1]{v_i}}$ is even,
define $\mathrm{Ext}gen$ by $\mathrm{Ext}gen(1)=0$ and $\mathrm{Ext}gen((\susp[k]{v_i}))=1$.
\end{itemize}
In both cases, we have
$\mathrm{Hom}_{\tauFiber_i}(\tauFiberResol_i, \tauFiber_i)(\mathrm{Ext}gen)
=\tauFiber_i\circ\mathrm{Ext}gen\circ\tauFiberResol_i
=-\mathrm{Ext}gen$.
This proves $\mathrm{Ext}_{\tauFiber_i}({\rm id}, \tauFiber_i)=-1$
and completes the determination of the sign in \cref{equation:commutativityOfInclconstShriek}.
\end{proof}
\section*{Acknowledgment}
I would like to express my gratitude to Katsuhiko Kuribayashi and Takahito Naito for productive discussions and valuable suggestions.
Furthermore, I would like to thank my supervisor Nariya Kawazumi for the enormous support and comments.
This work was supported by JSPS KAKENHI Grant Number 16J06349 and the Program for Leading Graduate School, MEXT, Japan.
\end{document} |
\begin{document}
\title{Sharp energy self-determination of macroscopic quantum bodies in pure states, as a validation of the First Law of Thermodynamics}
\author{V\'{\i}ctor Romero-Roch\'in}
\affiliation{Instituto de F\'isica, Universidad Nacional Aut\'onoma de M\'exico \\
Apartado Postal 20-364, 01000 Cd. M\'exico, Mexico}
\date{\today}
\begin{abstract}
We argue that a very large class of quantum pure states of isolated macroscopic bodies have sharply peaked energy distributions, with their width relative to the average scaling between $\sim N^{-1}$ and $\sim N^{-1/2}$, with $N \gg 1$, the number of atoms conforming the body. Those states are dense superpositions of energy eigenstates within arbitrary finite or infinite energy intervals that decay sufficiently fast. The sharpness of the energy distribution implies that closed systems in those states are {\it microcanonical} in the sense that only energy eigenstates very near to the mean energy contribute to their thermodynamic evolution. Since thermodynamics accurately describes processes of macroscopic bodies and requires that closed systems have constant energy, our claim is that these pure states are typical of macroscopic systems.
The main assumption beneath the energy sharpness is that the isolated body can reach thermal equilibrium if left unaltered. We argue that such a self-sharpness of the energy in macroscopic bodies indicates that the First Law of Thermodynamics is statistical in character.\\
\noindent
{\bf Key words:} quantum mechanics of macroscopic systems; thermodynamics; pure quantum states.
\end{abstract}
\maketitle
\section{Introduction}
An isolated quantum macroscopic body, whose atoms or molecules interact via short range interatomic potentials, reaches thermal equilibrium if left unperturbed for a sufficiently long time. \cite{LL} We face this situation, for instance, {\it every single time} that the lid of a (very good) thermos bottle is closed or a gas of alkali atoms is confined by a magneto-optical trap in ultra-high vacuum. \cite{Cornell,Ketterle,Pethick,Seman} A very important observation is that this occurs every single time that the experiment is ``repeated'', independently of whether the initial state is the same or not and independently of whether the system reaches the same equilibrium state or not. As a matter of fact, it is actually very hard to conceive or prepare an everyday system, being a solid, fluid or superfluid, to remain in non-equilibrium states, as all tend to thermalize, either in contact with their environment or completely isolated. Our interest here is on isolated systems that do thermalize. In those cases, thermodynamics provides a very precise description of properties and transformations of macroscopic systems in and between states in thermal equilibrium, and gives us very general conditions and restrictions on the relaxation to equilibrium. In particular, the First Law, that establishes that the change of energy of a system during a process equals the change of energy of its surroundings in terms of work and heat, demands that the total energy of the system plus environment, a composite isolated system, remains constant throughout. If we appeal to classical mechanics we can invoke the conservation of energy of closed systems and establish the microcanonical ensemble as the representation of the equilibrium state, in which all points in phase space with energy equal to the initial and constant energy of the system are equally probable. \cite{LL} But, in real life, systems obey quantum mechanics and, in general and strictly speaking, energy is undetermined since quantum states are superpositions of energy eigenstates. To cope with this complication it is usually argued that the energy of a quantum closed system can be determined within a very small ``microcanonical'' shell $\delta E$ around a value $E$ and, then, once equilibrium is reached, the quantum microcanonical distribution is obeyed; see e.g. Refs. \cite{LL,Goldstein0,Reimann1,Goldstein3}. In particular, Ref. \cite{Goldstein3} provides a thorough discussion of the {\it individual} evolution of these microcanonical states towards equilibrium.\\
However, how can be true, or we be sure, that every time that we prepare a closed system in an arbitrary state it is guaranteed that its energy is within a very small shell $\delta E$? In other words, how can typical quantum states of macroscopic bodies yield a very sharply peaked energy distribution? This is the question that we address here. By ``typical'' states we mean those that we prepare in our every day life or in controlled experiments, as we take for granted that the laws of thermodynamics apply to them. By its enormous encompassing nature, the posed question cannot be answered neither fully nor rigorously. Here, we give very general requirements that pure states $|\psi\rangle$ of macroscopic bodies should obey to obtain such a ``typicallity'' and, for contrast, we also give exceptions to this rule.\\
As we will discuss, the main requirement that pure states of macroscopic bodies should obey is that they are dense superpositions of energy eigenstates, either {\it bounded} in energy or with a fast decay for large energy. We will qualify what we mean by ``fast'' decay. This demand, in addition to the opposite fact that the energy density of states of macroscopic bodies that can relax to equilibrium grows extremely fast with energy, allow us to show that very sharply peaked energy distributions are obtained. That is, that the energy of the system is automatically determined within a very small interval $\Delta E$. Hence, the microcanonical shell $\delta E$ around the mean value $E$ is given by $\Delta E$ and does not need to be assumed a priori, as all eigenstates that effectively contribute to the thermodynamic properties have essentially the same constant energy $E$. On the one hand, this allows us to suggest that those states are typical of macroscopic bodies since they lead to the conditions required by the First Law of thermodynamics; and on the other, however, this also implies that the First Law is of statistical character, rather than being an exact or a rigorous one.\\
The result of this paper should also be relevant for the ongoing discussion on thermal equilibration of {\it isolated} macroscopic quantum systems. While this is an old and unabated question, see Refs. \cite{Boltzmann,vanHove,Redfield,Montroll,Zwanzig,Davis,Linblad,Legget,vKOpp,RR,Deutsch,Srednicki,Tasaki,Zurek} to mention a few, there has been a recent vigorous revival of this debate \cite{Goldstein3,Goldstein0,Reimann1,Rigol1,Linden,Deutsch2,Goldstein1,Reimann2,Deutsch3,Kim,Goldstein2,Gogolin,Kaufman,Neill,Eisert,Calabrese,Govinda}, further motivated by recent experimental developments \cite{Kaufman,Neill}
in which the control on preparation and measurement has yielded powerful tools to test fundamental aspects and assumptions regarding the foundations of statistical physics. In particular, the present study should contribute to the understanding and extensions of the validity of the Eigenstate Thermalization Hypothesis \cite{Deutsch,Srednicki,Rigol1,Reimann2,Kaufman}.\\
In Section II we first revise the well-known fact that the density of states of systems that relax to equilibrium grows extremely fast with energy. \cite{LL} Then, in Section III, we give the general arguments and assumptions on the considered many body pure states $|\psi\rangle$ that yield a sharply peaked energy distribution, and analyze some specific examples. In Section IV we discuss some exceptions to the rule. Finally, in Section V we briefly review the consistency between equilibrium states and arbitrary but typical pure states, and retake the suggestion that the First Law of Thermodynamics is of statistical character.
\section{Density of states of thermalizing macroscopic bodies and the statement of sharp energy distributions}
Although we have insisted that we consider closed systems that reach equilibrium, as the example of the thermos bottle or the ultracold gases confined in optical or magnetic traps, this is not really a limitation since both thermodynamics and statistical physics assume that composite systems, of an ``open'' system plus environment are, in fact, closed or isolated. \cite{LL} Thus, barring very peculiar cases, such as plasmas or other carefully tailored systems, essentially all bodies that surround us do reach thermal equilibrium if left unaltered. For the sake of argument, consider a chemically pure system in thermal equilibrium. Then, a fundamental thermodynamic result is that any macroscopic {\it subsystem} with a number $N \gg 1$ of atoms or molecules, being part of the closed system in a thermal equilibrium state, will have a single valued, concave, entropy function $S(E,V,N) = N s(e,v)$, with $E$ and $V$ the mean energy and volume of the subsystem, and $s$, $e$ and $v$, their corresponding entropy, energy and volume per particle. By considering systems with positive temperatures only, $s$ is a monotonously increasing function of $e$. Now, as a consequence of the fundamental identification of the entropy in terms of the available number of states $\Delta \Gamma$ of the subsystem, with energy within a very small interval $\delta E$ around $E$, one finds that, quite accurately, \cite{LL}
\begin{equation}
\Delta \Gamma(E) \simeq e^{N s(e,v)/k_B} \>,\label{denstates}
\end{equation}
with $k_B$ Boltzmann constant. Therefore, for macroscopic subsystems $N \gg 1$, the number of states $\Delta \Gamma(E)$ is an extremely dense function of $E$. However, since the entropy is a concave, monotonously increasing function of the energy, \cite{LL,Callen,MM} we observe that $\Delta \Gamma(E)$ is not only very dense, but also, it is an extremely fast growing function of the energy $E$, for a fixed number of particles $N \gg 1$. A trivial example is the dilute ideal gas, yielding
\begin{equation}
\Delta \Gamma(E) \simeq C E^{\frac{3}{2}N} \>,\label{ideal}
\end{equation}
with $C$ independent of $E$. And a similar fast growth can be found for Fermi and Bose gases. \\
We now recall that the energy density of states $\omega(E)$ is formally given by
\begin{equation}
\omega(E) = \frac{d\Gamma(E)}{dE} \>,\label{omega}
\end{equation}
with $\Gamma(E)$ the number of energy states with energy less or equal than $E$. But because the energy spectrum is dense, in any very small energy interval $\delta E$, we can further approximate the density of states as
\begin{equation}
\frac{d\Gamma(E)}{dE} \approx \frac{\Delta \Gamma(E,V,N)}{\delta E} \>, \label{WE20}
\end{equation}
hence indicating that the density of states is also a dense, fast growing function of $E$. We point out now a very important observation: while this result is achieved via the assumptions of statistical physics, this is actually a property of the density of states of the {\it isolated} subsystem of $N$ atoms. That is, it is a property of the Hamiltonian $H$ of the isolated macroscopic subsystem, regardless of the actual state in which it finds itself. This, we will show, is essential for the determination of the energy distribution of macroscopic bodies that can achieve thermal equilibrium.\\
Our interest, certainly, is not in the equilibrium states of closed systems but, rather, on arbitrary but typical initial states $|\psi \rangle$ that relax to equilibrium. Hence, we can express such a state as a superposition of energy states of the macroscopic system whose Hamiltonian is $H$,
\begin{equation}
|\Psi \rangle = {\sum_{\{m\}}} a_{\{m\}} |{\{m\}}\rangle \>, \label{Psi}
\end{equation}
with the expansion coefficients,
\begin{equation}
a_{\{m\}} = \langle {\{m\}} | \Psi \rangle \>,
\end{equation}
and where $|{\{m\}}\rangle$ denote the (complete set of) energy eigenstates of the system, $H |{\{m\}}\rangle = E_{\{m\}} |{\{m\}}\rangle$. We recall here that $|a_{\{m\}}|^2$ is the probability to find the system in the energy eigenstate $|{\{m\}}\rangle$, given that the state of the system is $|\Psi\rangle$. A different question is to enquire about the probability of finding the system with a value of the energy $E$, say, between $E$ and $E + dE$. Hence, for this matter, let us
suppose for the moment that we want to know the statistical properties of an operator that involves the Hamiltonian of the system only, $f = f(H)$, when the system is in the state $|\Psi\rangle$ given by Eq. (\ref{Psi}). That is, we want to know the moments of $f$,
\begin{equation}
\langle f^n \rangle = \langle \Psi | f^n(H) | \Psi \rangle \>,
\end{equation}
with $n = 1, 2, 3, \dots$ . These are,
\begin{equation}
\langle f^n \rangle = \sum_{\{m\}} | a_{\{m\}}|^2 f^n(E_{\{m\}}) \>.
\end{equation}
Because the energy levels are dense for the macroscopic system under consideration, we can also write,
\begin{equation}
\langle f^n \rangle = \int \> f^n(E) \>{\cal W}(E) dE \>,
\end{equation}
with ${\cal W}(E) dE$ the probability of finding the system with energy between $E$ and $E + dE$. Thus, the statistical properties of $f(H)$ are essentially given by ${\cal W}(E)$.\\
The probability distribution ${\cal W}(E)$ depends on the values of the amplitudes $a_{\{m\}}$, as we amply discuss in the following section, and which, in principle, are quite arbitrary, except that obey
\begin{equation}
{\sum_{\{m\}}} |a_{\{m\}}|^2 = 1\> .\label{norma}
\end{equation}
Two very important quantities associated directly to the distribution ${\cal W}(E)$ are the average energy $\overline E$ and its width $\Delta E$, defined as,
\begin{eqnarray}
\overline{E} &=& \langle \Psi | H | \Psi \rangle \nonumber \\
& = & \int \> E \>{\cal W}(E) dE \>.
\label{Emean}
\end{eqnarray}
and
\begin{eqnarray}
\Delta E^2 &=& \langle \Psi | H^2 | \Psi \rangle - \langle \Psi | H | \Psi \rangle^2 \nonumber \\
& = & \int \> (E - \overline E)^2 \>{\cal W}(E) dE \>.
\label{DelE}
\end{eqnarray}
The purpose of this work is to show that there exists a wide class of states $|\psi\rangle$, or equivalently of sets of probability amplitudes $a_{\{m\}}$, such that the ratio of the width $\Delta E$ to the average $\overline E$ of the distribution ${\cal W}(E)$, scales as
\begin{equation}
\frac{\Delta E}{\overline E} \sim N^{-\kappa} \label{scales}
\end{equation}
with $1/2 \le \kappa \le1$. If this is true, then we say that the distribution of energy is sharply peaked, since $N \gg 1$. However, since the distribution of energy does not evolve in time, we can claim that the energy remains ``constant'' within a very small interval of energy $\Delta E$, or, that only states whose energy $E$ is very close to $\overline E$ contribute to the determination of the thermodynamics of the system. In the following section we discuss and show the very general requirements that the
states $|\psi\rangle$ should obey to yield a sharp energy distribution.
\section{Conditions for typical pure states to yield sharply peaked energy distributions}
Considering the state $|\Psi \rangle$, given by Eq. (\ref{Psi}), as the initial state of the closed system, we now require certain plausible and reasonable properties of the expansion coefficients $a_{\{m\}}$. The first assumption is that the superposition of states, given by Eq.(\ref{Psi}), is ``dense'' within an interval $E_{min} \le E \le E_{max}$, where the bounds are completely arbitrary; we shall argue below on this interval. But the point is that if not {\it all} states within the interval $|E_{max} - E_{min}|$ are included, at least there are finite regions within such an interval that are densely populated, see Fig. 1 for a couple of examples. Although we do not rule out the possibility that the initial state is an energy eigenstate or a superposition of a few of them, we do not consider those cases as they are ``atypical'' in the following sense. On the one hand, due to the very dense density of states it is almost a practical impossibility to prepare a macroscopic body in a single eigenstate $|\{m\}\rangle$: Landau and Lifshitz \cite{LL} argue that it would take a time $\Delta t \sim e^N$, in any units, to prepare a system in such a state. That is, this time scale imposes the constraint that we cannot have an energy resolution below a certain ``practical'' limit $\delta E$.
On the other hand, if we could prepare a single isolated energy eigenstate every single time we repeat an experiment, we would obtain an infinitely sharp energy distribution, ETH would apply and the problem would be solved. But we insist, this is not typical of arbitrary preparations of initial states that we do know relax to equilibrium.\\
Second, perhaps the apparently most demanding but simplifying assumption, we require that, since the superposition is assumed dense, the coefficients $a_{\{m\}}$ can be considered to be a smooth function of its energy, that is,
\begin{equation}
a_{\{m\}} \approx a(E_{\{m\}}) \>. \label{aE}
\end{equation}
In agreement with the first assumption, this is equivalent to require that all states within any very small width $\delta E$ are essentially equally probable.
This is not unreasonable, for if a single or a few states within $\delta E$ were much more probable than the others, it would amount to accept the possibility that one can prepare single eigenstates.\\
With the two previous assumptions we can write the energy distribution as,
\begin{equation}
{\cal W}(E) \simeq | a(E)|^2 \frac{d\Gamma(E)}{dE} \>, \label{WE}
\end{equation}
with $d\Gamma/dE$ the energy density of states. But because of the result given by Eq. (\ref{WE20}) we can further approximate the density of states, yielding
\begin{equation}
{\cal W}(E) \approx | a(E)|^2 \frac{\Delta \Gamma(E,V,N)}{\delta E} \>, \label{WE2}
\end{equation}
with $\Delta \Gamma(E,V,N)$ given by Eq. (\ref{denstates}). As already mentioned, the growth of $\Delta \Gamma(E,V,N)$ as a function of increasing $E$ is guaranteed by the fact that the entropy function $S/N = s(e,v)$
is a concave, monotonic increasing function of $E$, \cite{LL,Callen,MM} at least for systems that can have positive temperatures only; we do not consider the possibility of negative temperatures here. \cite{Ramsey,Bloch,VRR2,Frenkel} \\
\begin{figure}
\caption{(Color online) Sketches of expansion coefficients $|a(E)|^2$ (red line, not at scale) and energy distribution ${\cal W}
\label{Figs}
\end{figure}
We can now examine the assumption concerning the statement that the interval $(E_{min},E_{max})$ is {\it bounded} or decreasing very fast as $E \to E_{\max} \to \infty$. First, let us analyze the ``strict'' case where $|a(E)|^2 = 0$ for $E$ outside the given energy interval, with $E_{max} < \infty$, as exemplyfied in Fig. 1; that is, when the energy interval is truly bounded. We discuss further below that this assumption can be relaxed. Its purpose is to argue that such a condition immediately implies that ${\cal W}(E)$ is sharply peaked at an average value $\overline E$, which is smaller but very near $E_{max}$. This can be seen from the fact that, see Eq. (\ref{WE2}), ${\cal W}(E)$ is the product of the density of states that grows very fast without bound and the coefficients that are smooth and bounded, $|a(E)|^2 < 1$, in the energy interval. As a result, the product $|a(E)|^2 \Delta \Gamma(E)$ will necessarily accumulate at the highest possible value of $|a(E)|^2$, yielding a peaked function, as illustrated in the right panels in Fig. 1. As a matter of fact, the value of
$E_{min}$ appears to be irrelevant; further, one could even have separated finite regions where $|a(E)|^2 \ne 0$ and the distribution ${\cal W}(E)$ would still be peaked near $E_{max}$. This seemingly simple result indicates that, as long as the probability of occurrence of the energy states is dense and bounded, the energy distribution will be peaked near its highest energy value. In the remaining of this section we justify more explicitly this result, using reasonable assumptions regarding the function $|a(E)|^2$.\\
First, we can estimate how far $\overline E$ is from $E_{max}$ and how sharp is the decay, if strictly $|a(E)|^2 = 0$ for $E \ge E_{max}$. To this end, using Eq. (\ref{denstates}), we write the energy distribution, Eq. (\ref{WE2}), as
\begin{equation}
{\cal W}(E) \approx \frac{1}{\delta E} \exp \left[ \ln |a(E)|^2 + N s(e,v)/k_B\right] \>. \label{WEe}
\end{equation}
Since the exponential function is a monotonic function, the maximum of ${\cal W}(E)$ also occurs at the maximum of its argument. Call $\overline E$ the value of the energy at the maximum. This maximum is determined by the condition that the first derivative of the exponent in Eq. (\ref{WEe}) vanishes, giving the following condition,
\begin{equation}
\frac{1}{k_B}\left(\frac{\partial s}{\partial e}\right)_{\overline E} + \frac{1}{|a(\overline E)|^2}\left.\frac{d |a(E)|^2}{dE}\right|_{\overline E} = 0 \>.\label{barE}
\end{equation}
The first term is positive and the second one negative. The first one equals the inverse of the temperature function of the system at the value $\overline e = \overline E/N$
\begin{equation}
\frac{1}{k_B}\left(\frac{\partial s}{\partial e}\right)_{\overline E} = \frac{1}{k_B T(\overline e,v)} \label{temp}
\end{equation}
and, although initially this is not the temperature of the system, it will become so when the system reaches thermal equilibrium.
The second term in Eq. (\ref{barE}) is negative because of the assumption on the shape of $|a(E)|^2$. Let us assume first a simple but very general model of how $|a(E)|^2$ vanishes as $E \to E_{max}^-$. In this case, $E$ vanishes algebraically when approaching $E_{max}$,
\begin{equation}
|a(E)|^2 \sim K \left\{
\begin{array}{ccc}
(E_{max} - E_0)^\alpha - (E - E_0)^\alpha &{\rm if} & E \le E_{max} \\
0 &{\rm if} & E \ge E_{max}
\end{array}\right. \label{modela}
\end{equation}
where $\alpha > 0$, $E_0$ is an energy smaller than $E_{max}$, and $K$ is a constant of proportionality. Let us now argue about the properties of common energies of macroscopic systems. To begin with, these energies can be considered to scale with $N$ in the sense that $E/N$ is an energy of the order of $k_B T_{eff}$, with $T_{eff}$ an ``effective'' temperature that may range from fractions of Kelvin, $10^{-7}$ K, at the ground state of a weakly interacting Bose gas \cite{Bose}, to, say, $10^{10}$ K, at the core of a supernova \cite{supernova}. This is approximately a range from $10^{-30}$ to $10^{-13}$ Joules. We call ``macroscopic'' energies to values of $E/N$ in such an interval, and we expect all energies involved, $E_{max}$, $E_0$ and $\overline E$ to be within it. This allows us to say that those energies scale with $N$, namely, that they are extensive in that sense. \\
Now, because $\overline E$ is very near $E_{max}$, we can write $\overline E = E_{max} - \epsilon$ in Eq. (\ref{barE}), using Eq. (\ref{modela}); since the first term in Eq. (\ref{barE}) is slowly varying in energy, it can be evaluated at $\overline E \approx E_{max}$, while the second one can be expressed in terms of $\epsilon$. The result is that
\begin{equation}
\epsilon \approx \frac{k_B}{\left(\frac{\partial s}{\partial e}\right)_{E_{max}}}
\end{equation}
namely, $\epsilon \approx k_B T(e_{max},v)$ with $e_{max} = E_{max}/N$. In the light of the discussion of the previous paragraph, $\epsilon$ is an intensive quantity, being of order ${\cal O}(1)$ with respect to $E_{max} \sim {\cal O}(N)$; a very small shift. Now, we can estimate the width of the distribution ${\cal W}(E)$. For this, we consider the second order term in the energy expansion of the argument of the exponential in Eq. (\ref{WEe}). Using the assumed form of the coefficients, Eq. (\ref{modela}), this yields,
\begin{eqnarray}
&\frac{1}{2}\left(\frac{1}{k_B N}\left(\frac{\partial^2 s}{\partial e^2}\right)_{\overline E} - \frac{1}{|a(\overline E)|^4}\left(\frac{d |a(E)|^2}{dE}\right)_{\overline E}^2 + \frac{1}{|a(\overline E)|^2}\left.\frac{d^2 |a(E)|^2}{dE^2}\right|_{\overline E}\right) (E - \overline E)^2 \approx& \nonumber\\
& \frac{1}{2}\left(\frac{1}{k_B N}\left(\frac{\partial^2 s}{\partial e^2}\right)_{\overline E} - \frac{1}{\epsilon^2} - \frac{\alpha-1}{\epsilon(E_{max}-E_0)}\right) (E - \overline E)^2 \approx & \nonumber \\
& -\frac{1}{2\epsilon^2} (E - \overline E)^2 .
\end{eqnarray}
The last approximation follows from the fact that the first and third terms in the second line scale as $1/N$, and the second one as ${\cal O}(1)$. Therefore, the energy distribution approximates as,
\begin{equation}
{\cal W}(E) \approx |a(\overline E)|^2\frac{e^{Ns(\overline e,v)/k_B}}{\delta E} \exp \left[- \frac{(E - \overline E)^2}{2\epsilon^2}\right] \label{Wgauss}
\end{equation}
with $\overline E \approx E_{max} - \epsilon$ and $\Delta E = \epsilon \approx k_B T(\overline e,v)$. The above distribution is an extremely sharp gaussian function, since $\overline E \sim {\cal O}(N)$ and $\Delta E = \epsilon \sim {\cal O}(1)$; namely, $\Delta E/\overline E \sim N^{-1}$. It is much narrower than the usual thermodynamic gaussians, whose widths scale as $\sim N^{1/2}$. \\
While the previous argument was done for $|a(E)|^2$ approaching zero as $E \to E_{max}^-$ algebraically, the same can be shown to obtain if it does so exponentially, namely,
\begin{equation}
|a(E)|^2 \sim K \left\{
\begin{array}{ccc}
e^{-\left((E-E_0)/E_1\right)^\gamma} - e^{-\left((E_{max}-E_0)/E_1\right)^\gamma} &{\rm if} & E \le E_{max} \\
0 &{\rm if} & E \ge E_{max}
\end{array}\right. , \label{modela2}
\end{equation}
with $\gamma > 0$, $K$, $E_0$ and $E_1$ constants, but with both $E_0 \sim {\cal O}(N)$ and $E_1 \sim {\cal O}(N)$, such that the interval $|E_{max} - E_{min}|$ is macroscopic. The ensuing distribution ${\cal W}(E)$ again behaves as given by Eq. (\ref{Wgauss}).
There are, however, other possible behaviors of the coefficients $|a(E)|^2$ that we now address.\\
In the above paragraphs we discussed cases in which $|a(E)|^2 = 0$ for $E \ge E_{max}$. However, this requirement can be relaxed and demand now that $|a(E)|^2 \to 0$ exponentially, as $E \to \infty$. That is, let us assume that for large $E$, $|a(E)|^2$ behaves as,
\begin{equation}
|a(E)|^2 \sim \exp[-(E/\Delta)^\kappa] , \>\>\>{\rm for}\>\>\> E \to \infty \label{expo}
\end{equation}
with $\Delta$ a scale of energy whose value we discuss below. If $\kappa \ge 1$, because $Ns(e,v)$ is concave, there exists always a maximum in ${\cal W}(E)$, as can be seen by writing the energy distribution for large $E$, see Eq. (\ref{WEe}),
\begin{equation}
{\cal W}(E) \approx \frac{1}{\delta E} \exp \left[ -(E/\Delta)^\kappa + N s(e,v)/k_B\right] \>\>\>{\rm for}\>\>\> E \to \infty. \label{WE3}
\end{equation}
For $0 < \kappa < 1$ the maximum may not exist. Let us discuss first $\kappa \ge 1$. The maximum value $\overline E$ is found from Eq. (\ref{WEe}),
\begin{equation}
\frac{1}{k_B}\left(\frac{\partial s}{\partial e}\right)_{\overline E} \approx \kappa \frac{\overline E^{\kappa -1}}{\Delta^\kappa} ,
\label{barE2}
\end{equation}
while the width of the distribution $\Delta E$ may be obtained from the second order expansion term, already identifying the width,
\begin{eqnarray}
-\frac{1}{2\Delta E^2}(E -\overline E)^2 & = & \frac{1}{2} \left[ \frac{1}{Nk} \left(\frac{\partial s^2}{\partial e^2} \right)_{\overline E} - \kappa(\kappa -1) \frac{\overline E^{\kappa -2}}{\Delta^\kappa} \right] (E -\overline E)^2 \nonumber \\
&=& \frac{1}{2} \left[ \frac{1}{N k} \left(\frac{\partial s^2}{\partial e^2}\right)_{\overline E} - (\kappa -1) \frac{1}{\overline E k_B}\left(\frac{\partial s}{\partial e}\right)_{\overline E} \right] (E -\overline E)^2 \label{DelE2} .
\end{eqnarray}
First, both terms within the square brackets are negative, the first one because the function entropy is concave and the second because of the assumption $\kappa \ge 1$. Now it comes an interesting point. If we stick to the requirement that $\overline E \sim {\cal O}(N)$, such that the entropy $s = s(e,v)$ is intensive, then both terms in the square brackets are of the same order, yielding a width $\Delta E \sim {\cal O}(N^{1/2})$. This case, however, demands a further requirement on $\Delta$ as seen from Eq. (\ref{barE2}); that is, it should be true that $\overline E^{\kappa -1}/\Delta^\kappa \sim {\cal O}(1)$ for the derivative of $s$ with respect to $e$ to be intensive, yielding $\Delta \sim {\cal O}(N^{(\kappa -1)/\kappa})$. Thus, as long as $\kappa \ge 1$ and we demand that the function $s(e,v)$ and $e$ are intensive always, then, the exponentially decaying function $a(E)$, as given by Eq. (\ref{expo}), also gives rise to a sharply peaked energy distribution with width $\Delta E \sim {\cal O}(N^{1/2})$. For $\kappa = 1$ the reader can see that this is the usual textbook argument to show that the canonical equilibrium distribution yields sharply peaked energy distributions, if $\Delta$ is the temperature of the system in equilibrium. \cite{LL}\\
However, what if $\Delta$ in Eq.(\ref{expo}) does not scale in the way described in the previous paragraph? For instance $\Delta$ could be $\sim {\cal O}(1)$ or $\sim {\cal O}(N)$. Although we cannot strictly make general statements for an arbitrary system, we can check different cases with the dilute ideal gas, given in Eq. (\ref{ideal}), as we can calculate explicitly the expressions in Eqs. (\ref{barE2}) and (\ref{DelE2}). We find that $\overline E$ does not scale with $N$, in general; for instance, if $\Delta \sim {\cal O}(1)$, $\overline E \sim {\cal O}(N^{1/\kappa})$, while if $\Delta \sim {\cal O}(N)$, $\overline E \sim {\cal O}(N^{1+ 1/\kappa})$. In the same fashion, $\Delta E$ scales differently for each case. While these kinds of scaling of $\overline E$ with $N$ seem to be unusual, we expect nevertheless that $\overline E$ should be within realistic bounds as described above. The notorious result is that, for all type of dependences, the ratio $\Delta E/\overline E \sim {\cal O}(N^{1/2})$ always. That is, the distribution ${\cal W}(E)$ is always sharply peaked, with a relative width $\sim N^{-1/2}$. \\
\section{Exceptions to the rule}
Clearly, the arguments given above fail if the product of $|a(E)|$ times $\Delta \Gamma(E)$ has a long tail as $E\to \infty$, as this would yield an arbitrarily large width $\Delta E$. But this is only possible if $|a(E)|$ decays much more slowly than the ensured growth of $\Delta \Gamma(E)$. Such a behavior can also be illustrated with the exponential form of the coefficients given by Eq. (\ref{expo}), in the case where $0< \kappa < 1$. In this situation there is no guarantee that ${\cal W}(E)$ has a maximum, neither that it is normalized, because both terms in the exponent in Eq. (\ref{WE3}) are concave. However, we can tailor the value of $\kappa$ such that, still, the maximum exists, but we can also tune it such that the width $\Delta E$ is as {\it large} as we desire, see the second line in Eq. (\ref{DelE2}): note that if we choose $\kappa < 1$ appropriately, the gaussian-like exponent in ${\cal W}(E)$ can still be negative, yet we could make the factor multiplying $(E-\overline E)^2$ arbitrarily small in absolute value. A similar reasoning can be used if we choose that the coefficients $|a(E)|^2$ vanish as $E \to \infty$ algebraically, say $|a(E)|^2 \sim E^{-\eta}$. Choosing $\eta$ appropriately, we can make the tail of ${\cal W}(E)$ to behave as slow as we desire, but still normalizable, and obtain a very large width $\Delta E$. These two cases make the general statement of this article to fail, yet we claim, doing this requires a very detailed tailoring of the initial state $|\Psi \rangle$. That is, the coefficients must be strictly different from zero as $E \to \infty$ and must almost exactly cancel the enormous growth in energy of the density of states, in order to render a slow decay of the energy distribution. This appears just as complicated, perhaps, as trying to obtain a superposition of very few states. In any case, it would certainly be very interesting to be able to prepare in real life states of macroscopic systems with such long energy probability tails, for after measurement, one would obtain a very different energy each time the system were prepared in the {\it same} state.\\
To summarize our claim, we can state that a very sharp energy distribution is obtained for initial pure states $|\Psi\rangle$, whose energy coefficients $|a(E)|^2$ are bounded from above by an energy value $E_{max}$ or decay to zero as $E \to \infty$ sufficiently fast. An important point is that states whose energy is well below $\overline E$ are quite irrelevant. This has an interesting consequence when the superposition of states which is dense but in ``lumps", such as that shown in the lower panel of Fig. 1. In such a situation the interference between states of different lumps would not affect the thermodynamics of the system, because its energy would effectively remain in those states near the maximum value of the energy $E_{max}$ all the time.\\
\section{Final Comments}
The claim of this paper rests on assuming that the systems under consideration can reach thermal equilibrium. While we have no intention of indicating how this does occur, we still have few questions that should be addressed regarding the consistency or compatibility of an equilibrium state with the fact that the system is always in a pure state. As already cited in the introductory paragraphs, there are recent excellent discussions, see Ref. \cite{Goldstein3} and references therein, on how thermal equilibrium is achieved in quantum macroscopic bodies that are in pure states within a microcanonical shell. The issue we address in this paragraph regards the equilibrium state itself. As discussed in Landau and Lifshitz monography and in the recent papers, equilibrium is reached when distributions of extensive quantities of {\it macroscopic subsystems} of the whole isolated body are sharply peaked; in this case, it is true that $\Delta A_s/\overline A_s \sim {\cal O}(N_s^{-1/2})$, with $A_s$ an extensive quantity of the $s-$th subsystem and $N_s \gg 1$ its number of particles. This condition is achieved as a consequence of the macroscopic subsystems becoming statistically independent. Concomitantly, one can assert that states of any subsystem, with the same number of particles, volume and, specially, same energy, are equally probable. This in turn allows us to establish that the equilibrium state of the subsystems can be accurately described by a density matrix $\rho_s$, such that the entropy of the $s-$th subsystem, $S_s$ can be obtained from the expression, \cite{LL}
\begin{equation}
S_s = - k_B {\rm Tr_s}\> \rho_s \ln \rho_s ,\label{entropy}
\end{equation}
where the trace is taken over states of the system $s$ alone. We can now proceed ``backwards'' and conclude that, regarding the whole isolated system, its state of equilibrium can be described by the microcanonical density matrix that asserts that all states within a very narrow energy band are equally probable, and that states outside of it have probability zero. \\
The above well-known assertions regarding equilibrium states may seem contradictory with the fact that we are assuming that the system is always in a pure state $|\Psi\rangle$, with a quite arbitrary but typical superposition of energy eigenstates. First, since the system is isolated, its time evolution is unitary under its Hamiltonian $H$
\begin{equation}
|\Psi(t) \rangle = e^{-iHt/\hbar} |\Psi\rangle \>,\label{psit}
\end{equation}
and, therefore, the energy distribution ${\cal W}(E)$ remains stationary for all times. Second, as essentially only energies corresponding to energy states within $\Delta E$ around $\overline E$ are probable, we can then assert that, in the evolution of $|\Psi(t)\rangle$, the energy of the system remains constant within such an interval. And without compromising an explanation of how it occurs, the system reaches equilibrium. But the point is that we can attach to such an equilibrium state the actual microcanonical ensemble, with $\sim e^N$ states in $\Delta E$ around $\overline E$, since all have the ``same'' energy and are ``equally'' probable. It is therefore clear that only when the system has reached equilibrium we can use the microcanonical density matrix as a {\it representation} of the equilibrium state, regardless of whether it is in a pure state, known or unknown. As a clear consequence, all initial ``typical'' states $|\Psi\rangle$ with approximately the same mean energy $\overline E$, once in equilibrium, can be described by the same microcanonical density matrix. After all, this density matrix permits calculation of thermodynamic properties only, including their correlations, which usually are properties of few bodies; of course, those properties can also be calculated using the state $|\Psi(t)\rangle$, if known. In this way, using the microcanonical density matrix in Eq. (\ref{entropy}), leads to the entropy of the equilibrium state as being (the negative of the logarithm of) the number of energy states within $\Delta E$ around $\overline E$, that is, to the expected value $S = - k_B \ln \Delta \Gamma(\overline E,V,N)$. It is certainly erroneous to substitute the state $|\Psi(t)\rangle$ into the formula given in Eq. (\ref{entropy}) since it would yield the absurd result that the entropy is zero for an isolated system in thermal equilibrium.\\
To conclude, we mention once again that the present result indicates that the First Law of Thermodynamics may be of statistical character, just as we are used to the fact that the Second one indeed it is. Usually, in order to enunciate the First Law one appeals to the conservation of energy of isolated classical systems. Then, one argues that the change in energy of a system must equal the corresponding change in energy of its surroundings in terms of heat and work. However, real systems obey quantum mechanics and their energy cannot be considered to be a constant, unless they are in an energy eigenstate. But as this cannot be ensured every single time we prepare a macroscopic system, the message of this paper is that
macroscopic bodies are naturally prepared in states whose energy distribution are sharply peaked. In turn, this entails us to affirm that the energy of the system {\it effectively} remains constant, and the First Law can then be established. The striking conclusion is that such a law has a statistical validity, not a rigorous nor an exact one. As a matter of fact, since classical mechanics emerges from quantum mechanics for macroscopic systems, one could also claim that the conservation of energy of classical systems is a consequence of the energy sharpness of the corresponding quantum bodies. This opens the possibility of preparing isolated macroscopic systems in atypical initial states, always in the same one, that would yield very distinct values of its energy after measurement, in ``violation'' of the First Law of Thermodynamics. \\
{\bf Acknowledgement.} Acknowledgement is given to grant PAPIIT-IN108620 (UNAM).
\end{document} |
\begin{document}
\title{Fault-tolerant conversion between adjacent Reed-Muller\\ quantum codes based on gauge fixing}
\author{Dong-Xiao Quan$^{1,2}$ }
\email[]{dxquan@xidian.edu.cn}
\author{Li-Li Zhu$^{1}$ }
\author{Chang-Xing Pei$^{1}$ }
\author{Barry C.\ Sanders$^{2,3,4,5}$ }
\affiliation{$^1$State Key Laboratory of Integrated Services Networks, Xidian University, Xi'an, Shaanxi 710071, P.\ R.\ China}
\affiliation{$^2$Institute for Quantum Science and Technology, University of Calgary, Alberta T2N 1N4, Canada}
\affiliation{$^3$Program in Quantum Information Science, Canadian Institute for Advanced Research, Toronto, Ontario M5G 1Z8, Canada}
\affiliation{$^4$Hefei National Laboratory for Physical Sciences at Microscale, University of Science and Technology of China, Hefei, Anhui 230026, P.\ R.\ China}
\affiliation{$^5$Shanghai Branch, CAS Center for Excellence and Synergetic Innovation Center in Quantum Information and Quantum Physics,
University of Science and Technology of China, Shanghai 201315, P.\ R.\ China}
\date{\today}
\begin{abstract}
We design forward and backward fault-tolerant conversion circuits, which convert between the Steane code and the 15-qubit Reed-Muller quantum code so as to provide a universal transversal gate set.
In our method,
only 7 out of total 14 code stabilizers need to be measured,
and we further enhance the circuit by simplifying some stabilizers;
thus, we need only to
measure eight weight-4
stabilizers for one round of forward conversion and seven weight-4 stabilizers for one round of backward conversion.
For conversion,
we treat random single-qubit errors and their influence on syndromes of gauge operators,
and our novel single-step process enables
more efficient fault-tolerant conversion between these two codes.
We make our method quite general by showing how to convert between any two adjacent Reed-Muller quantum codes~$\overline{\textsf{RM}}(1,m)$ and~$\overline{\textsf{RM}}\left(1,m+1\right)$,
for which we need only measure stabilizers whose number scales linearly with~$m$
rather than exponentially with~$m$ obtained in previous work.
We provide the explicit
mathematical expression for the necessary stabilizers and the
concomitant resources required.
\end{abstract}
\keywords{Quantum Error Correction, Reed-Muller Quantum Code, Code Conversion, Gauge Fixing}
\maketitle
\section{Introduction}
\label{intro}
Quantum information technology, with applications such as secure quantum communication
or universal quantum computing,
is extremely powerful but challenging due to the fragility of quantum information in the presence of noise, loss and decoherence.
Fault-tolerant quantum error correction~\cite{Sho95,Got97,Sho96} ameliorates this problem of fragility by encoding plain-text quantum information into cipher text, processing this encoded information while also measuring error syndromes and correcting, and finally turning back to plain text.
For fault-tolerant error correction,
transversal gates are especially valuable as qubits in each code block act bitwise between corresponding qubits in each code block,
thereby naturally preventing error propagation~\cite{ZCC11}.
Unfortunately, no code can
simply enable a universal set of transversal gates~\cite{ZCC11,EK09,CCC+08} without invoking a technique involving ancillary qubits; thus, complicated strategies are employed to produce universal transversal gate sets.
One strategy to circumvent the non-universal gate set problem
is to employ magic-state distillation~\cite{Bravyi2005,Reichardt2005,Meier2012,Bravyi2012,Jones2013,Fowler2013}.
Although magic-state distillation has the advantage of a higher fault-tolerant threshold,
the overhead in preparation and distillation is a major bottleneck for scalable quantum computing~\cite{Jochym-OConnor2014,SR-2014,Ducloscianci2015,Beverland2016}.
Alternative techniques can provide a universal fault-tolerant gate set.
The Steane code~\cite{Steane1996} provides transversal Clifford gates but not the transversal
$\text{T}:=\operatorname{diag}(1,\exp\{\text{i}\pi/4\})$
gate whereas the 15-qubit Reed-Muller quantum code (RMQC)~\cite{Steane1996b} is transversal for
\textsc{T}, controlled-NOT (\textsc{CNOT}), controlled-S (\textsc{CS}), controlled-Z (\textsc{CZ}), and
controlled-controlled-\textsc{Z} (\textsc{CCZ}) gates,
but not for the Hadamard (\textsc{H}) gate.
One approach to achieving a universal set of gates
employs just one code such as the Steane code or the 15-qubit RMQC
and does not invoke code conversion.
For example, for the Steane code, the fault-tolerant \textsc{T} gate can be realized with the help of an ancillary logical qubit, Pauli Z-measurement and transversal $S:=\operatorname{diag}(1,\text{i})$ and \textsc{X} operations, which we call the standard method to construct T gate for the Steane code~\cite{Nielsen2000}.
Alternatively,
for the 15-qubit RMQC,
which does not include the transversal \textsc{H} gate in the universal gate set,
a Hadamard gadget can be introduced:
this Hadamard gadget exploits the transversality of the CS gate,
which yields a \textsc{CZ} gate if applied twice,
and, combined with an ancillary logical qubit and a Pauli X-measurement and post-selection,
yields a Hadamard gate~\cite{Bremner2011}. A universal gate set for 15-qubit RMQC can also be achieved by
using transversal operations and the gauge-fixing method to realize the fault-tolerant \textsc{H} gates~\cite{Paetznick2013,Bombin2013,Kubica2015}.
Another technique is code concatenation,
for which the qubits that make up the code are subsequently encoded into a second code so combining the 7-qubit Steane code and 15-qubit RMQC requires 105 qubits~\cite{Jochym-OConnor2014,Jochym-OConnor2016}.
An alternative method is by code conversion~\cite{Anderson2014,Hwang2015,Bravyi2015,Hill2013,Choi2015,Bombin2016}, which converts between two codes to provide a universal transversal gate set. Direct conversion can be realized via Clifford operations, which is general for converting between different kinds of codes but requires 13 single-qubit and 74 two-qubit gates. Furthermore, this direct conversion requires error correction at every step to ensure fault tolerance~\cite{Hwang2015}. Another direct conversion method is by gauge fixing as both codes correspond to the same subsystem code with different gauge qubits~\cite{Anderson2014,Bravyi2015}, which is more efficient compared to the method of Clifford operations with respect to the number of qubits and gates required. This method is also studied from the view of colour codes~\cite{Bravyi2015} and furthermore generalized to the dimensional jump between two-dimensional and three-dimensional colour codes or even higher dimensions~\cite{Bombin2016}. This code conversion can also be realized by code teleportation~\cite{Choi2015,Choi2013}.
As both the Steane code and the RMQC are of the same code distance $3$, they can only correct one error.
For fixed distance,
fewer qubits are better as fewer errors can happen
so smaller codes are easier to implement and need fewer resources to perform syndrome measurements.
Thus, the Steane code is more efficient than the 15-qubit RMQC.
Detailed analysis shows that the overhead of using only the Steane code to realize
a fault-tolerant \textsc{T} gate is lower compared with direct conversion~\cite{Anderson2014}
and with the teleportation scheme~\cite{Choi2015}, since implementation of the \textsc{T} gate is feasible in the Steane code with ancilla,
and the necessary stabilizer measurements in the direct conversion method require many gates~\cite{Choi2015,Choi2013}.
In this paper we find that some stabilizers are not necessary for fault-tolerant conversion
between adjacent RMQCs based on gauge fixing, especially for higher $m$,
so that we can significantly decrease resource requirement.
Here we significantly
improve the fault-tolerant code-conversion method of Anderson, Duclos-Cianci and Poulin (ADP14)~\cite{Anderson2014} by
exponentially
reducing the number of stabilizer measurements, which thus reduces the resource requirement. Our circuits and method have the following advantages. First we split some of the stabilizers into two gauge operators, which reduces the resource requirement for the measurements of the syndromes. Second in the single-step process we consider random single-qubit errors and their influence on the syndromes of the gauge operators, using fixed syndromes to choose the operations so that we can make fault-tolerant conversion. Third we generalize the method to any adjacent RMQCs for which we need only measure $2m+1$ stabilizers of the RMQC code.
In contrast ADP14's approach discusses measuring all $2^{m+1}-2$ stabilizers
and requiring $2^{m+1}-m-2$ stabilizers.
Our approach significantly reduces this stabilizer overhead.
The rest of this paper is organized as follows.
In \S\ref{sec:basicknowledge}, we first give a brief review of the order-one RMQC and then give the stabilizer groups for the 7-qubit Steane code, the 15-qubit RMQC
and
extended Reed-Muller quantum code (ERMQC) and, in \S\ref{sec:ADP14} we describe the ADP14 method. In \S\ref{sec:convertSteaneRMQC} we describe forward conversion from the 7-qubit Steane code to the 15-qubit RMQC in detail, and we deal with backward conversion in \S\ref{sec:convertRMQCSteane}.
In \S\ref{sec:convertmtomplus1}, we explain how to convert between adjacent order-one RMQCs and, in \S\ref{sec:simulationandcost} we elaborate on our simulation process and cost analysis. We conclude in \S\ref{sec:conclusion}.
\section{Basic Knowledge}
\label{sec:basicknowledge}
In this section, we give the basic concept of quantum error correction (QEC) and the stabilizer groups for the Steane code, the 15-qubit RMQC and ERMQC.
From the relations for the stabilizer groups, we design the conversion scheme in following sections.
QEC~\cite{Sho95,Got97,Sho96} is an effective way to protect the information from the influence of noise. The quantum block notation $[[n,k,d]]$ refers to encoding~$k$ logical qubits into~$n$ physical qubits with code distance~$d$ so that $t<d/2$ random errors can be detected and corrected by syndrome measurements and error correction processes.
The order-one RMQC~\cite{Steane1996b}
is written as $\overline{\textsf{RM}}(1,m)=[[M-1,1,3]]$, which encodes 1 logical qubit into $M-1$ qubits with distance 3 for $M:=2^m$. Adjacent codes are~$\overline{\textsf{RM}}(1,m)$ and~$\overline{\textsf{RM}}\left(1,m+1\right)$. The Steane code is written as $\overline{\textsf{RM}}(1,3)$ and the 15-qubit RMQC is written as $\overline{\textsf{RM}}(1,4)$; in the framework of RMQC, they are adjacent RMQCs.
RMQCs are derived from the recursively defined classical order-one Reed-Muller code~\cite{FN1977}. By deleting the first row and column of the generator matrix for the classical Reed-Muller code, we obtain $\overline{\textsf{RM}}(1,3)$'s generator matrix
\begin{align}
\label{eq:GSR}
\bar{G}(1,3)
:=\begin{bmatrix}
\bar{G}(1,3)_1\\
\bar{G}(1,3)_2\\
\bar{G}(1,3)_3\\
\end{bmatrix}
=\begin{bmatrix}
1&0&1&0&1&0&1\\
0&1&1&0&0&1&1\\
0&0&0&1&1&1&1\\
\end{bmatrix},
\end{align}
which can be used recursively to obtain
the generator matrix for $\overline{\textsf{RM}}\left(1,m+1\right)$
given by
\begin{align}
\label{eq:Gm1}
\bar{G}\left(1,m+1\right)=\begin{bmatrix}
\bar{G}(1,m)& 0 &\bar{G}(1,m)\\
\bm{\bar{0}}_{M-1}& 1 & \bm{\bar{1}}_{M-1}\\
\end{bmatrix},\
\bar{G}(1,m)
\!=\!\begin{bmatrix}
\bar{G}(1,m)_1\\
\vdots\\
\bar{G}(1,m)_m\\
\end{bmatrix},
\end{align}
where
\begin{equation}
\bm{\bar{b}}_{M}
:=b^{\otimes{M}},\;
b\in\{0,1\}.
\end{equation}
As all RMQCs are Calderbank-Shor-Steane codes~\cite{Steane1996a,Calderbank1996},
we can subdivide their stabilizers into two parts
corresponding to phase errors ($X$ stabilizers) and flip errors ($Z$ stabilizers).
The $X$ parts are obtained from the generator matrix of the code, and the $Z$ parts are obtained from the generator matrix of the dual code, which is also the parity-check matrix of the code.
As $\overline{\textsf{RM}}(1,3)$ is self dual~\cite{FN1977}, the stabilizers of the Steane code can be written as
\begin{equation}
\bm{S}(1,3)=\left\langle \bar{G}(1,3)^X, \bar{G}(1,3)^Z\right\rangle,
\end{equation}
with
\begin{align}
\label{eq:G13}
\bar{G}(1,3)_1^{B\in \{X,Z\}}
=&B_1B_3B_5B_7,
\nonumber\\
\bar{G}(1,3)_2^{B\in \{X,Z\}}
=&B_2B_3B_6B_7,
\nonumber\\
\bar{G}(1,3)_3^{B\in \{X,Z\}}
=&B_4B_5B_6B_7,
\end{align}
where~$G^{B\in\{X,Z\}}$ is a matrix obtained from $G$ by substituting 1 by~$B$
and substituting 0 by~$I$.
As the Steane code is self dual, it is transversal for logical
\begin{equation}
\label{eq:barHn}
\bar{H}:=H^{\otimes{n}},
\end{equation}
where $n$ is the number of qubits for the code~\cite{Bravyi2015,Betsumiya2012}.
When $m\geq3$, $\overline{\textsf{RM}}(1,m)$ is contained in its dual~\cite{FN1977}. For the case $m=4$, we define a matrix
\begin{equation}
\label{eq:H14}
\tilde{H}(1,4):= \begin{bmatrix}
\tilde{H}(1,4)_1\\
\tilde{H}(1,4)_2\\
\tilde{H}(1,4)_3\\
\tilde{H}(1,4)_4\\
\tilde{H}(1,4)_5\\
\tilde{H}(1,4)_6\\
\end{bmatrix}
=\begin{bmatrix}
1&0&1&0&0&0&0&0&1&0&1&0&0&0&0\\
0&1&1&0&0&0&0&0&0&1&1&0&0&0&0\\
0&0&1&0&0&0&1&0&0&0&1&0&0&0&1\\
\multicolumn{7}{c}{\bar{G}_{1,3}}& \multicolumn{8}{c}{\bm{\bar{0}}_{8}} \\
\end{bmatrix},
\end{equation}
and we have
\begin{align}
\label{eq:H14}
\tilde{H}(1,4)_1^{B\in \{X,Z\}}
=&B_1B_3B_9B_{11},\nonumber\\
\tilde{H}(1,4)_2^{B\in \{X,Z\}}
=&B_2B_3B_{10}B_{11},\nonumber\\
\tilde{H}(1,4)_3^{B\in \{X,Z\}}
=&B_3B_7B_{11}B_{15},
\end{align}
with~$\tilde{H}$ and~$\tilde{H}^{B\in \{X,Z\}}$ related in the same way as~$G$ and~$G^{B\in \{X,Z\}}$ earlier.
Obviously
\begin{equation}
\tilde{H}(1,4)\bar{G}(1,4)^T=0
\end{equation}
and the parity-check matrix for $\overline{\textsf{RM}}(1,4)$
is the vertical concatenation of the two matrices
$\bar{G}(1,4)$ and~$\tilde{H}(1,4)$.
Thus, the stabilizers of the 15-qubit RMQC are
\begin{equation}
\label{eq:SRM}
\bm{S}(1,4)=\left\langle \bar{G}(1,4)^X, \bar{G}(1,4)^Z, \tilde{H}(1,4)^Z\right\rangle
\end{equation}
with
\begin{align}
\label{eq:G14}
\bar{G}(1,4)_1^{B\in \{X,Z\}}
=&B_1B_3B_5B_7B_9B_{11}B_{13}B_{15},
\nonumber\\
\bar{G}(1,4)_2^{B\in \{X,Z\}}
=&B_2B_3B_6B_7B_{10}B_{11}B_{14}B_{15},
\nonumber\\
\bar{G}(1,4)_3^{B\in \{X,Z\}}
=&B_4B_5B_6B_7B_{12}B_{13}B_{14}B_{15},
\nonumber\\
\bar{G}(1,4)_4^{B\in \{X,Z\}}
=&B_8B_9B_{10}B_{11}B_{12}B_{13}B_{14}B_{15}.
\end{align}
As the syndromes of $\bar{G}(1,4)^Z$ can unambiguously discriminate all single-qubit~$X$ errors,
the syndromes of $\tilde{H}(1,4)^Z$ are often omitted. The code is triply-even as its weight is 8 so it is transversal for logical $\overline{T}:=T^{\otimes{n}}$~\cite{Bravyi2015,Betsumiya2012}.
From Eq.~(\ref{eq:Gm1}) we can see the 15-qubit RMQC comprises two blocks of Steane code for the first and last 7-qubit blocks
plus another interconnecting qubit labeled~8.
This interconnecting qubit entangles with the last 7-qubit block of Steane code.
Thus, we can prepare an 8-qubit quantum state $\ket{\phi}=1/\sqrt{2}\left(\ket{0}\ket{\bar{0}}_{3}+\ket{1}\ket{\bar{1}}_{3}\right)$ in the last 8 qubits~\cite{Anderson2014},
where $\ket{\bar{b}}_{m},\;b\in\{0,1\}$,
is the logical bit~$b$ encoded into the~$\overline{\textsf{RM}}(1,m)$ code.
Together with the Steane code $\ket{\psi}=\alpha\ket{\bar{0}}_{3}+\beta\ket{\bar{1}}_{3}$,
we construct a 15-qubit ERMQC
\begin{equation}
\label{eq:noerrorcase}
\ket{\phi}_{3\backsim4}
=1/\sqrt{2}\left(\alpha\ket{\bar{0}}_{3}+\beta\ket{\bar{1}}_{3}\right)\left(\ket{0}\ket{\bar{0}}_{3}+\ket{1}\ket{\bar{1}}_{3}\right).
\end{equation}
We refer to Eq.~(\ref{eq:noerrorcase})
as the no-error case for preparation of the 15-qubit ERMQC,
and this ideal input preparation~(\ref{eq:noerrorcase})
is justified in the realistic case by introducing
fault-tolerant preparation using gadgets~\cite{Aliferis2006}.
The stabilizer group for this code can be expressed as
\begin{equation}
\label{eq:SEXRM}
\bm{S}_{3\backsim4}
=\left\langle\left(\bar{G}(1,3)\otimes\bm{\bar{0}}_{8}\right)^{X},\bar{G}(1,4)^X, \left(\bar{G}(1,3)\otimes\bm{\bar{0}}_{8}\right)^{Z}, \bar{G}(1,4)^Z\right\rangle.
\end{equation}
It is also transversal for logical~$\bar{H}$ as it is self dual. There is no information in the last 8 qubits so it has the same logical operations as the Steane code.
We define a subsystem code with the stabilizers of
\begin{align}
\bm{S}^\text{sub}_{3\backsim4}
=\left\langle\bar{G}(1,4)^X,\left(\bar{G}(1,3)\otimes\bm{\bar{0}}_{8}\right)^{Z},\bar{G}(1,4)^Z\right\rangle.
\end{align}
Pauli operators that commute with both the stabilizers and logical operators generate the gauge group~\cite{Bravyi2015} so the gauge group of this subsystem code is
\begin{align}
\bm{G}^\text{sub}_{3\backsim4}=\left\langle \bar{G}(1,4)^X,
\left(\bar{G}(1,3)\otimes\bm{\bar{0}}_{8}\right)^{X}, \bar{G}(1,4)^Z, \tilde{H}(1,4)^Z\right\rangle.
\end{align}
Both the 15-qubit RMQC and the ERMQC belong to this subsystem code with different gauge operators so they can convert to each other by gauge fixing.
\section{ADP14}
\label{sec:ADP14}
Based on the theory of subsystem codes,
ADP14 shows how to convert from $\overline{\textsf{RM}}(1,m)$ to $\overline{\textsf{RM}}(1,{m+1})$.
They first fault-tolerantly prepare the ERMQC, fault-tolerantly measure the $2^{m+1}-2$ stabilizer generators of $\overline{\textsf{RM}}(1,{m+1})$, error-correct given the first $2^{m+1}-m-2$ syndrome bits, and restore the last~$m$ syndrome bits using their associated pure errors.
To convert from $\overline{\textsf{RM}}(1,m+1)$ to $\overline{\textsf{RM}}(1,m)$, they simply fault-tolerantly measure the $2^{m+1}-2$ stabilizer generators of ERMQC, use the first $2^{m+1}-m-2$ syndrome bits to diagnose errors, and restore the last~$m$ syndrome bits using the associated pure errors~\cite{Anderson2014}.
Here we improve the fault-tolerant code conversion method of ADP14 by exponentially reducing the number of stabilizer measurements from $2^{m+1}-2$ to $2m+1$,
which thereby significantly reduces the resource requirement.
Furthermore we use the relations between these stabilizers to simplify the stabilizer measurements.
For code conversion,
we consider single-qubit errors and their influence on the gauge operators so as to achieve fault-tolerant conversion. In the following we first use conversion between the Steane code and the 15-qubit RMQC as an example to describe conversion,
simplification of stabilizer measurements, the single-step error-correction and code-conversion process, and then generalize this conversion to any adjacent RMQCs.
\section{Conversion from the Steane code to the 15-qubit RMQC}
\label{sec:convertSteaneRMQC}
In this section we deal with forward conversion,
which converts from the Steane code to the 15-qubit RMQC.
From Eqs.~(\ref{eq:SRM}) and~(\ref{eq:SEXRM}),
we see that the ERMQC satisfies all the other stabilizers of the 15-qubit RMQC except for $\tilde{H}(1,4)_i^Z,i=1,2,3$.
Our procedure entails measuring
the stabilizers of $\tilde{H}(1,4)_i^Z$, $i=1,2,3$, given in Eq.~(\ref{eq:H14}).
The $i^\text{th}$ $Z$ syndrome is
\begin{equation}
\label{eq:SiMHi}
S_i=\mathbb{M}\left(\tilde{H}(1,4)_i^Z\right)\in\{0,1\}, ~~i=1,2,3,
\end{equation}
for~$\mathbb{M}(\tilde{H})$
denoting the syndrome measurement result of stabilizer $\tilde{H}$.
Operations are then performed on the collapsed state according to syndromes
as shown in Table~\ref{table:syndrome} in order to get the 15-qubit RMQC.
\begin{table}
\caption{Relation between stabilizer measurement results and corresponding operations for forward conversion in the no-error case~(\ref{eq:noerrorcase})
with $S_i=\mathbb{M}\left(\tilde{H}(1,4)_i^Z\right)$.}
\label{table:syndrome}
\begin{tabular}{C{1.2cm}C{1.2cm}C{1.2cm}C{3.5cm}C{1.2cm}C{1.2cm}C{1.2cm}C{3.5cm}}
\hline\noalign{
}
$S_1$ & $S_2$ & $S_3$ & operation & $S_1$ & $S_2$ & $S_3$ & operation\\
\noalign{
}\hline\noalign{
}
0 & 0 & 0 & I & 1 & 0 & 0 & $X_{10}X_{11}X_{14}X_{15}$\\
0 & 0 & 1 & $X_{12}X_{13}X_{14}X_{15}$ & 1 & 0 & 1 & $X_{10}X_{11}X_{12}X_{13}$\\
0 & 1 & 0 & $X_{9}X_{11}X_{13}X_{15}$ & 1 & 1 & 0 & $X_{9}X_{10}X_{13}X_{14}$\\
0 & 1 & 1 & $X_{9}X_{11}X_{12}X_{14}$ & 1 & 1 & 1 & $X_{9}X_{10}X_{12}X_{15}$\\
\hline\noalign{
}
\end{tabular}
\end{table}
The operations shown in Table~\ref{table:syndrome} are chosen as follows.
For example, if we obtain $S_1=S_2=0$ and $S_3=1$,
then we need to use~$X$-type operations, which commute with all $Z$-type stabilizers except $\tilde{H}(1,4)_3^Z$ so we can choose $X_{12}X_{13}X_{14}X_{15}$ as the operations. If we obtain $S_1=S_2=1$ and $S_3=0$, then we can choose the operations as $X_{9}X_{10}X_{13}X_{14}$, which anti-commute with $\tilde{H}(1,4)_1^Z$ and $\tilde{H}(1,4)_2^Z$ but commute with all the others.
We can divide our analysis into two cases:
the no-error case, which assumes ideal state preparation~(\ref{eq:noerrorcase}),
and the case of a single--qubit error occurring before the code conversion.
In the case of a single-qubit error,
this error can influence the syndromes of Table~\ref{table:syndrome} and accordingly result in wrong operations
so we need to distinguish single-qubit errors and fix the syndromes before the corrections in Table~\ref{table:syndrome}.
The syndromes of $S(1,4)$ are
\begin{equation}
\label{eq:S14iB}
S(1,4)_i^{B\in\{X,Z\}}
=\mathbb{M}\left(\bar{G}(1,4)_i^{B\in\{X,Z\}}\right), ~~i=1,2,3,4,
\end{equation}
and the single-qubit errors are shown in Table~\ref{table:errorsyndrome}.
\begin{table}
\caption{Relation between stabilizer measurement results and single-qubit errors.
The syndromes indicate the bit error when $B=Z$, and indicate the phase error when $B=X$.}
\label{table:errorsyndrome}
\begin{tabular}{C{1.5cm}C{1.5cm}C{1.5cm}C{1.5cm}C{1.5cm}C{1.5cm}C{1.5cm}C{1.5cm}C{1.5cm}C{1.5cm}}
\hline\noalign{
}
$S(1,4)_1^B$ & $S(1,4)_2^B$&$S(1,4)_3^B$ & $S(1,4)_4^B$ & error & $S(1,4)_1^B$ & $S(1,4)_2^B$ & $S(1,4)_3^B$ & $S(1,4)_4^B$ & error\\
\noalign{
}\hline\noalign{
}
0 & 0 & 0 & 0 & NO & 0 & 0 & 0 & 1 & 8\\
0 & 0 & 1 & 0 & 4 & 0 & 0 & 1 & 1 & 12\\
0 & 1 & 0 & 0 & 2 & 0 & 1 & 0 & 1 & 10\\
0 & 1 & 1 & 0 & 6 &0 & 1 & 1 & 1 & 14\\
1 & 0 & 0 & 0 & 1 & 1 & 0 & 0 & 1 & 9\\
1 & 0 & 1 & 0 & 5 & 1 & 0 & 1 & 1 & 13\\
1 & 1 & 0 & 0 & 3 & 1 & 1 & 0 & 1 & 11\\
1 & 1 & 1 & 0 & 7 &1 & 1 & 1 & 1 & 15\\
\noalign{
}\hline
\end{tabular}
\end{table}
By the~$X$-type syndromes of Table~\ref{table:errorsyndrome}, we can identify the single-qubit~$Z$ error represented as $Z_i$.
The error $Z_i$ has no influence on the syndromes of Table~\ref{table:syndrome} so we need only add the $Z_i$ operation in the correction process.
Similarly, by the~$Z$-type syndromes of Table~\ref{table:errorsyndrome}, we can identify the single-qubit~$X$ error written as $X_j$.
However, this~$X$ error could also result in wrong syndromes of Table~\ref{table:syndrome}
so we need to consider the influence of this error when choosing the operations.
Let us consider the point in Fig.~\ref{fig:forwardconversion} subsequent to the $S_3$ measurement
at which point we have measured all three syndromes $S_1$, $S_2$ and $S_3$.
Then we can simplify the measurement according to
\begin{align}
\label{eq:S141ZM}
S\left(1,4\right)_1^Z
=&\mathbb{M}\left(\bar{G}(1,4)_1^{Z}\right)
\nonumber\\
=&\mathbb{M}\left(Z_1Z_3Z_5Z_7Z_9Z_{11}Z_{13}Z_{15}\right)
\nonumber\\
=&\mathbb{M}\left(Z_3Z_7Z_{11}Z_{15}Z_1Z_5Z_9Z_{13}\right)
\nonumber\\
=&\underbrace{\mathbb{M}\left(Z_3Z_7Z_{11}Z_{15}\right)}_{S_3}
\oplus\underbrace{\mathbb{M}\left(Z_1Z_5Z_9Z_{13}\right)}_{S_4}
\nonumber\\
=&S_3\oplus S_4,
\end{align}
where we have used commutativity of~$Z$ operators in the second step
and, for the third step, we have used the syndrome definition~(\ref{eq:SiMHi})
and the expression for~$\tilde{H}(1,4)_i^Z$ given in Eq.~(\ref{eq:H14}).
Generalizing Eq.~(\ref{eq:S141ZM}),
we obtain the simplifying assignments
\begin{align}
\mathbb{M}\left(\bar{G}(1,4)_1^{Z}\right)\to&S_4=\mathbb{M}\left(Z_1Z_5Z_{9}Z_{13}\right),
\nonumber\\
\mathbb{M}\left(\bar{G}(1,4)_2^{Z}\right)\to&S_5=\mathbb{M}\left(Z_2Z_6Z_{10}Z_{14}\right),
\nonumber\\
\mathbb{M}\left(\bar{G}(1,4)_3^{Z}\right)\to&S_6=\mathbb{M}\left(Z_4Z_5Z_{12}Z_{13}\right).
\end{align}
Unfortunately
\begin{equation}
\mathbb{M}\left(\bar{G}(1,4)_4^{Z}\right)=\mathbb{M}\left(Z_8Z_{9}Z_{10}Z_{11}Z_{12}Z_{13}Z_{14}Z_{15}\right)
\end{equation}
cannot be simplified in this way.
However, it can be divided into two weight-4 stabilizers
\begin{align}
S_7=&\mathbb{M}\left(Z_8Z_9Z_{10}Z_{11}\right),
\nonumber\\
S_8=&\mathbb{M}\left(Z_{12}Z_{13}Z_{14}Z_{15}\right).
\end{align}
in order to be experimentally realistic since both of them commute with all the stabilizers.
The total set of~$Z$-type syndromes is $\{S_1,S_2,\dots,S_8\}$.
Using the technique shown in Eq.~(\ref{eq:S141ZM}),
we obtain three additional relations
\begin{equation}
\label{eq:S1423ZM}
S(1,4)_2^Z=S_3\oplus S_5,\;
S(1,4)_3^Z=S_3\oplus S_5\oplus S_2\oplus S_6,\;
S(1,4)_4^Z=S_7\oplus S_8
\end{equation}
and Table~\ref{table:errorsyndrome} to deduce the single-qubit~$X$ error $X_j$.
Then we modify the first three syndromes~$S_{1,2,3}$ using the relation
\begin{equation}
\label{eq:syndromecorrection}
Z_j\in \tilde{H}(1,4)_n^Z\implies S_n=S_n\oplus1
(n=1,2,3).
\end{equation}
Now we use the modified $S_{1,2,3}$ and Table~\ref{table:syndrome} to deduce the operations written as~$X_\text{sub}$. Thus, the total operations we need to perform for the correction process are
$X_\text{sub}X_jZ_i$; by these operations we obtain the 15-qubit RMQC.
\begin{figure}
\caption{
Conversion circuit from Steane code to 15-qubit RMQC.
Quantum information is first encoded into the Steane code
$\ket{\psi}
\label{fig:forwardconversion}
\end{figure}
The forward conversion circuit is shown in Fig.~\ref{fig:forwardconversion}, where we first prepare the entangled state of a single qubit with the Steane-code qubits, these ancillary qubits together with the initial Steane code form the ERMQC.
Then we measure four weight-8 X type stabilizers and eight weight-4 Z type stabilizers.
Preparation and syndrome measurements shown in Fig.~\ref{fig:forwardconversion} are not fault-tolerant as the single error may spread into multiple errors. The fault-tolerant preparation can be realized by using gadgets~\cite{Aliferis2006}.
In order to ensure fault-tolerant measurement,
we use the Shor state~\cite{Weinstein2015},
or use encoded Bell pairs~\cite{Knill2005}, use encoded blocks~\cite{Steane1997,Weinstein2016}, or by adding flags~\cite{Chao2017} which reduces requisite resources significantly, to perform syndrome measurement, repeat the syndrome measurement three times and then make a majority vote to ensure fault-tolerant measurement.
Recall that we separately consider the no-error case~(\ref{eq:noerrorcase})
and the case of a single-qubit error.
Even in the no-error case,
subsequent to the $S_1$, $S_2$ and $S_3$ syndrome measurements,
the ERMQC collapses to the 15-qubit RMQC with probability~$1/8$.
Thus, we need to apply the fixing operation shown in Table~\ref{table:syndrome} with probability~$7/8$.
This fixing operation is shown as the correct box in Fig.~\ref{fig:forwardconversion}.
After this correction we need another round of syndrome measurements;
this round needs~$8$ ancillary qubits to measure the stabilizers of~$\bar{G}(1,4)^X$ and~$\bar{G}(1,4)^Z$
in order to judge whether an error happened during the correction process.
If an error happened in the correction phase,
then we need to repeat the correction and syndrome measurement one more time.
As we assume the error probability is sufficiently small that we can ignore more than one error occurring during the entire process,
we are sure that all eight syndromes are zero in this final step, which we verify by this final syndrome measurement.
If we only want to fault-tolerantly convert from the Steane code to the 15-qubit RMQC, as the code distance is 3, one error is allowed in the final code block.
Single Z errors do not have any influence on the gauge operators,
summarized in Table~\ref{table:syndrome},
that we have used to do conversion, so these single Z errors do not propagate to multiple errors. Thus, we do not need to measure the four X type syndromes which are used to diagnose the Z errors, the ancillary qubits 9-12 in Fig.~\ref{fig:forwardconversion} can be omitted, this time we need only measure eight weight-4 Z type stabilizers. After the correct operation, there may be one single-qubit error in the code, but fault-tolerant conversion is fulfilled.
\section{Conversion from the 15-qubit RMQC to the Steane code}
\label{sec:convertRMQCSteane}
In this section we deal with the backward conversion, which first converts from the 15-qubit RMQC to the ERMQC£¬
then drops the last 8 qubits to get the Steane code.
As in \S\ref{sec:convertSteaneRMQC}, we first deal with the ideal case of no error in the 15-qubit RMQC.
From Eqs.~(\ref{eq:SRM}) and~(\ref{eq:SEXRM}),
we see that the 15-qubit RMQC satisfies all the other stabilizers of the ERMQC except for $\bar{G}(1,3)_i^X$, $i=1,2,3$.
Thus, we measure these three stabilizers $\bar{G}(1,3)_i^X$, $i=1,2,3$, given in Eq.~(\ref{eq:G13}), and write
the $i^\text{th}$ $X$ syndrome as
\begin{equation}
\label{eq:SiMGi}
S'_i=\mathbb{M}\left(\bar{G}(1,3)_i^X\right)\in\{0,1\},~~i=1,2,3.
\end{equation}
Following the syndrome measurements,
we perform the fixing operations on the collapsed state according to Table~\ref{table:backwardsyndrome}.
In Table~\ref{table:backwardsyndrome} the operations are chosen similar as before in \S\ref{sec:convertSteaneRMQC}.
For example, if we obtain $S'_1=S'_2=0$ and $S'_3=1$,
then we can choose $Z_{3}Z_{7}Z_{11}Z_{15}$ as the fixing operations, which anti-commute with $\bar{G}(1,3)_3^X$ but commute with all the other stabilizers of $\bm{S}_{3\backsim4}$.
\begin{table}
\caption{Relation between stabilizer measurement results and corresponding operations for backward conversion in the no-error case.}
\label{table:backwardsyndrome}
\begin{tabular}{C{1.2cm}C{1.2cm}C{1.2cm}C{3.5cm}C{1.2cm}C{1.2cm}C{1.2cm}C{4cm}}
\hline\noalign{
}
$S'_1$ & $S'_2$ & $S'_3$ & operation & $S'_1$ & $S'_2$ & $S'_3$ & operation\\
\noalign{
}\hline\noalign{
}
0 & 0 & 0 & I & 1 & 0 & 0 & $Z_{2}Z_{3}Z_{10}Z_{11}$\\
0 & 0 & 1 & $Z_{3}Z_{7}Z_{11}Z_{15}$ & 1 & 0 & 1 & $Z_{2}Z_{7}Z_{10}Z_{15}$\\
0 & 1 & 0 & $Z_{1}Z_{3}Z_{9}Z_{11}$ & 1 & 1 & 0 & $Z_{1}Z_{2}Z_{9}Z_{10}$\\
0 & 1 & 1 & $Z_{1}Z_{7}Z_{9}Z_{15}$ & 1 & 1 & 1 & $Z_{1}Z_{2}Z_{3}Z_{7}Z_{9}Z_{10}Z_{11}Z_{15}$\\
\noalign{
}\hline
\end{tabular}
\end{table}
We now consider random single-qubit errors and their influence on the syndromes of Table~\ref{table:backwardsyndrome}
using an approach similar to that used in \S\ref{sec:convertSteaneRMQC}.
First we measure the~$Z$-type syndromes of Table~\ref{table:errorsyndrome} to deduce the single-qubit~$X$ error on qubit~$i$,
written as $X_i$.
This single-qubit error has no influence on the syndromes of Table~\ref{table:backwardsyndrome}
so we need only add the $X_i$ operation in the correction process.
For the~$X$-type syndromes,
as we already have the syndromes $S'_1$, $S'_2$ and $S'_3$,
similar as in \S\ref{sec:convertSteaneRMQC}, by generalizing Eq.~(\ref{eq:S141ZM}),
we obtain the simplifying assignments
\begin{align}
\mathbb{M}\left(\bar{G}(1,4)_1^{X}\right)\to&S'_4=\mathbb{M}\left(X_9X_{11}X_{13}X_{15}\right),
\nonumber\\
\mathbb{M}\left(\bar{G}(1,4)_2^{X}\right)\to&S'_5=\mathbb{M}\left(X_{10}X_{11}X_{14}X_{15}\right),
\nonumber\\
\mathbb{M}\left(\bar{G}(1,4)_3^{X}\right)\to&S'_6=\mathbb{M}\left(X_{12}X_{13}X_{14}X_{15}\right),
\nonumber\\
\mathbb{M}\left(\bar{G}(1,4)_4^{X}\right)\to&S'_7=\mathbb{M}\left(X_{8}X_{9}X_{10}X_{11}\right).
\end{align}
The relations between these~$X$-type syndromes and $S(1,4)_{1,2,3,4}^{X}$
are
\begin{align}
\label{eq:backwardsynrelation}
&S(1,4)_1^X=S'_1\oplus S'_4,~~~ S(1,4)_2^X=S'_2\oplus S'_5,\notag\\
&S(1,4)_3^X=S'_3\oplus S'_6,~~~ S(1,4)_4^X=S'_6\oplus S'_7.
\end{align}
We use the syndrome set $\{S'_1,S'_2,\dots,S'_7\}$
and the relations in Eq.~(\ref{eq:backwardsynrelation}) as well as Table~\ref{table:errorsyndrome} to deduce the single-qubit~$Z$ error denoted $Z_j$.
Following we modify the first three syndromes according to
\begin{equation}
\label{eq:backwardsyndromecorrection}
X_j\in \bar{G}(1,3)_n^X\implies S'_n=S'_n\oplus1
(n=1,2,3).
\end{equation}
Now we use the fixed syndromes $S'_{1,2,3}$ and Table~\ref{table:backwardsyndrome} to get the operations written as $Z_\text{sub}$, the total operations we need to perform in the correction process is $Z_\text{sub}Z_jX_i$, by these operations we get the 15-qubit ERMQC. Then we can discard the last 8 qubits to get the Steane code.
The backward conversion circuit is shown in Fig.~2,
which shows that we need to measure four weight-8 Z type stabilizers and seven weight-4 X type stabilizers.
To guaranty fault tolerance,
the method used in \S\ref{sec:convertSteaneRMQC} to make fault-tolerant measurements should also be applied here.
The probability of obtaining the correct Steane code without
applying the fixing operation is also~$1/8$ for the no-error case as is the case for forward conversion.
Thus, with probability $1/8$, the procedure terminates here with no error.
Otherwise,
in order to obtain the correct Steane code without error,
then,
after the correction process,
we undertake another round of syndrome measurements;
this additional round needs six ancillary qubits to measure the stabilizers of~$\bar{G}(1,3)^X$ and~$\bar{G}(1,3)^Z$ to determine whether an error happened during the correction process.
If an error is detected,
we repeat the correction and syndrome measurement one more time.
Then, because we allow only one error, the procedure is complete.
As in \S\ref{sec:convertSteaneRMQC}, if we only want to fulfill fault-tolerant conversion, the ancillary qubits 8-11 in Fig.~2, which are used to diagnose the single X errors,
can be omitted.
Thus we need only measure seven weight-4 X type stabilizers.
\begin{figure}
\caption{
Conversion circuit from 15-qubit RMQC to Steane code.
The ancillary qubits 1--11 are used to measure the syndromes
$S'_1=\mathbb{M}
\label{fig:backwardconversion}
\end{figure}
\section{Conversion between adjacent RMQCs}
\label{sec:convertmtomplus1}
As the recursive relation~(\ref{eq:Gm1})
exists for all adjacent RMQCs,
denoted~$\overline{\textsf{RM}}(1,m)$ and~$\overline{\textsf{RM}}\left(1,m+1\right)$,
our method is suitable for all conversions between these adjacent codes.
Now we explain how to convert between adjacent RMQCs and the resources needed.
Beginning with the case $m=3$ and Eq.~(\ref{eq:Gm1}),
we obtain the generator matrix~$\bar{G}(1,m)$ and~$\bar{G}\left(1,m+1\right)$ for $\overline{\textsf{RM}}(1,m)$ and $\overline{\textsf{RM}}\left(1,m+1\right)$.
Accordingly we can define a matrix
\begin{equation}
\tilde{H}\left(1,m+1\right)
:= \begin{bmatrix}
\multicolumn{2}{c}{\tilde{H}\left(1,m+1\right)_{1}}\\
\multicolumn{2}{c}{\vdots}\\
\multicolumn{2}{c}{\tilde{H}\left(1,m+1\right)_{m}}\\
\bar{G}(1,m)~~&\bm{\bar{0}}_{2^m}\\
\tilde{H}(1,m)~~&\bm{\bar{0}}_{2^m}\\
\bm{\bar{0}}_{2^m}~~&\tilde{H}(1,m)\\
\end{bmatrix},
\end{equation}
from $m=4$
where
\begin{equation}
\tilde{H}\left(1,m+1\right)_{1}
=\bm{1}\left(1,3\right),
\tilde{H}\left(1,m+1\right)_{2}
=\bm{1}\left(2,3\right)
\end{equation}
and
\begin{equation}
\tilde{H}\left(1,m+1\right)_{n}
=\bm{1}\left(3,3+2^{n-1}\right),
\;3\leq n\leq m,
\end{equation}
and $\bm{1}\left(x,y\right)$ means a vector is~$1$ at the positions of $x,y, x+2^m, y+2^m$, and~$0$ for the other elements.
We can verify
\begin{equation}
\tilde{H}\left(1,m+1\right)\bar{G}\left(1,m+1\right)^T=0
\end{equation}
and the parity-check matrix for~$\overline{\textsf{RM}}(1,m+1)$,
namely,
$\bar{H}\left(1,m+1\right)$,
is the vertical concatenation of
$\bar{G}\left(1,m+1\right)$ and~$\tilde{H}\left(1,m+1\right)$.
Thus, the stabilizer group for $\overline{\textsf{RM}}\left(1,m+1\right)$ can be written as
\begin{equation}
\label{eq:SRM+1}
\bm{S}\left(1,m+1\right)
=\left\langle \bar{G}\left(1,m+1\right)^X, \bar{G}\left(1,m+1\right)^Z, \tilde{H}\left(1,m+1\right)^Z\right\rangle.
\end{equation}
The ERMQC for conversion from~$m$ to $m+1$ can be written as
\begin{equation}
\ket{\phi}_{m\backsim m+1}
=1/\sqrt{2}\left(\alpha\ket{\bar{0}}_{m}+\beta\ket{\bar{1}}_{m}\right)
\left(\ket{0}\ket{\bar{0}}_{m}+\ket{1}\ket{\bar{1}}_{m}\right).
\end{equation}
The stabilizer group for this code can be written as
\begin{align}
\label{eq:SEXRM+1}
\bm{S}_{m\backsim m+1}=\Big\langle &\left(\bar{G}(1,m)\otimes\bm{\bar{0}}_{2^m}\right)^X,\bar{G}\left(1,m+1\right)^X, \left(\bar{G}(1,m)\otimes\bm{\bar{0}}_{2^m}\right)^Z,\notag \\
&\bar{G}\left(1,m+1\right)^Z,\left(\tilde{H}(1,m)\otimes\bm{\bar{0}}_{2^m}\right)^Z,\left(\bm{\bar{0}}_{2^m}\otimes \tilde{H}(1,m)\right)^Z
\Big\rangle.
\end{align}
Both the RMQC and ERMQC correspond to a subsystem code with stabilizer group of
\begin{align}
\bm{S}^\text{sub}_{m\backsim m+1}=\Big\langle &\bar{G}\left(1,m+1\right)^X,\bar{G}\left(1,m+1\right)^Z,\left(\bar{G}(1,m)\otimes\bm{\bar{0}}_{2^m}\right)^Z,\notag \\
&\left(\tilde{H}(1,m)\otimes\bm{\bar{0}}_{2^m}\right)^Z,\left(\bm{\bar{0}}_{2^m}\otimes \tilde{H}(1,m)\right)^Z\Big\rangle
\end{align}
with different gauge operators so they can convert to each other by gauge fixing.
For forward conversion,
which converts from $\overline{\textsf{RM}}(1,m)$ to $\overline{\textsf{RM}}\left(1,m+1\right)$, we compare Eq.~(\ref{eq:SEXRM+1}) with Eq.~(\ref{eq:SRM+1})
to discover that the first~$m$ stabilizers of $\tilde{H}\left(1,m+1\right)^Z$ may not be satisfied.
Thus, in order to convert forward,
we need only measure these~$m$ stabilizers and choose the corresponding operations to correct the collapsed state to $\overline{\textsf{RM}}\left(1,m+1\right)$. These $X$-type operations can be chosen similar as in \S\ref{sec:convertSteaneRMQC}, which commute with all the other stabilizers except for the stabilizers which were measured to be~$1$.
For backward conversion,
which converts from $\overline{\textsf{RM}}\left(1,m+1\right)$ to $\overline{\textsf{RM}}(1,m)$,
we discover that the stabilizers
$\left(\bar{G}(1,m)\otimes\bm{\bar{0}}_{2^m}\right)^X$
of ERMQC may not be satisfied by comparing Eq.~(\ref{eq:SRM+1}) with Eq.~(\ref{eq:SEXRM+1}).
Thus, we need~$m$
ancillary qubits to measure these stabilizers and do corresponding operations to get the ERMQC. Finally we drop the last $2^m$ qubits to obtain $\overline{\textsf{RM}}(1,m)$.
Consider the random single-qubit errors, for both directions of conversion, we need also measure the stabilizers of $\bar{G}\left(1,m+1\right)^X$ and $\bar{G}\left(1,m+1\right)^Z$ to distinguish the single-qubit errors, and fix the first~$m$ syndromes using the relations in Eqs.~(\ref{eq:syndromecorrection}) and~(\ref{eq:backwardsyndromecorrection}) with $n=1,2,\ldots,m$ respectively.
Then, by using the fixed syndromes,
we choose the correct fixing operations and perform error correction and fixing operations in one single step.
Following the above description,
we need to separately measure $3m+2$ stabilizers for both forward and backward conversion.
For forward conversion we need to measure separately~$m+1$ weight-$2^{m}$ X, Z type stabilizers and~$m$ weight-4 Z type stabilizers, and,
for backward conversion,
we need to measure separately $m+1$ weight-$2^{m}$ X, Z type stabilizers and~$m$ weight-$2^{m-1}$ X type stabilizers. Whereas,
in Ref.~\cite{Anderson2014},
they measure all the $2^{m+1}-2$ stabilizers and refer to $2^{m+1}-m-2$ syndromes to do error correction. For the case $m=3$, the number of useful syndromes is the same, but for larger~$m$, our method reduces the number exponentially with~$m$.
Using the simplified method shown in Eq.~(\ref{eq:S141ZM}), the resources can be further reduced.
For forward conversion we need to measure $m+1$ weight-$2^{m}$ X type stabilizers, $m$ weight-4 Z type stabilizers, $m$ weight-$(2^{m}-4)$ Z type stabilizers plus one weight-$2^{m}$ Z type stabilizer; for the backward conversion we need to measure $m+1$ weight-$2^{m}$ Z type stabilizers and $2m+1$ weight-$2^{m-1}$ X type stabilizers. For the case $m=3$, if we divide the only weight-$2^{m}$ Z type stabilizer into two weight-4 stabilizers, the number of syndromes is 12 and the total weights of the syndromes are 64 and 60 respectively for forward and backward conversion, the same as shown in Figs.~1 and~2. If we only want to fulfill fault-tolerant conversion, the $m+1$ X type stabilizer measurements for the forward conversion and the $m+1$ Z type stabilizer measurements for the backward conversion can be omitted, and the number of stabilizers needed to be measured is reduced to $2m+1$.
\section{Simulation and cost analysis}
\label{sec:simulationandcost}
We simulate both directions of conversion using MATLAB$^{\tiny{\textregistered}}$ for the case of $m=3$ and $m=4$, where we introduce random single-qubit errors. The $X$-type stabilizer measurements are realized by first making a transversal~$\bar{H}$,
defined in Eq.~(\ref{eq:barHn}),
to the code, which converts an~$X$ measurement to a~$Z$ measurement and vice versa. Thus, we can measure the corresponding $Z$-type stabilizers in the dual code.
Then we use the transversal~$\bar{H}$ to convert back to the original code space.
The simulation process is depicted in Fig.~3.
The computer programme is used to verify that,
for any single-qubit errors in the input,
our single-step error-correction and code-conversion process is performing correctly.
The single-step process includes first combining the syndromes according to Eqs.~(\ref{eq:S141ZM}), (\ref{eq:S1423ZM}) and~(\ref{eq:backwardsynrelation}).
Then,
based on these syndromes,
we diagnose the single-qubit errors and use relations~(\ref{eq:syndromecorrection}) and~(\ref{eq:backwardsyndromecorrection}) to fix the first~$m$ syndromes and choose the corresponding operations. Then we implement the operations and compare the actual output with the ideal output to check whether the circuit and the single-step process is correct.
The result shows that,
if we use all $3m+2$ syndromes,
we obtain the correct output.
If we only use $2m+1$ syndromes,
we obtain the output with at most only one error.
This error does not propagate during conversion as it is fault-tolerant.
\begin{figure}
\caption{
The simulation process to verify our single-step error-correction and code-conversion process.
}
\label{fig:simulationprocess}
\end{figure}
In order to see the huge decrease in requisite resources,
we compare the average resource requirement for three methods to realize the fault-tolerant logical $\bar{T}$ gate for the Steane code. These three methods are
the standard method~\cite{Nielsen2000,Fowler2011,Bremner2011}, the ADP14 method~\cite{Anderson2014} and our simplified method.
We assess using the average cost employed by Choi~\cite{Choi2015} which includes our previous counting of \textsc{CNOT} gates but also incorporates all gates, measurements and error rate.
Specifically, we apply Choi's average-cost assessment~\cite{Choi2015} to the standard method. The average cost for ADP14 to realize $\bar{T}$ is shown in Table~\ref{table:costADP}, the average cost for our method to realize $\bar{T}$ is shown in Table~\ref{table:costourmethod}.
In our calculation, we treat all physical gates and single-qubit measurements as having fixed unit cost.
We treat the error rates of the qubit, gate and measurement as all being equal to $\epsilon\in(10^{-6}-10^{-3})$~\cite{Steane2003}.
The calculation results for these three methods are shown in Table~\ref{table:averagecost}.
\begin{table}
\caption{ Cost(ADP14), where Cost(entangleS) can be found in Table~26 in~\cite{Choi2015} plus the cost for $\ket0_{Steane}$, AvgCost($S_{i,k}$) can be found in Table~12 in~\cite{Choi2015}.}
\label{table:costADP}
\begin{tabular}{C{3.5cm}C{7cm}C{5.5cm}}
\hline\noalign{
}
Cost or probability & Value or reference & Explanation \\
\noalign{
}\hline\noalign{
}
Cost(ancillary) & Cost(entangleS) & cost for the input\\
Cost($QEC_{RM}$) & $8\times \operatorname{AvgCost}(S_{i,8}$)+$6\times \operatorname{AvgCost}(S_{i,4}$) & 14 stabilizer measurements\\
Cost(fix operation) & $0.875\times 4\times \operatorname{Cost}(X)$ & fix operation, Table~\ref{table:syndrome}\\
Cost($\bar{T}$) & $15\times \operatorname{Cost}(T)$ & Transversal T on RM code \\
Cost($QEC_{RM}$) & $8\times ¡¢\operatorname{AvgCost}(S_{i,8}$)+$ 6\times \operatorname{AvgCost}(S_{i,4}$) & 14 stabilizer measurements\\
Cost(fix operation) & $0.75\times 4\times \operatorname{Cost}(Z)+0.125\times8\times \operatorname{Cost}(Z)$ & fix operation, Table~\ref{table:backwardsyndrome}\\
Average cost & Sum of all costs & \\
\hline\noalign{
}
\end{tabular}
\end{table}
\begin{table}
\caption{ Cost(our method), where Cost(entangleS) can be found in Table~26 in~\cite{Choi2015} plus the cost for $\ket0_{Steane}$, AvgCost($S_{i,k}$) can be found in Table~12 in~\cite{Choi2015}. }
\label{table:costourmethod}
\begin{tabular}{C{3.5cm}C{7cm}C{5.5cm}}
\hline\noalign{
}
Cost or probability & Value or reference & Explanation \\
\noalign{
}\hline\noalign{
}
Cost(ancillary) & Cost(entangleS) & cost for the input\\
Cost($QEC_{RM}$) & $8\times \operatorname{AvgCost}(S_{i,4}$) & 8 stabilizer measurements\\
Cost(fix operation) & $0.875\times4\times \operatorname{Cost}(X)$ & fix operation, Table~\ref{table:syndrome}\\
Cost($\bar{T}$) & $15\times \operatorname{Cost}(T)$ & Transversal T on RM code \\
Cost($QEC_{RM}$) & $ 7\times \operatorname{AvgCost}(S_{i,4}$) & 7 stabilizer measurements\\
Cost(fix operation) & $ 0.75\times 4\times \operatorname{Cost}(Z)+0.125\times8\times \operatorname{Cost}(Z)$ & fix operation, Table~\ref{table:backwardsyndrome}\\
Average cost & Sum of all costs & \\
\hline\noalign{
}
\end{tabular}
\end{table}
\begin{table}
\caption{ Average cost of fault-tolerant logical $\bar{T}$ for the standard method, ADP14 method and our method}
\label{table:averagecost}
\begin{tabular}{C{3cm}C{5cm}C{3cm}C{3cm}}
\hline\noalign{
}
Error rate ($\epsilon$) & standard method & ADP14 & our method\\
\noalign{
}\hline\noalign{
}
0.000001 & 932&4009 & 1882 \\
0.00001 & 932 &4010& 1883 \\
0.0001 & 937 &4020& 1887\\
0.001 & 981 &4127 & 1928\\
\hline\noalign{
}
\end{tabular}
\end{table}
From these results,
we see that our method reduces the resource overhead significantly compared with ADP14.
The main reason for this cost reduction is that we only measure eight weight-4 syndromes for the forward conversion and seven weight-4 syndromes for the backward conversion,
whereas ADP14 requires measurement of all eight weight-8 syndromes and six weight-4 syndromes. For larger $m$, the advantage of our method is evident as the syndromes are exponentially reduced from $2^{m+1}-2$ to $2m+1$.
Compared with the standard method for fault-tolerantly effecting a \emph{single} $\bar{T}$ gate,
our method needs approximately twice the resource of it,
as we first need to convert to the 15-qubit RMQC and then convert back to the Steane code after performing the transversal $\bar{T}$ gate.
This double overhead is not a problem, though.
We do not expect to convert back and forth between codes for each execution of $\bar{T}$.
Once in the 15-qubit RMQC,
we can execute many gates as long as the sequence is not interrupted by the $\bar{H}$ gate,
which is not transversal in that code.
Quantum-code conversion is valuable precisely for this reason:
order the operations in the circuit so as many gates as possible can be performed transversally in one code before converting to the other code for a sequence of gates that can be performed transversally in that code. Moreover, the conversion between the two codes is also extremely important because we could convert the 15-qubit RMQC to the smaller Steane code so as to operate most of the gates more efficiently.
\section{Conclusions}
\label{sec:conclusion}
We use the special case of converting from the Steane code to the 15-qubit RMQC as an example to explain the simplified code-conversion method,
in which the novel single-step process can diagnose the single qubit errors and also consider their influence on the gauge operators so as to fulfill
fault-tolerant conversion. The required resource is further decreased by the relations between gauge operators and stabilizers.
We extend this special case
to the general case of adjacent RMQCs, where the required number of stabilizer measurements is decreased to $2m+1$ from $2^{m+1}-2$.
In particular we provide explicit mathematical expression for all stabilizers that need to be measured for conversion,
and we discuss all requisite ancillary qubits and stabilizers weights.
Conversion between $\overline{\textsf{RM}}(1,3)$ and $\overline{\textsf{RM}}(1,4)$
enables a universal transversal set of gates.
Furthermore we can also convert between any adjacent order-one RMQCs so to non-adjacent RMQCs conversion, which enables all gates of the type
$\sqrt[2^{m-4}]{T}$
to be transversal for $m\geq4$~\cite{ZCC11}.
\end{document} |
\begin{document}
\title{Single-shot energetic-based estimator for entanglement in a half-parity measurement setup}
\author{Cyril Elouard}
\affiliation{Department of Physics and Astronomy, University of Rochester, Rochester, NY 14627, USA}
\affiliation{CNRS and Universit\'e Grenoble Alpes, Institut N\'eel, F-38042 Grenoble, France}
\author{Alexia Auff\`eves}
\affiliation{CNRS and Universit\'e Grenoble Alpes, Institut N\'eel, F-38042 Grenoble, France}
\author{G\'eraldine Haack}
\email{geraldine.haack@unige.ch}
\affiliation{Universit\'e de Gen\`eve, Department of Applied Physics, Chemin de Pinchat 22, CH-1211 Gen\`eve 4, Switzerland}
\date{\today}
\begin{abstract}
Producing and certifying entanglement between distant qubits is a highly desirable skill for quantum information technologies. Here we propose a new strategy to monitor and characterize entanglement genesis in a half parity measurement setup, that relies on the continuous readout of an energetic observable which is the half-parity observable itself. Based on a quantum-trajectory approach, we theoretically analyze the statistics of energetic fluctuations for a pair of continuously monitored qubits. We quantitatively relate these energetic fluctuations to the rate of entanglement produced between the qubits, and build an energetic-based estimator to assess the presence of entanglement in the circuit. Remarkably, this estimator is valid at the single-trajectory level and shows to be robust against finite detection efficiency. Our work paves the road towards a fundamental understanding of the stochastic energetic processes associated with entanglement genesis, and opens new perspectives for witnessing quantum correlations thanks to quantum thermodynamic quantities.
\end{abstract}
\maketitle
{\cal S}ection{Introduction}
\label{Sec:intro}
Entanglement is a cornerstone of quantum information technologies: Entangled pairs of qubits have for long been a resource for secure quantum key distribution \cite{Ekert91}, quantum teleportation \cite{Bennett93}, quantum repeaters for long distance quantum communication \cite{Briegel98}, while large scale entanglement can be exploited through cluster states to perform measurement-based quantum computing \cite{Raussendorf01}.
From a practical point of view, entangled pairs of qubits can be produced, e.g. by using quantum gates based on non-linear interactions \cite{Nielsen}, or performing measurement-based protocols of the parity observable. The latter is known to induce entanglement between two qubits initially in a separable state and has been extensively investigated in the last decade within various physical systems. Proposals have been made considering superconducting qubits jointly measured by a cavity mode \cite{Lalumiere10, Tornberg10, Chantasri16, Royer18} and semiconductor quantum dots jointly measured by a quantum point contact \cite{Trauzettel06, Williams08} or by an electronic Mach-Zehnder interferometer in quantum transport experiments \cite{Haack10, Meyer14}. All these works have derived the specific conditions under which these setups can be operated as parity meters. Those conditions include having a fine tuning of the coupling parameters between the qubits and the detector such that only the parity degree of freedom of the qubits is measured. In addition, having each of the two qubits initially in a maximal coherent superposition state is required to generate maximally entangled states. In particular, Ref. \cite{Williams08} investigated the stochastic generation of entanglement from a weak continuous measurement of the parity operator, putting forward the measurement-induced entanglement genesis. Since then, measurement-induced entanglement has eventually been implemented within circuit QED experiments \cite{Riste13, Roch14, Chantasri16}. Interestingly, these platforms also provide the technological know-how to access the quantum trajectories of individual quantum systems both subject to local measurements \cite{Weber14, Ficheux17, Naghiloo18} or to joint-measurements \cite{Roch14, Chantasri16}. Hence, Refs. \cite{Roch14, Chantasri16} not only implemented a parity-measurement based protocol onto two qubits, but could also access the stochastic trajectories followed by the joint state of the qubits along the entanglement generation process.
Recently, it was shown that the measurements allowing to reconstruct the pure state trajectories of the monitored systems are associated with energetic fluctuations of genuinely quantum origin called quantum heat. These energetic fluctuations can be turned into work in various protocols \cite{Elouard17b, Elouard18, Buffoni18, Ding18} and have been related to entropy production of quantum origin \cite{Elouard17, Manzano15} and provide new merit criteria to assess the performances of a feedback loop \cite{Alonso16, Naghiloo18}. So far, a thermodynamic analysis of entanglement genesis based on stochastic trajectories has remained elusive.
In this work, we theoretically analyze the statistics of energetic fluctuations for a pair of continuously monitored half spins subject to a half-parity measurement. This is achieved within the framework of stochastic quantum thermodynamics presented in \cite{Elouard17,Elouardchapter} and it allows us to derive and highlight for the first time energetic signatures associated with measurement-induced entanglement genesis. We quantitatively relate these energetic quantum fluctuations to the rate of entanglement produced between the qubits. We then exploit our results to propose a new practical application of these energetic fluctuations by building an energetic-based estimator to attest the presence of entanglement in the circuit. Remarkably, this latter quantity holds at the single trajectory level and does not rely on the measurement record itself.
The paper is organized as follows. In Sec.~\ref{sec2}, we present our system and recall the basics of a half-parity measurement protocol.
In Sec.~\ref{sec3}, we relate parity measurement and energy measurement, and we introduce the quantum energetic fluctuations associated with the quantum heat. We define and compute the stochastic energetic quantities involved in the continuous measurement case. In Sec.~\ref{sec4}, we relate the fluctuations of quantum heat to the rate of entanglement induced between the qubits. This one is investigated through the time-derivative of the concurrence, a monotone measure for two-qubit entanglement. We derive upper and lower bounds for the rate of entanglement genesis, which we exploit in Sec.~\ref{sec5} to build an estimator assessing the presence of entanglement. The latter does not depend explicitly on the measurement record, it is solely based on energetic quantities and is valid at the single-trajectory level. We also investigate its robustness in presence of finite detection efficiency.
\begin{figure}
\caption{Model and motivation. a) Weak continuous half-parity measurement. Two qubits are weakly coupled to a meter able to measure continuously the joint observable $\hat{\Phi}
\label{fig:setup}
\end{figure}
{\cal S}ection{Model}
\label{sec2}
\textit{System--} The system is made of two two-level systems (qubits) with identical energy splitting $\epsilon$ described by their Hamiltonian $\hat H_{{\cal S}}$
\bb
\label{eq:Hqb}
\hat H_{{\cal S}} =\epsilon \left( \hat {\cal S}igma_z^{(1)}\otimes \mathds{1} + \mathds{1} \otimes \hat {\cal S}igma_z^{(2)}\right) \,,
\ee
where ${\cal S}igma_z^{(i)}$ denotes the z-Pauli matrix for qubit $i$. We assume the two qubits to be initially in a separable state, and more specifically in a maximal superposition state:
\bb
\label{eq:psi_0}
\ket{{\partial}si(0)} = \frac{1}{2} \left(\ket{\!\uparrow} + \ket{\!\downarrow} \right) \otimes \left(\ket{\!\uparrow} + \ket{\!\downarrow} \right)\,.
\ee
This choice is motivated by previous works showing that this state belongs to the set of optimal states that lead to maximally entangled final states (i.e. Bell states) when the qubits are subject to a (half-) parity measurement \cite{Haack10}. Hence, this choice of initial state will allow us to investigate the energetic signatures associated to the generation of entanglement in optimal conditions.
In addition, the qubits are subject to a weak continuous measurement of the joint observable $\hat{\Phi}$, that implements a half-parity measurement, see Fig. \ref{fig:setup}. Within the two-qubit computational basis $\{ {\ket{\!\!\uparrow \uparrow}}, {\ket{\!\!\uparrow \downarrow}}, {\ket{\!\!\downarrow \uparrow}}, {\ket{\!\!\downarrow \downarrow}} \}$, the collective operator $\hat{\Phi} = {\cal S}um_{i=1,2} {\cal S}igma_z^{(i)}$ is defined by ${\ket{\!\!\uparrow \uparrow}}{\bra{\uparrow \uparrow\!\!}}- {\ket{\!\!\downarrow \downarrow}}{\bra{\downarrow \downarrow\!\!}}$ and has three eigenvalues ${\partial}m1$ and $0$ with eigenstates ${\ket{\!\!\uparrow \uparrow}}$, ${\ket{\!\!\downarrow \downarrow}}$ and $({\ket{\!\!\uparrow \downarrow}} {\partial}m {\ket{\!\!\downarrow \uparrow}})/{\cal S}qrt{2}$ respectively. The eigenvalue 0 is degenerate, implying that this outcome does not allow one to distinguish between the two odd states $\{ {\ket{\!\!\uparrow \downarrow}}, {\ket{\!\!\downarrow \uparrow}}\}$. Consequently, when outcome $0$ is obtained from the measurement, the two qubits are driven into a coherent superposition of those states, which leads to entanglement. On the contrary, the measurement of the eigenvalues ${\partial}m 1$ allows to distinguish the two even states, leaving the qubits in a product state. Hence, the half-parity measurement presents the specificity of producing in a probabilistic way entangled and product states. It is clear from the expression of $\hat{\Phi} $ that its measurement does not put the qubits into an entangled state (spanned by the odd states) if these ones are not in a coherent superposition initially. Although the forms of $\hat{\Phi}$ and of the parity operator $\hat{P} = {\cal S}igma_z^{(1)} \otimes {\cal S}igma_z^{(2)}$ seem to be specific, these joint operators are the only ones up to local unitary operations that can generate correlations between two qubits. This explains the high interest in the past years for this parity measurement protocol within solid-state setups, also known within the quantum information community as a Bell measurement. This one was for instance proposed and realized for quantum teleportation \cite{Bennett93, Riebe04}. \\%Remarkably, the half-parity observable also serves as energy filter within our model: $\hat{\Phi} {\partial}ropto \hat{H}_S$, both being related by the energy gap of the qubits $\epsilon$ (see Eq. \eqref{eq:Hqb}). Hence, the half-parity measurement, in addition to generating entanglement, also provides a direct access to the energetics of the qubits along the quantum trajectories generated by the weak measurement of the corresponding $\hat{\Phi}$. This allows us to distinguish quantum trajectories associated with entanglement genesis or not using post-selection, and to compare their energetic properties.\\
\textit{Quantum trajectories--} Assuming a weak continuous measurement of $\hat{\Phi}$, each realization of the measurement procedure is associated to a quantum stochastic trajectory followed by the qubits and labelled $\gamma$ in the rest of the manuscript \cite{Wiseman96, Jacobs06}. We assume the initial state of the qubits is a known pure state, see Eq.\eqref{eq:psi_0}, such that the trajectory $\gamma$ is made of a sequence of pure states $\{\ket{{\partial}si_\gamma(t)}\}$. To each trajectory $\gamma$ corresponds a stochastic measurement record $I_\gamma(t)$:
\bb
\label{eq:meas_I}
I_\gamma(t) = \moy{\hat\Phi(t)}_\gamma + \frac{dW_\gamma(t)}{2{\cal S}qrt{\Gamma}dt}\,.
\ee
Here, $ \moy{\hat\Phi(t)}_\gamma= \bra{{\partial}si_\gamma(t)}\hat\Phi\ket{{\partial}si_\gamma(t)}$ is the expectation value of the half-parity operator w.r.t. the state $\ket{{\partial}si_\gamma(t)}$, and $\Gamma$ corresponds to the detector measurement rate, i.e. the rate at which one is able to distinguish the measurement outcomes from the detector's shot noise \cite{Pilgram02, Korotkov99, Haack10, Meyer14}. The infinitesimal Wiener increment $dW_\gamma(t)$ is a stochastic variable characterized by a zero average and variance $dt$, i.e. $\langle\!\langle dW_\gamma \rangle\!\rangle_t =0$ and $\langle\!\langle dW_\gamma^2 \rangle\!\rangle_t= dt$, where $\langle\!\langle \cdot \rangle\!\rangle_t$ denotes the average over all realizations of the measurement during the time interval $[0, t]$. Note that throughout this work, we use Ito's convention for stochastic differential calculus \cite{Gardiner85, Gillespie96}. This infinitesimal Wiener increment encodes Gaussian fluctuations of the measurement record $I_\gamma(t)$ around its expectation value $\moy{\hat\Phi(t)}_\gamma$ and therefore captures the detector's shot noise in the weak coupling limit \cite{Korotkov99, Jacobs06}.
Based upon the knowledge of $I_\gamma$, the conditional dynamics of the two qubits subject to the weak measurement of the half-parity observable $\hat{\Phi}$ is captured by the stochastic Schr\"odinger equation
\bb
\label{SSE}
d\ket{{\partial}si_\gamma(t)} &=& \left[-i \hat H_{{\cal S}} dt- \frac{\Gamma}{2} dt (\hat{\Phi} - \moy{\hat\Phi(t)}_\gamma)^2\right. \nonumber\\
&& \left. \;+ {\cal S}qrt{\Gamma} \, dW_\gamma(t)(\hat \Phi - \moy{\hat\Phi(t)}_\gamma)\right] \ket{{\partial}si_\gamma(t)} .
\ee
\begin{figure}
\caption{Post-selected average concurrence $\mathcal{C}
\label{fig:av_thermo}
\end{figure}
Using $\hat H_{{\cal S}} = \epsilon \hat\Phi$, see Eq. \eqref{eq:Hqb}, one can solve analytically Eq.\eqref{SSE} at any time $t$ as a function of the stochastic measurement record:
\bb
\label{eq:state}
\ket{{\partial}si(J_\gamma,t)} = \frac{1}{N_\gamma(t)}\left(e^{(-i\epsilon + 2\Gamma J_\gamma \, )\hat \Phi t - \Gamma \hat\Phi^2 t} \right)\ket{{\partial}si(0)}\,,
\ee
with $N_\gamma(t)$ the time-dependent normalization factor $N_\gamma(t)= {\cal S}qrt{(1+ e^{-2 \Gamma t} \cosh(4 \Gamma J_\gamma t ))/2}$. Following Ref. \cite{Jacobs06}, this solution is expressed in terms of the measurement outcome $J_\gamma = 1/t \, \int_0^t I_\gamma(\tau) \, d\tau $:
\bb\label{Jgamma}
J_\gamma(t) = \frac{1}{t} \int_0^t\moy{\hat\Phi(\tau)}_\gamma \, d\tau\, + \frac{W_\gamma(t)}{2{\cal S}qrt{\Gamma} t}\,,
\ee
with $W_\gamma(t) = \int_0^t dW_\gamma(t')$ a Gaussian random variable with mean zero and variance $t$. The time integral corresponds to a finite resolution time, typically set by experimental constraints. The probability distribution of $J_\gamma(t)$ is a sum of three Gaussian functions of variance $1/4\Gamma t$, each peaked around one of the eigenvalues $\{0, {\partial}m 1\}$ of the measured observable $\hat \Phi$:
\bb
\label{eq:proba_dis}
P(J_\gamma,t) = \frac{1}{3} {\cal S}um_{j=-1}^1 {\cal S}qrt{\frac{2 \Gamma t}{{\partial}i}} e^{- (J_\gamma-j)^2/ (2 \tilde{{\cal S}igma}_0^2)}\,,
\ee
with the variance $\tilde{{\cal S}igma}_0 \equiv {\cal S}qrt{\Gamma t}$ setting the measurement strength. In the long time limit $t\gg (4\Gamma)^{-1}$, $J_\gamma(t)$ only takes one of three possible outcomes which are the eigenvalues of the half-parity measurement operator: $J_0 \equiv 0$, $J_{{\partial}m1}\equiv {\partial}m 1$. At those long times, the measurement becomes projective. In the following of the work, we choose to label the different trajectories with their long-time value of $J_\gamma$; this defines three subsets of trajectories, leading the qubits either in an entangled state ($J_\gamma = 0$) or in a product state ($J_\gamma = {\partial}m1$).\\
\textit{Measure of entanglement--} We quantify the presence of entanglement at any time $t$ using the concurrence, a monotone measure for two-qubit entangled states \cite{Wootters98}. The qubits being in a pure state at each instant of time along their trajectory $\gamma$, we can make use of a simpler definition $\mathcal{C}_\gamma(t) = \text{max}\left\{ 0, 2\vert ad - bc \vert \right\}$ where $a,b,c,d$ are the amplitudes of $\ket{{\partial}si_\gamma(t)}$ with respect to the computational states ${\ket{\!\!\uparrow \uparrow}}, {\ket{\!\!\uparrow \downarrow}},{\ket{\!\!\downarrow \uparrow}}, {\ket{\!\!\downarrow \downarrow}}$ respectively. Inserting Eq.\eqref{eq:state}, we get a stochastic concurrence which depends on the measurement outcome $J_\gamma$:
\bb
\label{eq:C}
\mathcal{C}_\gamma(t) &=& \frac{1-e^{- 2 \Gamma t}}{1+ e^{-2 \Gamma t} \cosh(4 \Gamma J_\gamma t)}.
\ee
Figure~\ref{fig:av_thermo}\,a) shows the time evolution of the average post-selected concurrence $\mathcal{C}_\gamma$ according to the three subsets of trajectories, labeled by the final outcomes $J_\gamma \to \{J_0, J_{{\partial}m1}\}$. The average is made over all realizations that belong to a given subset. At long times, $t > 6/\Gamma$, the concurrence can be directly obtained by replacing in Eq.\eqref{eq:C} $J_\gamma$ by the $\hat{\Phi}$-eigenvalues $0, {\partial}m 1$ as the measurement becomes projective. Under optimal conditions as considered here (QND-measurement, no additional dephasing processes, ideal detection scheme), the concurrence reaches the value 1 when qubits are driven into the odd subspace, meaning that the qubits end up in a maximally entangled state. Non-ideal conditions encountered in experiments lead to a lower maximal value of concurrence, and eventually to a long-time state that is separable if dephasing processes are too important. This was for instance the case in Ref. \cite{Roch14}, and can be accounted for theoretically.\\
In this work, the goal is to establish fundamental links between energetic signatures and the generation of entanglement. This is why we focus below on the realization of weak continuous half-parity measurement under optimal conditions. Non-ideal conditions could also be accounted for, but would prevent us to draw clear conclusions on the origin of these energetic signatures.
{\cal S}ection{Half-parity measurement seen as an energy measurement (filter)}
\label{sec3}
It is remarkable that the half-parity observable also serves as energy filter within our model: $\hat{\Phi} {\partial}ropto \hat{H}_S$, both being related by the energy gap of the qubits $\epsilon$ (see Eq. \eqref{eq:Hqb}). Hence, the half-parity measurement provides a direct access to the energetics of the qubits along the quantum trajectories generated by the weak measurement of $\hat{\Phi}$. The internal energy $U_\gamma(t)$ of the two qubits along a given trajectory $\gamma$ is given by:
\bb
\label{eq:U_def}
U_\gamma(t) = \bra{{\partial}si_\gamma(t)} \hat H_{{\cal S}} \ket{{\partial}si_\gamma(t)} = \epsilon \bra{{\partial}si_\gamma(t)} \hat{\Phi} \ket{{\partial}si_\gamma(t)}\,.
\ee
Considering the initial state $\ket{{\partial}si(0)}$ (see Eq.\eqref{eq:psi_0}), the initial internal energy $U(0)$ is zero and constitutes the energy reference. In the absence of driving, $\hat H_{{\cal S}}$ is time-independent. Hence no work is performed onto the qubits. In addition, there is no thermal reservoir involved in the problem so that a change in the internal energy of the qubits can only arise from the measurement process itself. This form of energy exchange has no classical counter-part \cite{Brandner15,Alonso16, Elouard17, Abdelkhalek16, PRL17, Yi17} and will be, in the line of \cite{Elouard17}, referred to as quantum heat and denoted $Q$ in this article. \\
\textit{Long times: projective energetic measurement--} At long times, $t > (6\Gamma)^{-1}$, the change of internal energy (associated here to a net quantum heat $Q$) takes three different values depending on the measurement outcomes:
\bb
\Delta U \equiv Q = \left\{\begin{array}{c}
0 \quad \text{for} \quad J_\gamma = 0 \\
\epsilon \quad \text{for} \quad J_\gamma = 1 \\
-\epsilon \quad \text{for} \quad J_\gamma = -1.
\end{array} \right.
\ee
When averaged over all trajectories, $\left\langle\!\left\langle \Delta U \right\rangle\!\right\rangle_\gamma = 0$, which equals the reference internal energy at initial time $U(0) = 0$. This equality follows from $[\hat{H}_S, \hat{\Phi}] = 0$ that characterizes a QND-measurement. Indeed, as both observables commute, there must be no change of internal energy on average and this must hold at all times. At long times, the weak continuous measurement of $\hat{\Phi}$ is equivalent to a projective \textit{energy} measurement, and this explains the long-time values of $\Delta U$ for the different subsets of trajectories, equal to the energy of the odd states $\{ \ket{{\uparrow \downarrow}}, \ket{{\downarrow \uparrow}}\}$ and even states $\ket{{\uparrow \uparrow}}$ and $\ket{{\downarrow \downarrow}}$ respectively.\\
\textit{Intermediate times--} At arbitrary time $t$, the quantum heat exchange depends on the exact outcome $J_\gamma(t)$:
\bb
\label{eq:Q}
Q_{\gamma}(t) &=& U_\gamma(t) - U(0) \nonumber \\
&=& \epsilon \, \frac{ e^{- 2 \Gamma t} {\cal S}inh(4\Gamma t J_\gamma)}{1+ e^{-2 \Gamma t} \cosh(4\Gamma t J_\gamma)\,.}
\ee
This quantum heat exchange $Q_{\gamma}(t)$, after post-selection, is plotted in Fig.~\ref{fig:av_thermo}~b) as a function of time. As for the concurrence, an average is made within each subset of trajectories defined by the long-time limit value of the measurement record $J_\gamma = 0, {\partial}m 1$. Because the half-parity measurement amounts to an energetic measurement, an access to the internal energy is sufficient to determine whether the qubits are entangled or not. A record equal to 0 implies that the qubits ended up in the odd subspace, in a coherent superposition of the odd states $(\ket{{\uparrow \downarrow}} + \ket{{\downarrow \uparrow}}){\cal S}qrt{2}$.
This first part demonstrates that, on top of being a way to generate entangled state in a heralded way (the outcome is enough to know whether the final state is entangled) as demonstrated in previous works, the half-parity measurement also gives access to the joint internal energy of the qubits. Let us note that the rate at which the quantum heat exchange converges to its final value corresponds to the measurement-induced dephasing rates derived in previous works \cite{Haack10, Meyer14, Riste13,Roch14}.\\
\textit{Stochastic fluctuations at arbitrary times--}
{To derive the fluctuations of the quantum heat,} let us first introduce the increment $\delta Q_\gamma(t)$ that corresponds to the stochastic infinitesimal variation of the internal energy $U_\gamma(t)$ between times $t$ and $t+dt$ along a trajectory $\gamma$. It is related to the total quantum heat exchange $Q_\gamma(t)$ up to time $t$ via:
\bb
\label{eq:Q_dQ}
Q_\gamma(t) = \int_0^t \delta Q_\gamma(t')\,,
\ee
and is defined as
\bb
\delta Q_\gamma(t) &\equiv& d\left(\langle {\partial}si_\gamma(t) \vert \hat H_{{\cal S}}\vert {\partial}si_\gamma(t) \rangle \right) \\
&=& \Big( d\langle {\partial}si_\gamma(t) \vert \Big) \hat H_{{\cal S}}\vert {\partial}si_\gamma(t) \rangle + \langle {\partial}si_\gamma(t) \vert \hat H_{{\cal S}} \Big( d\vert {\partial}si_\gamma(t) \rangle \Big) \nonumber \\
&& + \Big( d\langle {\partial}si_\gamma(t) \vert \Big) \hat H_{{\cal S}} \Big( d\vert {\partial}si_\gamma(t) \rangle \Big) \label{eq:dQ}
\ee
Inserting Eqs.~\eqref{eq:Hqb} and \eqref{eq:state} into Eq.~\eqref{eq:dQ} and expanding the last term up to the first order in $dt$, we obtain
\bb
\label{eq:dQ1}
\delta Q_\gamma(t) &=& 2 \epsilon {\cal S}qrt{\Gamma} dW_\gamma(t) \left( 4 P_{\uparrow \uparrow} P_{\downarrow \downarrow} + P_o P_e \right) \nonumber \\
&\equiv& \delta Q^{(e)}_\gamma(t) + \delta Q^{(eo)}_\gamma(t)\,,
\ee
with
\bb
\delta Q^{(e)}_\gamma(t) &=& 8 \epsilon {\cal S}qrt{\Gamma} dW_\gamma(t) P_{\uparrow \uparrow} P_{\downarrow \downarrow} \label{eq:dQ2} \\
\delta Q^{(eo)}_\gamma(t) &=& 2 \epsilon {\cal S}qrt{\Gamma} dW_\gamma(t) P_e P_o \label{eq:dQ3}\,.
\ee
Here, we have introduced the populations $P_{ij}$ for the 4 two-qubit states defined as $P_{ij} \equiv P_{ij,\gamma}(t) = \vert \langle ij \vert {\partial}si_\gamma(t)\rangle\vert^2 \quad i,j= \uparrow, \downarrow$
and we denote $P_e = P_{\uparrow \uparrow} + P_{\downarrow \downarrow}$ and $P_o = P_{\uparrow \downarrow} + P_{\downarrow \uparrow} $ the populations within the even and odd parity subspaces respectively. From those definitions, the products $P_{\uparrow \uparrow}P_{\downarrow \downarrow} $ and $P_e P_o$ correspond respectively to the squared coherences (off-diagonal elements in the two-qubit density matrix) within the even subspace and between the even and odd subspaces. Let us recall that a (half-) parity measurement generates entanglement by distinguishing the even states form the odd ones. It is therefore the loss of coherence between the two parity subspaces that brings the two qubits in a coherent superposition of odd states when outcome $J_\gamma = 0$ is obtained. Hence, we claim that the product $P_o P_e$ (equivalently $\delta Q^{(eo)}_\gamma(t)$) in Eq.\eqref{eq:dQ3} reflects entanglement genesis, and so does the total heat increment $\delta Q_\gamma(t)$. We demonstrate this claim in the following sections.
{\cal S}ection{Energetic bounds for the entanglement genesis rate}
\label{sec4}
To validate our claim, we define the standard deviation of the quantum heat increment between times $t$ and $t+ dt$ as
\bb
{\cal S}igma_\gamma(t) &=& {\cal S}qrt{\langle\!\langle \delta Q_\gamma^2(t) \rangle\!\rangle_{dt}} \nonumber\\
&=& 2 \epsilon {\cal S}qrt{\Gamma dt}\, \frac{e^{-4 \Gamma t}+e^{-2 \Gamma t} \cosh( 4\Gamma t J_\gamma)}{\left( 1+ e^{-2 \Gamma t} \cosh(4\Gamma t J_\gamma) \right)^2} \,,\label{eq:s_gamma}
\ee
where we made use of $\langle\!\langle dW(t)^2 \rangle\!\rangle_{dt} = dt$ and $\langle\!\langle dW(t) \rangle\!\rangle_{dt} = 0$.
For simplicity, we will work in the following with dimensionless quantities:
\bb
\label{eq:tilde}
\tilde{{\cal S}igma}_\gamma(t) = \frac{{\cal S}igma_\gamma(t)}{ 2 \epsilon {\cal S}qrt{\Gamma dt}} \quad \text{and} \quad \tilde{Q}_\gamma(t) &=& \frac{Q_\gamma(t)}{\epsilon}\,.
\ee
${\cal S}qrt{\Gamma dt}$ refers to the variance of the Gaussian distribution of the measurement record $I_\gamma(t)$ during the discrete time interval $dt$, see the variance $\tilde{{\cal S}igma}_0$ defined in Eq. \eqref{eq:proba_dis}. Similarly to Eq. \eqref{eq:dQ1}, we define two contributions to the standard deviation of the stochastic quantum heat increment:
\bb
\tilde{{\cal S}igma}_\gamma(t) = \tilde{{\cal S}igma}_\gamma^{(eo)}(t)+ \tilde{{\cal S}igma}_\gamma^{(e)}(t)
\ee
\begin{figure}
\caption{Infinitesimal quantum heat fluctuations $\tilde{{\cal S}
\label{f:fig3}
\end{figure}
Figure \ref{f:fig3} illustrates our claim. The contour plot corresponding to constant values of the concurrence is superimposed onto the density plot of $\tilde{{\cal S}igma}^{(eo)}(t)$. It highlights that an increase in the concurrence is associated with non-zero fluctuations of $\delta Q^{(eo)}_\gamma(t)$. In contrast, the concurrence plateaus correspond to the areas where $\tilde{{\cal S}igma}^{(eo)}$ vanishes. This observation is supported by a strict equality between the concurrence variation (the concurrence derivative) and $\tilde{{\cal S}igma}_\gamma^{(eo)}$ for the trajectories leading the qubits into an entangled state $J_\gamma = 0$ (see App. \ref{appen:1} for detailed derivation):
\bb
\label{eq:ll}
\left.\frac{{d\mathcal{C}}_\gamma}{dt} \right\vert_{J_\gamma = 0} = 4 \Gamma \left.\tilde{{\cal S}igma}_\gamma^{(eo)} \right\vert_{J_\gamma = 0}\,.
\ee
Because it constitutes at the same time the source of entanglement and a direct access to the energy and its fluctuations, it follows that the energy fluctuations must be related to the creation of entanglement. However, the practical interest of Eq. \eqref{eq:ll} is limited as it would be impossible in a heat-sensitive measurement to distinguish the two contributions $\delta Q^{(eo)}_\gamma$ and $\delta Q^{(e)}_\gamma$.\\% and taking {$J_\gamma = 0$ has a true physical meaning} when the measurement becomes projective {at long times}. \\
Nevertheless, at intermediate times, we demonstrate that the concurrence derivative, that characterizes the rate at which entanglement is induced by the measurement, can be upper and lower bounded by energetic quantities solely. Starting from
\bb\label{eq:dCdt}
\frac{d \mathcal{C}_\gamma(t)}{dt} &=& 2 \Gamma \Bigg[ \frac{e^{-2 \Gamma t} \left( 1+ \cosh(4 \Gamma t J_\gamma)\right)}{(1+e^{-2 \Gamma t} \cosh(4 \Gamma t J_\gamma)^2} \nonumber \\
&& - \frac{2\mathcal{C}_\gamma (t) Q_\gamma(t)}{\epsilon} \left(\moy{\hat\Phi(t)}_\gamma + \frac{1}{2{\cal S}qrt{\Gamma}}\frac{d W_\gamma(t)}{dt} \right) \Bigg]\,,\nonumber\\
\ee
we then perform an an ensemble average over all trajectories (or equivalently over the measurement record $I_\gamma(t)$) during the time interval $[t, t+dt]$, keeping the past records $\{I_\gamma(t')\}_{t'<t}$ fixed. This interval $[t, t+ dt]$ is the one over which we investigate the fluctuations of the quantum heat increment $\delta Q_\gamma$, and this ensemble average $\langle \!\langle \cdot \rangle \! \rangle_{dt}$ corresponds to the one used in the definitions of the quantum heat increment fluctuations, see Eq.\eqref{eq:s_gamma}. We then have $\langle \!\langle dW_\gamma(t)/dt\rangle \! \rangle_{dt} = 0$ in Eq.\eqref{eq:dCdt} and
\bb
\label{eq:dC_eo}
\left\langle \!\!\!\left\langle \frac{d \mathcal{C}_\gamma(t)}{dt} \right\rangle \!\!\! \right\rangle_{dt} &=& 2 \Gamma \frac{e^{-2 \Gamma t} + e^{- 2 \Gamma t} \cosh(4 \Gamma t J_\gamma)}{(1+e^{-2 \Gamma t} \cosh(4 \Gamma t J_\gamma))^2} \nonumber \\
&&- 4 \Gamma \, \frac{Q_\gamma}{\epsilon} \, \mathcal{C}_\gamma \moy{\hat\Phi(t)}_\gamma .
\ee
The first term on the r.h.s. can be upper and lower bounded with $\tilde{{\cal S}igma}_\gamma$ (see Append. \ref{appen:2}):
\bb
&&\left\langle \!\!\!\left\langle \frac{d \mathcal{C}_\gamma(t)}{dt} \right\rangle \!\!\! \right\rangle_{dt} \geq 2 \Gamma \left[ \tilde{{\cal S}igma}_\gamma(t) - 2 \tilde{Q}^2_\gamma(t) \mathcal{C}_\gamma(t) \right] \label{eq:ineq1}\\
&&\left\langle \!\!\!\left\langle \frac{d \mathcal{C}_\gamma(t)}{dt} \right\rangle \!\!\! \right\rangle_{dt} \leq 2 \Gamma \left[2 \tilde{{\cal S}igma}_\gamma(t) - 2 \tilde{Q}^2_\gamma(t) \mathcal{C}_\gamma(t)\right] \label{eq:ineq2}
\ee
\begin{figure*}
\caption{Success rate of the energetic-based estimator $\mathcal{E}
\label{fig:esti}
\end{figure*}
Inequalities \eqref{eq:ineq1} and \eqref{eq:ineq2} constitute one of the main analytical results of this work. The entanglement rate is exclusively upper and lower bounded by energetic quantities, the quantum heat and quantum heat fluctuations defined in Eq.\eqref{eq:tilde}. All quantities are defined over a (small) finite time interval $dt$. Remarkably, we can further exploit Ineq. \eqref{eq:ineq1} towards the derivation of an energetic-based estimator to assess the presence of entanglement at intermediate times. Of interest towards
single shot entanglement detection, this will be done at the level of a unique quantum trajectory.
{\cal S}ection{Single-shot energetic-based estimator for entanglement}
\label{sec5}
Formally, a witness for entanglement is an observable that takes a negative value when averaged with respect to a state that is entangled. If the witness takes a positive value, no conclusions can be drawn, the state can either be entangled or separable \cite{Plenio07}. In the past years, few witnesses based on temperature have been proposed, as a first attempt to exploit energetic quantities to certify the presence of entanglement \cite{Anders06, Anders08}. Following the spirit of assessing the presence of entanglement with some independent quantities, we introduce in this section a \emph{trajectory-based witness}, i.e. a quantity which takes negative value in presence of entanglement, just as usual entanglement witnesses, but which takes as an input a single quantum trajectory, or equivalently a weak measurement record, instead of a quantum state. As a first step to build this witness, we use the property that a given quantum trajectory $\gamma$ will drive the qubits onto an entangled state if,
\bb
\left\langle \!\!\!\left\langle \frac{d \mathcal{C}_\gamma(t')}{dt} \right\rangle \!\!\! \right\rangle_{dt} \geq 0 \,,\quad\forall t' \in [0,t].\label{condition}
\ee
This condition is motivated by the idea that accumulating positive time-derivatives would lead to a positive concurrence at final time $t$. While one cannot certify the presence of entanglement along individual trajectory $\gamma$ from Eq.\eqref{condition}, the integration over time of $\frac{d \mathcal{C}_\gamma(t)}{dt}$ used to compute the concurrence at time $t$ is expected to play the role of the ensemble average $\left\langle \!\!\!\left\langle \cdot \right\rangle \!\!\! \right\rangle_{dt}$ for $\Gamma dt \ll 1$ and provided $t \gtrsim \Gamma^{-1}$. Therefore, condition \eqref{condition} implies an entangled state at time $t$ for a high fraction of the considered trajectories.
Making use of inequality \eqref{eq:ineq1}, this condition translates into:
\bb
\label{eq:lower_bound}
&&\tilde{{\cal S}igma}_\gamma(t) - 2 \tilde{Q}_\gamma^2(t) \mathcal{C}_\gamma(t) \geq 0\,,
\ee
We can now use that the concurrence $\mathcal{C}_\gamma$ takes values within $[0,1]$ to prove an inequality assessing the presence of entanglement, that only depends on energetic quantities, $\tilde{{\cal S}igma}_\gamma(t) - 2 \tilde{Q}_\gamma^2(t) \geq 0\,.$
We can now directly use this condition to introduce our trajectory-based entanglement witness $\mathcal{W}_\gamma$, defined as:
\bb
\label{eq:wit}
\mathcal{W}_\gamma = \frac{1}{\Delta t}\int_{t_i}^{t_i+\Delta t} \left[2 \tilde{Q}_\gamma^2(t) - \tilde{{\cal S}igma}_\gamma(t) \right]dt \,.
\ee
The time-averaged over $\Delta t$ is meant to take into account a finite acquisition time during the experiment.
When {$\mathcal{W}_\gamma$} is negative, one can expect with high probability that the trajectory $\gamma$ leads to qubits in an entangled state. When it is positive, one can not draw a definite conclusion. However, and as stated before, this quantity implies an ensemble-average $\langle\!\langle \cdot \rangle\!\rangle_{dt}$ over all trajectories occurring during the finite time interval $dt$, {which is not yet optimal for experimental purposes}. We {therefore} define an estimator $\mathcal{E}_{ss}$ (with the label $ss$ referring to single-shot), where the ensemble average in the definition of ${\cal S}igma_\gamma(t)$ Eq.\eqref{eq:s_gamma} is replaced by a time average over an interval $\tau$. This procedure is similar to a coarse-graining of the fluctuations along a single trajectory. The witness \eqref{eq:wit} then transforms into an estimator, valid at the level of single trajectory:
\bb
\label{eq:E_ss}
\mathcal{E}_{ss} \!\!= \!\! \frac{1}{\Delta t}\int_{t_i}^{t_i+\Delta t} \left[2 \tilde{Q}_\gamma(t)^2 -{\cal S}qrt{\frac{1}{\tau}\int_{t}^{t+\tau} \delta\tilde{Q}_\gamma^2(t')\, dt' } \right]\!dt\,. \nonumber \\
\ee
The performance of $\mathcal{E}_{ss}$ as an estimator to attest entanglement between the qubits is analyzed through two figures of merit, its success rate (ratio of detected vs. total number of trajectories leading to entangled states) and its error rate (whenever $\mathcal{E}_{ss}$ takes a negative value whereas the trajectory does not lead to entangled qubits). The error rate takes the maximal value of $0.2\%$ for the coarse-graining times $\tau = 0.1$ and $\tau = 0.4$ and $1.2\%$ for $\tau= \Delta t$. Indeed, as the coarse-graining time increases, the time average leads to trusty measurements, as expected from the analytical bound derived with the ensemble-averaged. The finite error rate forbids us to claim an energetic witness, but its small value demonstrates the usefulness of our energetic-based estimator. As shown in Fig.\ref{fig:esti}, the success rate of $\mathcal{E}_{ss}$ does not strongly depend on the overall integration window $\Delta t$, but rather on the initial time $t_i$ from which the averages are performed. At large $t_i$, the success rate reaches 1 as expected from the convergence of each trajectory towards one of the eigenstate of the measurement operator $\hat{\Phi}$. However, the success rate exceeds 0.5 for $t_i$ as small as $3/\Gamma$ for $\tau = \Delta t = 0.3/\Gamma$, with a corresponding error rate of $0.4\%$, being promising for future experiments towards single-shot energetic-based estimators to certify the presence of entanglement.
Finally, as a first step towards realistic implementation, we have also investigated the robustness of our estimator against finite detection efficiency. We have numerically simulated trajectories when the half-parity detection channel has an efficiency $\eta<1$, which leads to mixed state trajectories $\rho_\gamma(t)$ \cite{Jacobs06}, see App. \ref{appen:3} . As a consequence, the analytic justification for the estimator is not valid anymore as the formula for the concurrence has to be modified for mixed state\cite{Wootters98} and there is no analytic expression for the state conditioned to a given readout $J_\gamma$. However, a numerical treatment is possible, and it is straightforward to extend the definition \eqref{eq:E_ss} to mixed state trajectories, using the average heat increment along mixed state trajectory $\gamma$ defined as $\delta Q_\gamma(t) = \text{Tr}\{d\rho_\gamma(t)\hat H_{\cal S}\}$. The latter is related to the populations of the states $\ket{{\uparrow \uparrow}}$ and $\ket{{\downarrow \downarrow}}$ and of the even and odd subspaces in the same way as in Eq.\eqref{eq:dQ1}, with an additional overall factor ${\cal S}qrt{\eta}$. The generation of entanglement happens to be quite robust to finite detection efficiency, as witnessed by the experimental implementations \cite{Riste13,Roch14}. The success rate, computed from a sample of $1000$ numerically generated trajectories, is plotted for finite efficiency in Fig.~\ref{fig:esti}d)-f) for different values of $\eta$, showing that our method is robust against finite detection efficiency. High success rates (up to $80\%$ for $\Delta t = 10/\Gamma$) are predicted even for $\eta = 0.5$ which is the order of magnitude of the experimental conditions. The error rate remains smaller or of the order of $1\%$.
{\cal S}ection{Discussion}
In this work, we investigate the energy fluctuations associated with entanglement genesis during the paradigmatic half-parity measurement procedure. Not only this measurement generates both product states and entangled states, but it also constitutes an energy observable. Based on a quantum-trajectory approach, and making use of the framework provided by stochastic thermodynamics, we demonstrate that the generation of entanglement is closely related to the presence of quantum heat fluctuations induced by the stochasticity of the weak continuous measurement. We derive analytical upper and lower bounds for the entanglement genesis rate. We then exploit the lower bound to derive an estimator that is solely based on energetic observables and their fluctuations. We show that this energetic-based estimator is indeed able to attest the presence of entanglement at a finite time for a given quantum trajectory, with a tunable probability that depends on the total time-averaged window $\Delta t$. Remarkably, this single-shot estimator is valid at the level of a single trajectory and so does not require any ensemble average. Remarkably, the energetic-based estimator we proposed is robust to finite detection efficiency, reaching a success rate close to 1 for suitable integration times as discussed earlier.
These results do not aim at evaluating the energetic cost of creating quantum correlations, see for instance Refs.\cite{Huber15,Bruschi15, Friis16}, but rather at defining an energetic signature associated to their generation. Whereas the quantum heat is zero on average, the quantum fluctuations $\delta Q$ during a finite time interval $dt$ contain relevant information to attest the presence of entanglement.
Exact in the context of the half-parity measurement process which also plays the role of an energetic filter, our work fixes theoretical tools for thermodynamic analysis of the generation of quantum correlations and opens the way to develop witnesses based exclusively on the measurements of energetic quantities for quantum information purposes. This theoretical research is additionally motivated by recent experimental achievements in the emergent field of quantum caloritronics, aiming at controlling and measuring energetic observables like the heat current in various quantum circuits \cite{Giazotto12, Jezouin13, Gasparinetti15, Iftikhar16, Fornieri17, Banerjee17,Zanten16}. \\
\textbf{Acknowledgements}
G.H. gratefully acknowledges discussions with J. Anders, A. Jordan and I. Siddiqi. C.E. acknowledges the US Department of Energy grant No. DE-SC0017890. This research was supported in part by the National Science Foundation under Grant No. NSF PHY-1748958. G.H. thanks the Swiss National Science Foundation for support through the MHV grant 164466 and starting grant PRIMA PR00P2$\_$179748. This research was supported in part by the National Science Foundation under Grant No. NSF PHY17-48958.
\appendix
{\cal S}ection{Long-time limit formula between the concurrence derivative and infinitesimal heat fluctuations}
\label{appen:1}
The fluctuations of the heat increment associated to the lose of coherences between the two parity subspaces are defined in a similar was as ${\cal S}igma_\gamma$:
\bb
{\cal S}igma_\gamma^{(eo)}(t) &=& {\cal S}qrt{\langle\!\langle \delta Q_\gamma^{(eo)2} (t) \rangle\!\rangle_{dt} }\,. \\
&=& 2 \epsilon {\cal S}qrt{\Gamma dt} \frac{e^{-2 \Gamma t} \cosh{4 \Gamma t J_\gamma}}{\left(1+ e^{-2 \Gamma t} \cosh{4 \Gamma t J_\gamma}\right)^2}\,. \label{eq:sigma_eo}
\ee
We can compare them to the derivative of the concurrence averaged over realizations occurring during the time interval $dt$:
\bb
\label{eq:dC_eo1}
\left\langle\!\!\!\left\langle \frac{d \mathcal{C}_\gamma}{dt} \right\rangle\!\!\!\right\rangle_{dt} &=& 2 \Gamma \frac{e^{-2 \Gamma t} + e^{- 2 \Gamma t} \cosh(4 \Gamma t J_\gamma)}{(1+e^{-2 \Gamma t} \cosh(4 \Gamma t J_\gamma))^2} \nonumber \\
&&- 4 \Gamma \, \frac{Q_\gamma}{\epsilon} \, \mathcal{C}_\gamma \moy{\hat\Phi(t)}_\gamma .
\ee
When $t \gg 1/\Gamma$, the probability distribution of the measurement outcome $J_\gamma$ is narrowly peaked around the three values corresponding to the eigenvalues of the half-parity measurement operator $\hat{\Phi}$ \cite{Jacobs06}, i.e. ${\partial}m1,0$. The average value of $\hat{\Phi}$ tends also to one of the three eigenvalues. Hence, for trajectories corresponding to qubits in a maximally entangled state at long times, $J_\gamma = \moy{\hat\Phi(t)}_\gamma = 0$ and the heat flow $Q_\gamma = 0$. Consequently, Eqs. \eqref{eq:sigma_eo} and \eqref{eq:dC_eo1} simplify to
\bb
\left.{{\cal S}igma}_\gamma^{(eo)}\right\vert_{J_\gamma = 0} &=& 2 \epsilon {\cal S}qrt{\Gamma dt } \frac{e^{-2 \Gamma t}}{(1+ e^{-2 \Gamma t})^2} \\
\left.\left\langle\!\!\!\left\langle \frac{d \mathcal{C}_\gamma}{dt}\right\rangle\!\!\!\right\rangle_{dt}\right\vert_{J_\gamma = 0} &=& 4 \Gamma \frac{e^{-2 \Gamma t}}{(1+ e^{-2 \Gamma t})^2} \,.
\ee
When time exceeds the measurement time, the following equality holds:
\bb
\left.\left\langle\!\!\!\left\langle \frac{d \mathcal{C}_\gamma}{dt} \right\rangle\!\!\!\right\rangle_{dt}\right\vert_{J_\gamma = 0} = \frac{2}{\epsilon} {\cal S}qrt{\frac{\Gamma}{dt}} \left.{{\cal S}igma}_\gamma^{(eo)} \right\vert_{J_\gamma = 0}\,.
\ee
Although only valid and meaningful at times longer than the measurement time, this relation exemplifies the underlying fundamental role of infinitesimal heat fluctuations for the generation of entanglement. As stated in the main text, this relation is only exact in the context of the half-parity measurement considered in this work and can not be exploited experimentally. Indeed, one could not distinguish in an experiment infinitesimal heat fluctuations originating in the loss of phase coherence between the two parity subspaces (${\cal S}igma_\gamma^{(eo)}$) from total infinitesimal heat fluctuations (${\cal S}igma_\gamma$).
{\cal S}ection{Energetic bounds for the entanglement genesis rate}
\label{appen:2}
The general expression of the concurrence derivative reads
\bb
\frac{d \mathcal{C}_\gamma}{dt} &=& 2 \Gamma \frac{e^{-2 \Gamma t} + e^{- 2 \Gamma t} \cosh(4 \Gamma t J_\gamma)}{(1+e^{-2 \Gamma t} \cosh(4 \Gamma t J_\gamma))^2} \nonumber \\
&&- 4 \Gamma \frac{e^{-2 \Gamma t} {\cal S}inh(4 \Gamma t J_\gamma) (1- e^{-2 \Gamma t}) d(J_\gamma t)/dt}{(1+e^{-2 \Gamma t} \cosh(4 \Gamma t J_\gamma))^2} \nonumber \\
&&
\ee
Using Eqs. \eqref{Jgamma},\eqref{eq:C} and \eqref{eq:Q}, it can be rewritten as
\bb
\frac{d \mathcal{C}_\gamma}{dt} &=& 2 \Gamma \frac{e^{-2 \Gamma t} + e^{- 2 \Gamma t} \cosh(4 \Gamma t J_\gamma)}{(1+e^{-2 \Gamma t} \cosh(4 \Gamma t J_\gamma))^2}\nonumber \\
&& - 4 \Gamma \, \frac{Q_\gamma}{\epsilon} \, \mathcal{C}_\gamma \left(\moy{\hat\Phi(t)}_\gamma + \dfrac{dW_\gamma(t)}{dt}\right).
\ee
To enable the comparison with $\tilde{{\cal S}igma}_\gamma$, we perform the ensemble average $\langle\!\langle \cdot \rangle\!\rangle_{dt}$ and obtain:
\bb
\left\langle\!\!\!\left\langle \frac{d \mathcal{C}_\gamma}{dt} \right\rangle\!\!\!\right\rangle_{dt} &=& 2 \Gamma \frac{e^{-2 \Gamma t} + e^{- 2 \Gamma t} \cosh(4 \Gamma t J_\gamma)}{(1+e^{-2 \Gamma t} \cosh(4 \Gamma t J_\gamma))^2} \nonumber \\
&&- 4 \Gamma \, \frac{Q_\gamma}{\epsilon} \, \mathcal{C}_\gamma \moy{\hat\Phi(t)}_\gamma\,,
\ee
which corresponds to Eq.\eqref{eq:dC_eo} in the main text.
Using the inequalities
\bb
&&e^{-2 \Gamma t} + e^{- 2 \Gamma t} \cosh(4 \Gamma t J_\gamma) \leq 2 e^{-2 \Gamma t} \cosh(4 \Gamma t J_\gamma) \,, \nonumber \\
&&
\ee
and
\bb
&&e^{-2 \Gamma t} + e^{- 2 \Gamma t} \cosh(4 \Gamma t J_\gamma) \geq e^{-4 \Gamma t} + e^{- 2 \Gamma t} \cosh(4 \Gamma t J_\gamma) \,, \nonumber \\
&&
\ee
we can now compare the r.h.s. of these two inequalities with the st. dev. of the total heat fluctuations $\tilde{{\cal S}igma}_\gamma(t)$:
\bb
\tilde{{\cal S}igma}_\gamma(t) = \frac{e^{-4 \Gamma t} + e^{- 2 \Gamma t} \cosh(4 \Gamma t J_\gamma) }{(1+e^{-2 \Gamma t} \cosh(4 \Gamma t J_\gamma))^2}\,.
\ee
These bounds directly lead to Eqs. \eqref{eq:ineq1} and \eqref{eq:ineq2} in the main text. \\
{\cal S}ection{Finite detection efficiency}
\label{appen:3}
When the efficiency of the detector takes a finite value $\eta$, the state of the two qubits along a given trajectory $\gamma$ is a mixed state $\rho_\gamma(t)$ which obeys \cite{Jacobs06}:
\bb
&&d\rho_\gamma(t) = -idt[\hat H_{\cal S},\rho_\gamma(t)]\nonumber\\
&& + \frac{\Gamma dt}{2}\left(\hat\Phi\rho_\gamma(t)\hat\Phi - \tfrac{1}{2}\{\hat\Phi^2,\rho_\gamma(t)\}\right)\nonumber\\
&& + {\cal S}qrt{\eta \Gamma}dW_\gamma(t)\left(\{\hat\Phi,\rho_\gamma(t)\} - 2\moy{\hat\Phi(t)}\rho_\gamma(t\right),
\ee
where $\{A,B\} = AB+BA$. The measurement record $I_\gamma(t)$ is now linked to the Wiener increment $dW_\gamma(t)$ via:
\bb
I_\gamma(t) = \moy{\hat\Phi(t)}_\gamma + \frac{dW_\gamma(t)}{2{\cal S}qrt{\eta\Gamma}dt}.
\ee
\end{document} |
\begin{document}
\title{A Primal-Dual Approximation Algorithm for Min-Sum Single-Machine
Scheduling Problems\thanks{
A preliminary version of this article appeared in the Proceedings of APPROX-RANDOM 2011.
Research supported partially by NSF grants CCF-0832782, CCF-1017688, CCF-1526067, and CCF-
1522054; NSERC grant PGS-358528; FONDECYT grant No. 11140579, and Nucleo Milenio Informaci\'on y Coordinaci\'on en Redes ICM/FIC RC130003.}}
\author{Maurice Cheung\thanks{School of Operations Research \& Information
Engineering Cornell University, Ithaca NY 14853, USA.}
\and Juli\'{a}n Mestre\thanks{School of Information Technologies, The
University of Sydney, NSW, Australia.}
\and David B. Shmoys\footnotemark[2]
\and Jos\'{e} Verschae\thanks{Facultad de Matem\'aticas \& Escuela de Ingenier\'ia, Pontificia Universidad Cat\'olica de Chile, Santiago, Chile.}
}
\date{}
\maketitle
\begin{abstract}
We consider the following single-machine scheduling problem, which is often
denoted $1||\sum f_{j}$: we are given $n$ jobs to be scheduled on a single
machine, where each job $j$ has an integral processing time $p_j$, and there
is a nondecreasing, nonnegative cost function $f_j(C_{j})$ that specifies the
cost of finishing $j$ at time $C_{j}$; the objective is to minimize
$\sum_{j=1}^n f_j(C_j)$. Bansal \& Pruhs recently gave the first constant
approximation algorithm with a performance guarantee of 16. We improve on this result by giving a primal-dual pseudo-polynomial-time algorithm based on the recently introduced knapsack-cover inequalities. The algorithm finds a
schedule of cost at most four times the constructed dual solution. Although we show that this bound is tight for our algorithm, we leave open the question of whether the integrality gap of the LP is less than 4. Finally, we show how the technique can be adapted to yield, for any $\epsilon >0$, a $(4+\epsilon )$-approximation algorithm for this problem.
\end{abstract}
\section{Introduction}
We consider the following general scheduling problem: we are
given a set $\mathcal{J}$ of $n$ jobs to schedule on a single machine,
where each job $j\in \mathcal{J}$ has a positive integral processing time $p_j$, and there is
a nonnegative integer-valued cost function $f_j(C_{j})$ that specifies the cost of finishing $j$ at time $C_{j}$.
The only restriction on the cost function $f_j(C_{j})$ is that it is a nondecreasing function of $C_{j}$; the objective is to minimize $\sum_{j\in\mathcal{J}} f_j(C_j)$. This problem is denoted as $1||\sum f_{j}$ in the
notation of scheduling problems formulated by Graham, Lawler, Lenstra, \& Rinnooy Kan \cite{GrahamLLR79}.
In a recent paper, Bansal \& Pruhs \cite{BansalP10} gave the first constant approximation algorithm for this problem;
more precisely, they presented a 16-approximation algorithm, that is, a polynomial-time algorithm
guaranteed to be within a factor of 16 of the optimum. We improve on this result: we give a primal-dual pseudo-polynomial-time algorithm that finds a solution directly to the scheduling problem of cost at most four times the optimal cost,
and then show how this can be extended to yield, for any $\epsilon >0$, a
$(4+\epsilon )$-approximation algorithm for this problem. This problem is strongly $NP$-hard, simply by
considering the case of the weighted total tardiness, where $f_{j} (C_{j})= w_{j} \max_{j\in\mathcal{J}}\{0,C_{j}-d_{j}\}$
and $d_{j}$ is a specified due date of job $j$, $j\in\mathcal{J}$. However, no hardness results are known other than
this, and so it is still conceivable that there exists a polynomial approximation scheme for
this problem (though by the classic result of Garey \& Johnson \cite{GareyJ79a}, no fully polynomial approximation
scheme exists unless P=NP). No polynomial approximation scheme is known even for the special case of weighted total tardiness.
\paragraph{Our Techniques}
Our results are based on the linear programming relaxation of a time-indexed integer programming formulation
in which the 0-1 decision variables $x_{jt}$
indicate whether a given job $j\in\mathcal{J}$, completes at time $t\in \mathcal{T} = \{1,\ldots,T\}$, where $T=\sum_{j\in\mathcal{J}} p_{j}$;
note that since the cost functions are nondecreasing with time, we can assume, without loss of generality, that the machine is active only throughout the interval $[0,T]$, without any idle periods.
With these time-indexed variables, it is trivial to ensure that each job is
scheduled; the only difficulty is to ensure that the machine is not required to process more than one job at a time.
To do this, we observe that, for each time $t\in\mathcal{T}$, the jobs completing at time $t$ or later have total processing time at least $T-t+1$ (by the assumption that the processing times $p_j$ are positive integers); for conciseness,
we denote this demand $D(t) =T-t+1$.
This gives the following integer program:
\begin{align}
\text{minimize}\ \ &\sum_{j\in \mathcal{J}} \sum_{t\in \mathcal{T}} f_j(t)x_{jt} \tag{IP}\label{IP}\\
\text{subject to}\ \ & \sum_{j\in \mathcal{J}} \sum_{s\in \mathcal{T}: s \ge t} p_jx_{js} \ge D(t), & \text{for each}\ t \in \mathcal{T}; \label{eq:demand}\\
& \sum_{t\in \mathcal{T}} x_{jt} = 1, & \text{for each}\ j\in \mathcal{J}; \label{eq:assign}\\
& x_{jt} \in \{0,1\}, & \text{for each}\ j\in \mathcal{J},\ t\in \mathcal{T}. \notag
\end{align}
We first argue that this is a valid formulation of the problem. Clearly, each feasible schedule corresponds to a feasible
solution to (IP) of equal objective function value. Conversely,
consider any feasible solution, and for each job $j\in \mathcal{J}$, assign it the due date $d_{j} =t$ corresponding to $x_{jt}=1$. If we schedule the jobs in Earliest Due Date (EDD) order,
then we claim that each job $j\in \mathcal{J}$, completes by its due date $d_{j}$. If we consider the constraint
(\ref{eq:demand}) in (IP)
corresponding to $t=d_{j}+1$, then since each job is assigned once, we know that
$\sum_{j\in \mathcal{J}} \sum_{t=1}^{d_{j}} p_{j} x_{jt} \leq d_{j};$
in words, the jobs with due date at most $d_{j}$ have total processing time at most $d_{j}$.
Since each job completes by its due date, and the cost functions $f_{j}(\cdot)$ are nondecreasing, we
have a schedule of cost no more than that of the original feasible solution to (IP).
The formulation (IP) has an unbounded integrality gap:
the ratio of the optimal value of (IP) to the optimal value of its linear programming relaxation can be
arbitrarily large. We strengthen this formulation by introducing a class of
valid inequalities called {\it knapsack-cover inequalities}. To understand the starting point for our work,
consider the special case of this scheduling problem in which all $n$ jobs have a common due date $D$,
and for each job $j\in \mathcal{J}$, the cost function is 0 if the job completes by time $D$, and is $w_{j}$, otherwise.
In this case, we select a set of jobs of total size at most $D$, so as to minimize the total weight of the complementary
set (of late jobs). This is equivalent to the minimum-cost (covering) knapsack problem, in which we wish to select a
subset of items of total size at least a given threshold, of minimum total cost. Carr, Fleischer, Leung, and Phillips
\cite{CarrFLP00} introduced knapsack-cover inequalities for this problem (as a variant of flow-cover inequalities introduced by Padberg, Van Roy, and Wolsey \cite{PadbergVW85}) and gave an LP-rounding 2-approximation algorithm based on this formulation.
Additionally, they showed that the LP relaxation with knapsack-cover inequalities has an integrality gap of at least $2-\frac{2}{n}$.
The idea behind the knapsack-cover inequalities is quite simple. Fix a subset of jobs $A \subseteq \mathcal{J}$
that contribute towards satisfying the demand $D(t)$ for time $t$ or later;
then there is a {\it residual demand} from the remaining jobs of $D(t,A):= \max \{D(t) - \sum_{j \in A} p_j, 0\}$.
Thus, each job $j\in \mathcal{J}$ can make an effective contribution to this residual demand of
$p_j(t,A):= \min \{p_j, D(t,A) \}$; that is, given the inclusion of the set $A$, the effective contribution of job $j$ towards
satisfying the residual demand can be at most the residual demand itself. Thus, we have the constraint:
$$\sum_{j \notin A} \sum_{s = t}^T p_j(t,A)x_{js} \ge D(t,A) \mbox{ for
each }t \in \mathcal{T}, \mbox{ and each }A\subseteq \mathcal{J}.$$
The dual LP is quite natural: there are dual variables $y(t,A)$, and a constraint that
indicates, for each job $j$ and each time $s\in \mathcal{T}$, that $f_{j} (s)$ is at least a weighted sum of
$y(t,A)$ values, and the objective is to maximize $\sum_{t,A} D(t,A)y(t,A)$.
Our primal-dual algorithm has two phases: a growing phase and a pruning phase. Throughout the algorithm, we maintain a set of jobs $A_{t}$ for each time $t \in \mathcal{T}$. In each iteration of the growing phase, we choose one dual variable to increase, corresponding to the demand $D(t,A_{t})$ that is largest, and increase that dual variable as much as possible. This causes a dual constraint corresponding to some job $j$ to become tight for some time $t'$, and so we set $x_{jt'} =1$ and add $j$ to each set $A_{s}$ with $s \leq t'$. Note that this may result in jobs being assigned to complete at multiple times $t$; then in the pruning phase we do a ``reverse delete'' that both ensures that each job is uniquely assigned, and also that the solution is minimal, in the sense that each job passes the test that if it were deleted, then some demand constraint (\ref{eq:demand}) in (IP) would be violated. This will be crucial to show that the algorithm is a 4-approximation algorithm. Furthermore, we show that our analysis is tight by giving an instance for which the algorithm constructs primal and dual solutions whose objective values differ by a factor 4. It will be straightforward to show that the algorithm runs in time polynomial in $n$ and $T$, which is a pseudo-polynomial bound.
To convert this algorithm into a polynomial-time algorithm, we adopt an interval-indexed formulation, where we bound the change of cost of any job to be within a factor of $(1+\epsilon)$ within any interval. This is sufficient to ensure a (weakly) polynomial number of intervals, while degrading the performance guarantee by a factor of $(1+\epsilon)$, and this yields the desired result.
It is well known that primal-dual algorithms have an equivalent local-ratio counterpart~\cite{Bar-YehudaR05}. For completeness, we also give the local-ratio version of our algorithm and its analysis. One advantage of the local ratio approach is that it naturally suggests a simple generalization of the algorithm to the case where jobs have release dates yielding a $4\kappa$-approximation algorithm, where $\kappa$ is the number of distinct release dates.
\paragraph{Previous Results}
The scheduling problem $1||\sum f_j$ is closely related to the \emph{unsplittable flow problem} (UFP) on a path. An instance of this problem consists of a path $P$, a demand $d_e$ for each edge $e$, and a set of tasks. Each task $j$ is determined by a cost $c_j$, a subpath $P_j$ of $P$, and a covering capacity $p_j$. The objective is to find a subset $T$ of the tasks that has minimum cost and covers the demand of each edge $e$, i.e., $\sum_{j\in T: e\in P_j} p_j \ge d_e$. The relation of this problem to $1||\sum f_j$ is twofold. On the one hand UFP on a path can be seen as a special case of $1||\sum f_{j}$~\cite{BansalV2013}. On the other hand, Bansal \& Pruhs~\cite{BansalP10} show that any instance of $1||\sum f_{j}$ can be reduced to an instances of UFP on a path while increasing the optimal cost by a factor of 4. Bar-Noy et al.\@~\cite{Bar-NoyBFNS01} study UFP on a path and give a 4-approximation algorithm based on a local ratio technique. In turn, this yields a 16-approximation with the techniques of Bansal \& Pruhs~\cite{BansalP10}. Very recently, and subsequent to the dissemination of earlier versions of our work, H\"ohn et al.\@~\cite{Hohn14} further exploited this connection. They give a quasi-PTAS for UFP on a path, which they use to construct a quasipolynomial $(e+\epsilon)$-approximation for $1||\sum f_{j}$ by extending the ideas of Bansal \& Pruhs~\cite{BansalP10}.
The local ratio algorithm by Bar-Noy et al.\@~\cite{Bar-NoyBFNS01}, when interpreted as a primal-dual algorithm~\cite{Bar-YehudaR05}, uses an LP relaxation that includes knapsack-cover inequalities. Thus, the $4$-approximation algorithm of this paper can be considered a generalization of the algorithm by Bar-Noy et al.\@~\cite{Bar-NoyBFNS01}. The primal-dual technique was independently considered by Carnes and Shmoys~\cite{CarnesS07} for the minimum knapsack-cover problem. Knapsack-cover inequalities have subsequently been used to derive approximation algorithms in a variety of other settings, including the work of Bansal \& Pruhs \cite{BansalP10}
for $1|ptmn, r_{j}|\sum f_{j}$, Bansal, Buchbinder, \&
Naor~\cite{BansalBN07,BansalBN08},
Gupta, Krishnaswamy, Kumar, \& Segev~\cite{GuptaKKS09},
Bansal, Gupta, \& Krishnaswamy~\cite{BansalGK10}, and Pritchard~\cite{Pritchard09}.
An interesting special case of $1||\sum f_j$ considers objective functions of the form $f_j = w_j f$ for some given non-decreasing function $f$ and job-dependent weights $w_j>0$. It can be easily shown that this problem is equivalent to minimize $\sum w_j C_j$ on a machine that changes its speed over time. For this setting, Epstein et\,al.~\cite{EpsteinLMMMSS12} derive a $4$-approximation algorithm that yields a sequence independent of the speed of the machine (or independent of $f$, respectively). This bound is best possible for an unknown speed function. If randomization is allowed they improve the algorithm to an $e$-approximation. Moreover, Megow and Verschae~\cite{Megow13} give a PTAS for the full information setting, which is best possible since even this special case is strongly NP-hard~\cite{Hohn12}.
A natural extension of $1||\sum f_j$ considers scheduling on a varying speed machine to minimize $\sum f_j(C_j)$, yielding a seemingly more general problem. However, this problem can be modeled~\cite{Hohn12,Megow13,EpsteinLMMMSS12} as an instance of $1||\sum f_j$ by considering cost functions $\tilde{f}_j = f_j \circ g$ for a well chosen function $g$ that depends on the speed function of the machine.
\paragraph{Organization of the paper} Section~\ref{sec:pseudopoly} contains our main results, including the pseudopolynomial $4-$approx{\-}imation algorithm and the proof that its analysis is tight. Section~\ref{sec:poly} shows the techniques to turn this algorithm to a polynomial $(4+\epsilon)$-approximation. The local ratio interpretation is given in Section~\ref{sec:local-ratio}, and the case with release dates is analyzed in Section~\ref{sec:releasedates}.
\section{ A pseudo-polynomial algorithm for $1||\sum f_j$ }
\label{sec:pseudopoly}
We give a primal-dual algorithm that runs in pseudo-polynomial time that has a performance guarantee of 4. The algorithm is based on the following LP relaxation:
\begin{align}
\text{min}\ \ &\sum_{j \in \mathcal{J}} \sum_{t \in \mathcal{T}} f_j(t)x_{jt} \tag{P}\label{P}\\
\text{s.t.}\ \ & \sum_{j \notin A} \sum_{s \in \mathcal{T} : s \geq t} p_j(t,A)x_{js} \ge D(t,A), & &\text{for each}\ t \in \mathcal{T},\ A\subseteq \mathcal{J};\label{eq:kc}\\
& x_{jt} \ge 0, & & \text{for each}\ j\in \mathcal{J},\ t\in \mathcal{T}. \notag
\end{align}
Notice that the assignment constraints (\ref{eq:assign}) are not included in (P). In fact, the following lemma
shows that they are redundant, given the knapsack-cover inequalities. This leaves a much more tractable
formulation on which to base the design of our primal-dual algorithm.
\begin{lemma} Let $x$ be a feasible solution to the linear programming relaxation (P).
Then there is a feasible solution $\bar x$ of no greater cost that also satisfies the assignment constraints
(\ref{eq:assign}).
\label{lem:no-assign}
\end{lemma}
\begin{proof}
First, by considering the constraint (\ref{eq:kc}) with the set $A= \mathcal{J}-\{k\}$ and $t=1$, it is easy to show that for any feasible solution $x$ of (P), we must have
$\sum_{s \in \mathcal{T}} x_{ks} \geq 1$ for each job $k$.
We next show that each job is assigned at most once. We may assume without loss of generality
that $x$ is a feasible solution for (P) in which $\sum_{j \in \mathcal{J}} \sum_{s \in \mathcal{T}} x_{js}$ is minimum.
Suppose, for a contradiction, that $\sum_{s\in \mathcal{T}} x_{js} > 1$ for some job $j$, and let $t$ be the
largest time index where the partial sum $\sum_{s \in \mathcal{T}: s \geq t} x_{js} \ge 1$.
Consider the truncated solution $\bar x$ where
\[\bar x_{ks} = \left\{
\begin{array}{ll}
0, & \mbox{if } k =j \mbox{ and } s < t \\
1- \sum_{s=t+1}^{T} x_{js}, & \mbox{if } k=j \mbox{ and } s=t \\
x_{ks}, & \mbox{ otherwise}
\end{array}
\right. \]
Let us check that the modified solution $\bar x$ is feasible for (P). Fix $s\in \mathcal{T}$ and $A\subseteq \mathcal{J}$. If $s>t$ or $A \ni j$, then clearly $\bar x$ satisfies the corresponding inequality \eqref{eq:kc} for $s,A$ since $x$ does. Consider $s\le t$ and $A\not \ni j$, so that $\sum_{r \in \mathcal{T} : r \geq s} \bar{x}_{j,r}=1$ and $p_k(s,A)=p_k(s,A\setminus\{j\})$ for each $k\in \mathcal{J}$. Then,
\begin{align*}
\sum_{k \notin A} \sum_{r \in \mathcal{T} : r \geq s} p_k(s,A)\bar{x}_{kr} & = p_j(s,A\setminus\{j\})\hspace{-.3cm}\sum_{r \in \mathcal{T} : r \geq s}\bar{x}_{k,j}+ \sum_{k \notin A\setminus\{j\}} \sum_{r \in \mathcal{T} : r \geq s} p_k(s,A\setminus\{j\})\bar{x}_{kr} \\
& \ge p_j(s,A\setminus\{j\})+ D(s,A\setminus\{j\}) \ge D(s,A),
\end{align*}
where the first inequality follows since $x$ is feasible for (P). Thus $\bar{x}$ satisfies \eqref{eq:kc}. This gives the desired contradiction because $\sum_{j \in \mathcal{J}} \sum_{s \in \mathcal{T}} \bar x_{js} < \sum_{j \in \mathcal{J}} \sum_{s\in \mathcal{T}} x_{js}$. Finally, since $\bar x \le x$ component-wise and the objective $f_j(t)$ is nonnegative, it follows that $\bar x$ is a solution of no greater cost than $x$.
\end{proof}
Taking the dual of (P) gives:
\begin{align}
\text{max}\ & \sum_{t \in \mathcal{T}} \sum_{A\subseteq \mathcal{J}} D(t,A)y(t,A) \tag{D} \label{D} \\
\text{s.t.}\ & \sum_{t \in \mathcal{T} : t \leq s} \sum_{A: j \notin A} p_j(t,A)y(t,A) \le f_j(s); & &
\text{for each}\ j \in \mathcal{J},\ s \in \mathcal{T}; \label{eq:dc}\\
&y(t,A) \ge 0 & & \text{for each}\ t \in \mathcal{T},\ A\subseteq \mathcal{J}. \notag
\end{align}
\noindent
We now give the primal-dual algorithm for the scheduling problem $1||\sum f_j$.
The algorithm consists of two phases: a growing phase and a pruning phase.
The growing phase constructs a feasible solution $x$ to (P) over a series of iterations. For each $t \in \mathcal{T}$, we let $A_t$ denote the set of jobs that are set to finish at time $t$ or later by the algorithm, and thus contribute towards satisfying the demand $D(t)$. In each iteration, we set a variable $x_{jt}$ to 1 and add $j$ to $A_s$ for all $s \le t$. We continue until all demands $D(t)$ are satisfied. Specifically, in the $k^{th}$ iteration, the algorithm select
$t^k:= {\text{argmax}}_t D(t,A_t)$, which is the time index that has the largest residual demand with respect to the current partial solution. If there are ties, we choose the \emph{largest} such time index to be $t^k$ (this is not essential to the correctness of the algorithm -- only for consistency and efficiency).
If $D(t^k, A_{t^k})=0 $, then we must have
$\sum_{j \in A_t} p_j \ge D(t)$ for each $t \in \mathcal{T}$; all demands have been satisfied and the growing phase terminates. Otherwise, we increase the dual variable $y(t^k,A_{t^k})$ until some dual constraint \eqref{eq:dc} with right-hand side
$f_j(t)$ becomes tight. We set $x_{jt}=1$ and add $j$ to $A_{s}$ for \emph{all} $s \le t$ (if $j$ is not yet in $A_s$). If multiple constraints become tight at the same time, we pick the one with the \emph{largest} time index (and if there are still ties, just pick one of these jobs arbitrarily). However, at the end of the growing phase, we might have jobs with multiple variables set to 1, thus we proceed to the pruning phase.
The pruning phase is a ``reverse delete'' procedure that checks each variable $x_{jt}$ that is set to 1, in decreasing
order of the iteration $k$ in which that variable was set in the growing phase. We attempt to set $x_{jt}$ back to 0 and
correspondingly delete jobs from $A_t$, provided this does not violate the feasibility of the solution. Specifically, for each variable $x_{jt}=1$, if $j$ is also in $A_{t+1}$ then we set $x_{jt} =0$. It is safe to do so, since in this case, there must exist $t' >t$ where $x_{jt'} =1$, and as we argued in Lemma \ref{lem:no-assign}, it is redundant to have $x_{jt}$ also set to 1. Otherwise, if $j \notin A_{t+1}$, we check if $\sum_{j' \in A_{s} \setminus \{j\}} p_{j'} \ge D(s)$ for each time index $s$ where $j$ has been added to $A_s$ in the same iteration of the growing phase. In other words, we check the inequality for each $s\in\{s_0,\ldots,t\}$, where $s_0<t$ is the largest time index with $x_{js_0}=1$ (and $s_0=0$ if there is no such value). If all the inequalities are fulfilled, then $j$ is not needed to satisfy the demand at time $s$. Hence, we remove $j$ from all such $A_s$ and set $x_{jt} =0$. We will show that at the end of the pruning phase, each job $j$ has exactly one $x_{jt}$ set to 1. Hence, we set this time $t$ as the \emph{due date} of job $j$.
Finally, the algorithm outputs a schedule by sequencing the jobs in Earliest Due Date (EDD) order. We give pseudo-code for this in the figure Algorithm 1.
\begin{figure}\label{st:dualIncrease}
\end{figure}
\subsection{Analysis}
Throughout the algorithm's execution, we maintain both a solution $x$ along with the sets $A_{t}$, for each $t \in \mathcal{T}$. An
easy inductive argument shows that the following invariant is maintained.
\begin{lemma}
\label{lem:unique_duedate}
Throughout the algorithm, $j \in A_s$ if and only if there exists $t \ge s$ such that $x_{jt}=1$.
\end{lemma}
\begin{proof}
This lemma is proved by considering each step of the algorithm. Clearly, it
is true initially.
In the growing phase of the algorithm, we add $j$ to $A_s$ if and only if we
have set some $x_{jt}$ with $t \ge s$ to 1 in the same iteration; hence the
result holds through the end of the growing phase. Moreover, there is the
following monotonicity property: Since $j$ is added to $A_s$ for all $s\le
t$ when $x_{jt}$ is set to 1, if there is another $x_{jt'}$ set to 1 in a
later iteration $k$, we must have $t' \ge t$. Otherwise, if $t^k\le t'< t$, when increasing $y(t^k,A_{t^k})$ in Step~\ref{st:dualIncrease} job $j$ would belong to $A_t\subseteq A_{t^k}$ and the dual constraint could never become tight. Hence, in the pruning phase, we
consider the variables $x_{jt}$ for a particular job $j$ in decreasing order
of $t$.
Next we show that the result holds throughout the pruning phase. One
direction is easy, since as long as there is some $t\ge s$ with $x_{jt}$
equals 1, $j$ would remain in $A_s$. Next, we prove the converse by using
backward induction on $s$; we show that if for all $t \ge s$, $x_{jt}=0$,
then $j \notin A_s$. Since the result holds at the end of the growing phase,
we only have to argue about the changes made in the pruning phase. For the
base case, if $x_{jT}$ is set to 0 during the pruning phase, by construction
of the algorithm, we also remove $j$ from $A_T$; hence the result holds. Now
for the inductive case. In a particular iteration of the pruning phase,
suppose $x_{jt'}$ is the only variable corresponding to job $j$ with time
index $t'$ at least $s$ that is set to 1, but it is now being changed to 0.
We need to show $j$ is removed from $A_s$. First notice by the monotonicity
property above, $j$ must be added to $A_s$ in the same iteration as when
$x_{jt'}$ is set to 1 in the growing phase. By the assumption that $x_{jt'}$
is the only variable with time index as least $s$ that is set to 1 at this
point, $j \notin A_{t'+1}$ by induction hypothesis. Hence we are in the
\emph{else-if} case in the pruning phase of the algorithm. But by
construction of the algorithm, we remove $j$ from all $A_t$ for all $t \le
t'$ that are added in the same iteration of the growing phase, which include
$s$. Hence the inductive case holds, and the result follows.
\end{proof}
Note that this lemma also implies that the sets $A_t$ are nested; i.e., for any two time indices $s < t$, it follows that
$A_s \supseteq A_t$.
Using the above lemma, we will show that the algorithm produces a feasible solution to (P) and (D).
\begin{lemma}
\label{lem:feasible}
The algorithm produces a feasible solution $x$ to (P) that is integral and
satisfies the assignment constraints (\ref{eq:assign}), as well as a feasible
solution $y$ to (D).
\end{lemma}
\begin{proof}
First note that, by construction, the solution $x$ is integral. The algorithm
starts with the all-zero solution to both (P) and (D), which is feasible for
(D) but infeasible for (P). Showing that dual feasibility is maintained
throughout the algorithm is straightforward. Next we show that at
termination, the algorithm obtains a feasible solution for (P).
At the end of the growing phase, all residual demands $D(t,A_t)$ are zero,
and hence, $\sum_{j \in A_t} p_j \ge D(t)$ for each $t \in \mathcal{T}$. By
construction of the pruning phase, the same still holds when the algorithm
terminates.
Next, we argue that for each job $j$ there is exactly one $t$ with $x_{jt}
=1$ when the algorithm terminates. Notice that $D(1)$ (the demand at time 1)
is $ T$, which is also the sum of processing time of all jobs; hence $A_1$
must include every job to satisfy $D(1)$. By Lemma~\ref{lem:unique_duedate}, this implies that each job
has at least some time $t$ for which $x_{jt} =1$ when the growing phase
terminates. On the other hand, from the pruning step (in particular, the
first \emph{if} statement in the pseudocode), each job $j$ has $x_{jt}$ set
to 1 for at most one time $t$. However, since no job can be deleted from
$A_1$, by Lemma~\ref{lem:unique_duedate}, we see that, for each job $j$, there is still at least one
$x_{jt}$ set to 1 at the end of the pruning phase. Combining the two, we see
that each job $j$ has one value $t$ for which $x_{jt}=1$.
By invoking Lemma~\ref{lem:unique_duedate} for the final solution $x$, we have that $\sum_{s = t}^T
\sum_{j \in \mathcal{J}} p_jx_{js} \ge D(t)$. Furthermore, $x$ also satisfies the
constraint $\sum_{t \in \mathcal{T}} x_{jt} = 1$, as argued above. Hence, $x$ is
feasible for (IP), which implies the feasibility for (P).
\end{proof}
Since all cost functions $f_j$ are nondecreasing, it is easy to show that
given a feasible integral solution $x$ to (P) that satisfies the assignment
constraints (\ref{eq:assign}), the following schedule costs no more than the
objective value for $x$: set the due date $d_j =t$ for job $j$, where $t$ is
the unique time such that $x_{jt} =1$, and sequence in EDD order.
\begin{lemma}
\label{eq:edd}
Given a feasible integral solution to (P) that satisfies the assignment
constraint (\ref{eq:assign}), the EDD schedule is a feasible schedule with
cost no more than the value of the given primal solution.
\end{lemma}
\begin{proof}
Since each job $j \in \mathcal{J}$ has exactly one $x_{jt}$ set to 1, it follows
that $\sum_{j \in \mathcal{J}} \sum_{s \in \mathcal{T}} p_j x_{js} = T$. Now, taking $A =
\emptyset $ from constraints \eqref{eq:kc}, we have that $ \sum_{j\in \mathcal{J}}
\sum_{s \in \mathcal{T}: s \geq t} p_j x_{js} \ge D(t) = T - t+1$. Hence, $ \sum_{j\in \mathcal{J}}
\sum_{s \in \mathcal{T} : s \leq t-1} p_j x_{js} \le t-1$.
This ensures that the sum of processing assigned to finish before time $t$ is
no greater than the machine's capacity for job processing up to this time
(which is $t-1$). Hence, we obtain a feasible schedule by the EDD rule
applied to the instance in which, for each job $j \in \mathcal{J}$, we set its due
date $d_j =t$, where $t$ is the unique time such that $x_{jt} =1$. As a
corollary, this also shows $x_{jt}=0$ for $t < p_j$. Finally, this
schedule costs no more than the optimal value of (P), since each job
$j\in \mathcal{J}$ finishes by $d_j$, and each function $f_j(t)$ is
nondecreasing in $t$.
\end{proof}
Next we analyze the cost of the schedule returned by the algorithm. Given the
above lemma, it suffices to show that the cost of the primal solution is no
more than four times the cost of the dual solution; the weak duality theorem of
linear programming then implies that our algorithm has a performance guarantee
of 4.
We first introduce some notation used in the analysis. Given the final solution $\bar{x}$ returned by the algorithm,
define $\bar{J}_t := \{j: \bar{x}_{jt} =1 \}$, and $\bar{A}_t := \{j: \exists \bar{x}_{jt'} =1, t' \ge t \}$.
In other words, $\bar{A}_t$ is the set of jobs that contribute towards satisfying the demand at time $t$ in the final solution;
hence, we say that $j$ \emph{covers} $t$ if $j \in \bar{A}_t$. Let $x^k$ be the partial solution of (P) at the beginning of
the $k^{th}$ iteration of the growing phase. We define $J_t^k$ and $A_t^k$ analogously with respect to $x^k$.
Next we prove the key lemma in our analysis.
\begin{lemma}
\label{lem:bound}
For every $(t,A)$ such that $y(t,A) >0$ we have
\[\sum_{s \in \mathcal{T} : s \geq t} \sum_{j \in \bar{J}_s \setminus A} p_j(s,A) < 4 D(t,A).\]
\end{lemma}
\begin{proof}
Recall that the algorithm tries to increase only one dual variable in each iteration
of the growing phase. Suppose that $y(t,A)$ is the variable chosen in
iteration $k$, i.e., $t = t^k$. Then the lemma would follow from
\begin{equation}
\label{eq:at-most-4}
\sum_{j\in \bar{A}_{t^k} \setminus A^k_{t^k}} p_j(t^k,A^k_{t^k}) \leq 4\cdot D(t^k,A^k_{t^k}) \text{\quad for all $k$}.
\end{equation}
Let us fix an iteration $k$. We can interpret the set on the left-hand side as the jobs that cover the
demand of $t^k$ that are added to the solution after the start of iteration
$k$ and that survive the pruning phase. For each such job $j$, let us define
$\tau_j$ to be largest time such that
\begin{equation*}
p\,(\bar{A}_{\tau_j} \setminus (A^k_{\tau_j} \cup \{j\})) < D(\tau_j, A^k_{\tau_j}).
\end{equation*}
Let us first argue that this quantity is well defined. Let $d_j$ be the unique
time step for which $\bar{x}_{j,d_j}=1$, which, by
Lemma~\ref{lem:unique_duedate}, is guaranteed to exist. Also, let $r$ be the
largest time such that $x^k_{j,r}=1$, which must be $r < t^k$ (we define $r=0$ if $x_{j,t}=0$ for all $t$). We claim that
$\tau_j > r$.
Consider the iteration of the pruning phase where the algorithm tried
(unsuccessfully) to set $x_{j,d_j}$ to $0$ and let $\hat{x}$ be the primal
solution that the algorithm held at that moment; also, let $\hat{A}$ be defined
for $\hat{x}$ in the same way $\bar{A}$ is defined for $\bar{x}$. The algorithm
did not prune $x_{j, d_j}$ because there was a time $s > r$ such that
$p(\hat{A}_s \setminus \{j\}) < D(s)$. Notice that $\bar{A}_s \subseteq
\hat{A}_s$ because the pruning phase can only remove elements from $A_s$, and
$A^k_s \subseteq \hat{A}_s$ because $x_{j, d_j}$ was set in iteration $k$ or
later of the growing phase. Hence,
$$p(\bar{A}_s \setminus (A^k_s \cup \{j\}))
\leq p(\hat{A}_s \setminus \{j\}) - p(A^k_s) < D(s) - p(A^k_s) \leq D(s,
A^k_s),$$
which implies that $\tau_j \geq s$, which in turn is strictly larger than
$r$ as claimed. Therefore, $\tau_j$ is well defined.
Based on this definition we partition the set $\bar{A}_{t^k}\setminus A^k_{t^k}$ in
two subsets,
\begin{align*}
H &:= \{j\in \bar{A}_{t^k}\setminus A^k_{t^k} : \tau_j\ge t^k \} \text{ and }\\
L &:= \{ j\in \bar{A}_{t^k}\setminus A^k_{t^k} : \tau_j < t^k \}.
\end{align*}
For each of these, we define
\begin{align*}
h &:= \mathrm{argmin} \{\tau_j: j\in H\} \text{ and }\\
\ell &:= \mathrm{argmax}\{\tau_j: j\in L\}.
\end{align*}
We will bound separately the contribution of $H \setminus \{h\}$ and $L
\setminus \{\ell\}$ to the left-hand side of \eqref{eq:at-most-4}. For $j \in \{ h, \ell\}$, we will use the trivial bound
\begin{equation}
\label{eq:trivial}
p_j(t^k, A^k_{t^k}) \leq D(t^k, A^k_{t^k}).
\end{equation}
We start by bounding the contribution of $H\setminus\{h\}$. Notice that for
every job $j \in H$ we must have $\tau_j \leq d_j$; otherwise, the solution
$\bar{x}$ would not be feasible, which contradicts Lemma~\ref{lem:feasible}.
For all $j \in H$ we have that $j \in \bar{A}_{\tau_h}$ since $\tau_h \leq
\tau_j \leq d_j$; also $j \notin A^k_{\tau_h}$ since $j \notin A^k_{t_k}$ and
$A^k_{t^k} \supseteq A^k_{\tau_h}$ because $\tau_h \geq t^k$. It follows that $H \subseteq \bar{A}_{\tau_h} \setminus A^k_{\tau_h}$. Therefore,
\begin{equation}
\label{eq:H-h}
\hspace{-1ex}\sum_{j \in H \setminus \{h\}} \hspace{-2ex} p_j(t^k, A^k_{t^k}) \leq p(H \setminus \{h\}) \leq p\,(\bar{A}_{\tau_h} \setminus (A^k_{\tau_h} \cup \{h\})) < D(\tau_h, A^k_{\tau_h}) \leq D(t^k, A^k_{t_k}),
\end{equation}
where the first inequality follows from $p_j(t,A) \leq p_j$, the second
inequality from the fact that $H \subseteq \bar{A}_{\tau_h} \setminus
A^k_{\tau_h}$, the third inequality from the definition of $\tau_h$, and the
fourth because $t^k$ is chosen in each iteration of the growing phase to
maximize $D(t^k, A^k_{t_k})$.
Now we bound the contribution of $L \setminus \{\ell\}$. Suppose that at the
beginning of iteration $k$ we had $x_{j,r} = 1$ for some $r < t^k$ and $j \in
\bar{A}_{t^k} \setminus A^k_{t^k}$. When we argued above that $\tau_j$ was well
defined we showed in fact that $r < \tau_j$. For all $j \in L$ then we have
that $j \notin A^k_{\tau_\ell}$ since $\tau_j \leq \tau_\ell$; also $j
\in \bar{A}_{\tau_\ell}$ since $j \in \bar{A}_{t_k}$ and $\bar{A}_{t^k}
\subseteq \bar{A}_{\tau_\ell}$ because $\tau_\ell \leq t^k$. It
follows that $L \subseteq \bar{A}_{\tau_\ell} \setminus A^k_{\tau_\ell}$. Therefore,
\begin{equation}
\label{eq:L-ell}
\sum_{j \in L \setminus \{\ell\}} p_j(t^k, A^k_{t^k}) \leq p(L\setminus \{\ell\}) \leq p\,(\bar{A}_{\tau_\ell} \setminus (A^k_{\tau_\ell} \cup \{\ell\})) < D(\tau_\ell, A^k_{\tau_\ell}) \leq D(t^k, A^k_{t_k}),
\end{equation}
where the first inequality follows from $p_j(t,A) \leq p_j$, the second
inequality from the fact that $L \subseteq \bar{A}_{\tau_\ell} \setminus
A^k_{\tau_\ell}$, the third inequality from the definition of $\tau_\ell$, and
the forth because $t^k$ is chosen in each iteration of the growing phase to
maximize $D(t^k, A^k_{t_k})$.
It is now easy to see that~\eqref{eq:at-most-4} follows from \eqref{eq:trivial}, \eqref{eq:H-h}, and \eqref{eq:L-ell}:
\begin{equation*}
\sum_{j \in \bar{A}_{t^k} \setminus A^k_{t^k}} \hspace{-2ex} p_j(t^k, A^k_{t^k}) \leq p (L\setminus \{\ell\}) + p_\ell(t^k, A^k_{t^k}) + p(H \setminus \{h\}) + p_h(t^k, A^k_{t^k})\leq 4 \cdot D(t^k, A^k_{t_k}).
\end{equation*}
\end{proof}
Now we can show our main theorem.
\begin{theorem} The primal-dual algorithm produces a schedule for $1||\sum f_{j}$ with cost at most four times the optimum.\end{theorem}
\begin{proof}
It suffices to show that the cost of the primal solution after the pruning phase is no more than four times the dual objective value. The cost of our solution is denoted by $\sum_{t \in \mathcal{T}} \sum_{j \in \bar{J}_t} f_j(t)$. We have that
\begin{eqnarray*}
\sum_{t \in \mathcal{T}} \sum_{j \in \bar{J}_t} f_{j}(t) &=& \sum_{t \in \mathcal{T}} \sum_{j \in \bar{J}_t}\sum_{s \in \mathcal{T} : s \leq t } \sum_{A:j \notin A} p_j(s,A)y(s,A) \\
&=& \sum_{s \in \mathcal{T}} \sum_{A\subseteq \mathcal{J}} y(s,A) \left(\sum_{t \in \mathcal{T} : t \geq s} \sum_{j \in \bar{J}_t \setminus A} p_j(s,A)\right)
\end{eqnarray*}
The first line is true because we set $x_{jt} = 1 $ only if the dual
constraint is tight, and the second line is obtained by interchanging the order of
summations. Now, from Lemma \ref{lem:bound}
we know that $\sum_{t \in \mathcal{T} : t \geq s} \sum_{j \in \bar{J}_t \setminus A} p_j(s,A) < 4 D(s,A)$. Hence it follows that
\begin{eqnarray*}
\sum_{s \in \mathcal{T}} \sum_{A\subseteq \mathcal{J}}y_{sA} \left(\sum_{t \in \mathcal{T} : t \geq s} \sum_{j \in \bar{J}_t \setminus A} p_j(s,A)\right) & < &
\sum_{s \in \mathcal{T}} \sum_{A\subseteq \mathcal{J}} 4 D(s,A) y(s,A),
\end{eqnarray*}
where the right-hand side is four times the dual objective. The result now
follows, since the dual objective is a lower bound of the cost of the optimal
schedule.
\end{proof}
\subsection{Tight example}
\label{sec:tight-example}
In this section we show that the previous analysis is tight.
\begin{lemma}
\label{lem:gap-example}
For any $\varepsilon>0$ there exists an instance where Algorithm~1 constructs a pair of primal-dual solutions with a gap of $4-\varepsilon$.
\end{lemma}
\begin{proof}
Consider an instance with 4 jobs. Let $p\geq 4$ be an integer. For $j \in \{1,2,3,4\}$, we
define the processing times as $p_j=p$ and the
cost functions as
\begin{align*}
f_1(t) = f_2(t) & =
\begin{cases}
0 \qquad\qquad & \text{if } 1\le t \le p-1,\\
p \qquad\qquad & \text{if } p\le t \le 3p-1,\\
\infty \,\quad & \text{otherwise}, \text{ and}
\end{cases}
\ \end{align*}
\begin{align*}
f_3(t) = f_4(t) & =
\begin{cases}
0 \qquad\qquad & \text{if } 1\le t \le 3p-2,\\
p & \text{otherwise}.
\end{cases}
\end{align*}
\begin{figure}
\caption{\label{table:trace}
\label{table:trace}
\end{figure}
Table~\ref{table:trace} shows a trace of the algorithm for the instance. Notice that the only non-zero dual variable the algorithm sets is
$y_{3p-1,\emptyset}=1$. Thus the dual value achieved is
$y_{3p-1,\emptyset}D(3p-1,\emptyset) = p+2$. It is easy to check that the
pruning phase keeps the largest due date for each job and has cost $4p$. In
fact, it is not possible to obtain a primal (integral) solution with cost
less than $4p$: We must pay $p$ for each job $3$ and $4$ in order to cover
the demand at time $3p$, and we must pay $p$ for each job $1$ and $2$ since
they cannot finish before time $p$. Therefore the pair of primal-dual
solutions have a gap of $4p/(p+2)$, which converges to $4$ as $p$ tends to
infinity.
\end{proof}
The attentive reader would complain that the cost functions used in the proof
Lemma~\ref{lem:gap-example} are somewhat artificial. Indeed, jobs $1$ and $2$
cost 0 only in $[0,p-1]$ even though it is not possible to finish them before
$p$. This is, however, not an issue since given any instance $(f, p)$ of the
problem we can obtain a new instance $(f', p')$ where $f'_j(t) \geq f'_j(p'_j)$
for all $t$ where we observe essentially the same primal-dual gap in $(f,p)$
and $(f', p')$. The transformation is as follows: First, we create a dummy job
with processing time $T =
\sum_{j} p_j$ that costs 0 up to time $T$ and infinity after that. Second, for
each of the original jobs $j$, we keep their old processing times, $p'_j =
p_j$, but modify their cost function:
\begin{equation*}
f'_j(t) = \begin{cases}
\delta p_j & \text{if } t \leq T, \\
\delta p_j + f_j(t - T) & \text{if } T < t \leq 2T.
\end{cases}
\end{equation*}
In other words, to obtain $f'_j$ we shift $f_j$ by $T$ units of time to the
right and then add $\delta p_j$ everywhere, where $\delta$ is an arbitrarily
small value.
Consider the execution of the algorithm on the modified instance $(f',
p')$. In the first iteration, the algorithm sets $y_{1, \emptyset}$ to 0 and
assigns the dummy job to time $T$. In the second iteration, the algorithm
chooses to increase the dual variable~$y_{T+1, \emptyset}$. Imagine increasing
this variable in a continuous way and consider the moment when it reaches
$\delta$. At this instant, the slack of the dual constraints for times in
$[T+1, 2T]$ in the modified instance are identical to the slack for times in
$[1, T]$ at the beginning of the execution on the original instance $(f,p)$.
From this point in time onwards, the execution on the modified instance will
follow the execution on the original instance but shifted $T$ units of time to
the right. The modified instance gains only an extra $\delta T$ of dual value,
which can be made arbitrarily small, so we observe essentially the same
primal-dual gap on $(f', p')$ as we do on~$(f, p)$.
\section{A $(4+\epsilon)$-approximation algorithm}
\label{sec:poly}
We now give a polynomial-time $(4+ \epsilon)$-approximation algorithm for
$1||\sum f_j$. This is achieved by simplifying the input via rounding in a
fairly standard fashion, and then running the primal-dual algorithm on the LP
relaxation of the simplified input, which has only a polynomial number of
interval-indexed variables. A similar approach was employed in the work of
Bansal \& Pruhs \cite{BansalP10}.
Fix a constant $\epsilon >0$. We start by constructing $n$ partitions of the
time indices $\{1,\ldots,T\}$, one partition for each job, according to its
cost function. Focus on some job $j$. First, the set of time indices
$I^{0}_{j}=\{t:f_{j}(t)=0\}$ are those of {\it class} 0 and classes $k=1,2,\ldots$ are the set of indices $I^{k}_{j}=\{ t: (1+\epsilon)^{k-1} \leq f_{j}(t) < (1+\epsilon)^k \}$. (We can bound the number of classes for job $j$ by $2+
\log_{1+\epsilon} f_{j} (T)$.) Let $\ell^{k}_{j}$ denote the minimum element in
$I^{k}_{j}$ (if the set is non-empty), and let $ \widehat{\mathcal{T}}_j$ be the set of
all left endpoints $\ell^k_j$. Finally, let $\widehat{\mathcal{T}} = \cup_{j \in \mathcal{J}} \widehat{\mathcal{T}}_j \cup \{ 1 \}$. Index the
elements such that $\widehat{\mathcal{T}} := \{ t_1,..., t_{\tau} \}$ where $1 = t_1 < t_2
< ... < t_{\tau}$. We then compute a master partition of the time horizon $T$
into the intervals $\mathcal{I} = \{ [t_1,t_2-1], [t_2, t_3 -1],...,
[t_{\tau-1},t_{\tau}-1], [t_{\tau}, T] \} $. There are two key properties of
this partition: the cost of any job changes by at most a factor of $1+\epsilon$
as its completion time varies within an interval, and the number of intervals
is a polynomial in $n$, $\log P$ and $\log W$; here $P$ denotes the length of
the longest job and $W= \max_{j,t} (f_j(t) -f_j(t-1))$, the maximum increase in
cost function $f_j(t)$ in one time step over all jobs $j$ and times $t$.
\begin{lemma} The number of intervals in this partition,
$|\mathcal{T}|= O(n\log{nPW})$.
\end{lemma}
\begin{proof}
It suffices to show that the number of intervals in each $\mathcal{T}_j$ is $O(\log{nPW})$.
Notice that $T \le nP$, thus the maximum cost of any job is bounded by $nPW$, which implies $\mathcal{T}_j = O(\log{nPW})$.
\end{proof}
Next we define a modified cost function $f'_j(t)$ for each time $t \in \widehat{\mathcal{T}}$; in essence, the modified cost
is an upper bound on the cost of job $j$ when completing in the interval for which $t$ is the left endpoint. More precisely,
for $t_i \in \widehat{\mathcal{T}}$, let $f'_j(t_i) := f_j(t_{i+1}-1)$. Notice that, by construction, we have that $ f_j(t) \le f'_j(t) \le (1 + \epsilon) f_j(t)$ for each $t \in \widehat{\mathcal{T}}$.
Consider the following integer programming formulation with
variables $x'_{jt}$ for each job $j$ and each time $t \in \widehat{\mathcal{T}}$;
we set the variable $x'_{jt_{i}}$ to 1 to indicate that job $j$ completes at the end of the interval $[t_i, t_{i+1}-1]$.
The demand $D(t)$ is defined the same way as before.
\begin{align}
\text{minimize}\ \ &\sum_{j \in \mathcal{J}} \sum_{t \in \widehat{\mathcal{T}}} f'_j(t)x'_{jt} \tag{$\text{IP}'$}\label{IP'} \\
\text{subject to}\ \ & \sum_{j \in \mathcal{J}} \sum_{s \in \widehat{\mathcal{T}}: s \ge t} p_jx'_{js} \ge D(t), & \text{for each}\ t \in \widehat{\mathcal{T}};\\
& \sum_{t \in \widehat{\mathcal{T}}} x'_{jt} = 1, & & \text{for each}\ j \in \mathcal{J}; \\
& x'_{jt} \in \{0,1\}, & & \text{for each}\ j \in \mathcal{J},\ t \in \widehat{\mathcal{T}}. \notag
\end{align}
The next two lemmas relate ($\text{IP}'$) to (IP).
\begin{lemma}
If there is a feasible solution $x$ to (IP) with objective value $v$, then there is a feasible solution $x'$ to ($\text{IP}'$)
with objective value at most $(1+ \epsilon)v$.
\end{lemma}
\begin{proof}
Suppose $x_{jt}=1$ where $t$ lies in the interval $[t_i, t_{i+1}-1]$ as defined by the time indices in $\mathcal{T}$, then we construct a solution to ($\text{IP}'$) by setting $x'_{jt_i} =1$. It is straightforward to check $x'$ is feasible for ($\text{IP}'$), and by construction $f'_j(t_i) = f_j(t_{i+1}-1) \le (1+ \epsilon)f_j(t)$.
\end{proof}
\begin{lemma}
For any feasible solution $x'$ to ($\text{IP}'$) there exists a feasible solution $x$ to (IP) with the same objective value.
\end{lemma}
\begin{proof}
Suppose $x'_{jt}=1$, where $t= t_i$; then we construct a solution to (IP) by setting $x_{j,t_{i+1}-1}=1$. Notice
that the time $t_{i+1}-1$ is the right endpoint to the interval $[t_i, t_{i+1}-1]$. By construction, $f_j(t_{i+1}-1) = f'_j(t_i)$; hence, the cost of solution $x$ and $x'$ coincide. To check its feasibility, it suffices to see that the constraint corresponding to $D(t_i)$ is satisfied. This uses the fact that within the interval
$[t_i, t_{i+1}-1]$, $D(t)$ is largest at $t_i$ and that the constraint corresponding to
$D(t)$ contains all variables $x_{js}$ with a time index $s$ such that $s \ge t$.
\end{proof}
Using the two lemmas above, we see that running the primal-dual algorithm using the LP relaxation of ($\text{IP}'$) strengthened by the knapsack-cover inequalities gives us a
$4\,(1+\epsilon)$-approximation algorithm for the scheduling problem $1||\sum f_j$. Hence we have the following result:
\begin{theorem} \label{thm:poly} For each $\epsilon > 0$,
there is a $(4+\epsilon)$-approximation algorithm for the scheduling problem $1||\sum f_j$.
\end{theorem}
\section{A local-ratio interpretation} \label{sec:local-ratio}
In this section we cast our primal-dual 4-approximation as a local-ratio algorithm.
We will work with due date assignment vectors $\vec{\sigma}=(\sigma_1, \ldots,
\sigma_n) \in (\mathcal{T}\cup\{0\})^n$, where $\sigma_j=t$ means that job $j$
has a due date of $t$. We will use the short-hand notation $(\vec{\sigma}_{-j}, s)$ to denote the assignment where $j$ is given a due date $s$ and all other jobs get their $\vec{\sigma}$ due date; that is,
\[ (\vec{\sigma}_{-j}, s) = (\sigma_1, \ldots, {\sigma}_{j-1}, s, {\sigma}_{j+1}, \ldots, {\sigma}_n). \]
We call an assignment $\vec{\sigma}$ \emph{feasible}, if there is a schedule of
the jobs that meets all due dates. We say that job $j\in \mathcal{J}$
\emph{covers} time $t$ if $\sigma_j \geq t$. The cost of $\vec{\sigma}$ under the cost function vector $\vec{g}=(g_1, \ldots, g_n)$ is defined as
$\vec{g}(\vec{\sigma}) = \sum_{j\in \mathcal{J}}g_j(\sigma_j)$. We denote by
$A_t^{\vec{\sigma}}=\set{j\in \mathcal{J}: \sigma_j\ge t}$, the set of jobs that cover $t$. We call
\[ D(t, \vec{\sigma}) = D(t, A^{\vec{\sigma}}_{t})= \max \set{ T - t + 1 - p(A_t^{\vec{\sigma}}), 0} \]
the \emph{residual demand} at time $t$ with respect to assignment $\vec{\sigma}$. And
\[ p_j(t, \vec{\sigma}) = p_j (t, A^{\vec{\sigma}}_t) = \min \set{p_j, D(t, \vec{\sigma})}\]
the \emph{truncated processing time} of $j$ with respect to $t$ and
$\vec{\sigma}$.
At a very high level, the algorithm, which we call {\sc local-ratio}, works as
follows: We start by assigning a due date of $0$ to all jobs; then we
iteratively increase the due dates until the assignment is feasible; finally,
we try to undo each increase in reverse order as long as it preserves
feasibility.
In the analysis, we will argue that the due date assignment that the algorithm
ultimately returns is feasible and that the cost of any schedule that meets these
due dates is a 4-approximation. Together with Lemma~\ref{eq:edd} this implies
the main result in this section.
\begin{theorem}
\label{thm:4-approx}
Algorithm {\sc local-ratio} is a pseudo-polynomial time {4-}ap\-prox\-imation algorithm for $1||\sum f_{j}$.
\end{theorem}
\begin{figure}\label{algo:lrcs}
\end{figure}
We now describe the algorithm in more detail. Then we prove that is a
4-approximation. For reference, its pseudo-code is given in
Algorithm~\ref{algo:lrcs}.
\subsection{Formal description of the algorithm}
The algorithm is recursive. It takes as input an assignment vector
$\vec{\sigma}$ and a cost function vector $\vec{g}$, and returns a feasible
assignment $\vec{\rho}$. Initially, the algorithm is called on the trivial
assignment $(0, \ldots, 0)$ and the instance cost function vector $(f_1,
\ldots, f_n)$. As the algorithm progresses, both vectors are modified. We
assume, without loss of generality, that $f_j(0) = 0$ for all $j \in
\mathcal{J}$.
First, the algorithm checks if the input assignment $\vec{\sigma}$ is feasible.
If that is the case, it returns $\vec{\rho} = \vec{\sigma}$. Otherwise,
it decomposes the input vector function $\vec{g}$ into two cost function
vectors $\vec{\widetilde{g}}$ and $\vec{\widehat{g}}$ as follows
\[ \vec{g} = \vec{\widetilde{g}} + \alpha \cdot \vec{\widehat{g}}, \]
where $\alpha$ is the largest value such that $\vec{\widetilde{g}} \geq \vec{0}$
(where by $\vec{g} = \vec{\widetilde{g}} + \alpha \cdot \vec{\widehat{g}}$, we mean $g_j(t) = \widetilde{g}_j(t) + \alpha \cdot \widehat{g}_j(t)$ for all $t \in \mathcal{T}$ and $j \in \mathcal{J}$, and by $\vec{\widetilde{g}} \geq \vec{0}$, we mean $\widetilde{g}_j(t) \geq 0$ for all $j \in \mathcal{J}$, $t \in \mathcal{T}$), and $\vec{\widehat{g}}$ will be specified later.
It selects a job $j$ and a time $s$ such that $\widehat{g}_j(s) > 0$ and
$\widetilde{g}_j(s) = 0$, and builds a new assignment
$\vec{\widetilde{\sigma}}=(\vec{\sigma}_{-j}, s)$ thus increasing the due date
of $j$ to $s$ while keeping the remaining due dates fixed. It then makes a
recursive call \mbox{\sc local-ratio}$(\vec{\widetilde{g}},
\vec{\widetilde{\sigma}})$, which returns a feasible assignment
$\vec{\widetilde{\rho}}$. Finally, it tests the feasibility of reducing the
deadline of job $j$ in $\vec{\widetilde{\rho}}$ back to $\sigma_j$. If the
resulting assignment is still feasible, it returns that; otherwise, it returns
$\vec{\widetilde{\rho}}$.
The only part that remains to be specified is how to decompose the cost
function vector. Let $t^*$ be a time slot with maximum residual unsatisfied
demand with respect to $\vec{\sigma}$:
\[t^*\in
\mathrm{argmax}_{t\in\mathcal{T}} D(t, \vec{\sigma}).\]
The algorithm creates, for each job $i \in \mathcal{J}$, a model cost function
\[
\widehat{g}_i(t)=\begin{cases}
p_i(t^*,\vec{\sigma}) & \text{if } \sigma_i < t^* \leq t, \\
0 & \text{otherwise}. \\
\end{cases}
\]
and chooses $\alpha$ to be the largest value such that
\[
\widetilde{g}_i(t) = g_i(t) - \alpha \widehat{g}_i(t)\ge 0 \qquad \text{for all } i\in \mathcal{J} \text{ and } t \in \mathcal{T}.
\]
In the primal-dual interpretation of the algorithm, $\alpha$ is the value
assigned to the dual variable $y(t^*,A_{t^*}^{\vec{\sigma}})$.
Let $(j,s)$ be a job-time pair that prevented us from increasing $\alpha$ further. In other words, let $(j,s)$ be such that $\widetilde{g}_{j}(s) = 0$ and $\widehat{g}_j(s) > 0$. Intuitively, assigning a due date of $s$ to job $j$ is free in the residual cost function $\vec{g}$ and helps cover some of the residual demand at $t^*$. This is precisely what the algorithm does: The assignment used as input for the recursive call is $\vec{\widetilde{\sigma}} = (\vec{\sigma}_{-j}, s)$.
\subsection{Analysis}
For a given vector $\vec{g}$ of non-negative functions, $opt(\vec{g})$ denotes the cost of an optimal schedule with respect to these cost
functions. We say an assignment $\vec{\rho}$ is $\beta$-approximate with
respect to $\vec{g}$ if $\sum_{i \in \mathcal{J}} g_i(\rho_i) \leq \beta \cdot
opt(\vec{g})$.
The correctness of the algorithm rests on the following lemmas.
\begin{lemma}
\label{lem:lr-cs-prop}
Let $(\vec{\sigma^{(1)}}, \vec{g^{(1)}}), (\vec{\sigma^{(2)}}, \vec{g^{(2)}}), \ldots, (\vec{\sigma^{(k)}}, \vec{g^{(k)}})$ be the inputs to the successive recursive calls to {\sc local-ratio} and let $\vec{\rho^{(1)}}, \vec{\rho^{(2)}}, \ldots, \vec{\rho^{(k)}}$ be their corresponding outputs. The following properties hold:
\begin{enumerate}[(i)]
\item $\vec{\sigma^{(1)}} \leq \vec{\sigma^{(2)}} \leq \cdots \leq
\vec{\sigma^{(k)}}$,
\item $\vec{\rho^{(1)}} \leq \vec{\rho^{(2)}} \leq \cdots \leq \vec{\rho^{(k)}}$,
\item $\vec{\sigma^{(i)}} \leq \vec{\rho^{(i)}}$ for all $i =1, \ldots, k$,
\item $g^{(i)}_j(\sigma^{(i)}_j) = 0$ and $g^{(i)}_j$ is non-negative for all $i=1, \ldots, k$ and $j \in \mathcal{J}$.
\end{enumerate}
\end{lemma}
\begin{proof}
The first property follows from the fact that $\vec{\sigma^{(i+1)}}$ is
constructed by taking $\vec{\sigma^{(i)}}$ and increasing the due date of a
single job.
The second property follows from the fact that $\vec{\rho^{(i)}}$ is either
$\vec{\rho^{(i+1)}}$ or it is constructed by taking $\vec{\rho^{(i+1)}}$ and
decreasing the due date of a single job.
The third property follows by an inductive argument. The base case is the
base case of the recursion, where $\vec{\sigma^{(k)}} = \vec{\rho^{(k)}}$.
For the recursive case, we need to show that $\vec{\sigma^{(i)}} \leq
\vec{\rho^{(i)}}$, by recursive hypothesis we know that $\vec{\sigma^{(i+1)}}
\leq \vec{\rho^{(i+1)}}$ and by the first property $\vec{\sigma^{(i)}}\leq
\vec{\sigma^{(i+1)}}$. The algorithm either sets $\vec{\rho^{(i)}} = \vec{\rho^{(i+1)}}$, or $\vec{\rho^{(i)}}$ is constructed by taking
$\vec{\rho^{(i+1)}}$ and decreasing the due date of some job to its old
$\vec{\sigma^{(i)}}$ value. In both cases the property holds.
The forth property also follows by induction. The base case is the first call
we make to {\sc local-ratio}, which is $\vec{\sigma^{(1)}} = (0,\ldots, 0)$ and
$\vec{g^{(1)}} = (f_1, \ldots, f_n)$, where it holds by our assumption that $f_j(0) = 0$ for all $j$. For the
inductive case, we note that $\vec{{g}^{(i+1)}}$ is constructed by taking
$\vec{{g}^{(i)}}$ and subtracting a scaled version of the model function
vector, so that $\vec{0} \leq \vec{g^{(i+1)}} \leq \vec{g^{(i)}}$, and
$\vec{\sigma^{(i+1)}}$ is constructed by taking $\vec{\sigma^{(i)}}$ and
increasing the due date of a single job $j^{(i)}$ such that $g^{(i+1)}_{j^{(i)}} ( \sigma^{(i+1)}_{j^{(i)}}) = 0$, which
ensures that the property holds.
\end{proof}
\begin{lemma}
\label{lem:local-argument}
Let {\sc local-ratio}$(\vec{\sigma}, \vec{g})$ be a recursive call returning $\vec{\rho}$ then
\begin{equation}
\sum_{i \in \mathcal{J}\, :\, \sigma_i < t^* \leq \rho_i} p_i(t^*, \vec{\sigma})
\leq 4 \cdot D(t^*, \vec{\sigma}).
\end{equation}
where $t^*$ is the value used to decompose the input cost function vector $\vec{g}$.
\end{lemma}
\begin{proof} Our goal is to bound the $p_i(t^*, \vec{\sigma})$ value of jobs in
\[X = \sset{ i \in \mathcal{J}}{\sigma_i < t^* \leq \rho_i}.\]
Notice that the algorithm increases the due date of these jobs in this or a
later recursive call. Furthermore, and more important to us, the algorithm
decides not to undo the increase. For each $i \in X$, consider the call {\sc
lr-cs}$(\vec{\sigma'}, \vec{g'})$ when we first increased the due date of $i$
beyond $\sigma_i$. Let $\vec{{\rho}'}$ be the assignment returned by the
call. Notice that $\rho'_i > \sigma_i$ and that $(\vec{\rho'}\!_{-i},
\sigma_i)$ is not feasible---otherwise we would have undone the due date
increase. By Lemma~\ref{lem:lr-cs-prop}, we know that $\vec{\rho} \leq
\vec{\rho'}$, and so we can conclude that $(\vec{\rho}_{-i}, \sigma_i)$ is not
feasible either. Let $t_i$ be a time with positive
residual demand in this unfeasible assignment:
\[ D(t_i, (\vec{{\rho}}_{-i}, \sigma_i)) > 0. \]
Note that $\sigma_i < t_i \leq \rho_i$, otherwise $\vec{\rho}$ would not be feasible,
contradicting Lemma~\ref{lem:lr-cs-prop}.
We partition $X$ into two subsets
\begin{equation*}
L = \set{ i\in X: t_i \le t^*} \text{ and } R = \set{ i\in X: t_i > t^* },
\end{equation*}
and we let $t_L = \max \sset{t_i}{i \in L}$ and $i_L$ be a job attaining this
value. Similarly, we let $t_R = \min \sset{t_i}{i \in R}$ and $i_R$ be a job attaining this
value.
We will bound the contribution of each of these sets separately. Our goal
will be to prove that
\begin{align}
\label{eq:Lmax}
\sum_{i \in L - i_L} p_i & \le D(t^*, \vec{\sigma}), \text{ and }\\
\label{eq:Rmax}
\sum_{i \in R - i_R} p_i & \le
D(t^*,\vec{\sigma}).
\end{align}
Let us argue \eqref{eq:Lmax} first. Since $D\left(t_L, (\vec{\rho}_{-i_L},
\sigma_{i_L})\right) > 0$, it follows that
\begin{align*}
\sum_{i \in \mathcal{J} - i_L : \rho_i \geq t_L} p_i & < T-t_L + 1 \\
\sum_{i \in \mathcal{J} : \sigma_i \geq t_L} p_i + \sum_{i \in \mathcal{J} - i_L : \rho_i \geq t_L > \sigma_i} p_i & < T- t_L +1 \\
\sum_{i \in \mathcal{J} - i_L : \rho_i \geq t_L > \sigma_i} p_i & < D(t_L, \vec{\sigma})
\end{align*}
Recall that $\sigma_i < t_i \leq \rho_i$ for all $i \in X$ and that $t_i
\leq t_L \leq t^*$ for all $i \in L$. It follows that the sum on the left-hand side of the last inequality contains all jobs in $L - i_L$. Finally, we note that
$D(t_L, \vec{\sigma} )
\leq D(t^*, \vec{\sigma})$ due to the way {\sc local-ratio} chooses~$t^*$, which gives us~\eqref{eq:Lmax}.
Now let us argue \eqref{eq:Rmax}. Since $D\left(t_R, (\vec{\rho}_{-i_R},
\sigma_{i_R})\right) > 0$, it follows that
\begin{align*}
\sum_{i \in \mathcal{J} - i_R : \rho_i \geq t_R} p_i & < T-t_R + 1 \\
\sum_{i \in \mathcal{J} : \sigma_i \geq t_R} p_i + \sum_{i \in \mathcal{J} - i_R : \rho_i \geq t_R > \sigma_i} p_i & < T- t_R +1 \\
\sum_{i \in \mathcal{J} - i_R : \rho_i \geq t_R > \sigma_i} p_i & < D(t_R, \vec{\sigma}).
\end{align*}
Recall that $\sigma_i < t^*$ for all $i \in X$ and that $t^* < t_R \leq t_i \leq \rho_i$ for all $i \in R$. It follows that the sum in the left-hand side of the last inequality
contains all jobs in $R- i_R$. Finally, we note that $D(t_R, \vec{\sigma} )
\leq D(t^*, \vec{\sigma})$ due to the way {\sc local-ratio} chooses $t^*$, which
gives us~\eqref{eq:Rmax}.
Finally, we note that $p_i(t^*, \vec{\sigma}) \leq D(t^*, \vec{\sigma})$ for all $i \in \mathcal{J}$. Therefore,
\begin{align*}
\sum_{i \in X} p_i(t^*, \vec{\sigma})
&\leq \sum_{i \in L-i_L} p_i + p_{i_L}(t^*, \vec{\sigma}) +
\sum_{i \in R-i_R} p_i + p_{i_R}(t^*, \vec{\sigma}) \\
& \leq 4 \cdot D(t^*, \vec{\sigma}),
\end{align*}
which finishes the proof.
\end{proof}
We are ready to prove the performance guarantee of the algorithm.
\begin{lemma}
\label{lem:4-correct}
Let {\sc lr-sc}$(\vec{\sigma}, \vec{g})$ be a recursive call and $\vec{\rho}$ be its output. Then $\vec{\rho}$ is a feasible 4-approximation w.r.t.\@ $\vec{g}$.
\end{lemma}
\begin{proof}
The proof is by induction. The base case corresponds to the base case of the recursion, where we get as
input a feasible assignment $\vec{\sigma}$, and so $\vec{\rho} = \vec{\sigma}$. From Lemma~\ref{lem:lr-cs-prop} we know that $g_i(\sigma_i) =
0$ for all $i \in \mathcal{J}$, and that the cost functions are non-negative.
Therefore, the cost of $\vec{\rho}$ is optimal since
\[ \sum_{i \in \mathcal{J}} g_i(\rho_i) = 0. \]
For the inductive case, the cost function vector $\vec{g}$ is decomposed into
$\vec{\widetilde{g}} + \alpha \cdot \vec{\widehat{g}}$. Let $(j,s)$ be the
pair used to define $\vec{\widetilde{\sigma}} = (\vec{\sigma}_{-j}, s)$. Let
$\vec{\widetilde{\rho}}$ be the assignment returned by the recursive call. By
inductive hypothesis, we know that $\vec{\widetilde{\rho}}$ is feasible and
4-approximate w.r.t.\@ $\vec{\widetilde{g}}$.
After the recursive call returns, we check the feasibility of
$(\vec{\widetilde{\rho}}_{-j}, \sigma_j)$. If the vector is feasible, we
return the modified assignment; otherwise, we
return~$\vec{\widetilde{\rho}}$. In either case $\vec{\rho}$ is feasible.
We claim that $\vec{\rho}$ is 4-approximate w.r.t.\@ $\vec{\widehat{g}}$. Indeed,
\[ \sum_{i \in \mathcal{J}} \widehat{g}_i(\rho_i) = \sum_{i \in \mathcal{J}: \sigma_i < t^*\leq \rho_i} p_i(t^*, \vec{\sigma}) \leq 4 \cdot D(t^*,
\vec{\sigma}) \leq 4 \cdot opt(\vec{\widehat{g}}),\] where the
first inequality follows from Lemma~\ref{lem:local-argument} and the last
inequality follows from the fact that the cost of any schedule under
$\vec{\widehat{g}}$ is given by the $p_i(t^*, \vec{\sigma})$ value of jobs $i
\in \mathcal{J}$ with $\sigma_i < t^* \leq \rho_i$, which must have a
combined processing time of at least $D(t^*, \vec{\sigma})$ on any feasible
schedule. Hence, $opt(\vec{\widehat{g}}) \geq D(t^*, \vec{\sigma})$.
We claim that $\vec{\rho}$ is 4-approximate w.r.t.\@ $\vec{\widetilde{g}}$. Recall that $\vec{\widetilde{\rho}}$ is
4-approximate w.r.t.\@ $\vec{\widetilde{g}}$; therefore, if~$\vec{\rho} =
\vec{\widetilde{\rho}}$ then $\vec{\rho}$ is 4-approximate w.r.t.\@ $\vec{\widetilde{g}}$. Otherwise, $\vec{\rho} = (\vec{\widetilde{\rho}}_{-j}, \sigma_j)$, in which case $\widetilde{g}_j(\rho_j) = 0$, so $\vec{\rho}$ is also 4-approximate w.r.t.\@ $\vec{\widetilde{g}}$.
At this point we can invoke the Local Ratio Theorem to get that
\begin{align*}
\sum_{j \in \mathcal{J}} g_j(\rho_j) &
= \sum_{j \in \mathcal{J}} \widetilde{g}_j(\rho_j) +
\sum_{j \in \mathcal{J}} \alpha \cdot \widehat{g}_j(\rho_j), \\
& \leq 4 \cdot opt(\vec{\widetilde{g}}) + 4 \alpha \cdot opt(\vec{\widehat{g}}), \\
& = 4 \cdot \big( opt(\vec{\widetilde{g}}) + opt(\alpha \cdot \vec{\widehat{g}}) \big), \\
& \leq 4 \cdot opt(\vec{g}),
\end{align*}
which finishes the proof of the lemma.
\end{proof}
Note that the number of recursive calls in Algorithm~\ref{algo:lrcs} is at most $|\mathcal{J}|\cdot|\mathcal{T}|$. Indeed, in each call the due date of some job is increased. Therefore we can only guarantee a pseudo-polynomial running time. However, the same ideas developed in Section~\ref{sec:poly} can be applied here to obtain a polynomial time algorithm at a loss of a $1+\epsilon$ factor in the approximation guarantee.
\section{Release dates}
\label{sec:releasedates}
This section discusses how to generalize the ideas from the previous section to instances with release dates. We assume that there are $\kappa$ different release dates, which we denote with the set $H$. Our main result is a pseudo-polynomial $4\kappa$-approximation algorithm. The generalization is surprisingly easy: We only need to redefine our residual demand function to take into account release dates.
For a given due date assignment vector $\vec{\sigma}$ and an interval $[r,t)$ we denote by
\[ D(r,t,\vec{\sigma}) = \max \set { r + p \left(\sset{j \in \mathcal{J}}{r \leq r_j \leq \sigma_j < t} \right)- t+ 1 ,0} \]
the \emph{residual demand} for $[r,t)$. Intuitively, this quantity is the amount of processing time of jobs released in $[r,t)$ that currently have a due date strictly less than $t$ that should be assigned a due date of $t$ or greater if we want feasibility.
The \emph{truncated processing time} of $j$ with respect to $r$, $t$, and $\vec{\sigma}$ is
\[ p_j(r,t, \vec{\sigma}) = \min \set{p_j, D(r,t, \vec{\sigma})}.
\]
The algorithm for multiple release dates is very similar to {\sc local-ratio}. The \emph{only} difference is in the way we decompose the input cost function vector $\vec{g}$. First, we find values $r^*$ and $t^*$ maximizing $D(r^*, t^*, \vec{\sigma})$. Second, we define the model
cost function for job each $i \in \mathcal{J}$ as follows
\[ \widehat{g}_i(t) =
\begin{cases}
p_i(r^*,t^*, \vec{\sigma}) & \text{if } r^* \leq r_i < t^* \text{ and } \sigma_i < t^* \leq t, \\
0 & \text{otherwise}. \\
\end{cases}
\]
\begin{myalgorithm}[12cm]{\sc local-ratio-release$(\vec{\sigma}, \vec{g})$ \label{algo:release}}
\IF {$\vec{\sigma}$ is feasible}
\STATE $\vec{\rho}$ = $\vec{\sigma}$
\ELSE
\STATE $(t^*, r^*) = \mathrm{argmax}_{(t, r) \in \mathcal{T} \times H} D(r, t,\vec{\sigma})$ \COMMENT{break ties arbitrarily}
\STATE For each $i \in \mathcal{J}$ let \(
\widehat{g}_i(t)=\begin{cases}
p_i(r^*,t^*, \vec{\sigma}) & \text{if } r^* \leq r_i < t^* \text{ and } \sigma_i < t^* \leq t, \\
0 & \text{otherwise}.
\end{cases}
\)
\STATE Set $\vec{\widetilde{g}} = \vec{g} - \alpha \cdot \vec{\widehat{g}}$ where $\alpha$ is the largest value such that $\vec{\widetilde{g}} \geq 0$
\STATE Let $j$ and $s$ be such that
\( \widetilde{g}_j(s) = 0 \text{ and } \widehat{g}_j(s) > 0 \)
\STATE $\vec{\widetilde{\sigma}} = (\vec{\sigma}_{-j}, s)$
\STATE $\vec{\widetilde{\rho}}$ = {\sc local-ratio-release}$(\vec{\widetilde{\sigma}}, \vec{\widetilde{g}})$
\IF {$(\vec{\widetilde{\rho}}_{-j}, \sigma_j)$ is feasible}
\STATE $\vec{\rho} = (\vec{\widetilde{\rho}}_{-j}, \sigma_j)$
\ELSE
\STATE $\vec{\rho} = \vec{\widetilde{\rho}}$
\ENDIF
\ENDIF
\RETURN $\vec{\rho}$
\end{myalgorithm}
The rest of the algorithm is exactly as before. We call the new algorithm {\sc
local-ratio-release}. Its pseudocode is given in Algorithm~\ref{algo:release}. The initial
call to the algorithm is done on the assignment vector $(r_1, r_2, \ldots,
r_n)$ and the function cost vector $(f_1, f_2, \ldots, f_n)$. Without loss of
generality, we assume $f_j(r_j) = 0$ for all $j \in \mathcal{J}$.
\begin{theorem}
\label{thm:ls-cs-rd}
There is a pseudo-polynomial time $4\kappa$-approximation for scheduling jobs
with release dates on a single machine with generalized cost
function.
\end{theorem}
The proof of this theorem rests on a series of Lemmas that mirror
Lemmas~\ref{lem:lr-cs-prop},~\ref{lem:local-argument},
and~\ref{lem:4-correct} from Section~\ref{sec:local-ratio}.
\begin{lemma}
\label{lem:folk-rd}
An assignment $\vec{\sigma}$ is feasible if there is no residual demand at
any interval $[r,t)$; namely, $\vec{\sigma}$ is feasible if $D(r, t,
\vec{\sigma})=0$ for all $r\in H$ and $r<t \in \mathcal{T}$. Furthermore,
scheduling the jobs according to early due date first yields a feasible
preemptive schedule.
\end{lemma}
\begin{proof}
We start by noting that one can use a simple exchange argument to show that
if there is some schedule that meets the due dates $\vec{\sigma}$, then the
earliest due date (EDD) schedule must be feasible.
First, we show that if there is a job $j$ in the EDD schedule that does not
meet its deadline, then there is an interval $[r,t)$ such that $D(r,t,
\vec{\sigma}) > 0$. Let $t = \sigma_j + 1$ and let $r < t$ be latest release date such that the machine was idle at time $r-1$ just after EDD finished scheduling $j$. Let $X = \sset{i \in
\mathcal{J}}{r \leq r_i , \sigma_i < t}$. Clearly,
$r + p(X) \geq t$, otherwise $j$ would have met its due date. Therefore,
\begin{align*}
0 & < r + p(X) - t + 1\\
& = r + p \left(\sset{i \in \mathcal{J}}{r \leq r_i \leq \sigma_i < t} \right) - t + 1 \\
& \leq D(r, t, \vec{\sigma}).
\end{align*}
Second, we show that for any interval $[r,t)$ such that $D(r,t,
\vec{\sigma}) > 0$, there exists a job $j$ in the EDD schedule that does not
meet its deadline. Let $X = \sset{i \in
\mathcal{J}}{r \leq r_i, \sigma_i<t}$. Then,
\begin{equation*}
0 < D(r, t, \vec{\sigma}) = r + p(X) - t + 1 \quad \Longrightarrow \quad r + p(X) \geq t.
\end{equation*}
Let $j$ be the job in $X$ with the largest completion time in the EDD schedule. Notice that the completion time of $j$ is at least $r + p(X) \geq t$. On the other hand, its due date is $\sigma_j < t$. Therefore, the EDD schedule misses $j$'s due date.
\end{proof}
\begin{lemma}
\label{lem:local-ratio-release-prop}
Let $(\vec{\sigma^{(1)}}, \vec{g^{(1)}}), (\vec{\sigma^{(2)}}, \vec{g^{(2)}}), \ldots, (\vec{\sigma^{(k)}}, \vec{g^{(k)}})$ be the inputs to the successive recursive calls to {\sc local-ratio-release} and let $\vec{\rho^{(1)}}, \vec{\rho^{(2)}}, \ldots, \vec{\rho^{(k)}}$ be their corresponding outputs. The following properties hold:
\begin{enumerate}[(i)]
\item $\vec{\sigma^{(1)}} \leq \vec{\sigma^{(2)}} \leq \cdots \leq
\vec{\sigma^{(k)}}$,
\item $\vec{\rho^{(1)}} \leq \vec{\rho^{(2)}} \leq \cdots \leq \vec{\rho^{(k)}}$,
\item $\vec{\sigma^{(i)}} \leq \vec{\rho^{(i)}}$ for all $i =1, \ldots, k$,
\item $g^{(i)}_j(\sigma^{(i)}_j) = 0$ and $g^{(i)}_j$ is non-negative for all $i=1, \ldots, k$ and $j \in \mathcal{J}$.
\end{enumerate}
\end{lemma}
\begin{proof}
The proof of Properties (i)-(iii) is exactly the same as that of Lemma~\ref{lem:lr-cs-prop}.
The forth property follows by induction. The base case is the first call
we make to {\sc local-ratio-release}, which is $\vec{\sigma^{(1)}} = (r_1,\ldots, r_n)$ and
$\vec{g^{(1)}} = (f_1, \ldots, f_n)$, where it holds by our assumption. For the
inductive case, we note that $\vec{{g}^{(i+1)}}$ is constructed by taking
$\vec{{g}^{(i)}}$ and subtracting a scaled version of the model function
vector, so that $\vec{0} \leq \vec{g^{(i+1)}} \leq \vec{g^{(i)}}$, and
$\vec{\sigma^{(i+1)}}$ is constructed by taking $\vec{\sigma^{(i)}}$ and
increasing the due date of a single job $j^{(i)}$. The way this is done
guarantees that $g^{(i+1)}_{j^{(i)}} ( \sigma^{(i+1)}_{j^{(i)}}) = 0$, which
ensures that the property holds.
\end{proof}
\begin{lemma}
\label{lem:local-argument-rd}
Let {\sc local-ratio-release}$(\vec{\sigma}, \vec{g})$ be a recursive call returning $\vec{\rho}$ then
\[
\sum_{i \in \mathcal{J} \atop r^* \leq r_i \leq \sigma_i < t^* \leq \rho_i} p_i(r^*, t^*, \vec{\sigma})
\leq 4 \kappa \cdot D(r^*, t^*, \vec{\sigma}).
\]
where $(r^*, t^*)$ are the values used to decompose the input cost function
vector $\vec{g}$.
\end{lemma}
\begin{proof} Our goal is to bound the $p_i(r^*,t^*, \vec{\sigma})$ value of jobs
\[X = \sset{ i \in \mathcal{J}}{r \leq r_i \leq \sigma_i < t^* \leq \rho_i}.\]
Notice that the algorithm increases the due date of these jobs in this or a
later recursive call. Furthermore, and more important to us, the algorithm
decides not to undo the increase.
For each $i \in X$, consider the call {\sc local-ratio-release}$(\vec{\sigma'},
\vec{g'})$ when we first increased the due date of $i$ beyond $\sigma_i$. Let
$\vec{{\rho}'}$ be assignment returned by the call. Notice that $\rho'_i >
\sigma_i$ and that $(\vec{\rho'}\!_{-i}, \sigma_i)$ is not
feasible---otherwise we would have undone the due date increase. By
Lemma~\ref{lem:lr-cs-prop}, we know that $\vec{\rho} \leq
\vec{\rho'}$, so we conclude that $(\vec{\rho}_{-i}, \sigma_i)$ is not
feasible either. We define $r(i) \leq r_j$ and $\sigma_i < t(i) \leq \rho_i$
such that the interval $[r(i), t(i))$ has a positive residual demand in this
unfeasible assignment:
\[ D(r(i), t(i), (\vec{\rho}_{-i}, \sigma_i)) > 0. \]
Note that such an interval must exist, otherwise $\vec{\rho}$ would not be
feasible.
We partition $X$ in $2 \kappa$ subsets. For each release date $r \in H$ we define
\begin{equation*}
L(r) = \set{ i\in X: t(i) \le t^*, r(i) = r} \text{ and } R(r) = \set{ i\in X: t(i) > t^*, r(i) = r },
\end{equation*}
Let $t_L^r = \max \sset{t(i)}{i \in L(r)}$ and
$i_L^r$ be a job attaining this value. Similarly, consider $t_R^r = \min \sset{t(i)}{i \in R(r)}$
and $i_R^r$ be a job attaining this value.
We will bound the contribution of each of these sets separately. Our goal
will be to prove that for each release date $r$ we have
\begin{align}
\label{eq:Lmax-rd}
\sum_{i \in L(r) - i_L^r} p_i & \le D(r^*, t^*, \vec{\sigma}), \text{ and }\\
\label{eq:Rmax-rd}
\sum_{i \in R(r) - i_R^r} p_i & \le
D(r^*, t^*,\vec{\sigma}).
\end{align}
Let us argue \eqref{eq:Lmax-rd} first. Assume $L(r) \neq \emptyset$, so $t_L^r$
is well defined; otherwise, the claim is trivial. Since $D\left(r, t_L^r,
(\vec{\rho}_{-i_L^r}, \sigma_{i_L^r})\right) > 0$, it follows that
\begin{align*}
\sum_{i \in \mathcal{J} - i_L^r \atop r \leq r_i < t_L^r \leq \rho_i } p_i & < r + \sum_{i \in \mathcal{J} \atop r \leq r_i < t_L^r } p_i - t_L^r + 1 \\
\sum_{i \in \mathcal{J} \atop r \leq r_i < t_L^r \leq \sigma_i } p_i +\sum_{i \in \mathcal{J} - i_L^r \atop r \leq r_i \leq \sigma_i < t_L^r \leq \rho_i } p_i & < r + \sum_{i \in \mathcal{J} \atop r \leq r_i < t_L^r } p_i - t_L^r + 1 \\
\sum_{i \in \mathcal{J} - i_L^r \atop r \leq r_i \leq \sigma_i < t_L^r \leq \rho_i } p_i & < D(r,t_L^r, \vec{\sigma}).
\end{align*}
Recall that $\sigma_i < t(i)$ for all $i \in X$. Furthermore, $t(i) \leq
t_L^r$, and thus $\sigma_i < t_L^r$, for all $i \in L(r)$. Also, $t(i) \leq
\rho_i$ for all $i \in X$. Therefore, the sum on the left-hand side of the
last inequality contains all jobs in $L(r) - i_L^r$. Finally, we note that $D(r,t_L, \vec{\sigma} ) \leq D(r^*, t^*, \vec{\sigma})$ due to the way {\sc local-ratio-release} chooses $r^*$ and $t^*$, which
gives us~\eqref{eq:Lmax-rd}.
Let us argue \eqref{eq:Rmax-rd}. Assume $R(r) \neq \emptyset$, so $t_R^r$ is well defined; otherwise, the claim is trivial. Since $D\left(r,t_R^r, (\vec{\rho}_{-i_R^r},
\sigma_{i_R^r})\right) > 0$, it follows that
\begin{align*}
\sum_{i \in \mathcal{J} - i_R^r \atop r \leq r_i < t_R^r \leq \rho_i } p_i & < r + \sum_{i \in \mathcal{J} \atop r \leq r_i < t_R^r } p_i - t_R^r + 1 \\
\sum_{i \in \mathcal{J} \atop r \leq r_i < t_R^r \leq \sigma_i } p_i +\sum_{i \in \mathcal{J} - i_R^r \atop r \leq r_i \leq \sigma_i < t_R^r \leq \rho_i } p_i & < r + \sum_{i \in \mathcal{J} \atop r \leq r_i < t_R^r } p_i - t_R^r + 1 \\
\sum_{i \in \mathcal{J} - i_R^r \atop r \leq r_i \leq \sigma_i < t_R^r \leq \rho_i } p_i & < D(r,t_R^r, \vec{\sigma})
\end{align*}
Recall that $t(i) \leq \rho_i$ for all $i \in X$. Furthermore, $t_R^r \leq
t(i)$, and thus $t_R^r \leq \rho_i$, for all $i \in R(r)$. Also, $t_i > \sigma_i$ for
all $i \in X$. Therefore, the sum on the left-hand side of the last
inequality contains all jobs in $R(r)- i_R^r$. Finally, we note that $D(r,t_R^r,
\vec{\sigma} )
\leq D(r^*, t^*, \vec{\sigma})$ due to the way {\sc lr-cs} chooses $r^*$ and $t^*$, which
gives us~\eqref{eq:Rmax-rd}.
Finally, we note that $p_i(r^*, t^*, \vec{\sigma}) \leq D(r^*,t^*, \vec{\sigma})$ for all $i \in \mathcal{J}$. Therefore,
\begin{align*}
\sum_{i \in \mathcal{J}: \rho_i \geq t^*} p_i(r^*, t^*, \vec{\sigma})
&= \sum_{i \in X} p_i(r^*, t^*, \vec{\sigma}) \\
& = \sum_r \left ( \sum_{i \in L(r)} p_i(r^*, t^*, \vec{\sigma}) + \sum_{i \in R(r)} p_i(r^*, t^*, \vec{\sigma}) \right) \\
& \leq \sum_r \Big( 2 \cdot D(r^*, t^*, \vec{\sigma}) + 2 \cdot D(r^*, t^*, \vec{\sigma}) \Big) \\
& = 4 \kappa \cdot D(r^*, t^*, \vec{\sigma}).
\end{align*}
\end{proof}
\begin{lemma}
\label{lem:4-correct-rd}
Let {\sc lr-sc-rd}$(\vec{\sigma}, \vec{g})$ be a recursive call and $\vec{\rho}$ be its output. Then $\vec{\rho}$ is a feasible $4\kappa$-approximation w.r.t.\@ $\vec{g}$.
\end{lemma}
\begin{proof}
The proof is by induction. The base case corresponds to the base case of the
recurrence where we get as input a feasible assignment $\vec{\sigma}$, and so
$\vec{\rho} = \vec{\sigma}$. From Lemma~\ref{lem:lr-cs-prop}, we know that
$g_i(\sigma_i) = 0$ for all $i \in \mathcal{J}$, and that the cost functions
are non-negative. Therefore, the cost of $\vec{\rho}$ is optimal since
\[ \sum_{i \in \mathcal{J}} g_i(\rho_i) = 0. \]
For the inductive case, the cost function vector $\vec{g}$ is decomposed into
$\vec{\widetilde{g}} + \alpha \cdot \vec{\widehat{g}}$. Let $(j,s)$ be the
pair used to define $\vec{\widetilde{\sigma}} = (\vec{\sigma}_{-j}, s)$. Let
$\vec{\widetilde{\rho}}$ be the assignment returned by the recursive call. By the
induction hypothesis, we know that $\vec{\widetilde{\rho}}$ is feasible and
$4\kappa$-approximate w.r.t.\@ $\vec{\widetilde{g}}$.
After the recursive call returns, we check the feasibility of
$(\vec{\widetilde{\rho}}_{-j}, \sigma_j)$. If the vector is feasible, then we
return the modified assignment; otherwise, we
return~$\vec{\widetilde{\rho}}$. In either case, $\vec{\rho}$ is feasible.
We claim that $\vec{\rho}$ is $4\kappa$-approximate w.r.t.\@ $\vec{\widehat{g}}$. Indeed,
\[ \sum_{i \in \mathcal{J}} \widehat{g}_i(\rho_i) = \sum_{i \in \mathcal{J} \atop r^*\leq r_i < t^* \leq \rho_i} p_i(r^*,t^*, \vec{\sigma}) \leq 4 \kappa \cdot D(r^*,t^*,
\vec{\sigma}) \leq 4 \kappa \cdot opt(\vec{\widehat{g}}),\] where the
first inequality follows from Lemma~\ref{lem:local-argument} and the last
inequality follows from the fact that the cost of any schedule under
$\vec{\widehat{g}}$ is given by the $p_i(r^*,t^*, \vec{\sigma})$ value
of jobs $i \in \mathcal{J}$ with $r^*\leq r_i < t^*$ and $\sigma_i < t^*$ that cover $t^*$, which must have
a combined processing time of at least $D(r^*,t^*, \vec{\sigma})$.
Hence, $opt(\vec{\widehat{g}}) \geq D(r^*, t^*, \vec{\sigma})$.
We claim that $\vec{\rho}$ is $4\kappa$-approximate w.r.t.\@ $\vec{\widetilde{g}}$.
Recall that $\vec{\widetilde{\rho}}$ is $4\kappa$-approximate w.r.t.\@
$\vec{\widetilde{g}}$; therefore, if~$\vec{\rho} =
\vec{\widetilde{\rho}}$ then $\vec{\rho}$ is $4\kappa$-approximate w.r.t.\@ $\vec{\widetilde{g}}$. Otherwise, $\vec{\rho} = (\vec{\widetilde{\rho}}_{-j}, \sigma_j)$, in which case $\widetilde{g}_j(\rho_j) = 0$, so $\vec{\rho}$ is also 4-approximate w.r.t.\@ $\vec{\widetilde{g}}$.
At this point we can invoke the Local Ratio Theorem to get that
\begin{align*}
\sum_{j \in \mathcal{J}} g_j(\rho_j) &
= \sum_{j \in \mathcal{J}} \widetilde{g}_j(\rho_j) +
\sum_{j \in \mathcal{J}} \alpha \cdot \widehat{g}_j(\rho_j), \\
& \leq 4 \kappa \cdot opt(\vec{\widetilde{g}}) + 4 \kappa \cdot \alpha \cdot opt(\vec{\widehat{g}}), \\
& = 4 \kappa \cdot \big( opt(\vec{\widetilde{g}}) + opt(\alpha \cdot \vec{\widehat{g}}) \big), \\
& \leq 4 \kappa \cdot opt(\vec{g}),
\end{align*}
which completes the proof of the lemma.
\end{proof}
Finally, we note that invoking Lemma~\ref{lem:4-correct-rd} on $\vec{\sigma} = (r_1, \ldots, r_n)$ and $\vec{g} = (f_1, \ldots, f_n)$ gives us Theorem~\ref{thm:ls-cs-rd}.
\section{Conclusions and Open Problems}
In this article we have proposed a primal-dual $4$-approximation algorithm for $1||\sum f_j$ based on an LP strengthen with knapsack-cover inequalities. Since the original appearance of this result in a preliminary paper~\cite{Cheung11}, an algorithm with an improved approximation ratio of $e+\epsilon$ was given~\cite{Hohn14}, although its running time is only quasi-polynomial. It is natural to ask whether an improved, polynomial-time algorithm is possible. A positive result would be interesting even in the special case of UFP on a path. Similarly, the exact integrality gap of the LP is known to be only in the interval $[2,4]$, even for UFP on a path. The example in Section \ref{sec:pseudopoly}, which shows that the analysis of our algorithm is tight, suggests that the reason we cannot obtain a performance guarantee better than 4 stems from the primal-dual technique, rather than from the integrality gap of the LP, and hence another LP-based technique might yield a better guarantee. Other natural open questions include finding a constant-factor approximation algorithm in presence of release dates, or ruling out the existence of a PTAS.
\end{document} |
\begin{document}
\title{Indistinguishable Encoding for Bidirectional Quantum Key Distribution: Theory to Experiment}
\author{J. S. Shaari}
\affiliation{Department of Physics, International Islamic University Malaysia (IIUM),
Jalan Sultan Ahmad Shah, Bandar Indera Mahkota, 25200 Kuantan, Pahang, Malaysia}
\affiliation{Institute of Mathematical Research (INSPEM), University Putra Malaysia, 43400 UPM Serdang, Selangor, Malaysia.}
\author{Suryadi}
\affiliation{Department of Physics, International Islamic University Malaysia (IIUM),
Jalan Sultan Ahmad Shah, Bandar Indera Mahkota, 25200 Kuantan, Pahang, Malaysia}
\date{\today}
\begin{abstract}
We present for the first time, a bidirectional Quantum Key Distribution protocol with minimal encoding operations derived from the use of two `nonorthogonal' unitary transformations selected from two mutually unbiased unitary bases; which are indistinguishable in principle for a single use. Along with its decoding procedure, it is a stark contrast to its `orthogonal encoding' predecessors. Defining a more relevant notion of security threshold for such protocols, the current protocol outperforms its predecessor in terms of security as the maximal amount of information an eavesdropper can glean is essentially limited by the indistinguishability of the transformations. We further propose adaptations for a practical scenario and report on a proof of concept experimental scheme based on polarised photons from an attenuated pulsed laser for qubits, demonstrating the feasibility of such a protocol.
\end{abstract}
\pacs{03.67.Dd}
\keywords{Quantum cryptography}
\maketitle
\section{Introduction}
Quantum cryptography, or more specifically quantum key distribution (QKD) provides for a solution to the courier problem of distributing secret keys between two parties to be utilised for a one-time pad cryptographic protocol. Arguably a first rather direct practical application of quantum physics, with realisations mainly in terms of optical based implementations, QKD's security is guaranteed by physical laws and saw its debut in the famous BB84 \cite{BB84} protocol, where one party, commonly referred to as Alice would send a photon prepared in one of two bases (mutually unbiased) over a quantum channel to another, Bob, for his measurements to determine the state sent. This is a straightforward scenario of `prepare and measure'; i.e. Alice prepares a quantum state while Bob measures. An adversary, Eve, would not be able to determine the states sent without inducing any errors. In general, channels are noisy and detection alone in any QKD protocol would prove to be impractical. More importantly, an estimation of the amount of information Eve may have gleaned can be inferred from the error between Alice and Bob and thus, below a prescribed threshold, a secret key can nevertheless be distilled by the legitimate parties. This is done by first correcting any errors between them using error corrections (EC) codes and Eve's knowledge of the key can be reduced to arbitrarily low levels using privacy amplification (PA) procedures (we refer to \cite{Gisin, HKL} for excellent reviews on quantum cryptography).
While variants of the first QKD protocol has seen much development, a departure from a prepare and measure scenario was imagined in a QKD protocol making bidirectional use of the quantum channel between Alice and Bob, sometimes referred to as two-way QKD (we shall use the terms bidirectional and two-way interchangeably). It was first reported in 2003 in \cite{Bostrom} and later saw its evolution into various forms, improving on security and some on practicality \cite{Qin,FG,LM05}. The essential feature of the protocol is the encoding of information by one party, Alice, by executing unitary transformations on qubits received from (and prepared by) another party, Bob who would later measure it in the same basis he prepared it in. Information bits for secret key generation is derived from the different transformations, in clear contrast to BB84 like schemes where information for the same purpose is simply the state themselves. Hence, while the latter requires an eavesdropper to estimate the state of a traveling qubit between the legitimate parties for successful eavesdropping, two-way protocols challenge eavesdroppers to estimate the evolution of an unknown state as it travels to and fro between Alice and Bob. Practical implementations realised include those reported in \cite{Cere} with entangled photons, \cite{Khir1,Khir2} with weak (attenuated) pulsed laser as photon sources and even using telecommunication wavelengths in \cite{Rupesh} to cite a few.
However, in all these, the unitary transformations had mostly been limited to those that may be described by the Pauli matrices, $X,Y,Z$ plus the identity operator $\mathbb{I}$ (in most cases, it would be the $iY$ and the identity $\mathbb{I}$) and the security of the protocols mainly lie in the inability to determine conclusively the traveling qubits' states randomly prepared by Bob in one of two mutually unbiased basis (MUB). Bob himself obviously can distinguish between the transformations;
in principle, these unitary transformations can be distinguished perfectly even for a single use \cite{dariano}. Ref.\cite{bisio} referred to these transformations as a set of \textit{orthogonal unitaries}. While this on its own does not hinder an eavesdropper (Eve) to ascertain without ambiguity the transformations executed by Alice, it does result in the former introducing errors should measurements be made instead on the received states. To this effect the legitimate party would, randomly interlude their encoding/ decoding runs with prepare and measure runs where Bob's prepared states are, with a certain probability, measured by Alice, akin to a BB84 scenario and is referred to as the \textit{control mode} (CM). The encoding/ decoding runs are denoted as the \textit{encoding mode} (EM).
While being operational and secure, these protocols in some sense betray the essence of utilising physical laws directly affecting Eve's ability to eavesdropping the encodings. As prepare and measure schemes delivers by capitalising on imperfect state estimations, these two-way schemes unfortunately do not, in an analogous way, prescribe imperfect estimation of unitary transformation as part of its working engine. The idea of Alice actually using transformations which are in principle indistinguishable for a single use for encoding purpose was noted in \cite{dar2} where the unitaries would be selected randomly from two mutually unbiased bases of orthogonal qubit unitaries (a term used in \cite{bisio}). A study, more focused towards QKD for such two-way protocols using qubits selected from 2 MUBs encoded with such indistinguishable `nonorthogonal unitaries' was reported in \cite{jss}. The term nonorthogonal unitaries can be traced to Ref. \cite{non} and the relevant definition was later given in \cite{jss}. A proper formalization for the structure of such unitaries was studied as mutually unbiased unitary bases (MUUB) in \cite{jssarxiv}. In short, two orthogonal bases, $\mathcal{B}_0$ and $\mathcal{B}_1$, for some $n$-dimensional subspace of $2\times 2$ matrices are defined as sets of MUUB when
\begin{eqnarray}
\left|\text{Tr}(B_0^{i\dagger}B_1^j)\right |^2=C~~,~~\forall B_0^i\in\mathcal{B}_0, B_1^j\in\mathcal{B}_1,
\end{eqnarray}
for $ i,j,=1,\ldots, n$ and some constant $C\neq 0$; $C$ equals $1$ and $2$ for $n=4$ and $n=2$ respectively \cite{jssarxiv}.
In this work, we describe and analyse a bidirectional QKD protocol which uses a minimal number of indistinguishable unitary for encodings where each encoding is selected from two different MUUB. Given the use of only 2 unitary operators, differently from \cite{jss} (which used 4) as well as other two-way QKDs, the very decoding procedure by Bob would be radically different from previously reported two-way protocols. Beginning with an ideal protocol, we brief on its merits in a depolarising channel and provide a security analysis which demonstrates its clear advantage over its predecessor, the protocol of \cite{LM05}. We further report on an experimental proof of concept for the protocol revealing its feasibility.
\section{Bidirectional QKD with Two Mutually Unbiased Unitaries}
\noindent
The protocol is based on the same bidirectional use of the quantum channel where Bob sends to Alice a qubit prepared in a basis of his choice. Alice would then encode using one of two unitary transformation before submitting to Bob for his measurements. We consider the case where Alice uses unitaries described as rotations around the $y$ axis of the Poincare sphere given respectively by
\begin{eqnarray}
R_y(\zeta)=\cos{\left(\dfrac{\zeta}{2}\right)}\mathbb{I}-i\sin{\left(\dfrac{\zeta}{2}\right)}Y.
\end{eqnarray}
We choose only two angles for $\zeta$ in this work, namely, $\zeta=0$ (corresponding to a passive operation) and $\zeta=-\pi/2$ which corresponds to flipping states between the mutually unbiased orthonormal $X$ and $Z$ bases. The transformations are in fact elements taken from either of two sets of MUUB \cite{jss}, $\{I,Y\}$ and $\{R_y(\pm\pi/2)\}$ with respect to one another,
\begin{eqnarray}
\left|\text{Tr}(IR_y(\pm\pi/2))\right |^2=\left|\text{Tr}(Y^\dagger R_y(\pm\pi/2))\right |^2=2.
\end{eqnarray}
The indistinguishability of these two transformations can be seen from the indistinguishability of an input state for the transformation, from its output. With an arbitrary state $|\psi\rangle=\cos{(\theta/2)}|0\rangle+\exp{(i\phi)}\sin{(\theta/2)}|1\rangle$, we can quickly observe that the overlap
\begin{eqnarray}
|\langle \psi|I R_y(-\pi/2)|\psi\rangle|^2=\dfrac{1}{2}+\dfrac{\sin^2{(\theta)}\sin^2{(\phi)}}{2}
\end{eqnarray}
has a minimum value of $1/2$. This minimum value is in fact the square of the inner product for two states coming from two MUB.
Any state lying on the equator of the Poincare sphere would hence provide for minimal overlap. Thus we let Bob prepare a state randomly selected from the basis defined by $\{|0^q\rangle=\cos{(\theta/2)}|0\rangle+\sin{(\theta/2)}|1\rangle,~|1^q\rangle=\sin{(\theta/2)}|0\rangle-\cos{(\theta/2)}|1\rangle\}$ to be submitted to Alice for her encoding (transformation). The value for the angle $\theta$ is also a random choice by Bob.
Once Alice has executed her transformation, the resulting state would be forwarded to Bob for which he shall commit to a measurement in either the same basis he prepared or one rotated by $\pi/2$. Writing Alice's transformation as $U_A$ and Bob's prepared and resulting measured state as $|\psi_f\rangle$ and $|\psi_b\rangle$ respectively, Bob can only determine Alice's encoding conclusively if $\langle\psi_b |U_A|\psi_f\rangle=0.$\footnote{admittedly, this is inspired very much by the SARG protocol \cite{SARG}} As an example, if Bob prepares the computational state $|0\rangle$ and his measurement result is $|1\rangle$, then he knows for certain that Alice could not have used the $\mathbb{I}$ operation. Or, if a measurement had been made in the $X$ basis instead and yields $(|0\rangle+|1\rangle)/\sqrt{2}$, then Bob can infer that Alice had not used $R_y(-\pi/2)$. Bob shall then announce publicly all inconclusive results to be discarded. Assigning the logical value `0' to $\mathbb{I}$ and `1' to $R_y(-\pi/2)$, Alice and Bob can share a key only for $1/4$ of the total qubits used.
\subsection{Security Analysis}
Taking the conventional approach to security analysis of bidirectional QKDs, we consider Eve's strategy is to attack the qubits en route twice, once in the forward path (from Bob to Alice) and once in the backward path (from Alice back to Bob). In the individual paths, the density operator for Bob's qubit, $\rho_B$, on its own does not provide for any information; a qubit prepared as either of the orthogonal states in any basis in the forward path is completely mixed as is the case in the backward path after Alice's encoding.
We shall analyse the protocol based on the methods of \cite{hua,ivan}, where we shall consider each of Bob's traveling state to independently undergo identical interaction with Eve's ancilla prior to Alice's encoding. Then we allow Eve to have access to the entire state in the backward path (after encoding) to extract information and we set no constraint on how she may do this. This is ultimately a very pessimistic stand and provides for a collective attack scenario.
We do however, reasonably require Eve's strategy to simulate a depolarising channel like \cite{ivan}, i.e. Bob's qubit, irrespective of the basis chosen should experience the same amount of noise, essentially undergoing a symmetric attack \cite{Gisin}. Also, like \cite{ivan}, we shall begin with Bob's state in one basis only, and then show that the information Eve should gain for any of Bob's choice of basis is the same. We begin by writing the interaction between Eve's ancillae, $|\mathcal{E}\rangle$, and the travelling qubit (in the computational basis for simplicity) in the forward path as;
\begin{eqnarray}\label{nonortho}
U|b\rangle|\mathcal{E}\rangle=|b\rangle |\mathcal{E}_{bb}\rangle+|b^\bot\rangle |\mathcal{E}_{bb^\bot}\rangle
\end{eqnarray}
with $b\in\{0,1\}$ and $\bar{0}=1$ ($\bar{1}=0$). Unitarity of the interaction necessitates
\begin{eqnarray}
\langle \mathcal{E}_{b\bar{b}}|\mathcal{E}_{b\bar{b}}\rangle+\langle \mathcal{E}_{bb}|\mathcal{E}_{bb}\rangle=1,~\langle \mathcal{E}_{bb}|\mathcal{E}_{\bar{b} b}\rangle+\langle \mathcal{E}_{b\bar{b}}|\mathcal{E}_{\bar{b} \bar{b}}\rangle=0
\end{eqnarray}
and we let $\langle \mathcal{E}_{bb}|\mathcal{E}_{bb}\rangle=F$ and $\langle \mathcal{E}_{b\bar{b}}|\mathcal{E}_{b\bar{b}}\rangle=Q$ and $F+Q=1$. It is also worth noting that, with proper choices for phases, one can ensure all of Eve's scalar products are reals \cite{Gisin}. Now, the value $Q$ is really the probability of Bob's state being measured as one orthogonal to which he sent. Admittedly, in the current protocol, we have not defined the protocol to measure the qubit in the forward path, thus making $Q$ inaccessible; it can easily be determined if we include some form of CM similar to that of \cite{LM05}. We shall return to this point later.
The state of the system (Bob's qubit after Eve's attack in the forward path) subsequent to Alice's encoding can be written as
\begin{eqnarray}
\rho_{BE}=\dfrac{1}{2}\left[U\rho_BU^\dagger+R_y^E(U\rho_BU^\dagger)R_y^{E\dagger}\right]
\end{eqnarray}
where $\rho_B=\mathbb{I}/2\otimes|\mathcal{E}\rangle\langle \mathcal{E}|$ and $R_y^E=R_y(-\pi/2)\otimes \mathbb{I}_E$ with $\mathbb{I}_E$ being the identity on Eve's Hilbert space. Eve access to the state on the backward path provides her with information of the key, $I_E$, which is given by $S(\rho_{BE})-1$ \cite{ivan} where $S(\rho)$ is the von Neuman entropy given by $-\text{tr}\rho\log_2{\rho}$ for a state $\rho$, which written in terms of its eigenvalues, $\lambda_i$, and eigenkets, then $S(\rho)=-\sum_i \lambda_i\log_2{\lambda_i}$.
In ascertaining the eigenvalues of $\rho_{BE}$, we adopt the method in \cite{ivan} by calculating the eigenvalues of its Gram matrix representation, $\textbf{G}^{BE}$ \cite{gram}.
The eigenvalues, for $\textbf{G}^{BE}$ (which are equal to those of $\rho_{BE}$ including its multiplicities, each being 2) are given by
\begin{eqnarray}
\lambda_\pm=\dfrac{1}{8}(2\pm \sqrt{2(F\cos{x}-Q\cos{y})^2+2}).
\end{eqnarray}
Eve's information gain, $I_E$ can then be written as
\begin{eqnarray}\label{IE}
I_{E}=S(\rho_{BE})-1=h\left (\dfrac{2-a\sqrt{2}}{4}\right)
\end{eqnarray}
with $a=\sqrt{(F\cos{x}-Q\cos{y})^2+1}$ and $h(x)=-x\log_2{x}-(1-x)\log_2{(1-x)}$ being the Shannon binary entropic function.
Now, we should stress the fact that Eve's information gain is actually the same for any state Bob could send (constrained to those on the equator of the Poincare sphere) and thus this analysis is valid in considering the protocol as described above where Bob can send any such states. We demonstrate this fact briefly in the section on Methods. Insisting on the same disturbance for any such state sent by Bob, the value for $Q=1-F$ would be given by \cite{Gisin} \footnote{this is relatively easy to derive for any state on the equator of the Poincare sphere and also given in the Methods section.}
and we can eventually write
\begin{eqnarray}
1-2Q(1+\cos{y})=F\cos{x}-Q\cos{y}.
\end{eqnarray}
Hence, $a=\sqrt{[1-2Q(1+\cos{y})]^2+1}$. We can immediately observe that Eve's best strategy to maximise her information would be to maximise $\cos{y}$; ensuring $Q$ be kept minimal. Thus for a fixed $Q$, let $\cos{y}=1$ (which then fixes $x$) and her information would be given by
\begin{eqnarray}
I_{E}=h\left [\dfrac{1}{2}\left(1-\dfrac{\sqrt{(1-4Q)^2+1}}{\sqrt{2}}\right)\right].
\end{eqnarray}
It's evident that Eve achieves maximum information, approximately $0.6$ when $Q=0.25$ and is equal to the von Neumann entropy for a mixture of two states derived from two MUBs.
\subsection{Security Thresholds}
Unlike its prepare and measure cousins, the secret key rate for two way protocols would depend on $2$ parameters of errors, namely the error in the forward path, $Q$, which informs the legitimate parties of Eve's gain, thus the rate for PA, and the error in the backward path, $Q_{AB}$ which tells of the cost in bits for error correction purposes. There is no reason \textit{a priori} to imagine that these two parameters are linked by some straightforward mathematical relationship (even if we assume both channels as depolarising). Hence, the cases of `correlated channels' or `independent channels' as studied in \cite{norm} are really specific models which may (or not) be true in the case of an actual implementation. A `security threshold' can only be determined after both $Q$ and $Q_{AB}$ are determined.
More importantly, the notion of security threshold, which is commonly understood as a point denoting the value for error in the channel such that beyond it no secure key can be extracted must give way to the idea of \textit{curves} in a plane defined by $Q$ and $Q_{AB}$ separating regions where key extraction is possible and otherwise. We define hence, a security threshold as the area for region of the said plane where secret key extraction is possible; i.e. where the secret key rate is greater than zero (we take the maximum values for $Q$ and $Q_{AB}$ for the total region as where Eve's information gain is maximum and Alice-Bob's mutual information is minimal respectively). Secret key rates can be written as $1-I_E-h(Q_{AB})$ \cite{devetak}.
In order to have an idea of the protocol's merit, we compare it to the earlier `orthogonal' protocol of \cite{LM05}, (in some literature referred to as LM05)\footnote{we consider this as fair comparison given the two has essentially identical topologies as well number of states and transformations used}. We calculate and compare the secure key rates per raw key bit for varying values of both $Q$ and $Q_{AB}$. Within the depolarising channel framework, Eve's gain for LM05 is given as $h(1-2Q)$ \cite{hua,ivan}.
\\
\begin{figure*}
\caption{Comparison of secret key rate (per bit) as contour plots, denoting only the positive key rate regions and Eve's information gain (insets) for the current protocol (a) against LM05 (b). The insets show that the maximal amount of information Eve can gain (for maximal disturbance) in LM05 is $1$ (complete) while only about 0.6 for the current protocol.}
\label{test3}
\end{figure*}
The contour plot on the left, FIG. 1(a) represents the current protocol while FIG. 1(b) is for LM05. The insets shows Eve's gain for each protocol respectively. The figures clearly demonstrate how utilising these nonorthogonal transformations suppresses Eve's information, quite drastically in fact, as we observe the region (defined by $Q$ and $Q_{AB}$) for extractable secure key is greatest for the current protocol. This is mainly due to the lesser gain by an eavesdropper for the current protocol (insets). A direct numerical integration (using a mathematical software) gives the security thresholds for the current protocol and LM05
as $\approx 0.037$
and $ \approx 0.017$ respectively. It is perhaps instructive to note that had we compared absolute secure key rates, then a factor of $1/4$ would be multiplied to the key rate for the current protocol relative to that of LM05; this however does not change the security thresholds.
We now return to the issue of the inaccessibility of the value $Q$. As noted in \cite{bisio}, protocols like LM05 using orthogonal unitary transformation requires a CM. In principle, given the fact that Alice's encoding in the current protocol cannot be ascertained perfectly by Eve, even for a maximal attack ($Q=0.25$) the CM is, to a certain extent obsolete. A naive way of putting this would be to say that a key can still be distilled, without knowing $Q$ provided the error in Bob's (raw) key is less than a certain $Q_{AB}$ while assuming Eve has maximal information independent of errors in the raw key. We can simply calculate $Q_{AB}$ as follows: Alice and Bob can have a positive key rate provided $h(Q_{AB})< 1-\max{(I_E)}$, i.e. $Q_{AB}\approx 7\%$. Thus in some sense, having a semblance of CM for the current protocol would only provide for a better key rate as Eve's information gain can be ascertained properly.
However, as we shall see shortly, practical considerations may delegate the estimation of $Q$ to a more critical role, especially given possible physical realisations with the use of polarisation of photons as qubits and waveplates for transformations.
\section{A Practical Protocol}
Let us consider a practical implementation of the protocol using the polarisation degree of photons as qubits. Realistic implementations of unitary transformations process pulses of photons independently of the actual number of photons. This of course exposes the protocol to a Quantum Man in the Middle attack where Eve could hijack Bob's photon en route and estimate Alice's transformations perfectly using a bright pulse before encoding Bob's photon accordingly to be submitted to him. The solution to this problem is the use of CM. The CM itself should of course involve a finite number of bases, say $n$, used for preparations and measurements; lest Alice and Bob would have a probability of approaching zero to agree a basis on in CM, $lim_{n\rightarrow \infty} 1/n = 0$.
We can thus imagine adding a step such that with probability $c$, Alice executes a CM where she would measure the incoming qubit in a basis selected from $n$ pre-agreed bases. Bob should then include these $n$ bases in his EM so that
with probability $c/n$, a CM is successful and Alice and Bob may estimate errors in the forward path.\footnote{originally, some two-way protocols include an error check in the backward path as well. However, given that the security analysis does not provide for a parameter checking on the backward path, we do not consider such a check here.} This immediately provides Alice and Bob with a means to access $Q$ and thus estimate Eve's information gain and the security analysis of the previous section holds.\footnote{we do not however consider imperfections to the extent of having multiphoton pulses nor do we consider losses in channels} For practical purposes, we set $n = 2$, corresponding to the conventional CM where the bases used are mutually unbiased. For simplicity, we further set Bob's number of basis in EM to be $2$ as well and correspond to the same bases for CM.
\section{Experimental Proof of Concept}
In the following we report on an experimental implementation of the practical protocol described above where Bob uses polarisation of photons derived from only 2 MUBs, namely the $X$ (diagonal) basis and the $Z$ (rectilinear) basis for his preparations and measurements. The setup is basically a proof of principle with modest apparatus utilising polarised photons from attenuated laser pulses as qubits and half wave plates for the encoding process. These should be rather conventional; for example, the former is quite standard in QKD experiments or the latter for orthogonal/ nonorthogonal unitary implementation in \cite{Cere,non}. While we do simulate the presence of Eve by introducing noise on the forward and backward paths (`artificial depolarisation' akin to that in \cite{Cere}), it is important to stress that this is not meant to be a full scale secure implementation. For example, rather than have Bob randomly select between bases for his qubits, we allow the protocol to be executed with Bob choosing one bases for a certain number of runs and another for the other runs; as is the case for Alice's encoding. We also do not execute classical aspect of a QKD protocol such as authenticating the users (Alice and Bob), error correction of Alice-Bob's strings and privacy amplification.
Figure 2 shows the schematic of the experimental setup which comprises of three main parts; namely Bob's, Eve's and Alice's sites. Bob's site consists of a photon state preparation setup and Measurement Device (described in Methods) to analyse incoming photons in the backward path, whereas Alice's is composed of an Encoder (to be used in EM) and a Measurement Device (to be used in CM).
\begin{figure*}
\caption{Sketch of the experimental setup. P$_i$ are Polarising Beamsplitters, D$_i$ are detectors (avalanched photodiode modules) and H$_i$ are Half waveplates (H$_{i,j}
\label{fig01}
\end{figure*}
At Bob's site, photon states were generated by a strongly attenuated laser and the polarisation of the photons can be set by using a zero-order half-wave plate. The polarised photon is then transmitted to Alice site via free-space (forward-path).
Alice passively switches between CM and EM using a 50/50 beam splitter (BS). In CM, Alice would measure the incoming photon directly in the forward-path using a Measurement Device (described in the Methods section) with the waveplate (H$_8$). In the experiment, for the sake of simplicity, we always set Alice's measurement bases to be equal to Bob's preparation bases. Obviously a full fledged implementation would require Alice to measure in either the rectilinear or diagonal bases randomly where half would eventually be discarded.
In the EM mode, Alice would need to realise the $\mathbb{I}$ and $R_y(-\pi/2)$ operators. A pair of half wave plates (H$_{6,7}$) is used for the purpose before forwarding the qubit to a mirror, thus returning it to Bob via free space in the backward path.
The incoming polarisation encoded photons from Alice are finally analysed at Bob's site using a Measurement Device (refer to Methods) with the zero-order half waveplate (H$_9$) set in either the same bases the qubits were originally prepared or one rotated to the other basis.
The existence of the eavesdropper in the forward path and backward path is simulated by introducing noise in the communication system. This is done by virtue of `artificial depolarisation' channels in the forward as well as backward paths. It is important to note that we do not require the errors in the forward path to be equal to that in the backward nor do we require the errors in EM to be trivially related to that in CM; thus discarding the usual models of noise considered for two-way QKDs.
We use two independent personal computers equipped with PCIe field-programmable gate array card (FPGA; National Instruments PCIe-7853) to control and synchronise all active equipment in the experiment as well as for data acquisition purposes.
\subsection{Experimental Results}
Data were collected for each `noise' setting; thus for each setting, we would have a pair of (average) errors, i.e. one from CM and the other from EM. In the case for EM, choosing only data which in principle would provide Bob with conclusive inference of Alice's encodings, we consider which of the cases tally with the actual encoding used by Alice and which do not (errors). Averaging the results over all states (by Bob) and encoding (by Alice) used, we arrive at an averaged error rate for the EM.
Data providing error rates for CM is quite straightforward as we only compare the state sent by Bob to that measured in CM. As argued earlier, that one may not know \textit{a priori} the relationship between the errors in CM and EM (assuming there is one), we do not presume plotting the more conventional `information curves' for Eve's gain (which is a function of $Q$) and Alice-Bob's (which is a function of $Q_{AB}$). Instead, points are plotted based on these error rates as $Q_{AB}$ versus $Q$ as in FIG. 3. We include the contour lines of the previous FIG. 1 to exhibit which of the points fall below the security threshold and which beyond. The values accompanying the points are just the value for the corresponding (in theory) secret key rates calculated using the secret key rate formula.
\begin{figure}
\caption{The red points are derived from the experiment. The numbers accompanying each point represent ``secure key rate" calculated using the key rate formula for the current protocol. The contour curves are based on earlier calculations as in FIG.1. The red curve represents the boundary between the region for distillable key (below) and non-distillable key (above).}
\label{fig02}
\end{figure}
While the points that occupy the secure region in the figure (positive secret key rate) reflect a very small sample of points that can, in principle be
achieved experimentally, we believe that these results already point out to the feasible and practical picture of our protocol.
\section{Conclusions}
\noindent Bidirectional or two way QKD has certainly been a topic of interest for more than a decade now, ranging from entanglement based protocols, nonentangled versions and even continuous variables framework \cite{pirandola}. These protocols essentially have a common topology; where one party sends quantum states to another who would encode with a transformation before sending it back to the sender for his decoding measurement. Building on this, we demonstrate the simplest way forward for such protocols to actually embody the essence of creating an ambiguity for Eve to determine the encoding rather than rely strictly on the use of nonorthogonal states as information carriers to suppress her information gain. We thus proposed and analysed in this work, a novel bidirectional QKD protocol making use of only two nonorthogonal unitary transformations selected from two different MUUBs for encoding purposes along a rather different decoding procedure, akin to the SARG protocol \cite{SARG}. Theoretical analysis based on collective attacks, coupled with a more relevant definition for security threshold has provided with a promising picture for the protocol's security compared to its predecessor.
We have also executed an experimental setup for a proof of concept of the protocol using weak photon pulses traversing artificial depolarising channels between the legitimate parties with a pair of half wave plates for the encoding transformation. While we do not commit to actually distilling a secret key, we have demonstrated its feasibility given a very modest setup. A full scale protocol with actual secret key extraction is hence very possible given some addendum to the setup to include randomisation of Bob's choice for preparation and measurement, Alice's choice for encoding as well as a proper execution of error correction and privacy amplification protocols.
Given its promising security, we hope that the future would see more realistic issues be addressed; a quick example would be the issue of multiphoton pulses coupled with channel losses and how the current protocol would perform given such imperfections. On a more fundamental note, we believe, this work should engender further interest, especially regarding the role of indistinguishable unitary transformations and even MUUBs within the context of quantum cryptography and quantum information as a whole.
\section{Methods}
In the following, we provide some details on the method we used in the theoretical calculations, mainly ascertaining the eigenvalues for the state $\rho_{BE}$ as well as certain detailed aspects of the experimental setup.
\subsection{Theoretical calculation for Eve's information}
We show in this section how one calculates the eigenvalues for the density operator $\rho_{BE}$. This is done, following \cite{ivan} by calculating the eigenvalues of its Gramm matrix representation. We start by noting the definition for the Gramm matrix representation \cite{gram} for an ensemble of pure states $\{ |\phi_i\rangle,...,|\phi_n\rangle,p_i,...,p_n\}$, a state given as $\sum_i^n|\phi_i\rangle\langle\phi_i|$ can be represented by a Gram matrix $\textbf{G}$ with the elements $G_{ij}=\sqrt{p_ip_j}\langle\phi_i|\phi_j\rangle$. Thus, to write out the Gramm matrix $\textbf{G}^{BE}$, for $\rho_{BE}$, we first write out all the possible (pure) states for Bob and Eve (after Alice's encoding) as
\begin{eqnarray}
|\Psi_0\rangle=|0\rangle|E_{00}\rangle+|1\rangle|E_{01}\rangle,\\\nonumber
|\Psi_1\rangle=|0\rangle|E_{10}\rangle+|1\rangle|E_{11}\rangle,\\\nonumber
|\Psi_2\rangle=R_y^E|\Psi_0\rangle,~|\Psi_3\rangle=R_y^E|\Psi_1\rangle,
\end{eqnarray}
and the Gramm matrix for the mixture $\sum_i|\Psi_i\rangle\langle\Psi_i|/4$, (each of the pure state $|\Psi_i\rangle$ is equiprobable) is given by
\[
\begin{bmatrix}
1 & 0 & 1/\sqrt{2} & \alpha/\sqrt{2} \\
0 & 1 & -\alpha/\sqrt{2} & 1/\sqrt{2} \\
1/\sqrt{2} & -\alpha/\sqrt{2} & 1 & 0\\
\alpha/\sqrt{2} & 1/\sqrt{2} & 0 & 1
\end{bmatrix}
\]
with $\alpha=(F\cos{x}-Q\cos{y})$. The eigenvalues can then be easily calculated to be
\begin{eqnarray}
\lambda_\pm=\dfrac{1}{8}(2\pm \sqrt{2(F\cos{x}-Q\cos{y})^2+2}).
\end{eqnarray}
and hence
\begin{eqnarray}
S(\rho_{BE})=-2(\lambda_{+}\log_2{\lambda_{+}}+\lambda_{-}\log_2{\lambda_{-}}).
\end{eqnarray}
The above calculation can be repeated with Bob using the bases defined by the following states
\begin{eqnarray}
|0^q\rangle&=&\cos{(\theta/2)}|0\rangle+\sin{(\theta/2)}|1\rangle,\\\nonumber
|1^q\rangle&=&\sin{(\theta/2)}|0\rangle-\cos{(\theta/2)}|1\rangle
\end{eqnarray}
and we can write out the states of Bob and Eve subsequent to Alice's encoding as
\begin{eqnarray}
|\Psi_0^q\rangle=|0^q\rangle|E_{00}^q\rangle+|1^q\rangle|E_{01}^q\rangle,\\\nonumber
|\Psi_1^q\rangle=|0^q\rangle|E_{10}^q\rangle+|1^q\rangle|E_{11}^q\rangle,\\\nonumber
|\Psi_2^q\rangle=R_y^E|\Psi_0^q\rangle,~|\Psi_3^q\rangle=R_y^E|\Psi_1^q\rangle
\end{eqnarray}
where Eve's ancillary states in the above equation are given by
\begin{eqnarray}
|E_{00}^q\rangle=\mathcal{C}_1|E_{00}\rangle+\mathcal{C}_2|E_{01}\rangle+\mathcal{C}_2|E_{10}\rangle+\mathcal{C}_3|E_{11}\rangle\\\nonumber
|E_{01}^q\rangle=\mathcal{C}_2|E_{00}\rangle-\mathcal{C}_1|E_{01}\rangle+\mathcal{C}_3|E_{10}\rangle-\mathcal{C}_2|E_{11}\rangle\\\nonumber
|E_{10}^q\rangle=\mathcal{C}_2|E_{00}\rangle+\mathcal{C}_3|E_{01}\rangle-\mathcal{C}_1|E_{10}\rangle-\mathcal{C}_2|E_{11}\rangle\\\nonumber
|E_{11}^q\rangle=\mathcal{C}_3|E_{00}\rangle-\mathcal{C}_2|E_{01}\rangle-\mathcal{C}_2|E_{10}\rangle+\mathcal{C}_1|E_{11}\rangle
\end{eqnarray}
with
$\mathcal{C}_1=\cos^2{(\theta/2)}$, $\mathcal{C}_2=\cos{(\theta/2)}\sin{(\theta/2)}$ and $\mathcal{C}_3=\sin^2{(\theta/2)}$.
This results in another Gramm matrix with equal eigenvalues. Thus Eve's information remains the same irrespective of Bob's choice of states. Further to that, setting
$\langle \mathcal{E}_{00}| \mathcal{E}_{00}\rangle=\langle \mathcal{E}_{00}^q| \mathcal{E}_{00}^q\rangle=F$ gives
\begin{eqnarray}
F=\dfrac{(1+\cos{y})}{2+(\cos{y}-\cos{x})}
\end{eqnarray}
This is just a rederiving of the same in \cite{Gisin}.
\subsection{Experiment}
\noindent\textbf{Photon State Preparation}
Polarised photon states were generated by a strongly attenuated pulsed laser diode (Coherent, OBIS 785 LX) using variable neutral density filters (NDF). The use of variable NDF is to allow for the preparation of photon states with a certain averaged number per pulse, $\mu$. A Glan-Thomson polariser (GT), with an extinction ratio of 100000:1, was inserted after the attenuator to ensure linearly polarised photon states. A zero-order half-wave plate (H$_1$) after GT prepares particular polarised states; the polarisation states{$|z+\rangle$, $|z-\rangle$}, and {$|x-\rangle$, $|x+\rangle$} were prepared by setting the polariser angles to $\varphi =0$, $\varphi =\pi/4$, $\varphi =-\pi/8$, and $\varphi =\pi/8$ with respect to $z$-axis, respectively. The density of photon $\mu$ was set to $\approx 0.15$ photon/pulse, measured just after the half-wave plate (H$_1$).
\\
\noindent\textbf{Measurement Device}
The measurement device is made up of a set of zero-order half waveplate (H$_8$ for Alice and H$_9$ for Bob), a polarizing beam splitter (P$_1$ for Alice and P$_2$ for Bob) and two avalanched photodiode modules (APD) (D$_1$ and D$_2$ for Alice and D$_3$ and D$_4$ for Bob) with the quantum efficiency of about 70\% at a wavelength of 785nm. Incoming photons are collected into two multimode fibers using objective lenses (Newport M-10X, focal length 16.5mm, NA=0.25) through a pair of interference filters centered at 785nm with a bandwidth of 10nm (used to reduce background light) to eventually be detected by the APDs.
\\
\noindent\textbf{Alice's Encoder}
Alice's encoding operation is realized by the use of a couple of half wave plates (H$_{6,7}$, for angles $\varphi_6,\varphi_7$ with respect to the $z$ axis) which rotates any state on the equator of the Poincare sphere by an angle $\varphi_6+\varphi_7$. The passive, $\mathbb{I}$ operation was realized by setting the angles of the half wave plates H$_{6,7}$ are to be $\varphi_6=\varphi_7 = 0$ radiant, while the flipping of polarization states between the mutually orthonormal $Z$ and $X$ bases, $R_y(-\pi/2)$ is realised by setting the angles of H$_{6,7}$ to be $\varphi_6=\pi/8$ radian, and $ \varphi_7 = \pi/4$ radian.
\\
\noindent\textbf{Artificial Depolarisation}
Artificial depolarisation is induced by simply inserting a pair of half-wave plates in the forward path (H$_{2,3}$) and another in the backward path (H$_{4,5}$). This has the effect (in each path) to rotate the state sent by Bob by a certain angle, thus resulting in a probability of an erroneous detection in the CM as well as EM. Varying levels of `depolarisation', or more precisely, erroneous detection in each path can be independently introduced by rotating the angles half-wave plates accordingly. It is worth noting that this artificial method while aimed at creating a symmetric error for the two bases used by Bob in CM, would create an asymmetry in the errors caused for different encodings used by Alice. However, we shall only be interested in the averaged error values for this experiment.
\section{Author Contributions}
J. S. S. contributed to the theoretical part as well as the writing of the manuscript, while S. worked mainly on the experimental aspects as well as, to a lesser degree, the writing of the manuscript.
\end{document} |
\begin{document}
\title{Green functions and smooth distances}
\author[Feneuil]{Joseph Feneuil}
\address{Joseph Feneuil. Mathematical Sciences Institute, Australian National University, Acton, ACT, Australia}
\email{joseph.feneuil@anu.edu.au}
\author[Li]{Linhan Li}
\address{Linhan Li. School of Mathematics, The University of Edinburgh, Edinburgh, UK }
\email{linhan.li@ed.ac.uk}
\author[Mayboroda]{Svitlana Mayboroda}
\address{Svitlana Mayboroda. School of Mathematics, University of Minnesota, Minneapolis, MN 55455, USA}
\email{svitlana@math.umn.edu}
\thanks{S. Mayboroda was partly supported by the NSF RAISE-TAQS grant DMS-1839077 and the Simons foundation grant 563916, SM. J. Feneuil was partially supported by the Simons fundation grant 601941, GD and the ERC grant ERC-2019-StG 853404 VAREG.}
\maketitle
\begin{abstract}
In the present paper, we show that for an optimal class of elliptic operators with non-smooth coefficients on a 1-sided Chord-Arc domain, the boundary of the domain is uniformly rectifiable if and only if the Green function $G$ behaves like a distance function to the boundary, in the sense that $\abs{\frac{\nabla G(X)}{G(X)}-\frac{\nabla D(X)}{D(X)}}^2D(X) dX$ is the density of a Carleson measure, where $D$ is a regularized distance adapted to the boundary of the domain. The main ingredient in our proof is a corona
decomposition that is compatible with Tolsa’s $\alpha$-number of uniformly rectifiable sets. We believe that the method can be applied to many other problems at the intersection of PDE and geometric measure theory, and in particular, we are able to derive a generalization of the classical F. and M. Riesz theorem to the same class of elliptic operators as above.
\end{abstract}
\noindent{\bf Key words:} Uniform rectifiability, Chord-Arc domains, elliptic operators with non-smooth coefficients, Green functions, regularized distance, Dahlberg-Kenig-Pipher condition.
\noindent
\tableofcontents
\section{Introduction}
\label{S1}
\subsection{Motivation and predecessors}
We consider elliptic operators $L$ on a domain $\Omega\subset\mathbb R^n$. In recent years a finale of an enormous body of work brought a characterization of uniform rectifiability in terms of absolute continuity of harmonic measure (see \cite{AHMMT}, a sample of earlier articles: \cite{DJ}, \cite{HM1}, \cite{HM2}, \cite{HMU}, \cite{AHM3TV}, \cite{Azzam}, see also the related article \cite{NTV} which proves the David-Semmes conjecture in codimension 1 and is a key step for the converse established in \cite{AHM3TV}). It also became clear that this characterization has its restrictions, for it fails in the domains with lower dimensional boundaries and it requires, in all directions, restrictions on the coefficients -- see a discussion in \cite{DM2}. In these contexts, the Green function emerged as a more suitable object to define the relevant PDE properties. Already the work in \cite{A} and \cite{DM2} suggested a possibility of a Green function characterization of regularity of sets. However, factually, \cite{DM2} provided more than satisfactory ``free boundary" results and only weak ``direct" results (no norm control). The papers \cite{DLM1}, and \cite{DLM2}, and \cite{DFMGinfty} aimed at the desired quantitative version of such ``direct" results but were restricted to either Lipschitz graphs or sets with lower dimensional boundaries.
The primary goal of the article is to show that if $L$ is reasonably well-behaved, and $\Omega$ provides some access to its boundary, then the boundary of $\Omega$ is reasonably regular (uniformly rectifiable) if and only if the Green function behaves like a distance to the boundary.
Let us discuss some predecessors of this work, including the aforementioned ones, in more details. In \cite{A} Theorem VI, it is shown that the affine deviation of the Green function for the Laplace operator is related to the linear deviation of the boundary of the domain.
In \cite{DM2}, G.~David and the third author of the paper show that for a class of elliptic operators, the Green function can be well approximated by distances to planes, or by a smooth distance to $\partial\Omega$, if and only $\partial\Omega$ is uniformly rectifiable. The bounds on the Green function given in \cite{DM2} are weak, more precisely, they carry no norm control of the sets where the Green function is close to a distance. Later, stronger and quantitative estimates on the comparison of the Green function and some distance functions are obtained in \cite{DFMGinfty}, \cite{DLM1}, and \cite{DLM2}.
In \cite{DLM1}, a quantitative comparison between the Green function and the distance function to the boundary is given for an optimal class of elliptic operators on the upper half-space. Moreover, the proximity of the Green function and the distance function is shown to be precisely controlled by the oscillation of the coefficients of the operator. Next, \cite{DLM2} extends the result of \cite{DLM1} to $\mathbb R^n\setminus\mathbb R^d$ with $d$ strictly less than $n$. But the methods employed in \cite{DLM1} and \cite{DLM2} seem to the authors difficult to be adapted to domains whose boundaries are rougher than Lipschitz graphs.
In \cite{DFMGinfty}, a bound for the difference of the Green function and smooth distances is obtained for sets with uniformly rectifiable boundaries, but its proof, which might appear surprising, is radically dependent on the fact that the boundary is of codimension strictly larger than 1. Also, the class of operators considered in \cite{DFMGinfty} is not optimal. So the instant motivation for the present work is to obtain a strong estimate on the Green function for an optimal class of operators, similar to the one considered in \cite{DLM1}, in a ``classical" setting: on domains with uniformly rectifiable boundaries of codimension 1.
The method employed here is completely different from \cite{DFMGinfty} or \cite{DLM1}, and has the potential to be applicable to many other problems at the intersection of PDE and geometric measure theory.
We should also mention that in \cite{HMT} and \cite{DLM1}, some Carleson measure estimates on the {\it second derivatives} of the Green function have been obtained, and that in \cite{A}, the second derivative of the Green function for the Laplace operator is linked to the regularity (uniform rectifiability) of the boundary of the domain. However, the result of \cite{A} is only for the Laplace operator, the class of elliptic operators considered in \cite{HMT} is more general but still
not optimal, and the estimates obtained in \cite{DLM1} are restricted to Lipschitz graph domains.
We think that our estimates might shed some light on proving an estimate on second derivatives of the Green function for an optimal class of elliptic operators on chord-arc domains.
For the ``free boundary" direction, since the weak type property of the Green function considered in \cite{DM2} already implies uniform rectifiablity, one expects the strong estimate on the Green function that we consider in this paper to automatically give uniform rectifiability. However, linking the two conditions directly seems to be more subtle than it might appear, and we actually need to obtain uniform rectifiablity from scratch. We point out that our result also holds for bounded domains, and thus dispensing with the unboundedness assumption on the domain in \cite{DM2}.
All in all, this paper is a culmination of all of the aforementioned efforts, featuring a true equivalence (characterization) of geometry through PDEs, and an optimal class of operators.
\subsection{Statements of the main results.}
We take a domain $\Omega \subset \mathbb R^n$ whose boundary $\partial \Omega$ is $(n-1)$-Ahlfors regular (AR for shortness), which means that there exists a measure $\sigma$ supported on $\partial \Omega$ such that
\begin{equation} \label{defADR}
C_\sigma^{-1}r^{n-1} \leq \sigma(B(x,r)) \leq C_\sigma r^{n-1} \qquad \text{ for } x\in \partial \Omega, \, r \in (0, \diam \Omega).
\end{equation}
The domain $\Omega$ can be bounded or unbounded. In the unbounded case, $\diam\Omega=\infty$.
In the rest of the paper, $\sigma$ will always be an Ahlfors regular measure on $\partial \Omega$. It is known that the Ahlfors regular measures are the ones that can be written as $d\sigma = w d\mathcal H^{n-1}|_{\partial \Omega}$, where $\mathcal H^{n-1}|_{\partial \Omega}$ is the $n-1$ dimensional Hausdorff measure on $\partial \Omega$, and $w$ is a weight in $L^\infty(\partial \Omega, \mathcal H^{n-1}|_{\partial \Omega})$ such that $C^{-1} \leq w \leq C$ for some constant $C>0$.
We shall impose more assumptions on our domain. For both the ``free boundary" and the ``direct" results, we will assume that $\Omega$ is a 1-sided Chord Arc Domain (see Definition \ref{defi:CAD}). For the ``direct" result, we will rely on the assumption that $\partial \Omega$ is uniformly rectifiable (see \cite{DS1,DS2} and Section \ref{SUR} below), and thus ultimately assuming that $\Omega$ is a (2-sided) Chord Arc Domain. The optimality of the assumptions on $\Omega$ is discussed in more details in the end of this subsection. Since the dimension $n-1$ plays an important role in our paper, and in order to lighten the notion, we shall write $d$ for $n-1$.
Without any more delay, let us introduce the regularized distance to a set $\partial \Omega$. The Euclidean distance to the boundary is denoted by
\begin{equation}
\delta(X):= \dist(X,\partial \Omega).
\end{equation}
For $\beta >0$, we define
\begin{equation} \label{IdefD}
D_\beta(X):= \left(\int_{\partial \Omega} |X-y|^{-d-\beta} d\sigma(y) \right)^{-1/\beta} \qquad \text{ for } X\in\Omega.
\end{equation}
The fact that the set $\partial \Omega$ is $d$-Ahlfors regular is enough to have that
\begin{equation} \label{equivD}
C^{-1} \delta(X) \leq D_\beta(X) \leq C\delta(X) \quad \text{ for } X\in\Omega,
\end{equation}
where the constant depends on $C_\sigma$, $\beta$, and $n$. The proof is easy and can be found after Lemma 5.1 in \cite{DFM3}.
The notion of Carleson measure will be central all over our paper. We say that a quantity $f$ defined on $\Omega$ satisfies the Carleson measure condition - or $f\in CM_\Omega(M)$ for short - if there exists $M$ such that for any $x\in \partial \Omega$ and $r<\diam(\Omega)$,
\begin{equation} \label{defCarleson}
\iint_{B(x,r) \cap \Omega} |f(X)|^2 \delta(X)^{-1} dX \leq M r^{n-1}.
\end{equation}
Our operators are in the form $L=-\mathop{\operatorname{div}} \mathcal A \nabla$ and defined on $\Omega$. We shall always assume that they are uniformly elliptic and bounded, that is, there exists $C_{\mathcal A}>1$ such that
\begin{equation} \label{defelliptic}
\mathcal A(X)\xi \cdot \xi \mathfrak geq C_{\mathcal A}^{-1} |\xi|^2 \qquad \text{ for } X\in \Omega, \, \xi \in \mathbb R^n,
\end{equation}
and
\begin{equation} \label{defbounded}
|\mathcal A(X)\xi \cdot \zeta| \leq C_{\mathcal A} |\xi||\zeta| \qquad \text{ for } X\in \Omega, \, \xi,\zeta \in \mathbb R^n.
\end{equation}
A weak solution to $Lu=0$ in $E \subset \Omega$ lies in $W^{1,2}_{loc}(E)$ and is such that
\begin{equation} \label{defsol}
\int_\Omega \mathcal A\nabla u \cdot \nabla \varphi \, dX \qquad \text{ for } \varphi \in C^\infty_0(E).
\end{equation}
If $\Omega$ has sufficient access to the boundary (and $\partial \Omega$ is $(n-1)$-Ahlfors regular), then for any ball $B$ centered on $\partial \Omega$ and any function $u$ in $W^{1,2}(B\cap \Omega)$, we have notion of trace for $u$ on $B\cap \partial \Omega$. It is well known that if $u \in W^{1,2}(B\cap \Omega)$ is such that $\Tr(u) = 0$ on $B\cap \partial \Omega$, and if $u$ is a weak solution to $Lu=0$ on $B\cap \Omega$ with $L$ satisfying \eqref{defelliptic} and \eqref{defbounded}, then $u$ is continuous $B\cap \Omega$ and can be continuously extended by 0 on $B\cap \partial \Omega$.
In addition to \eqref{defelliptic} and \eqref{defbounded}, we assume that our operators satisfy a weaker variant of the Dahlberg-Kenig-Pipher condition. The Dahlberg-Kenig-Pipher (DKP) condition was introduced by Dahlberg and shown to be sufficient for the $L^p$ solvability of the Dirichlet problem for some $p>1$ by Kenig and Pipher (\cite{KePiDrift}). It was also shown to be essentially necessary in \cite{CFK,MM}. The DKP condition says that the coefficient matrix $\mathcal A$ satisfies
\begin{equation}\label{DKP}
\delta(\cdot)\sup_{B(\cdot,\,\delta(\cdot)/2)}\abs{\nabla\mathcal A}\in CM_\Omega(M) \qquad\text{for some }M<\infty.
\end{equation}
Our assumption, slightly weaker than the classical DKP, is as follows.
\begin{definition}[Weak DKP condition]
An elliptic operator $L=-\mathop{\operatorname{div}} \mathcal A \nabla$ is said to satisfy the weak DKP condition with constant $M$ on $\Omega$ if there exists a decomposition $\mathcal A = \mathcal B+ \mathcal C$ such that
\begin{equation} \label{Main1a}
|\delta\nabla \mathcal B| + |\mathcal C| \in CM_\Omega(M).
\end{equation}
\end{definition}
Obviously, this condition is weaker than \eqref{DKP}: it allows for small Carleson perturbations and carries no supremum over the Whitney cubes. Moreover, we show in Lemma \ref{LellipB=ellipA} that the weak DKP condition self improves.
We are now ready for the statement of our main result.
\begin{theorem} \label{Main1}
Let $\beta >0$, $\Omega \subset \mathbb Rn$ be a 1-sided Chord-Arc Domain, and $L=-\mathop{\operatorname{div}} \mathcal A \nabla$ be a uniformly elliptic operator -- i.e., that verifies \eqref{defelliptic} and \eqref{defbounded} -- which satisfies the weak DKP condition with constant $M$ on $\Omega$. We write $G^{X_0}$ for the Green function of $L$ with pole at $X_0$.
The following are equivalent:
\begin{enumerate}[(i)]
\item $\Omega$ is a Chord-Arc Domain,
\item $\partial \Omega$ is uniformly rectifiable,
\item there exists $C\in (0,\infty)$ such that for any ball $B$ centered on the boundary, and for any positive weak solution $u$ to $Lu=0$ in $2B \cap \Omega$ for which $\Tr u = 0$ on $2B \cap \partial \Omega$, we have
\begin{equation} \label{Main1b}
\iint_{\Omega \cap B} \left| \frac{\nabla u}{u} - \frac{\nabla D_\beta}{D_\beta} \right|^2 D_\beta \, dX \leq C \sigma(B),
\end{equation}
\item there exists $C\in (0,\infty)$ such that for any $X_0 \in \Omega$ and for any ball $B$ centered on the boundary satisfying $X_0 \notin 2B$, we have
\begin{equation} \label{Main2b}
\iint_{\Omega \cap B} \left| \frac{\nabla G^{X_0}}{G^{X_0}} - \frac{\nabla D_\beta}{D_\beta} \right|^2 D_\beta \, dX \leq C \sigma(B),
\end{equation}
\item there exist $X_0\in \Omega$ and $C\in (0,\infty)$ such that for any ball $B$ centered on the boundary that satisfies $X_0\notin 2B$, we have \eqref{Main2b}.
\end{enumerate}
Moreover, the constants $C$ in \eqref{Main1b}--\eqref{Main2b} can be chosen to depend only on $C_\mathcal A$, $M$, the CAD constants of $\Omega$, $\beta$, and $n$.
\end{theorem}
\begin{remark}
The bound \eqref{Main1b} is a local one, meaning for instance that the bound will hold with a constant $C$ independent of the $B$ and the solution $u$ as long as $\Omega$ is chord-arc locally in $2B$ (that is, we only need the existence of Harnack chains and of corkscrew points in $2B\cap\Omega$) and the uniformly elliptic operator $L$ satisfies the weak DKP condition in $2B$.
\end{remark}
The equivalence $(i) \Longleftrightarrow (ii)$ is already well known, see Theorem 1.2 in \cite{AHMNT}. Moreover, $(iii) \implies (iv) \implies (v)$ is immediate. So we need only to prove $(ii)\implies(iii)$ and $(v)\implies(i)$ in Theorem \ref{Main1}.
When the domain is unbounded, we can use the Green function with pole at infinity instead of the Green function.
The Green function with pole at infinity associated to $L$ is the unique (up to a multiplicative constant) positive weak solution to $Lu=0$ with zero trace. See for instance Lemma 6.5 in \cite{DEM} for the construction (\cite{DEM} treats a particular case but the same argument works as long as we have CFMS estimates, see Lemma \ref{LCFMS} below). So we have that:
\begin{corollary} \label{Main2}
Let $\beta$, $\Omega$ and $L$ be as in Theorem \ref{Main1}. If $\Omega$ is unbounded, the following are equivalent:
\begin{enumerate}[(a)]
\item $\Omega$ is a Chord-Arc Domain,
\item $\partial \Omega$ is uniformly rectifiable,
\item there exists $C\in (0,\infty)$ such that for any ball $B$ centered on the boundary, we have
\begin{equation} \label{Main2d}
\iint_{\Omega \cap B} \left| \frac{\nabla G^{\infty}}{G^{\infty}} - \frac{\nabla D_\beta}{D_\beta} \right|^2 D_\beta \, dX \leq C \sigma(B),
\end{equation}
\end{enumerate}
\end{corollary}
For our proof of the ``direct" result, we need the fact that, for the same operators, the $L$-elliptic measure is $A_\infty$-absolutely continuous with respect to $\sigma$.
\begin{theorem}\label{Main3}
Let $\Omega$ be a Chord-Arc Domain, and let $L=-\mathop{\operatorname{div}} \mathcal A \nabla$ be a uniformly elliptic operator -- i.e., that verifies \eqref{defelliptic} and \eqref{defbounded} -- which satisfies the weak DKP condition with constant $M$ on $\Omega$.
Then the $L$-elliptic measure $\Omegaega_L\in A_\infty(\sigma)$, i.e. there exists $C,\theta >0$ such that given an arbitrary surface ball $\mathbb Delta=B\cap \partial\Omega$, with $B=B(x,r)$, $x\in \partial\Omega$, $0<r<\diam( \partial\Omega)$, and for every Borel set $F\subset\mathbb Delta$, we have that
\begin{equation}\label{defAinfty}
\frac{\sigma(F)}{\sigma(\mathbb Delta)} \leq C \left(\Omegaega_L^{X_{\mathbb Delta}}(F)\right)^\theta,
\end{equation}
where $X_{\mathbb Delta}$ is a corkscrew point relative to $\mathbb Delta$ (see Definition \ref{def1.cork}).
The constants $C$ and $\theta$ - that are called the intrinsic constants in $\Omegaega_L \in A_\infty(\sigma)$ - depend only on $C_\mathcal A$, $M$, the CAD constants of $\Omega$, and $n$.
\end{theorem}
The above is known for operators satisfying the DKP condition \eqref{DKP} on Chord-Arc domains. In fact, it is shown in \cite{KePiDrift} that $\Omegaega_L\in A_\infty(\sigma)$ for DKP operators on Lipschitz domains. But since the DKP condition is preserved in subdomains, and the Chord-Arc domains are well approximated by Lipschitz subdomains (\cite{DJ}), the $A_\infty$ property can be passed from Lipschitz subdomains to Chord-Arc domains (see \cite{JK}, or \cite{HMMTZ}). Moreover, combined with the stability of the $A_\infty$ property under Carleson perturbation of elliptic operators proved in \cite{CHMT}, it is also known for the elliptic operators $L=-\mathop{\operatorname{div}}er \mathcal A \nabla$ for which $\mathcal A = \mathcal B + \mathcal C$, where
\begin{equation}\label{DKPhw}
\sup_{B(\cdot,\,\delta(\cdot)/2)}\{\abs{\delta \nabla\mathcal B} + \abs{\mathcal C}\} \in CM_\Omega(M) \qquad\text{for some }M<\infty.
\end{equation}
However, to the best of our knowledge, the $A_\infty$-absolute continuity of the elliptic measure was not proved explicitly for elliptic operators satisfying the slightly weaker condition \eqref{Main1a}.
We obtain Theorem \ref{Main3} as a consequence of the following result - which is another contribution of the article - and Theorem 1.1 in \cite{CHMT}.
\begin{theorem}\label{Main4}
Let $\Omega$ be a domain in $\mathbb R^n$ with uniformly rectifiable (UR) boundary of dimension $n-1$. Let $L$ be a uniformly elliptic operator which satisfies the weak DKP condition with constant $M$ on $\Omega$. Suppose that $u$ is a bounded solution of $Lu=0$ in $\Omega$. Then for any ball $B$ centered on the boundary, we have
\begin{equation}\label{Main4a}
\iint_{\Omega\cap B}\abs{\nabla u(X)}^2\delta(X)dX\le C\norm{u}_{L^\infty(\Omega)}^2\sigma(B\cap\partial\Omega),
\end{equation}
where the constant $C$ depends only on $n$, $M$, and the UR constants of $\partial\Omega$.
\end{theorem}
Notice that in this theorem, we completely dispense with the Harnack chain and corkscrew conditions (see Definitions \ref{def1.cork} and \ref{def1.hc}) for the domain. Previously, an analogous result was obtained for bounded {\it harmonic functions} in \cite{HMMDuke} (see also \cite{GMT} for the converse) and for DKP operators in \cite[Theorem 7.5]{HMMtrans}. The result for elliptic operators which satisfy the weak DKP condition is again not explicitly written anywhere. However, slightly changing the proofs of a combination of papers would give the result. For instance, we can adapt Theorem 1.32 in \cite{DFM3} to the codimension 1 case to prove Theorem \ref{Main3} in the flat case, then extending it to Lipschitz graph by using the change of variable in \cite{KePiDrift}, and finally proving \ref{Main3} to all complement of uniformly rectifiable domains by invoking Theorem 1.19 (iii) in \cite{HMMtrans}).
Here, we claim that we can directly demonstrate Theorem \ref{Main4} using a strategy similar to our proof of Theorem \ref{Main1}. In Section~\ref{SecPfofThm4}, we explain how to modify the proof of Theorem \ref{Main1} to obtain Theorem \ref{Main4}.
By \cite{CHMT} Theorem 1.1, assuming that $\Omega$ is 1-sided CAD, the estimate \eqref{Main4a} implies that $\Omegaega_L\in A_\infty(\sigma)$, and therefore our Theorem \ref{Main3} follows from Theorem \ref{Main4}.
Note that the bound \eqref{defAinfty} is a characterization of $A_\infty$, see for instance Theorem 1.4.13 in \cite{KenigB}.
Let us discuss in more details our assumptions for Theorem \ref{Main1}. In order to get the bound \eqref{Main1b}, we strongly require that the boundary $\partial \Omega$ is uniformly rectifiable and that the operator $L$ satisfies the weak DKP condition. We even conjecture that those conditions are necessary, that is, if $\partial \Omega$ is not uniformly rectifiable, then the bound \eqref{Main1b} holds for none of the weak DKP operators.
The corkscrew condition and the Harnack chain condition (see Definitions \ref{def1.cork} and \ref{def1.hc}) are only needed for the proof of Lemma \ref{lemlogk} - where we used the comparison principle - and for the implication $(iii)\implies(i)$ in Theorem \ref{Main1}. However, since most of our intermediate results can be proved without those conditions and could be of interest in other situations where the Harnack chain is not assumed (like - for instance - to prove Theorem \ref{Main4}), {\bf we avoided to use the existence of Harnack chains and of corkscrew points in all the proofs and the intermediate results} except for Lemma \ref{lemlogk} and in Section \ref{Sconv}, even if it occasionally slightly complicated the arguments.
These observations naturally lead to the question about the optimality of our conditions on $\Omega$, and more precisely, whether we can obtain the estimate \eqref{Main1b} assuming only uniform rectifiablity. The answer is no, as we can construct a domain $\Omega$ which has uniformly rectifiable boundary but is only semi-uniform (see Definition \ref{defSUD}) where \eqref{Main1b} fails. More precisely, we prove in Section \ref{Scount} that:
\begin{proposition}
There exists a semi-uniform domain $\Omega$ and a positive harmonic function $G$ on $\Omega$ such that \eqref{Main1b} is false.
\end{proposition}
But of course, assuming $\Omega$ is a Chord-Arc Domain is not necessary for \eqref{Main1b} since \eqref{Main1b} obviously holds when $\Omega = \mathbb R^n \setminus \mathbb R^{n-1} = \mathbb R^n_+ \cup \mathbb R^n_-$, because we can apply Theorem \ref{Main1} to both $\Omega_+ = \mathbb R^n_+$ and $\Omega_- = \mathbb R^n_-$ and then sum.
\subsection{Main steps of the proof of $(ii) \implies (iii)$}\label{Sproof}
In this section, we present the outline of the proof of $(ii) \implies (iii)$ in Theorem \ref{Main1}. More exactly, this subsection aims to link the results of all other sections of the paper in order to provide the proof.
The approach developed in this article is new and it is interesting by itself, because it gives an alternative to proofs that use projection and extrapolation of measures. Aside from Theorems \ref{Main1} and \ref{Main4}, we claim that our approach can be used to obtain a third proof of the main result from \cite{FenUR} and \cite{DM1}, which establishes the $A_\infty$-absolute continuity of the harmonic measure when $\Omega$ is the complement of a uniformly rectifiable set of low dimension and $L$ is a properly chosen degenerate operator.
Let $\Omega$ and $L$ be as in the assumptions of Theorem \ref{Main1}, and let $\mathcal B$ and $\mathcal C$ denote the matrices in \eqref{Main1a}. Take $B := B(x_0,r)$ to be a ball centered at the boundary, that is $x_0 \in \partial \Omega$, and then a non-negative weak solution $u$ to $Lu=0$ in $2B \cap \Omega$ such that $\Tr(u) = 0$ on $B \cap \partial \Omega$.
\noindent {\bf Step 1: From balls to dyadic cubes.} We construct a dyadic collection $\mathbb D_{\partial \Omega}$ of pseudo-cubes in $\partial \Omega$ in the beginning of Section \ref{SUR}, and a collection of Whitney regions $W_\Omega(Q),W_\Omega^*(Q)$ associated to $Q\in \mathbb D_{\partial \Omega}$ in the beginning Section \ref{SWhitney}. We claim that \eqref{Main1b} is implied by cubes
\begin{equation} \label{Main1c}
I := \sum_{\begin{subarray}{c} Q \in \mathbb D_{\partial \Omega} \\ Q \subset Q_0 \end{subarray}} \iint_{W_\Omega(Q)} \left| \frac{\nabla u}{u} - \frac{\nabla D_\beta}{D_\beta} \right|^2 \delta \, dX \leq C \sigma(Q_0)
\end{equation}
for any cube $Q_0\in \mathbb D_{\partial \Omega}$ satisfying $Q_0 \subset \frac87B \cap \partial\Omega$ and $\ell(Q_0) \leq 2^{-8}r$. It follows from the definition of $W^*_{\Omega}(Q)$ \eqref{defWO*} that
\begin{equation} \label{Main1d}
W^*_{\Omega}(Q) \subset \frac74B \qquad \text{ for } Q \subset Q_0
\end{equation}
and $Q_0$ as above.
We take $\{Q_0^i\}\subset\mathbb D_{\partial\Omega}$ as the collection of dyadic cubes that intersects $B \cap \partial\Omega$ and such that $2^{-9}r < \ell(Q_0^i) \leq 2^{-8}r$. There is a uniformly bounded number of them, each of them satisfies $Q_0^i \subset \frac32B \cap \Omega$ and $\ell(Q_0^i) \leq 2^{-8}r$ and altogether, they verify
\[B \cap \Omega \subset \{X\in B, \, \delta(X) > 2^{-9}r\} \bigcup \mathcal Bigl(\bigcup_{i} \bigcup_{Q\in \mathbb D_{\partial \Omega}(Q_0^i)} W_\Omega(Q) \mathcal Bigr).\]
The estimate \eqref{Main1b} follows by applying \eqref{Main1c} to each $Q_0^i$ - using \eqref{equivD} and \eqref{defADR} when needed - and \eqref{Main1e} below to $\{X\in B, \, \delta(X) > 2^{-9}r\}$.
\noindent {\bf Step 2: Bound on a Whitney region.} In this step, we establish that if $E\subset \frac74B$ is such that $\diam(E) \leq K\delta(E)$, then
\begin{equation} \label{Main1e}
J_E:= \iint_{E} \left| \frac{\nabla u}{u} - \frac{\nabla D_\beta}{D_\beta} \right|^2 \delta \, dX \leq C_K \delta(E)^{n-1}.
\end{equation}
We could use Lemma \ref{lemflat} to prove this, but it would be like using a road roller to crack a nutshell, because it is actually easy. We first separate
\[J_E \leq \iint_{E} \left| \frac{\nabla u}{u}\right|^2 \delta \, dX + \iint_{E} \left|\frac{\nabla D_\beta}{D_\beta} \right|^2 \delta \, dX := J^1_E + J^2_E.\]
We start with $J_E^2$. Observe that $|\nabla [D_\beta^{-\beta}]| \leq (d+\beta) D_{\beta+1}^{-\beta-1}$, so $|\frac{\nabla D_\beta}{D_\beta}| \lesssim \delta^{-1}$ by \eqref{equivD}. We deduce that $J_E^2 \lesssim |E| \delta(E)^{-1} \lesssim \delta(E)^{n-1}$ as desired.
As for $J_E^1$, we construct
\[E^*:= \{X\in \Omega, \dist(X,E) \leq \delta(E)/100\}\subset \frac{15}{8}B,\] and then the Harnack inequality (Lemma \ref{Harnack}) and the Caccioppoli inequality (Lemma \ref{Caccio}) yield that
\[J_E^1 \lesssim \delta(E) (\sup_{E^*} u)^{-2} \iint_{E} |\nabla u|^2 dX \lesssim \delta(E)^{-1} (\sup_{E^*} u)^{-2} \iint_{E^*} u^2 dX \leq \delta(E)^{-1}|E^*| \lesssim \delta(E)^{n-1}.\]
The bound \eqref{Main1e} follows.
\noindent {\bf Step 3: Corona decomposition.} Let $Q_0$ as in Step 1. One can see that we cannot use \eqref{Main1e} to each $E=W_\Omega(Q)$ and still hope to get the bound \eqref{Main1c} for $I$. We have to use \eqref{Main1e} with parsimony. We first use a corona decomposition of $D_{\partial \Omega}(Q_0)$, and we let the stopping time region stops whenever $\alpha(Q)$ or the angle between the approximating planes are too big. We choose $0 <
$\square$
silon_1 \ll
$\square$
silon_0 \ll 1$ and Lemma \ref{Lcorona} provides a first partition of $\mathbb D_{\partial \Omega}$ into bad cubes $\mathcal B$ and good cubes $\mathcal G$ and then a partition of $\mathcal G$ into a collection of coherent regimes $\{\mathcal S\}_{\mathcal S \in \mathfrak S}$.
Let $\mathcal B(Q_0):= \mathcal B \cap \mathbb D_{\partial \Omega}(Q_0)$ and then $\mathfrak S(Q_0) = \{\mathcal S \cap \mathbb D_{\partial \Omega}(Q_0)\}$.
Observe that $\mathfrak S(Q_0)$ contains the collection of $\mathcal S \in \mathfrak S$ such that $Q(\mathcal S) \subset Q_0$ and maybe {\bf one} extra element, in the case where $Q_0 \notin \mathcal B \cup \bigcup_{\mathcal S \in \mathfrak S} Q(\mathcal S)$, which is the intersection with $\mathbb D_{\partial \Omega}(Q_0)$ of the coherent regime $\mathcal S \in \mathfrak S$ that contains $Q_0$.
In any case $\mathfrak S(Q_0)$ is a collection of (stopping time) coherent regimes. In addition, Lemma \ref{Lcorona} shows that $\mathfrak S(Q_0)$ and $\mathcal B(Q_0)$ verifies the Carleson packing condition
\begin{equation} \label{Main1f}
\sum_{Q\in \mathcal B(Q_0)} \sigma(Q) + \sum_{\mathcal S \in \mathfrak S(Q_0)} \sigma(Q(\mathcal S)) \leq C \sigma(Q_0).
\end{equation}
We use this corona decomposition to decompose the sum $I$ from \eqref{Main1c} as
\begin{multline} \label{Main1g}
I = \sum_{Q \in \mathcal B(Q_0)} \iint_{W_\Omega(Q)} \left| \frac{\nabla u}{u} - \frac{\nabla D_\beta}{D_\beta} \right|^2 \delta \, dX + \sum_{\mathcal S \in \mathfrak S(Q_0)} \iint_{W_\Omega(\mathcal S)} \left| \frac{\nabla u}{u} - \frac{\nabla D_\beta}{D_\beta} \right|^2 \delta \, dX \\ := I_1 + \sum_{\mathcal S \in \mathfrak S(Q_0)} I_{\mathcal S},
\end{multline}
where $W_\Omega(\mathcal S) = \bigcup_{Q\in \mathcal S} W_\Omega (Q)$. For each cube $Q\in \mathcal B(Q_0)$, the regions $W_\Omega(Q)$ are included in $\frac74B$ and verify $\diam(W_\Omega(Q)) \leq 8 \delta(W_\Omega(Q)) \leq 8\ell(Q)$, so we can use \eqref{Main1e} and we obtain
\begin{equation} \label{Main1h}
I_1 \lesssim \sum_{Q\in \mathcal B(Q_0)} \ell(Q)^{n-1} \lesssim \sigma(Q_0).
\end{equation}
by \eqref{defADR} and \eqref{Main1f}.
\noindent {\bf Step 4: How to turn the estimation of $I_{\mathcal S}$ into a problem on $\mathbb R^n_+$.} Now, we take $\mathcal S$ in $\mathfrak S(Q_0)$, which is nice because $\partial \Omega$ is well approximated by a small Lipschitz graph $\mathcal Gamma_{\mathcal S}$ around any dyadic cube of $\mathcal S$ (see Subsection \ref{SSLipschitz} for the construction of $\mathcal Gamma_\mathcal S$). For instance, fattened versions of our Whitney regions $W^*_\Omega(Q)$, $Q\in \mathcal S$, are Whitney regions in $\mathbb R^n \setminus \mathcal Gamma_{\mathcal S}$ (see Lemma \ref{LclaimPib}). More importantly, at any scale $Q\in \mathcal S$, the local Wasserstein distance between $\sigma$ and the Hausdorff measure of $\mathcal Gamma_\mathcal S$ is bounded by the local Wasserstein distance between $\sigma$ and the best approximating plane, which means that $\mathcal Gamma_\mathcal S$ approximate $\partial \Omega$ better (or at least not much worse) than the best plane around any $Q\in \mathcal S$. Up to our knowledge, it is the first time that such a property on $\mathcal Gamma_\mathcal S$ is established. It morally means that $D_\beta(X)$ will be well approximated by
\[D_{\beta,\mathcal S}(X):= \left( \int_{\mathcal Gamma_\mathcal S} |X-y|^{-d-\beta} d\mathcal H^{n-1}(y) \right)^{-\frac1\beta}\]
whenver $X\in X_\Omega(\mathcal S)$, and that the error can be bounded in terms of the Tolsa's $\alpha$-numbers.
Nevertheless, what we truly want is the fact $\partial \Omega$ is well approximated by a plane - instead of a Lipschitz graph - from the standpoint of any $X \in W_\Omega(\mathcal S)$, because in this case we can use Lemma \ref{lemflat}.
Yet, despite this slight disagreement, $\mathcal Gamma_{\mathcal S}$ is much better than a random uniformly rectifiable set, because $\mathcal Gamma_{\mathcal S}$ is the image of a plane $P$ by a bi-Lipschitz map.
So we construct a bi-Lipschitz map $\rho_{\mathcal S} : \, \mathbb R^n \to \mathbb R^n$ that of course maps a plane to $\mathcal Gamma_{\mathcal S}$, but which also provides an explicit map from any point $X$ in $W_\Omega(\mathcal S)$ to a plane $\Lambda(\rho_{\mathcal S}^{-1}(X))$ that well approximates $\mathcal Gamma_{\mathcal S}$ - hence $\partial \Omega$ - from the viewpoint of $X$. So morally, we constructed $\rho_\mathcal S$ such that we have a function
\[X \mapsto \dist(X,\Lambda(\rho^{-1}_{\mathcal S}(X)))\]
which, when $X\in W_\Omega(\mathcal S)$, is a good approximation of
\[D_{\beta,\mathcal S}(X):= \left( \int_{\mathcal Gamma_\mathcal S} |X-y|^{-d-\beta} d\mathcal H^{n-1}(y) \right)^{-\frac1\beta}\]
in terms of the Tolsa's $\alpha$-numbers.
We combine the two approximations to prove (see Lemma \ref{LprDb}, which is a consequence of Corollary \ref{CestD} and our construction of $\rho_{\mathcal S}$) that
\[\iint_{W_\Omega(Q)} \left| \frac{\nabla D_\beta(X)}{D_\beta(X)} - \frac{N_{\rho^{-1}_{\mathcal S}(X)}(X)}{\dist(X,\Lambda(\rho^{-1}_{\mathcal S}(X))} \right|^2\, \delta(X) \, dX \leq C |\alpha_{\sigma,\beta}(Q)|^2 \sigma(Q) \qquad \text{ for } Q\in \mathcal S,\]
where $Y \to N_{\rho_{\mathcal S}^{-1}(X)}(Y)$ is the gradient of the distance to $\Lambda(\rho_{\mathcal S}^{-1}(X))$.
And since the $\alpha_{\sigma,\beta}(Q)$ satisfies the Carleson packing condition, see Lemma \ref{LalphabetaCM}, we deduce that
\begin{equation} \label{Main1i}
I_{\mathcal S} \leq 2I'_{\mathcal S} + C \sum_{Q\in \mathcal S} |\alpha_{\sigma,\beta}(Q)|^2 \sigma(Q) \leq 2 I'_{\mathcal S} + C \sigma(Q(\mathcal S))
\end{equation}
where
\[ I'_{\mathcal S}:= \iint_{W_\Omega(\mathcal S)} \left| \frac{\nabla u}{u} - \frac{N_{\rho^{-1}_{\mathcal S}(X)}(X)}{\dist(X,\Lambda(\rho^{-1}_{\mathcal S}(X))} \right|^2 \delta \, dX.\]
We are left with $I'_{\mathcal S}$. We make the change of variable $(p,t) = \rho^{-1}_{\mathcal S}(X)$ in the integral defining $I'_{\mathcal S}$ and we obtain that
\[\begin{split}
I'_{\mathcal S} & = \iint_{\rho_{\mathcal S}^{-1}(W_\Omega(\mathcal S))} \left| \frac{(\nabla u) \circ \rho_{\mathcal S} (p,t)}{u \circ \rho (p,t) } - \frac{N_{p,t}(\rho_{\mathcal S}(p,t))}{\dist(\rho_{\mathcal S}(p,t),\Lambda(p,t)} \right|^2 \delta \circ \rho_{\mathcal S}(p,t) \det\br{\Jac(p,t)}\, dt\, dp \\
& \le 2 \iint_{\rho_{\mathcal S}^{-1}(W_\Omega(\mathcal S))} \left|\frac{\nabla \br{u \circ \rho_{\mathcal S} (p,t)}}{u \circ \rho (p,t)} - \frac{\Jac(p,t) N_{p,t}(\rho_{\mathcal S}(p,t))}{\dist(\rho_{\mathcal S}(p,t),\Lambda(p,t))} \right|^2 \, |t| \, dt\, dp ,
\end{split}\]
where $\Jac(p,t)$ is the Jacobian matrix of $\rho_{\mathcal S}$, which is close to the identity by Lemma \ref{LestonJ}. We have also used that $\delta \circ \rho_{\mathcal S}(p,t) \approx t$ since $\delta(X) \approx \dist(X,\mathcal Gamma_{\mathcal S})$ on $W_\Omega(\mathcal S)$ and the bi-Lipschitz map $\rho_{\mathcal S}^{-1}$ preserves this equivalence.
Even if the term
\[\frac{\Jac(p,t) N_{p,t}(\rho_{\mathcal S}(p,t))}{\dist(\rho_{\mathcal S}(p,t),\Lambda(p,t))}\]
looks bad, all the quantities inside are constructed by hand, and of course, we made them so that they are close to the quotient $\frac{\nabla d_P}{d_P}$, where $d_P$ is the distance to a plane that depends only on $\mathcal S$. With our change of variable, we even made it so that $P = \mathbb R^{n-1} \times \{0\}$, that is $\frac{\nabla d_P}{d_P} = \frac{\nabla t}{t}$. Long story short, Lemma \ref{LprNpt} gives that
\begin{equation} \label{Main1j}
I'_{\mathcal S} \leq 4I''_{\mathcal S} + \sigma(Q(\mathcal S))
\end{equation}
where
\[ I''_{\mathcal S}:= \iint_{\rho^{-1}(W_\Omega(\mathcal S))} \left| \frac{\nabla v}{v} - \frac{\nabla t}{t}\right|^2 |t| \, dt\, dp\,, \qquad v=u\circ\rho_\mathcal S.\]
\noindent {\bf Step 5: Conclusion on $I_{\mathcal S}$ using the flat case.}
It is easy to see from the definition that Chord-Arc Domains are preserved by bi-Lipschitz change of variable, and the new CAD constants depends only on the old ones and the Lipschitz constants of the bi-Lipschitz map. Since the bi-Lipschitz constants of $\rho_{\mathcal S}$ is less than 2 (and so uniform in $\mathcal S$), we deduce that $\rho^{-1}_{\mathcal S}(\Omega)$ is a Chord-Arc Domain with CAD constants that depends only on the CAD constants of $\Omega$.
Then, in Section \ref{SWhitney}, we constructed a cut-off function $\Psi_{\mathcal S}$ adapted to $W_\Omega(\mathcal S)$. We have shown in Lemma \ref{LprWS} that $\Psi_{\mathcal S}$ is 1 on $W_\Omega(\mathcal S)$ and supported in $W^*_\Omega(\mathcal S)$, on which we still have $\delta(X) \approx \dist(X,\mathcal Gamma_{\mathcal S})$. In Lemma \ref{LprPsiS}, we proved that the support of $\nabla \Psi_{\mathcal S}$ is small, in the sense that implies ${\mathds 1}_{\supp \nabla \Psi_{\mathcal S}} \in CM_\Omega$. What is important is that those two properties are preserved by bi-Lipschitz change of variable, and thus $\Psi_{\mathcal S} \circ \rho_{\mathcal S}$ is as in Definition \ref{defcutoffboth}.
We want the support of $\Psi_{\mathcal S} \circ \rho_{\mathcal S}$ to be included in a ball $B_{\mathcal S}$ such that $2B_{\mathcal S}$ is a subset of our initial ball $B$, and such that the radius of $B_{\mathcal S}$ is smaller than $C\ell(Q(\mathcal S))$. But those facts are an easy consequence of \eqref{Main1d} and the definition of $W^*_\Omega(\mathcal S)$ (and the fact that $\rho_{\mathcal S}$ is bi-Lipschitz with the Lipschitz constant close to 1).
We also want $u\circ \rho_{\mathcal S}$ to be a solution of $L_{\mathcal S}(u\circ \rho_{\mathcal S}) = 0$ for a weak-DKP operator $L_{\mathcal S}$. The operator $L_{\mathcal S}$ is not exactly weak-DKP everywhere in $\rho^{-1}_{\mathcal S}(\Omega)$, but it is the case on the support of $\Psi_{\mathcal S}$ (see Lemma \ref{LprAS}), which is one condition that we need for applying Lemma \ref{lemflat}.
To apply Lemma \ref{lemflat} - or more precisely for Lemma \ref{lemlogk}, where one term from the integration by parts argument is treated - we need to show that $\Omegaega_{L^*} \in A_\infty(\sigma)$. This is a consequence of Theorem \ref{Main3}. Indeed, since the adjoint operator $L^*$ is also a weak DKP operator on $\Omega$, Theorem \ref{Main3} asserts that $\Omegaega_{L^*}\in A_\infty(\sigma)$, where $\sigma$ is an Ahlfors regular measure on $\partial\Omega$. A direct computation shows that for any set $E\subset\partial\Omega$ and any $X_0\in\Omega$,
\[
\Omegaega_{L^*_\mathcal S}^{\rho_\mathcal S^{-1}(X_0)}\br{\rho_{\mathcal S}^{-1}(E)}=\Omegaega_{L^*}^{X_0}(E),
\]
and since the mapping $\rho_\mathcal S$ is bi-Lipschitz, $\Omegaega_{L^*}\in A_\infty(\sigma)$ implies that the $L^*_\mathcal S$-elliptic measure $\Omegaega_{L^*_\mathcal S}\in A_\infty(\widetilde\sigma)$, where $\widetilde\sigma$ is an Ahlfors regular measure on the boundary of $\rho_{\mathcal S}^{-1}(\Omega)$. Moreover, the intrinsic constants in $\Omegaega_{L^*_\mathcal S}\in A_\infty(\widetilde\sigma)$ depend only on the intrinsic constants in $\Omegaega_{L^*}\in A_\infty(\sigma)$ because the the bi-Lipschitz constants of $\rho_\mathcal S$ are bounded uniformly in $\mathcal S$.
All those verification made sure that we can apply Lemma \ref{lemflat}, which entails that
\begin{equation} \label{Main1k}
I''_{\mathcal S} \leq \iint_{\rho^{-1}(W_\Omega(\mathcal S))} |t| \left| \frac{\nabla (u\circ\rho_\mathcal S)}{u\circ\rho_\mathcal S} - \frac{\nabla t}{t}\right|^2 (\Psi_{\mathcal S} \circ \rho_{\mathcal S})^2 \, dt\, dp \lesssim \ell(Q(\mathcal S))^{n-1} \lesssim \sigma(Q(\mathcal S)).
\end{equation}
\noindent {\bf Step 6: Gathering of the estimates.}
We let the reader check that \eqref{Main1f}--\eqref{Main1k} implies \eqref{Main1c}, and enjoy the end of the sketch of the proof!
\subsection{Organisation of the paper}
In Section \ref{SMisc}, we present the exact statement on our assumptions on $\Omega$, and we give the elliptic theory that will be needed in Section \ref{Sflat}.
Sections \ref{SUR} to \ref{Sflat} proved the implication $(ii) \implies (iii)$ in Theorem \ref{Main1}.
Section \ref{SUR} introduces the reader to the uniform rectifiability and present the corona decomposition that will be needed.
The corona decomposition gives a collection of (stopping time) coherent regimes $\{\mathcal S\}_{\mathfrak S}$. From Subsection \ref{SSLipschitz} to Section \ref{Srho}, $\mathcal S \in \mathfrak S$ is fixed.
We construct in Subsection \ref{SSLipschitz} a set $\mathcal Gamma_{\mathcal S}$ which is the graph of a Lipschitz function with small Lipschitz constant.
Section \ref{SWhitney} associate a ``Whitney'' region $W_\Omega(\mathcal S)$ to the coherent regime $\mathcal S$ so that from the stand point of each point of $W_\Omega(\mathcal S)$, $\mathcal Gamma_{\mathcal S}$ and $\partial \Omega$ are well approximated by the same planes.
In Section \ref{SDbeta}, we are applying the result from Section \ref{SWhitney} to compare $D_\beta$ with the distance to a plane that approximate $\mathcal Gamma_{\mathcal S}$.
Section \ref{Srho} construct a bi-Lipschitz change of variable $\rho_{\mathcal S}$ that flattens $\mathcal Gamma_\mathcal S$, and we use the results from Sections \ref{SWhitney} and \ref{SDbeta} in order to estimate the difference
\[\frac{\nabla[D_\beta \circ \rho_{\mathcal S}]}{D_\beta \circ \rho_{\mathcal S}} - \frac{\nabla t}{t}\]
in terms of Carleson measure. Sections \ref{SUR} to \ref{Srho} are our arguments for the geometric side of the problem, in particular, the solutions to $Lu=0$ are barely mentionned (just to explain the effect of $\rho_{\mathcal S}$ on $L$).
Section \ref{Sflat} can be read independently and will contain our argument for the PDE side of the problem. Morally speaking, it proves Theorem \ref{Main1} $(ii)\implies (iii)$ when $\Omega = \mathbb R^n_+$.
Section \ref{SecPfofThm4} presents a sketch of proof of Theorem \ref{Main4}. The strategy is similar to our proof of Theorem \ref{Main1}, and in particular, many of the constructions and notations from Section~\ref{SUR} to Section~\ref{Sflat} are adopted in Section~\ref{SecPfofThm4}. But since we do not need to deal with the regularized distance $D_\beta$, the proof is much shorter.
Section \ref{Sconv} tackles the converse implication, proving $(v) \implies (i)$ in Theorem \ref{Main1}. The proof adapts an argument of \cite{DM2}, which states that if $G$ is sufficiently close to $D_\beta$, then $\partial \Omega$ is uniformly rectifiable. As mentioned earlier, we unfortunately did not succeed to link our strong estimate \eqref{Main2b} directly to the weak ones assumed in \cite{DM2}, which explains why we needed to rewrite the argument.
We finish with Section \ref{Scount}, where we construct a semi-uniform domain and a positive harmonic solution on it for which our estimate \eqref{Main1b} is false.
\section{Miscellaneous}
\label{SMisc}
\subsection{Self improvement of the Carleson condition on $\mathcal A$}
\begin{lemma} \label{LellipB=ellipA}
Let $\mathcal A$ be a uniformly elliptic matrix on a domain $\Omega$, i.e. a matrix function that satisfies \eqref{defelliptic} and \eqref{defbounded} with constant $C_\mathcal A$.
Assume that $\mathcal A$ can be decomposed as $\mathcal A = \mathcal B + \mathcal C$ where
\begin{equation} \label{elBelA1}
|\delta \nabla \mathcal B| + |\mathcal C| \in CM_{\Omega}(M).
\end{equation}
Then there exists another decomposition $\mathcal A = \widetilde \mathcal B + \widetilde \mathcal C$ such that
\begin{equation} \label{elBelA2}
|\delta \nabla \widetilde\mathcal B| + |\widetilde\mathcal C| \in CM_{\Omega}(CM)
\end{equation}
with a constant $C>0$ that depends only on $n$, and $\widetilde\mathcal B$ satisfies \eqref{defelliptic} and \eqref{defbounded} with the same constant $C_{\mathcal A}$ as $\mathcal A$. In addition,
\begin{equation}\label{eqwtB}
|\delta \nabla \widetilde \mathcal B| \leq CC_{\mathcal A}.
\end{equation}
\end{lemma}
\noindent {\em Proof: }
Let $\mathcal A = \mathcal B + \mathcal C$ as in the assumption of the lemma. Let $\theta \in C^\infty_0(\mathbb R^n)$ be a nonnegative function such that $\supp \theta \subset B(0,\frac1{10})$, $\iint_{\mathbb R^n} \theta(X) dX = 1$. Construct $\theta_X(Y) := \delta(X)^{-n} \theta\big(\frac{Y-X}{\delta(X)}\big)$ and then
\begin{equation} \label{defwtB}
\widetilde{\mathcal B}(X) := \iint_{\mathbb R^n} \mathcal A(Y) \, \theta_X(Y) \, dY \quad \text{ and } \quad \widetilde \mathcal C := \mathcal A - \widetilde \mathcal B.
\end{equation}
We see that $\widetilde \mathcal B$ is an average of $\mathcal A$, so $\widetilde B$ verifies \eqref{defelliptic} and \eqref{defbounded} with the same constant as $\mathcal A$.
So it remains to prove \eqref{elBelA2} and \eqref{eqwtB}. Observe that
\begin{multline*}
\nabla_X \theta_X(Y) = - n \delta(X)^{-n-1} \nabla \delta(X) \theta\mathcal Big(\frac{Y-X}{\delta(X)}\mathcal Big) - \delta(X)^{-n-1} (\nabla \theta)\mathcal Big(\frac{Y-X}{\delta(X)}\mathcal Big) \\ - \delta(X)^{-n-2} \nabla \delta(X) (Y-X) \cdot (\nabla \theta)\mathcal Big(\frac{Y-X}{\delta(X)}\mathcal Big).
\end{multline*}
Let $\Theta(Z)$ denote $Z\theta(Z)$, then $\mathop{\operatorname{div}}\Theta(Z)=n\theta(Z)+Z\cdot\nabla\theta(Z)$. So
\[
\delta(X) \nabla_X \theta_X(Y) = - \delta(X)^{-n} (\nabla \theta)\mathcal Big(\frac{Y-X}{\delta(X)}\mathcal Big) - \delta(X)^{-n} \nabla \delta(X) (\mathop{\operatorname{div}}\Theta)\mathcal Big(\frac{Y-X}{\delta(X)}\mathcal Big).
\]
From here, one easily sees that $\abs{\delta(X) \nabla_X \theta_X(Y)}$ is bounded by $C\delta(X)^{-n}$ uniformly in $X$ and $Y$, and thus
\[|\delta(X)\nabla \widetilde B(X)| \lesssim \Yint\longdash_{B(X,\delta(X)/2)} |\mathcal A(Y)| dY \leq C_{\mathcal A}\,,\]
which proves \eqref{eqwtB}. Set $\Theta_X(Y) = \delta(X)^{-n} \Theta\mathcal Big(\frac{Y-X}{\delta(X)}\mathcal Big) $. Then
\[
\delta(X) \nabla_X \theta_X(Y) = - \delta(X) \nabla_Y \theta_X(Y) - \delta(X) \nabla \delta(X)\mathop{\operatorname{div}}er_Y\Theta_X(Y).
\]
As a consequence,
\[\begin{split}
\delta(X) \nabla \widetilde \mathcal B(X) & = \iint_{\mathbb R^n} (\mathcal B + \mathcal C)(Y) \, \delta(X) \nabla_X \theta_X(Y) \, dY \\
& = \delta(X) \iint_{\mathbb R^n} \nabla \mathcal B(Y) \, \theta_X(Y) \, dY + \delta(X) \nabla \delta(X) \iint_{\mathbb R^n} \nabla \mathcal B(Y) \cdot \Theta_X(Y) \, dY \\
& \qquad + \iint_{\mathbb R^n} \mathcal C(Y) \, [\delta(X) \nabla_X \theta_X(Y)] \, dY.
\end{split}\]
We deduce that
\[|\delta(X) \nabla \widetilde \mathcal B(X)| \lesssim \Yint\longdash_{B(X,\delta(X)/10)} (\delta |\nabla \mathcal B(Y)| + |\mathcal C(Y)|) \, dY,\]
and so the fact that $|\delta\nabla \mathcal B| + |\mathcal C| \in CM_\Omega(M)$ is transmitted to $\delta \nabla \widetilde \mathcal B$, i.e. $\delta \nabla \widetilde \mathcal B \in CM_\Omega(CM)$.
As for $\widetilde \mathcal C$, since $\iint \theta_X(Y) dY = 1$, we have
\begin{equation}\label{wtCsplit}
\begin{split}
|\widetilde \mathcal C(X)| & = \left|\iint_{\mathbb R^n} (\mathcal A(Y) - \mathcal A(X)) \theta_X(Y) \, dY \right| \\
& \leq \iint_{\mathbb R^n} (|\mathcal B(Y) - \mathcal B(X)| + |\mathcal C(Y)| + |\mathcal C(X)|) \theta_X(Y) \, dY \\
& \lesssim |\mathcal C(X)| + \Yint\longdash_{B(X,\delta(X)/10)} (|\mathcal B(Y) - \mathcal B(X)| + |\mathcal C(Y)|) \, dY.
\end{split}
\end{equation}
By Fubini's theorem, to show that $\abs{\widetilde \mathcal C}\in CM_{\Omega}(CM)$, it suffices to show that for any ball $B$ centered on the boundary,
\[
\iint_{B\cap\Omega}\Yint\longdash_{B(Z,\delta(Z)/4)}\abs{\widetilde \mathcal C(X)}^2dX\frac{dZ}{\delta(Z)}\le CM\sigma(B\cap\partial\Omega).
\]
From this one sees that the terms on the right-hand side of \eqref{wtCsplit} that involves $\mathcal C$ can be easily controlled using $\abs{\mathcal C}\in CM_\Omega(M)$. So by the Cauchy-Schwarz inequality, it suffices to control
\begin{equation}\label{BLHS}
\iint_{Z\in B\cap\Omega}\Yint\longdash_{X\in B(Z,\delta(Z)/4)}\Yint\longdash_{Y\in B(X,\delta(X)/10)}\abs{\mathcal B(Y)-\mathcal B(X)}^2dYdX\frac{dZ}{\delta(Z)}.
\end{equation}
Notice that for all $X\in B(Z,\delta(Z)/4)$, $B(X,\delta(X)/10)\subset B(Z,\delta(Z)/2)$, and thus
\[\Yint\longdash_{Y\in B(X,\delta(X)/10)}\abs{\mathcal B(Y)-\mathcal B(X)}^2dY\lesssim \Yint\longdash_{Y\in B(Z,\delta(Z)/2)}\abs{\mathcal B(Y)-\mathcal B(X)}^2dY.\]
Therefore,
\begin{multline*}
\eqref{BLHS}\lesssim \iint_{Z\in B\cap\Omega}\Yint\longdash_{X\in B(Z,\delta(Z)/4)}\Yint\longdash_{Y\in B(Z,\delta(Z)/2)}\abs{\mathcal B(Y)-\mathcal B(X)}^2dYdX\frac{dZ}{\delta(Z)}\\
\lesssim\iint_{Z\in B\cap\Omega}\Yint\longdash_{X\in B(Z,\delta(Z)/2)}\abs{\nabla\mathcal B(X)}^2dX\delta(Z)dZ\\
\lesssim\iint_{X\in 2B\cap\Omega}\abs{\nabla\mathcal B(X)}^2\delta(X)dX\le CM\sigma(B\cap\partial\Omega)
\end{multline*}
by the Poincar\'e inequality, Fubini's theorem, and $\abs{\delta\nabla\mathcal B}\in CM_\Omega(M)$.
So again, the Carleson bound on $|\delta\nabla \mathcal B| + |\mathcal C|$ is given to $\widetilde \mathcal C$ as well. The lemma follows.
$\square$
\subsection{Definition of Chord-Arc Domains.}
\label{SSCAD}
\begin{definition}[{\bf Corkscrew condition}, \cite{JK}]\label{def1.cork}
We say that a domain $\Omega\subset \mathbb Rn$
satisfies the {\it corkscrew condition} with constant $c\in(0,1)$ if
for every surface ball $\mathbb Delta:=\mathbb Delta(x,r),$ with $x\in \partial\Omega$ and
$0<r<\diam(\Omega)$, there is a ball
$B(X_\mathbb Delta,cr)\subset B(x,r)\cap\Omega$. The point $X_\mathbb Delta\subset \Omega$ is called
a {\bf corkscrew point} relative to $\mathbb Delta$ (or, for $x$ at scale $r$).
\end{definition}
\begin{definition}[{\bf Harnack Chain condition}, \cite{JK}]\label{def1.hc}
We say that $\Omega$ satisfies the {\it Harnack Chain condition} with constants $M$, $C>1$ if for every $\rho >0,\, \Lambda\mathfrak geq 1$, and every pair of points
$X,X' \in \Omega$ with $\delta(X),\,\delta(X') \mathfrak geq\rho$ and $|X-X'|<\Lambda\,\rho$, there is a chain of
open balls
$B_1,\dots,B_N \subset \Omega$, $N\leq M(1+\log\Lambda)$,
with $X\in B_1,\, X'\in B_N,$ $B_k\cap B_{k+1}\neq \emptyset$
and $C^{-1}\diam (B_k) \leq \dist (B_k,\partial\Omega)\leq C\diam (B_k).$ The chain of balls is called
a {\it Harnack Chain}.
\end{definition}
\begin{definition}[\bf 1-sided NTA and NTA]\label{def1.1nta}
We say that a domain $\Omega$ is a {\it 1-sided NTA domain} with constants $c,C,M$ if it satisfies the corkscrew condition with constant $c$ and Harnack Chain condition with constant $M, C$.
Furthermore, we say that $\Omega$ is an {\it NTA domain} if it is a 1-sided NTA domain and if, in addition, $\Omega_{\rm ext}:= \mathbb Rn\setminus \overline{\Omega}$ also satisfies the corkscrew condition.
\end{definition}
\begin{definition}[\bf 1-sided CAD and CAD]\label{defi:CAD}
A \emph{1-sided chord-arc domain} (1-sided CAD) is a 1-sided NTA domain with AR boundary. The 1-sided NTA constants and the AR constant are called the 1-sided CAD constants.
A \emph{chord-arc domain} (CAD, or 2-sided CAD) is an NTA domain with AR boundary. The 1-sided NTA constants, the corkscrew constant for $\Omega_{\rm ext}$, and the AR constant are called the CAD constants.
\end{definition}
Uniform rectifiability (UR) is a quantitative version of rectifiability.
\begin{definition}[\bf UR]
We say that $E$ is uniformly rectifiable if $E$ has big pieces of Lipschitz images, that is, if $E$ is $(n-1)$-Ahlfors regular \eqref{defADR}, and there exists $\theta, M>0$ such that, for each $x\in \mathcal Gamma$ and $r>0$, there is a Lipschitz mapping $\rho$ from the ball $B(0,r) \subset \mathbb R^d$ into $\mathbb R^n$ such that $\rho$ has Lipschitz norm $\leq M$ and
\[\sigma(\mathcal Gamma \cap B(x,r) \cap \rho(B_{\mathbb R^d}(0,r))) \mathfrak geq \theta r^d.\]
\end{definition}
However, we shall not use the above definition. What we do require is the characterization of UR by Tolsa's $\alpha$-numbers (\cite{Tolsa09} ), as well as a modification of the corona decomposition of uniformly rectifiable sets constructed in \cite{DS1}. See Section~\ref{SUR} for details.
We shall also need the following result.
\begin{lemma}\label{lem.UReqv}
Suppose that $\Omega\subset\mathbb Rn$ is 1-sided chord-arc domain. Then the following are equivalent:
\begin{enumerate}
\item $\partial\Omega$ is uniformly rectifiable.
\item $\Omega_{\rm ext}$ satisfies the corkscrew condition, and hence, $\Omega$ is a chord-arc domain.
\end{enumerate}
\end{lemma}
That (1) implies (2) was proved in \cite{AHMNT}. That (2) implies (1) can be proved via the $A_\infty$ of harmonic measure (see \cite{AHMNT} Theorem 1.2), or directly as in \cite{DJ}.
\subsection{Preliminary PDE estimates}
\begin{lemma}[The Caccioppoli inequality] \label{Caccio}
Let $L=-\mathop{\operatorname{div}}er A\nabla$ be a uniformly elliptic operator and $u\in W^{1,2}(2B)$ be a solution of $Lu=0$ in $2B$, where $B$ is a ball with radius $r$. Then there exists $C$ depending only on $n$ and the ellipticity constant of $L$ such that
\[
\fint_{B}\abs{\nabla u(X)}^2dX\le \frac{C}{r^2}\fint_{2B}\abs{u(X)}^2dX.
\]
\end{lemma}
\begin{lemma}[The Harnack inequality] \label{Harnack}
Let $L$ be as in Lemma \ref{Caccio} and let $u$ be a nonnegative solution of $Lu=0$ in $2B\subset\Omega$. Then there exists constant $C\mathfrak ge 1$ depending only on $n$ and the ellipticity constant of $L$ such that
\[
\sup_B u \le C\inf_B u.
\]
\end{lemma}
Write $L^*$ for the transpose of $L$ defined by
$L^*=-\mathop{\operatorname{div}}er A^\top\nabla$, where $A^\top$ denotes the transpose matrix of $A$. Associated with $L$ and $L^*$ one can respectively construct the elliptic measures $\{\Omegaega_L^X\}_{X\in \Omega}$ and $\{\Omegaega_{L^*}^X\}_{X\in\Omega}$, and the Green functions $G_L$ and $G_{L^*}$ on domains with Ahlfors regular boundaries (cf. \cite{KenigB}, \cite{HMT}).
\begin{lemma}[The Green function]\label{lemagreen}
Suppose that $\Omega\subset\mathbb R^{n}$ is an open set such that $\partial\Omega$ is Ahlfors regular. Given an elliptic operator $L$, there exists a unique Green function $G_L(X,Y): \Omega \times \Omega \setminus \diag(\Omega) \to \mathbb R$
with the following properties:
$G_L(\cdot,Y)\in W^{1,2}_{\rm loc}(\Omega\setminus \{Y\})\cap C(\overline{\Omega}\setminus\{Y\})$,
$G_L(\cdot,Y)\big|_{\partial\Omega}\equiv 0$ for any $Y\in\Omega$,
and $L G_L(\cdot,Y)=\delta_Y$ in the weak sense in $\Omega$, that is,
\begin{equation*}\label{Greendef}
\iint_\Omega A(X)\,\nabla_X G_{L}(X,Y) \cdot\nabla\varphi(X)\, dX=\varphi(Y), \qquad\text{for any }\varphi \in C_c^\infty(\Omega).
\end{equation*}
In particular, $G_L(\cdot,Y)$ is a weak solution to $L G_L(\cdot,Y)=0$ in $\Omega\setminus\{Y\}$. Moreover,
\begin{equation}\label{eq2.green}
G_L(X,Y) \leq C\,|X-Y|^{2-n}\quad\text{for }X,Y\in\Omega\,,
\end{equation}
\begin{equation*}\label{eq2.green2}
c_\theta\,|X-Y|^{2-n}\leq G_L(X,Y)\,,\quad \text{if } \,\,\,|X-Y|\leq \theta\, \dist(X,\partial\Omega)\,, \,\, \theta \in (0,1)\,,
\end{equation*}
\begin{equation*}
\label{eq2.green3}
G_L(X,Y)\mathfrak geq 0\,,\quad G_L(X,Y)=G_{L^*}(Y,X), \qquad \text{for all } X,Y\in\Omega\,,\,
X\neq Y.
\end{equation*}
\end{lemma}
The following lemma will be referred to as the CFMS estimates (cf. \cite{CFMS}, \cite{KenigB} for NTA domains, and \cite{HMT2} or \cite{DFMprelim2} for 1-sided CAD).
\begin{lemma}[The CFMS estimates] \label{LCFMS}
Let $\Omega$ be a 1-sided CAD domain. Let $L$ be an elliptic operator satisfying \eqref{defelliptic} and \eqref{defbounded}. There exist $C$ depending only on $n$, $C_\mathcal A$, and the $1$-sided CAD constants, such that for any $B:=B(x,r)$, with $x\in \partial\Omega$, $0<r<\diam(\partial\Omega)$ and $\mathbb Delta:=\mathbb Delta(x,r)$, we have the following properties.
\begin{enumerate}
\item The elliptic measure is non-degenerate, that is
\[C^{-1} \leq \Omegaega_L^{X_\mathbb Delta}(\mathbb Delta) \leq C.\]
\item For
$X\in \Omega\setminus 2\,B$ we have
\begin{equation}\label{eq.CFMS}
\frac1C\Omegaega_L^X(\mathbb Delta)
\leq
r^{n-1}G_L(X,X_\mathbb Delta) \leq C \Omegaega_L^X(\mathbb Delta).
\end{equation}
\item If $0\leq u, v\in W^{1,2}_{\rm loc}(4\,B\cap\Omega)\cap C(\overline{4\,B\cap\Omega})$ are two nontrivial weak solutions of $Lu=Lv=0$ in $4\,B\cap\Omega$ such that $u=v=0$ in $4\,\mathbb Delta$, then
$$
C^{-1}\frac{u(X_\mathbb Delta)}{v(X_\mathbb Delta)}\le \frac{u(X)}{v(X)}\le C\frac{u(X_\mathbb Delta)}{v(X_\mathbb Delta)},\qquad\text{for all }X\in B\cap\Omega.
$$
\end{enumerate}
\end{lemma}
\section{Characterization of the uniform rectifiability}
\label{SUR}
In all this section, we assume that $\partial \Omega$ is uniformly rectifiable, and we plan to prove a corona decomposition of the uniformly rectifiable set which is ``Tolsa's $\alpha$-number compatible''.
Instead of a long explanation of the section, which will not be helpful anyway to any reader who is not already fully familiar with the corona decomposition (C3) in \cite{DS1} and the Tolsa $\alpha$-number (see \cite{Tolsa09}), we shall only state below the results proved in the section (the definition of all the notions and notation will be ultimately given in the section below).
\begin{lemma} \label{LcoronaG}
Let $\partial \Omega$ be a uniformly rectifiable set. Given any positive constants $0 <
$\square$
silon_1 <
$\square$
silon_0 < 1$, there exists a disjoint decomposition $\mathbb D_{\partial\Omega} = \mathcal G \cup \mathcal B$ such that
\begin{enumerate}[(i)]
\item The ``good'' cubes $Q\in \mathcal G$ are such that $\alpha_\sigma(Q) \leq
$\square$
silon_1$ and
\begin{equation}
\sup_{y \in 999\mathbb Delta_Q} \dist(y,P_Q) + \sup_{p\in P_Q \cap 999B_Q} \dist(p,\partial \Omega) \leq
$\square$
silon_1 \ell(Q).
\end{equation}
\item The collection $\mathcal G$ of ``good'' cubes can be further subdivided into a disjoint family $\mathcal G = \displaystyle \bigcup_{\mathcal S \in \mathfrak S} \mathcal S$ of {\bf coherent} regimes such that for any $\mathcal S\in \mathfrak S$, there a hyperplane $P:=P_{\mathcal S}$ and a $2
$\square$
silon_0$-Lipschitz function $\mathfrak b_\mathcal S:= \mathfrak b$ on $P$ such that
\begin{equation} \label{newstatement}
\int_{P\cap \Pi(2B_Q)} \dist(\mathfrak b(p),P_Q) \, dp \leq C \ell(Q) \sigma(Q) \alpha_\sigma(Q) \qquad \text{ for } Q\in \mathcal S,
\end{equation}
where $C$ depends only on $n$.
\item The cubes in $\mathcal B$ (the ``bad'' cubes) and the maximal cubes $Q(\mathcal S)$ satisfies the Carleson packing condition
\begin{equation}
\sum_{\begin{subarray}{c} Q\in \mathcal B \\ Q \subset Q_0 \end{subarray}} \sigma(Q) + \sum_{\begin{subarray}{c} \mathcal S\in \mathfrak S \\ Q(\mathcal S) \subset Q_0 \end{subarray}} \sigma(Q(\mathcal S)) \leq C_{
$\square$
silon_0,
$\square$
silon_1} \sigma(Q_0) \qquad \text{ for all } Q_0\in \mathbb D_{\partial \Omega}.
\end{equation}
\end{enumerate}
\end{lemma}
In the above lemma, $\sigma$ is the Ahlfors regular measure for $\partial \Omega$ given in \eqref{defADR}, $\mathbb D_{\partial \Omega}$ is a dyadic decomposition of $\partial \Omega$, $\Pi:=\Pi_{\mathcal S}$ is the orthogonal projection on $P_\mathcal S$, $P_Q$ is the best approximating plane of $\partial \Omega$ around $Q$, and $\alpha_\sigma$ is the Tolsa $\alpha$-number for $\sigma$. The novelty, which is not similar to any of the corona decompositions that the authors are aware of, is \eqref{newstatement}, which quantify the difference between $\partial \Omega$ and the approximating graph $\mathcal Gamma_\mathcal S$ in terms of $\alpha$-number.
Corona decompositions are a useful and popular tool in the recent literature pertaining to uniformly rectifiable sets, see for instance \cite{DS1}, \cite{HMMDuke}, \cite{GMT}, \cite{AGMT}, \cite{BH}, \cite{AHMMT}, \cite{BHHLN}, \cite{MT}, \cite{CHM}, \cite{MPT} to cite only a few.
\subsection{Dyadic decomposition} \label{SSdyadic}
We construct a dyadic system of pseudo-cubes on $\partial \Omega$. In the presence of the Ahlfors regularity property, such construction appeared for instance in \cite{David88}, \cite{David91}, \cite{DS1} or \cite{DS2}. We shall use the very nice construction of Christ \cite{Ch}, that allow to bypass the need of a measure on $\partial \Omega$. More exactly, one can check that the construction of the dyadic sets by Christ to not require a measure, and as such are independent on the measure on $\partial \Omega$.
There exist a universal constant $0<a_0<1$ and a collection $\mathbb{D}_{\partial \Omega} = \cup_{k \in \mathbb Z} \mathbb{D}_{k}$ of Borel subsets of $\partial \Omega$, with the following properties. We write
\[\mathbb{D}_{k}:=\{Q_{j}^k\subset \mathbb D_{\partial \Omega}: j\in \mathfrak{I}_k\}, \]
where $\mathfrak{I}_k$ denotes some index set depending on $k$, but sometimes, to lighten the notation, we shall forget about the indices and just write $Q \in \mathbb{D}_k$ and refer to $Q$ as a cube (or pseudo-cube) of generation $k$. Such cubes enjoy the following properties:
\begin{enumerate}[(i)]
\item $\partial \Omega =\cup_{j} Q_{j}^k \,\,$ for any $k \in \mathbb Z$.
\item If $m > k$ then either $Q_{i}^{m}\subseteq Q_{j}^{k}$ or
$Q_{i}^{m}\cap Q_{j}^{k}=\emptyset$.
\item $Q_i^m \cap Q_j^m=\emptyset$ if $i\neq j$.
\item Each pseudo-cube $Q\in\mathbb{D}_k$ has a ``center'' $x_Q\in \mathbb D$ such that
\begin{equation}\label{cube-ball}
\mathbb Delta(x_Q,a_02^{-k})\subset Q \subset \mathbb Delta(x_Q,2^{-k}).
\end{equation}
\end{enumerate}
Let us make a few comments about these cubes. We decided to use a dyadic scaling (by opposition to a scaling where the ratio of the sizes between a pseudo-cube and its parent is, in average, $
$\square$
silon < \frac12$) because it is convenient.
The price to pay for forcing a dyadic scaling is that if $Q \in \mathbb{D}_{k+\ell}$ and $R$ is the cube of $\mathbb{D}_k$ that contains $Q$ (it is unique by ($ii$), and it is called an ancestor of $Q$) is {\em not} necessarily strictly larger (as a set) than $Q$.
We also considered that the $\partial \Omega$ was unbounded, to avoid separating cases. If the boundary $\partial \Omega$ is bounded, then $\mathbb D_{\partial \Omega} := \bigcup_{k\leq k_0} \mathbb D_k$ where $k_0$ is such that $2^{k_0-1} \leq \diam(\Omega) \leq 2^{k_0-1}$, and we let the reader check that this variation doesn't change a single argument in the sequel.
If $\mu$ is any doubling measure on $\partial \Omega$ - that is if $\mu(2\mathbb Delta) \leq C_\mu \mu(\mathbb Delta)$ for any boundary ball $\mathbb Delta \subset \partial \Omega$ - then we have the following extra property:
\begin{enumerate}[(i)] \addtocounter{enumi}{4}
\item $\mu(\partial Q_i^k) = 0$ for all $i,k$.
\end{enumerate}
In our construction, (i) and (iii) forces the $Q_i^k$ to be neither open nor closed. But this last property (v) means that taking the interior or the closure of $Q_i^k$ instead of $Q_i^k$ would not matter, since the boundary amounts to nothing.
Let us introduce some extra notation. When $E \subset \partial \Omega$ is a set, $\mathbb D_{\partial \Omega}(E)$ is the sub-collection of dyadic cubes that are contained in $E$. When $Q\in \mathbb D_{\partial \Omega}$, we write $k(Q)$ for the generation of $Q$ and $\ell(Q)$ for $2^{-k(Q)}$, which is roughly the diameter of $Q$ by \eqref{cube-ball}. We also use $B_Q \subset \mathbb R^n$ for the ball $B(x_Q,\ell(Q))$ and $\mathbb Delta_Q$ for the boundary ball $\mathbb Delta(x_Q,\ell(Q))$ that appears in \eqref{cube-ball}. For $\kappa \mathfrak geq 1$, the dilatation $\kappa Q$ is
\begin{equation} \label{defkappaQ}
\kappa Q = \{x\in \partial \Omega, \, \dist(x,Q) \leq (\kappa - 1) \ell(Q)\},
\end{equation}
which means that $\kappa Q \subset \kappa \mathbb Delta_Q \subset (\kappa+1) Q$.
The dyadic decomposition of $\partial \Omega$ will be the one which is the most used. However, we also use dyadic cubes for other sets, for instance to construct Whitney regions, and we use the same construction and notation as the one for $\partial \Omega$. In particular, we will use dyadic cubes in $\mathbb R^n$ and in a hyperplane $P$ that still satisfy \eqref{cube-ball} for the universal constant $a_0$ - i.e. the dyadic cubes are not real cubes - and the definition \eqref{defkappaQ} holds even in those contexts.
\subsection{Tolsa's $\alpha$ numbers}
Tolsa's $\alpha$ numbers estimate how far a measure is from a flat measure, using Wasserstein distances. We denote by $\Xi$ the set of affine $n-1$ planes in $\mathbb R^n$, and for each plane $P\in \Xi$, we write $\mu_P$ for the restriction to $P$ of the $(n-1)$-dimensional Hausdorff measure, that is $\mu_P$ is the Lebesgue measure on $P$. A flat measure is a measure $\mu$ that can be written $\mu = c\mu_P$ where $c>0$ and $P \in \Xi$, the set of flat measure is then denoted by $\mathcal F$.
\begin{definition}[local Wasserstein distance]
If $\mu$ and $\sigma$ are two $(n-1)$-Ahlfors regular measures on $\mathbb R^n$, and if $y \in \mathbb R^n$ and $s>0$, we define
\[\dist_{y,s}(\mu,\sigma) := s^{-n} \sup_{f\in Lip(y,s)} \left| \int f\, d\mu - \int f \, d\sigma \right|\]
where $Lip(y,s)$ is the set of $1$-Lipschitz functions that are supported in $B(y,s)$.
If $Q \in \mathbb D_{\partial \Omega}$, then we set $\dist_{Q}(\mu,\sigma) := \dist_{x_Q,10^3\ell(Q)}(\mu,\sigma)$ and $Lip(Q) := Lip(x_Q,10^3\ell(Q))$, where $x_Q$ is as in \eqref{cube-ball}. Moreover, if $\sigma$ is an Ahlfors regular measure on $\partial \Omega$, then we set
\[\alpha_\sigma(Q) := \inf_{\mu \in \mathcal F} \dist_Q(\mu,\sigma).\]
\end{definition}
Note that
\begin{equation}\label{eqalphaQbdd}
0\le\alpha_\sigma(Q)\le C \qquad\text{for all }Q\in\mathbb D_{\partial\Omega}
\end{equation}
where $C<\infty$ depends only on the Ahlfors constants of $\mu$ and $\sigma$.
The uniform rectifiability of $\partial \Omega$ is characterized by the fact that, for any $(n-1)$-Ahlfors regular measure $\sigma$ supported on $\partial \Omega$, and any $Q_0\in\mathbb D_{\partial\Omega}$, we have
\begin{equation}\label{defUR}
\sum_{Q\in \mathbb D_{\partial \Omega}(Q_0)} \alpha_\sigma(Q)^2 \sigma(Q) \leq C \sigma(Q_0) \approx \ell(Q_0)^{n-1}
\end{equation}
and, for any $
$\square$
silon>0$,
\begin{equation}\label{geomlem}
\sum_{\begin{subarray}{c} Q\in \mathbb D_{\partial \Omega}(Q_0)\\ \alpha_\sigma(Q) >
$\square$
silon \end{subarray}} \sigma(Q) \leq C_
$\square$
silon \sigma(Q_0) \approx \ell(Q_0)^{n-1}.
\end{equation}
For a proof of these results, see Theorem 1.2 in \cite{Tolsa09}.
It will be convenient to introduce the following notation. Given $Q\in \mathbb D_{\partial \Omega}$, the quantities $c_Q$, $P_Q$, and $\mu_Q$ are such that
\begin{equation}\label{defmuQ}
\mu_Q = c_Q \mu_{P_Q} \quad \text{ and } \quad \dist_Q(\sigma,\mu_Q) \leq 2 \alpha_\sigma(Q),
\end{equation}
that is $\mu_Q$ is a flat measure which well approximates $\sigma$ (as long as $\alpha_\sigma(Q)$ is small). So it means that
\begin{equation} \label{ftestinalpha}
\left| \int f\, d\sigma - \int f \, d\mu_Q \right| \leq 2(10^3\ell(Q))^{n} \alpha_\sigma(Q) \qquad \text{ for } f\in Lip(Q).
\end{equation}
Let us finish the subsection with the following simple result.
\begin{lemma} \label{Lalphatobeta}
There exists $C>1$ depending only on $C_\sigma$ and $n$ such that if $Q \in \mathbb D_{\partial \Omega}$ and $
$\square$
silon\in (0,C^{-n})$ verify $\alpha_\sigma(Q) \leq
$\square$
silon$, then
\begin{equation} \label{Lab1}
\sup_{y \in 999\mathbb Delta_Q} \dist(y,P_Q) + \sup_{p\in P_Q \cap 999B_Q} \dist(p,\partial \Omega) \leq C
$\square$
silon^{1/n} \ell(Q).
\end{equation}
\end{lemma}
\noindent {\em Proof: } Assume that $\alpha_\sigma(Q) \leq
$\square$
silon = 8000^{-n} C_\sigma^{-1} \eta ^{n}$ with $\eta \in (0,1)$.
For a given point $y\in 999\mathbb Delta_Q$, we set the function $f_1(z):= \max\{0,\eta \ell(Q) - |y-z|\} \in Lip(Q)$. Observe that
\[\int f_1 d\sigma \mathfrak geq \frac{\eta\ell(Q)}{2} \sigma\br{\frac{\eta}2 \mathbb Delta_Q} \mathfrak geq C_\sigma^{-1} \mathcal Big(\frac{\eta \ell(Q)}2\mathcal Big)^n.\]
and thanks to \eqref{ftestinalpha}
\[\begin{split} 8000^{-n} C_\sigma^{-1} \eta^{n} & > \alpha_\sigma(Q) \mathfrak geq \frac{(1000\ell(Q))^{-n}}2 \dist_Q(\sigma,\mu_Q) \\
& \mathfrak geq (2000\ell(Q))^{-n} \left| \int f_1 \, d\sigma - \int f_1 \, d\mu_Q \right|.
\end{split}\]
By combining the two inequalities above, we have
\[\left| \int f_1 \, d\sigma - \int f_1 \, d\mu_Q \right| \leq C_\sigma^{-1} \mathcal Big(\frac{\eta \ell(Q)}4\mathcal Big)^n \leq \frac12 \int f_1 d\sigma.\]
So necessarily, the support of $f_1$ intersects the support of $\mu_Q$, that is $ \dist(y,P_Q) \leq \eta \ell(Q)$ and the first part of \eqref{Lab1} is proved. But notice also that the same computations force the constant $c_Q$ in the flat measure $\mu_Q = c_Q \mu_{P_Q}$ to be larger than $(2c_{n-1}C_\sigma)^{-1}$, where $c_n$ is the volume of the $n$-dimensional unit ball. We take now a point $p\in P_Q \cap 999B_Q$ and construct $f_2:= \max\{0,\eta \ell(Q) - |p-z|\} \in Lip(Q)$. We have
\[\begin{split}
\left| \int f_2 \, d\sigma - \int f_2 \, d\mu_Q \right| & \leq 2(1000\ell(Q))^n \alpha_\sigma(Q) < C_\sigma^{-1} \mathcal Big(\frac{\eta \ell(Q)}4\mathcal Big)^n < \int f_2 \, d\mu_Q.
\end{split}\]
So necessarily, the the support of $f_1$ intersects the support of $\sigma$, that is $ \dist(p,\partial \Omega) \leq \eta \ell(Q)$. The lemma follows.
$\square$
\subsection{Corona decomposition}
We first introduce the notion of coherent subset of $\mathbb D_{\partial \Omega}$.
\begin{definition}
Let $\mathcal S \subset \mathbb D_{\partial \Omega}$. We say that $\mathcal S$ is {\bf coherent} if
\begin{enumerate}[(a)]
\item $\mathcal S$ contains a unique maximal element $Q(\mathcal S)$, that is $Q(\mathcal S)$ contains all the other element of $\mathcal S$ as subsets.
\item If $Q\in \mathcal S$ and $Q \subset R \subset Q(\mathcal S)$, then $R\in \mathcal S$.
\item Given a cube $Q \in \mathcal S$, either all its children belong to $\mathcal S$ or none of them do.
\end{enumerate}
\end{definition}
The aim of the section is to prove the following corona decomposition for a uniformly rectifiable boundary $\partial \Omega$.
\begin{lemma} \label{Lcorona}
Let $\partial \Omega$ be a uniformly rectifiable set. Given any positive constants $
$\square$
silon_1 <
$\square$
silon_0 \in (0,1)$, there exists a disjoint decomposition $\mathbb D_{\partial\Omega} = \mathcal G \cup \mathcal B$ such that
\begin{enumerate}[(i)]
\item The ``good'' cubes $Q\in \mathcal G$ are such that $\alpha_\sigma(Q) \leq
$\square$
silon_1$ and
\begin{equation} \label{defP'Qb}
\sup_{y \in 999\mathbb Delta_Q} \dist(y,P_Q) + \sup_{p\in P_Q \cap 999B_Q} \dist(p,\partial \Omega) \leq
$\square$
silon_1 \ell(Q).
\end{equation}
\item The collection $\mathcal G$ of ``good'' cubes can be further subdivided into a disjoint family $\mathcal G = \displaystyle \bigcup_{\mathcal S \in \mathfrak S} \mathcal S$ of {\bf coherent} regimes that satisfy
\begin{equation} \label{angleS}
\mathcal Angle(P_Q,P_{Q'}) \leq
$\square$
silon_0 \qquad \text{ for all } \mathcal S \in \mathfrak S \text{ and } Q,Q'\in \mathcal S.
\end{equation}
\item The cubes in $\mathcal B$ (the ``bad'' cubes) and the maximal cubes $Q(\mathcal S)$ satisfies the Carleson packing condition
\begin{equation} \label{packingBS}
\sum_{\begin{subarray}{c} Q\in \mathcal B \\ Q \subset Q_0 \end{subarray}} \sigma(Q) + \sum_{\begin{subarray}{c} \mathcal S\in \mathfrak S \\ Q(\mathcal S) \subset Q_0 \end{subarray}} \sigma(Q(\mathcal S)) \leq C_{
$\square$
silon_0,
$\square$
silon_1} \sigma(Q_0) \qquad \text{ for all } Q_0\in \mathbb D_{\partial \Omega}.
\end{equation}
\end{enumerate}
\end{lemma}
\begin{remark}
What we secretly expect is, in addition to \eqref{angleS}, to also have a control on the constants $c_Q$ - defined in \eqref{defmuQ} - that belongs to the same $\mathcal S$. For instance, we would like to have
\[|c_Q-c_{Q(\mathcal S)}| \leq
$\square$
silon_0.\]
Imposing this extra condition while keeping the number of $\mathcal S$ low should be doable, but we do not need it, so we avoided this complication.
\end{remark}
The difficult part in the above lemma is to prove that \eqref{angleS} holds while keeping the number of coherent regimes $\mathcal S$ small enough so that \eqref{packingBS} stays true. To avoid a long and painful proof, we shall prove Lemma \ref{Lcorona} with the following result as a startpoint.
\begin{lemma}[\cite{DS1}] \label{LcoronaDS}
Let $\partial \Omega$ be a uniformly rectifiable set. Given any positive constants $
$\square$
silon_3 <
$\square$
silon_2 \in (0,1)$, there exists a disjoint decomposition $\mathbb D_{\partial\Omega} = \mathcal G' \cup \mathcal B'$ such that
\begin{enumerate}[(i)]
\item The ``good'' cubes $Q\in \mathcal G'$ are such that there exists an affine plane $P'_Q \in \Xi$ such that
\begin{equation} \label{defP'Q}
\dist(x,P'_Q) \leq
$\square$
silon_3 \ell(Q) \qquad \text{ for } x\in 999\mathbb Delta_Q
\end{equation}
\item The collection $\mathcal G'$ of ``good'' cubes can further subdivided into a disjoint family $\mathcal G' = \displaystyle \bigcup_{\mathcal S' \in \mathfrak S'} \mathcal S'$ of {\bf coherent} stopping time regimes that satisfies
\begin{equation} \label{angleSDS}
\mathcal Angle(P'_Q,P'_{Q(\mathcal S')}) \leq
$\square$
silon_2 \qquad \text{ for all } \mathcal S'\in \mathfrak S' \text{ and } Q\in \mathcal S'.
\end{equation}.
\item The cubes in $\mathcal B'$ and the maximal cubes $Q(\mathcal S')$ satisfies the Carleson packing condition
\begin{equation} \label{packingBSDS}
\sum_{\begin{subarray}{c} Q\in \mathcal B' \\ Q \subset Q_0 \end{subarray}} \sigma(Q) + \sum_{\begin{subarray}{c}\mathcal S' \in \mathfrak S' \\ Q(\mathcal S') \subset Q_0 \end{subarray}} \sigma(Q(\mathcal S')) \leq C_{
$\square$
silon_2,
$\square$
silon_3} \sigma(Q_0) \qquad \text{ for all } Q_0\in \mathbb D_{\partial \Omega}.
\end{equation}.
\end{enumerate}
\end{lemma}
The proof of Lemma \ref{LcoronaDS} is contained in Sections 6 to 11 of \cite{DS1}, and the statement that we gave is the combination of Lemma 7.1 and Lemma 7.4 in \cite{DS1}. Lemma \ref{Lcorona} might already be stated and proved in another article, and we apologize if it were the case. Moreover, the proof of Lemma \ref{Lcorona} is probably obvious to anyone that is a bit familiar with this tool. However, every corona decomposition has its own small differences, and we decided to write our own using only the results of David and Semmes as a prerequisite.
\noindent {\em Proof of Lemma \ref{Lcorona} from Lemma \ref{LcoronaDS}.}
We pick then $
$\square$
silon_1$ and $
$\square$
silon_0$ small such that $
$\square$
silon_1\ll
$\square$
silon_0 \ll 1$. We apply Lemma \ref{LcoronaDS} with the choices of $
$\square$
silon_2:=
$\square$
silon_0/2$ and $
$\square$
silon_3=
$\square$
silon_1$. Note that we can choose
\begin{equation} \label{PQP'Q}
P'_Q = P_Q \qquad \text{ when } Q \in \mathcal G' \text{ and } \alpha_\sigma(Q) \leq C^{-n}
$\square$
silon_1^{n}
\end{equation}
if $C>0$ is the constant from Lemma \ref{Lalphatobeta}.
Since we applied Lemma \ref{LcoronaDS}, we have a first disjoint decomposition $\mathbb D_{\partial \Omega} = \mathcal G' \cup \mathcal B'$ and a second decomposition $\mathcal G' = \bigcup \mathcal S'$ into coherent regimes which satisfy \eqref{defP'Q}, \eqref{angleSDS}, and \eqref{packingBSDS}.
We define $\mathcal G$ as
\[\mathcal G:= \mathcal G' \cap \{Q\in \mathbb D, \, \alpha_\sigma(Q) \leq C^{-n}
$\square$
silon_1^{n}\}\]
where $C$ is the constant in Lemma \ref{Lalphatobeta}. Of course, it means that $\mathcal B:= \mathcal B' \cup (\mathcal G'\setminus \mathcal G)$. The coherent regimes $\mathcal S'$ may not be contained in $\mathcal G$, that is $\mathcal S' \cap \mathcal G$ may not be a coherent regime anymore.
So we split further $\mathcal S' \cap \mathcal G$ into a disjoint union of (stopping time) coherent regimes $\{\mathcal S_{i}\}_{i\in I_{\mathcal S'}}$ that are maximal in the sense that the minimal cubes of $\mathcal S_{i}$ are those for which at least one children belongs to $\mathbb D \setminus (\mathcal S' \cap \mathcal G)$.
The collection $\{\mathcal S\}_{\mathcal S \in \mathfrak S}$ is then the collection of all the $\mathcal S_{i}$ for $i\in I_{\mathcal S'}$ and $\mathcal S' \in \mathfrak S'$.
It remains to check that the $\mathcal G$, $\mathcal B$ and $\{\mathcal S\}_{\mathcal S \in \mathfrak S}$ that we just built satisfy \eqref{angleS} and \eqref{packingBS}. For the former, we use the fact that a regime $\mathcal S$ is necessarily included in a $\mathcal S'$, so for any $Q\in \mathcal S$, we have
\begin{multline} \mathcal Angle(P_Q,P_{Q(\mathcal S)}) \leq \mathcal Angle(P_Q,P_{Q(\mathcal S')}) + \mathcal Angle(P_{Q(\mathcal S')},P_{Q(\mathcal S')}) \\
= \mathcal Angle(P'_Q,P'_{Q(\mathcal S')}) + \mathcal Angle(P'_{Q(\mathcal S)},P'_{Q(\mathcal S')}) \leq 2
$\square$
silon_2 =
$\square$
silon_0\end{multline}
by \eqref{PQP'Q}, \eqref{angleSDS}, and our choice of $
$\square$
silon_2$. The fact that $\mathcal B$ satisfies the Carleson packing condition
\begin{equation} \label{packingB} \sum_{\begin{subarray}{c} Q\in \mathcal B \\ Q \subset Q_0 \end{subarray}} \sigma(Q) \leq C_{
$\square$
silon_0,
$\square$
silon_1} \sigma(Q_0) \qquad \text{ for all } Q_0\in \mathbb D_{\partial \Omega}
\end{equation}
is an immediate consequence of the definition of $\mathcal B$, \eqref{geomlem}, and \eqref{packingBSDS}. Finally, by the maximality of the coherent regimes $\mathcal S$, then either $Q(\mathcal S)$ is the maximal cube of a coherent regime from the collection $\{\mathcal S'\}_{\mathcal S'\in \mathfrak S'}$, or (at least) the parent or one sibling of $Q(\mathcal S_i)$ belongs to $\mathcal B$. Therefore, if $Q^*$ denotes the parent of a dyadic cube $Q$, then for any $Q_0\in \mathbb D_{\partial \Omega}$,
\[ \sum_{\begin{subarray}{c} \mathcal S\in \mathfrak S \\ Q(\mathcal S) \subset Q_0 \end{subarray}} \sigma(Q(\mathcal S)) \leq \sum_{\begin{subarray}{c} \mathcal S'\in \mathfrak S' \\ Q(\mathcal S') \subset Q_0 \end{subarray}} \sigma(Q(\mathcal S')) + \sum_{\begin{subarray}{c} Q\in \mathcal B \\ Q \subset Q_0 \end{subarray}} \sigma(Q^*) \lesssim \sigma(Q_0)\]
because of the Carleson packing conditions \eqref{packingBSDS} and \eqref{packingB}, and because $\sigma(Q^*) \approx \ell(Q)^{n-1} \approx \sigma(Q)$. The lemma follows.
$\square$
\subsection{The approximating Lipschitz graph} \label{SSLipschitz}
In this subsection, we show that each coherent regime given by the corona decomposition is well approximated by a Lipschitz graph. We follow the outline of Section 8 in \cite{DS1} except that we are a bit more careful about our construction in order to obtain Lemma \ref{Lbbalpha} below. That is, instead of just wanting the Lipschitz graph $\mathcal Gamma_\mathcal S$ to be close to $\partial \Omega$, we aim to prove that the Lipschitz graph is an approximation of $\partial \Omega$ at least as good as the best plane.
Pick $0 <
$\square$
silon_1 \ll
$\square$
silon_0 \ll 1$, and then construct the collection of coherent regimes $\mathfrak S$ given by Lemma \ref{Lcorona}. Take $\mathcal S$ to be either in $\mathfrak S$, or a coherent regime included in an element of $\mathfrak S$, and let it be fixed. Set $P := P_{Q(\mathcal S)}$ and define $\Pi$ as the orthogonal projection on $P$. Similarly, we write $P^\bot$ for the linear plane orthogonal to $P$ and $\Pi^\bot$ for the projection onto $P^\bot$. We shall also need the function $d$ on $P$: for $p\in P$, define
\begin{equation} \label{defdx}
d(p) := \inf_{Q\in \mathcal S} \{ \dist(p,\Pi(2B_Q)) + \ell(Q)\}.
\end{equation}
We want to construct a Lipschitz function $b: \, P \mapsto P^\bot$. First, we prove a small result. We claim that for $x,y\in \partial \Omega \cap 999B_{Q(\mathcal S)}$, we have
\begin{equation} \label{claimPibot}
|\Pi^\bot(x) - \Pi^\bot(y)| \leq 2
$\square$
silon_0 |\Pi(y) - \Pi(x)| \qquad \text{ whenever } |x-y| > 10^{-3} d(\Pi(x))
\end{equation}
Indeed, with such choices of $x$ and $y$, we can find $Q\in \mathcal S$ such that
\[0 < |x-y| \approx \dist(\Pi(x),\Pi(Q)) + \ell(Q)\]
and by taking an appropriate ancestor of $Q$, we find $Q^*$ such that $|x-y| \approx \ell(Q^*)$. Since $x,y\in 999B_{Q(\mathcal S)}$, we can always take $Q^* \subset Q(\mathcal S)$ - that is $Q^* \in \mathcal S$ thanks to the coherence of $\mathcal S$ - and $x,y\in 999B_{Q^*}$. Due to \eqref{defP'Qb}, we deduce that
\[\dist(x,P_{Q^*}) + \dist(y,P_{Q^*}) \leq
$\square$
silon_1 \ell(Q^*) \ll
$\square$
silon_0 |x-y|\]
if $
$\square$
silon_1/
$\square$
silon_0$ is sufficiently small. Since $\mathcal Angle(P_{Q^*},P) \leq
$\square$
silon_0$ by \eqref{angleS}, we conclude
\[|\Pi^\bot(x) - \Pi^\bot(y)| \leq \dist(x,P_{Q^*}) + \dist(y,P_{Q^*}) + \frac12
$\square$
silon_0 |x-y| \leq \frac34
$\square$
silon_0 |x-y| \leq
$\square$
silon_0 |\Pi(x) - \Pi(y)|\]
if $
$\square$
silon_0$ is small enough. The claim \eqref{claimPibot} follows.
Define the closed set
\begin{equation}\label{def Z}
Z = \{p\in P, \, d(p) = 0\}.
\end{equation}
The Lipschiz function $b$ will be defined by two cases.
\noindent{\bf Case $d(p) = 0$.}
That is, $p\in Z$. In this case, since $\partial \Omega$ is closed, there necessarily exists $x\in \partial \Omega$ such that $\Pi(x) = p$. Moreover, \eqref{claimPibot} shows that such $x$ is unique, that is $\Pi$ is a one to one map on $Z$, and we define
\begin{equation} \label{defAonZ}
b(p) := \Pi^\bot(\Pi^{-1}(p)) \qquad \text{ for } p\in Z.
\end{equation}
\noindent{\bf Case $d(p)>0$.} We partition $P \setminus Z$ with a union of dyadic cubes, in the spirit of a Whitney decomposition, as follows. Construct the collection $\mathcal W_P$ as the subset of the dyadic cubes of $P$ that are maximal for the property
\begin{equation} \label{prR1}
0 < 21 \ell(R) \leq \inf_{q\in 3R} d(q).
\end{equation}
By construction, $d(p) \approx d(q)$ whenever $p,q\in 3R\in \mathcal W_P$. Moreover, let us check that
\begin{equation} \label{prR3}
\ell(R_1)/\ell(R_2) \in \{1/2,1,2\} \quad \text{ whenever } R_1,R_2 \in \mathcal W_P \text{ are such that } 3R_1 \cap 3R_2 \neq \emptyset.
\end{equation}
Indeed, if $R \in \mathcal W_P$ and $S$ is such that $\ell(S) = \ell(R)$ and $3S \cap 3R \neq \emptyset$, then $3S \in 9R$ and hence
\[20\ell(S) = 20\ell(R) \leq \inf_{p\in 3R} d(p) \leq \inf_{p\in 9R} d(p) + 6\ell(R) \leq \inf_{p\in 3S} d(p) + 6\ell(S).\]
So every children of $S$ has to satisfies \eqref{prR1}, which proves \eqref{prR3}.
By construction of $\mathcal W_P$, for each $R\in \mathcal W_P$, we can find $Q_R\in \mathcal S$ such that
\begin{multline} \label{prR2}
\dist(R,\Pi(Q_R)) \leq (2^6-2) \ell(R), \quad \ell(Q_R) \leq 2^5 \ell(R),\\
\text{ and either } Q_R =Q(\mathcal S) \text{ or } \ell(Q_R) = 2^5\ell(R) \approx \inf_{q\in 2R} d(q) \approx \sup_{q\in 2R} d(q).
\end{multline}
We want to associate each $R$ with an affine function $b_R: \, P \mapsto P^\bot$ such that the image of the function $\mathfrak b_R$ defined as $\mathfrak b_R(p) = (p,b_R(p))$ approximates $\partial \Omega$ well. First, we set
\begin{equation} \label{defbRout}
b_R \equiv 0 \quad \text{ when } Q_R = Q(\mathcal \mathcal S).
\end{equation}
When $Q_R \neq Q(\mathcal \mathcal S)$, we take $b_R$ such that $\mathfrak b_R$ verifies
\begin{equation} \label{defbR}
\int_{999\mathbb Delta_{Q(\mathcal S)}} |y - \mathfrak b_R(\Pi(y))| {\mathds 1}_{\Pi(y) \in 2R} \, d\sigma(y) := \min_{a} \int_{999\mathbb Delta_{Q(\mathcal S)}} |y - \mathfrak a_R(\Pi(y))| {\mathds 1}_{\Pi(y) \in 2R} \, d\sigma(y),
\end{equation}
where the minimum is taken over the affine functions $a:\, P \mapsto P^\bot$ and $\mathfrak a(p) := (p,a(p))$. The uniqueness of the minimum is not guaranteed, but it does not matter for us. The existence is guaranteed, because $R\subset \Pi(3B_{Q_R}) \subset P \cap 999B_{Q_R}$ by \eqref{prR2}, and hence \eqref{defP'Qb} entails that the graph of the $a$ that almost realize the infimum are very close to the plane $P_Q$ which makes a small angle with $P$. The same argument shows that
\begin{equation} \label{prbR2}
\sup_{y \in 999\mathbb Delta_{Q_R}} |y - \mathfrak b_R(\Pi(y))| + \sup_{p\in \Pi(999B_{Q_R})} \dist(\mathfrak b_R(p),\partial \Omega) \leq C
$\square$
silon_1 \ell(Q_R).
\end{equation}
for a constant $C>0$ that depends only on $n$ and
\begin{equation} \label{prbR}
\text{$b_R$ is $1.1
$\square$
silon_0$-Lipschitz}
\end{equation}
if $0<
$\square$
silon_1 \ll
$\square$
silon_0 \ll 1$. We associate to the collection $\mathcal W_P$ a partition of unity $\{\varphi_R\}_{R\in \mathcal W_P}$ such that $\varphi_R \in C^\infty_0(2R_i)$, $|\nabla \varphi_R| \lesssim \ell(R)^{-1}$, and $\sum_R \varphi_R \equiv 1$ on $P \setminus Z$.
We then define
\begin{equation} \label{defAonZc}
b(p) := \sum_{R\in \mathcal W_P} \varphi_R(p) b_{R}(p) \qquad \text{ for } p\in P \setminus Z.
\end{equation}
Due to \eqref{prR3}, the sum in \eqref{defAonZc} is finite and thus the quantity $b(p)$ is actually well defined.
For $p\in P$, we define $\mathfrak b(p) := (p,b(p))$ to be the graph of $b$.
\begin{lemma} \label{LALip}
The function $b$ defined by \eqref{defAonZ} and \eqref{defAonZc} is $2
$\square$
silon_0$-Lipschitz and supported in $P\cap 4B_{Q(\mathcal S)}$.
\end{lemma}
\noindent {\em Proof: } Recall that the property \eqref{prR2} implies that $2R \subset P \cap \Pi(3B_{Q_R})$ as long as $Q_R \neq Q(\mathcal S)$.
So if $p\notin P\cap \Pi(3B_{Q(\mathcal S)})$ and $R\in \mathcal W_P$ is such that $p\in 2R$, we necessarily have $Q_R = Q(\mathcal S)$ and then $b_R(p) = 0$ by \eqref{defbRout}. We conclude that $b(p) = 0$ and thus that $b$ is supported in $P\cap \Pi(3B_{Q(\mathcal S)}) \subset P \cap 4B_{Q(\mathcal S)}$.
Now, we want to show that $b$ is Lipschitz. The fact that $b$ is Lipschitz on $Z$ is an immediate consequence from the definition \eqref{defAonZ} and \eqref{claimPibot}. Let us prove now that $b$ is Lipschitz on the interior of $2R_0$ for every $R_0\in \mathcal W_P$. Take $R_0 \in \mathcal W_P$ and $p\in 2R_0 \setminus \partial (2R_0)$. Then, since $\sum \nabla \varphi_R(p) = 0$, we have
\begin{equation} \label{LALip1}\begin{split}
|\nabla b(p)| & = \left| \sum_{\begin{subarray}{c} R\in \mathcal W_P \\ 2R \cap 2R_0 \neq \emptyset \end{subarray}} \varphi_R(p) \nabla b_{Q_R}(p) + \sum_{\begin{subarray}{c} R\in \mathcal W_P \\ 2R \cap 2R_0 \neq \emptyset \end{subarray}} b_{Q_R}(p) \nabla \varphi_R(p) \right| \\
& \leq \sup_{\begin{subarray}{c} R\in \mathcal W_P \\ 2R \cap 2R_0 \neq \emptyset \end{subarray}} |\nabla b_{Q_R}(p)| + \sum_{\begin{subarray}{c} R\in \mathcal W_P \\ 2R \cap 2R_0 \neq \emptyset \end{subarray}} |\nabla \varphi_R(p)| |b_{Q_R}(p) - b_{Q_{R_0}}(p)| \\
& \leq 1.1
$\square$
silon_0 + C \ell(R_0)^{-1} \sup_{\begin{subarray}{c} R\in \mathcal W_P \\ 2R \cap 2R_0 \neq \emptyset \end{subarray}} |b_{Q_R}(p) - b_{Q_{R_0}}(p)|
\end{split}\end{equation}
by \eqref{prbR} and \eqref{prR3}. We can assume that $p\in 2R_0 \subset P\cap 4B_{Q(\mathcal S)}$, because we have already shown that $b(p) = 0$ otherwise. So due to \eqref{prR2} and \eqref{prR3}, both $Q_R$ and $Q_{R_0}$ are close to $2R_0$, in the sense that
\[3R_0 \subset P \cap 999\Pi(B_{Q_R}),\]
so we can invoke \eqref{prbR2} to say that $\dist(\mathfrak b_{Q_R}(p),P_{Q_{R_0}}) \lesssim
$\square$
silon_1 \ell(Q_{R_0})$ and then
\begin{equation} \label{LALip2}
|b_{Q_R}(p) - b_{Q_{R_0}}(p)| \lesssim
$\square$
silon_1 \ell(Q_{R_0}).
\end{equation}
So if $
$\square$
silon_1 \ll
$\square$
silon_0$ is small enough, \eqref{LALip1} becomes $|\nabla b(p)| \leq 2
$\square$
silon_0$.
We proved that $b$ is Lipschitz on $Z$ and $P\setminus Z$, so it remains to check that $b$ is continuous at every point in $\partial Z$. Take $z\in \partial Z$ and set $x:= \mathfrak b(z) \in \partial \Omega$. Take also $p\in P\setminus Z$ such that $|p-z| \ll 1$. Due to \eqref{prbR2} and \eqref{LALip2}, we have the existence of $y\in \partial \Omega$ such that, for any $R\in \mathcal W_P$ satisfying $p\in 2R$, we have
\begin{equation} \label{LALip3}
|y - \mathfrak b_{Q_R}(p)| \lesssim
$\square$
silon_1 \ell(R) \lesssim
$\square$
silon_1 d(p) \leq
$\square$
silon_1 |p-z|
\end{equation}
by \eqref{prR1} and the fact that $q\to d(q)$ is 1-Lipschitz. The latter bound shows in particular that
\begin{equation} \label{LALip5}
|y - \mathfrak b(p)| \leq
$\square$
silon_0 |p-z|
\end{equation}
if $
$\square$
silon_1/
$\square$
silon_0$ is small enough. The bound \eqref{LALip5} also implies that $\Pi(x) \neq \Pi(y)$ and then $x \neq y$, and so \eqref{claimPibot} entails that
\begin{equation} \label{LALip4}
|b(z) - \Pi^\bot(y)| = |\Pi^\bot(x) - \Pi^\bot(y)| \leq 2
$\square$
silon_0|z - \Pi(y)|.
\end{equation}
The combination of \eqref{LALip5} and \eqref{LALip4} proves that the restriction of $b$ to $P\setminus Z$ has the limit $b(z)$ at the point $z\in \partial \Omega$. Since it is true for all $z\in \partial Z$, and since $b$ is already continuous (even Lipschitz) on $Z$ and $P\setminus Z$, we conclude that $b$ is continuous on $P$.
The lemma follows.
$\square$
We prove that the graph of $b$ is well approximated by the same plane as the ones that approximate $\partial \Omega$, as shown below.
\begin{lemma} \label{Lbbeps}
For $Q\in \mathcal S$, we have
\[ \sup_{p\in P \cap \Pi(2^8B_Q)} \mathcal Big[\dist(\mathfrak b(p),\partial \Omega) + \dist(\mathfrak b(p), P_{Q}) \mathcal Big] \lesssim
$\square$
silon_1 \ell(Q).\]
\end{lemma}
\noindent {\em Proof: }
Take $p\in \Pi(2^8B_Q)$. If $p\in Z$, then $\mathfrak b(p) \in \partial \Omega$, but since we also have \eqref{claimPibot}, we deduce $\mathfrak b(p)\in 2^9\mathbb Delta_Q$. The bound $\dist(\mathfrak b(p),P_Q) \leq C
$\square$
silon_1\ell(Q)$ is then just a consequence of \eqref{defP'Qb}.
Assume now that $p\in P\setminus Z$. We have $d(p) \leq 2^8\ell(Q)$ so any $R$ that verifies $p\in 2R$ is such that $21\ell(R) \leq d(p) \leq 2^8\ell(Q)$ by \eqref{prR1}, that implies $\ell(Q_R) \leq 2^9\ell(Q)$ by \eqref{prR2}.
Since $\mathfrak b(p)$ is a weighted average of the $\mathfrak b_R(p)$, the estimate \eqref{prbR2} on $\mathfrak b_R(p)$ gives that
\[\dist(\mathfrak b(p),\partial \Omega) \lesssim
$\square$
silon_1 \sup_{R: \, p\in 2R}\ell(Q_R) \lesssim
$\square$
silon_1 \ell(Q).\]
If $x\in \partial \Omega$ is such that $|\mathfrak b(p) - x| = \dist(\mathfrak b(p),\partial \Omega)$, then we have again by \eqref{claimPibot} that $x\in 2^9\mathbb Delta_Q$ so \eqref{defP'Qb} gives that $\dist(x,P_Q) \leq
$\square$
silon_1$. We conclude
\[\dist(\mathfrak b(p),P_Q) \leq |\mathfrak b(p) - x| + \dist(x,P_Q) \lesssim
$\square$
silon_1\]
as desired.
$\square$
We also need a $L^1$ version of the above lemma, and with a better control in term of the $\alpha_\sigma(Q)$ (which is smaller than $
$\square$
silon_1$ when $Q\in \mathcal S$).
\begin{lemma} \label{Lbbalpha}
For $Q\in \mathcal S$, we have
\[\int_{P\cap \Pi(2B_{Q})} \dist(\mathfrak b(p), P_{Q}) \, dp \lesssim \ell(Q)^n \alpha_\sigma(Q).\]
\end{lemma}
\noindent {\em Proof: } The plane $P$ is the union of $Z$ and $P\setminus Z := \bigcup_{R\in W_P} R$, so
\[\begin{split}
I & := \int_{P\cap \Pi(2B_{Q})} \dist(\mathfrak b(p), P_{Q}) \, dp \\
& = \int_{Z\cap \Pi(2B_{Q})} \dist(\mathfrak b(p), P_{Q}) \, dp + \int_{Z^c \cap \Pi(2B_{Q})}\dist(\mathfrak b(p), P_{Q}) \, dp := I_1 + I_2.
\end{split}\]
The term $I_1$ is easy, because $\mathfrak b(p) \in 4\mathbb Delta_Q \subset \partial \Omega$ by \eqref{claimPibot}, and so we have
\[I_1 \lesssim \int_{4\mathbb Delta_Q} \dist(y,P_Q) \, d\sigma(y)\]
We apply \eqref{ftestinalpha} with the test function
\[f(y):= \min\{\dist(y,\mathbb R^n \setminus 999B_{Q}), \dist(y,P_{Q})\}\]
which lies in $Lip_Q$ and takes the value $0$ on $P_{Q}$ and $\dist(y,P_{Q})$ on $4\mathbb Delta_{Q}$, and we conclude that
\[I_1 \lesssim \int f \, d\sigma = \left| \int f \, d\sigma - \int f \, d\mu_{Q}\right| \lesssim \ell(Q)^{n} \alpha_\sigma(Q)\]
as desired.
We turn to the bound on $I_2$. We know that $\mathcal Angle(P_{Q},P) \leq
$\square$
silon_0$ so $P_{Q}$ is the graph of an affine function $a_{Q}: \, P \mapsto P^\bot$ with small Lipschitz constant. Therefore, we have
\[ I_2 \approx \int_{P\cap \Pi(2B_Q))} |b(p)- a_{Q}(p)| \, dp\]
Let $\mathcal W_P(Q)$ be the subfamily of $\mathcal W_P$ of elements $R$ such that $2R$ that intersects $\Pi(2B_{Q})$. The fact that $2R \cap \Pi(2B_{Q}) = \emptyset$ implies by \eqref{prR1} that $21 \ell(R) \leq \ell(Q)$. Consequently, $\ell(R) \leq 2^{-5} \ell(Q)$ because both $\ell(R)$ and $\ell(Q)$ are in the form $2^k$, and then $2R \subset \Pi(3B_{Q})$.
Assume first that $Q \varsubsetneq Q(\mathcal S)$, and check that this condition implies that $\ell(Q_R) \leq 2^5\ell(R) \leq \ell(Q) < \ell(Q(\mathcal S))$, hence $Q_R \neq Q(\mathcal S)$ for every $R\in \mathcal W_P(Q)$. So we have
\[\begin{split}
I_2 & = \int_{Z^c \cap \Pi(2B_{Q})} \left|\sum_{R\in \mathcal W_P(Q)} \varphi_R(p) (b_{R}(p) - a_{Q}(p))\right| \, dp \leq \sum_{R\in \mathcal W_P(Q)} \int_{2R} |b_{R}(p) - a_{Q}(p)| \, dp.
\end{split}\]
We want to estimate $\int_{2R} |b_{R}(p) - a_{Q}(p)| \, dp$, but now both $b_R$ and $a_{Q}$ are affine, so knowing $|b_{R}(p) - a_{Q}(p)|$ for $n$ different points $p\in 2R$ that are far from each other is enough. By \eqref{defP'Qb}, we know that $\Pi(\partial \Omega) \cap 2R$ contains many points all over $2R$, and by using those points to estimate the distance between $b_R$ and $a_{Q}$, we deduce that
\begin{multline*}
\int_{2R} |b_{R}(p) - a_{Q}(p)| \, dp \lesssim \int_{999\mathbb Delta_{Q(\mathcal S)}} |b_R(\Pi(y)) - a_{Q}(\Pi(y)) | {\mathds 1}_{\Pi(y) \in 2R} \, d\sigma(y) \\
\leq \int_{999\mathbb Delta_{Q(\mathcal S)}} |\Pi^{\bot}(y) - b_R(\Pi(y))| {\mathds 1}_{\Pi(y) \in 2R} \, d\sigma(y) + \int_{999\mathbb Delta_{Q(\mathcal S)}} |\Pi^\bot(y) - a_{Q}(\Pi(y)) | {\mathds 1}_{\Pi(y) \in 2R} \, d\sigma(y) \\
\lesssim \int_{999\mathbb Delta_{Q(\mathcal S)}} |\Pi^\bot(y) - a_{Q}(\Pi(y)) | {\mathds 1}_{\Pi(y) \in 2R} \, d\sigma(y)
\end{multline*}
by \eqref{defbR}, because $Q_R \neq Q(\mathcal S)$. Since the $2R$ are finitely overlapping, see \eqref{prR3}, the bound on $I_2$ becomes
\begin{equation} \label{boundI2}
I_2 \lesssim \int_{4\mathbb Delta_{Q})} |\Pi^\bot(y) - a_{Q}(\Pi(y)) | \, d\sigma(y) \lesssim \int_{4\mathbb Delta_{Q}} \dist(y,P_{Q}) \, d\sigma(y).
\end{equation}
We had the same bound on $I_1$, and with the same strategy, we can conclude that
\[I_2 \lesssim \int f \, d\sigma = \left| \int f \, d\sigma - \int f \, d\mu_{Q}\right| \lesssim \ell(Q)^{n} \alpha_\sigma(Q)\]
as desired.
If $Q = Q(\mathcal S)$, the same computations apply. It is possible to have some $R$ in $\mathcal W_P(Q)$ for which $Q_R = Q(\mathcal S)$ and thus $b_R \equiv 0$, but at the same time, we now have $a_Q \equiv 0$, so those $R$ verify $b_R - a_Q \equiv 0$ and do no have any contribution in the above bounds on $I_2$. Therefore, we also conclude that
\[I_2 \lesssim \ell(Q(\mathcal S))^{n} \alpha_\sigma(Q(\mathcal S)) = \ell(Q)^{n} \alpha_\sigma(Q).\]
The lemma follows.
$\square$
\section{Whitney regions for coherent regimes}
\label{SWhitney}
We associate the dyadic cubes of $\partial \Omega$ to Whitney regions in $\Omega$ and therefore associate the coherent family of dyadic cubes obtained in the corona decomposition to a subset of $\Omega$. The idea is similar to the construction found in \cite{HMMDuke}, but we need different properties than those in \cite{HMMDuke}, so we rewrite the construction.
This section will prove the following extension of Lemma \ref{LcoronaG}.
\begin{lemma} \label{lemWOG}
Let $\partial \Omega$ be a uniformly rectifiable sets. We keep the notation from Lemma \ref{LcoronaG}, and we further have the existence of $K^{**}>0$ and a collection $\{\Psi_\mathcal S\}_{\mathcal S \in \mathfrak S}$ of functions such that
\begin{enumerate}[(a)]
\item $\Psi_\mathcal S$ are cut-off functions, that is $0 \leq \Psi_{\mathcal S}\leq 1$, and $|\nabla \Psi_\mathcal S| \leq 2\delta^{-1}$.
\item For any $\mathcal S \in \mathfrak S$, if $X \in \supp (1-\Psi_\mathcal S)$, then there exists $Q\in \mathcal S$ such that
\[ \ell(Q)/2 < \delta(X) = \dist(X,Q) \leq \ell(Q).\]
\item If $X \in \supp \Psi_{\mathcal S}$, then there exists $Q\in \mathcal \mathcal S$ and such that
\[ \ell(Q)/2^6 < \delta(X) = \dist(X,2^6\mathbb Delta_Q) \leq 2^6 \ell(Q).\]
\item For any $\mathcal S\in \mathfrak S$ and any $X\in \supp \Psi_\mathcal S$, we have
\begin{equation} \label{prWSa}
(1-2
$\square$
silon_0) |X - \mathfrak b(\Pi(X))| \leq \delta(X) \leq (1+2
$\square$
silon_0) |X - \mathfrak b(\Pi(X))|
\end{equation}
and, if $\mathcal Gamma_\mathcal S$ is the graph of $\mathcal S$,
\begin{equation} \label{prWSb}
(1-2
$\square$
silon_0) \dist(X,\mathcal Gamma_{\mathcal S}) \leq \delta(X) \leq (1+3
$\square$
silon_0) \dist(X,\mathcal Gamma_{\mathcal S}).
\end{equation}
\item There exists a collection of dyadic cubes $\{Q_i\}_{i\in I_\mathcal S}$ in $\mathbb D_{\partial \Omega}$ such that $\{2Q_i\}_{i\in I_\mathcal S}$ has an overlap of at most 2, and
\[ \Omega \cap (\supp \Psi_{\mathcal S}) \cap \supp (1-\Psi_{\mathcal S}) \subset \bigcup_{i\in I_\mathcal S} \mathcal Big\{ (K^{**})^{-1}\ell(Q_i) < \delta(X) = \dist(X,K\mathbb Delta_{Q_i}) \leq K^{**}\ell(Q_i)\mathcal Big\}.\]
In particular, $|\delta \nabla \Psi_{\mathcal S}| \in CM_\Omega(C)$ with a constant $C>0$ that depends only on $n$.
\end{enumerate}
\end{lemma}
\subsection{Whitney decomposition}
We divide $\Omega$ into Whitney regions. Usually, one constructs them with dyadic cubes of $\mathbb R^n$, but we prefer to construct them directly. We recall that $\delta(X):= \dist(X,\partial \Omega)$, and for $Q\in \mathbb D_{\partial \Omega}$, we define
\begin{equation} \label{defWO}
W_{\Omega}(Q) := \{X\in \Omega: \, \exists\, x\in Q \text{ such that } \ell(Q)/2 < \delta(X) = |X-x| \leq \ell(Q)\}.
\end{equation}
It is easy to see that the sets $\{W_\Omega(Q)\}_{Q\in \partial \Omega}$ covers $\Omega$. The sets $W_\Omega(Q)$ are not necessarily disjoint, but we do not care, we are perfectly happy if $\{W_\Omega(Q)\}_{Q\in \mathbb D_{\partial \Omega}}$ is finitely overlapping, and we choose $W_\Omega(Q)$ small only because it will make our estimates easier. The sets $W_\Omega(Q)$ can be disconnected and have a bad boundary, but that is not an issue, since - contrary to \cite{HMMDuke} - we won't try to prove that the $W_\Omega(Q)$ are Chord-Arc Domains.
We also need fattened versions of $W_\Omega(Q)$, that we call $W_\Omega^*(Q)$ and $W_\Omega^{**}(Q)$, which are defined as
\begin{equation} \label{defWO*}
W_{\Omega}^*(Q) := \{X\in \Omega: \, \exists \, x\in 2^6\mathbb Delta_Q \text{ such that } 2^{-6}\ell(Q) < \delta(X) = |X-x| \leq 2^6\ell(Q)\}
\end{equation}
and
\begin{equation} \label{defWO**}
W_{\Omega}^{**}(Q) := \{X\in \Omega: \, \exists \, x\in K^{**}\mathbb Delta_Q \text{ such that } (K^{**})^{-1}\ell(Q) < \delta(X) = |X-x| \leq K^{**}\ell(Q)\}
\end{equation}
The exact value of the constant $K^{**}$ does not matter. In Lemma \ref{LprPsiS}, we will choose it large enough to fit our purpose. The first properties of $W_\Omega(Q)$ and $W_\Omega^*(Q)$ are the ones that we expect and are easy to prove. We have
\begin{equation} \label{prWO1}
\Omega = \bigcup_{Q\in D_{\partial \Omega}} W_\Omega(Q),
\end{equation}
\begin{equation} \label{prWO2}
\diam(W^{*}_\Omega(Q)) \leq 2^7 \ell(Q),
\end{equation}
and
\begin{equation} \label{prWO4}
W^{*}_\Omega(Q) \subset 2^8 B_Q.
\end{equation}
We want $W_\Omega(Q)$ and $W_\Omega^*(Q)$ to be so that we can squeeze a cut-off function between the two sets, which is possible because
\begin{equation} \label{prWO3}
\dist(W_\Omega(Q), \mathbb R^n \setminus W^*_\Omega(Q)) \mathfrak geq \frac14 \ell(Q).
\end{equation}
Indeed, if $X\in W_\Omega(Q)$ and $|X-Y| \leq \ell(Q)/4$, then $\ell(Q)/4 \leq \dist(Y,\partial \Omega) \leq 5\ell(Q)/4$ and for any $y \in \partial \Omega$ such that $|Y-y| = \delta(Y)$, we have
\[|y-x| \leq |y-Y| + |Y-X| + |X-x| \leq \frac54\ell(Q) + \frac14 \ell(Q) + \ell(Q) \leq 3\ell(Q),\]
so in particular, $y\in 2^5Q$, and thus $Y\in W^*_\Omega(Q)$. The claim \eqref{prWO3} follows.
\subsection{Coherent regions associated to coherent regimes}
As before, we pick $0 <
$\square$
silon_1 \ll
$\square$
silon_0 \ll 1$, and then construct the collection of coherent regimes $\mathfrak S$ given by Lemma \ref{Lcorona}. Let then $\mathcal S$ be either in $\mathfrak S$, or a coherent regime included in an element of $\mathfrak S$. For such $\mathcal S$, we define the regions
\begin{equation} \label{defWS}
W_{\Omega}(\mathcal S) := \bigcup_{Q\in \mathcal S} W_\Omega(Q) \quad \text{ and } \quad W_{\Omega}^*(\mathcal S) := \bigcup_{Q\in \mathcal S} W^*_\Omega(Q).
\end{equation}
Associated to the coherent regime $\mathcal S$, we have affine planes $P$ and $P^\bot$, the projections $\Pi$ and $\Pi^\bot$, a Lipschitz function $b: \, P \to P^\bot$, and $\mathfrak b(p)=(p,b(p))$ as in Subsection~\ref{SSLipschitz}. We also have the ``distance function'' $d(p)$ defined in \eqref{defdx}. We now define the Lipschitz graph
\begin{equation} \label{defGS}
\mathcal Gamma_{\mathcal S} := \{\mathfrak b(p), \, p\in P\} \subset \mathbb R^n.
\end{equation}
\begin{lemma} \label{LclaimPib}
If $X\in W^*_\Omega(\mathcal S)$ and $x\in \partial \Omega$ is such that $|X-x| = \delta(X)$, then
\begin{equation} \label{claimdPib}
(1-2
$\square$
silon_0) \delta(X) \leq |X - \mathfrak b(\Pi(X))| \leq (1+2
$\square$
silon_0) \delta(X),
\end{equation}
\begin{equation} \label{claimdGPib}
(1-2
$\square$
silon_0) \dist(X,\mathcal Gamma_{\mathcal S}) \leq \delta(X) \leq (1+3
$\square$
silon_0) \dist(X,\mathcal Gamma_{\mathcal S}),
\end{equation}
and
\begin{equation} \label{claimbxd}
|\mathfrak b(\Pi(X)) - x | \leq 2
$\square$
silon_0 \delta(X).
\end{equation}
\end{lemma}
\noindent {\em Proof: }
Since $X\in W^*_\Omega(\mathcal S)$, there exists $Q\in \mathcal S$ such that $X\in W^*_\Omega(Q)$. Such $Q$ verifies $x\in 2^6\mathbb Delta_Q$ and
\[2^{-6} |X- x| \leq \ell(Q) \leq 2^6 |X-x|,\]
so $X \in 2^7 B_Q$ and $\Pi(X) \in \Pi(2^7 B_Q)$. Lemma \ref{Lbbeps} and \eqref{defP'Qb} entail that
\[\dist(x,P_Q) + \dist(\mathfrak b(\Pi(X)),P_Q) \leq C
$\square$
silon_1 \ell(Q) \leq \frac18
$\square$
silon_0 |X-x| \]
if $
$\square$
silon_1/
$\square$
silon_0$ is small enough. Because the plane $P_Q$ makes a small angle with $P$, we deduce that
\begin{equation} \label{ccl48a}
|b(\Pi(X)) - \Pi^\bot(x)| \leq \frac14
$\square$
silon_0 |X-x|
\end{equation}
if $
$\square$
silon_0$ is small enough. Define $\Pi_Q$ and $\Pi^\bot_Q$ as the projection onto $P_Q$ and $P_Q^\bot$. We have $|\Pi_Q(x) - x| \lesssim
$\square$
silon_1 |X-x|$ thanks to \eqref{defP'Qb}. In addition, the projection $\Pi_Q$ lies in $P_Q \cap 2^8 B_Q$, so using \eqref{defP'Qb} again gives the existence of $y\in \partial \Omega$ such that $|\Pi_Q(X) - y| \leq
$\square$
silon_1\ell(Q) \lesssim
$\square$
silon_1 |X-x|$. By definition of $x$, the point $y$ has to be further away from $X$ that $x$ so
\[\begin{split}
|X-x| & \leq |X-y| \leq |X - \Pi_Q(X) - x + \Pi_Q(x)| + |\Pi_Q(x) - x| + |\Pi_Q(X) - y| \\
& \leq |\Pi^\bot_Q(X)- \Pi^\bot_Q(x)| + C
$\square$
silon_1|X-x|.
\end{split}\]
So one has $|\Pi^\bot_Q(X)-\Pi^\bot_Q(x)| \mathfrak geq (1-C
$\square$
silon_1) |X-x|$ and hence $|\Pi_Q(X)- \Pi_Q(x)| \leq C\sqrt{
$\square$
silon_1}$. Since $P_Q$ makes an angle at most $
$\square$
silon_0$ with $P$, we conclude that
\begin{equation} \label{ccl48b}
|\Pi(X) - \Pi(x)| \leq \frac32
$\square$
silon_0|X-x|
\end{equation}
if $
$\square$
silon_0$ and $
$\square$
silon_1/
$\square$
silon_0$ are small enough. The two bounds \eqref{ccl48a} and \eqref{ccl48b} easily prove \eqref{claimbxd}, and also prove \eqref{claimdPib} by writing
\[\begin{split}
\mathcal Big| |X - \mathfrak b(\Pi(X))| - |X-x| \mathcal Big| & \leq \mathcal Big| |\Pi^\bot(X) - b(\Pi(X))| - |\Pi^\bot(X)- \Pi^\bot(x)| \mathcal Big| + |\Pi(X) - \Pi(x)| \\
& \leq |\Pi^\bot(x) - b(\Pi(X))| + |\Pi(X) - \Pi(x)| \\
& \leq 2
$\square$
silon_0|X-x|.
\end{split}\]
The bounds \eqref{claimdGPib} is just a consequence of \eqref{claimdPib} and the fact that $\mathcal Gamma_{\mathcal S}$ is the graph of $b$ which is a $2
$\square$
silon_0$-Lipschitz function with $
$\square$
silon_0 \ll 1$.
The lemma follows.
$\square$
Let $\psi \in C^\infty_0(\mathbb R)$ be such that $0 \leq \psi \leq 1$, $\psi \equiv 1$ on $[0,1]$, $\psi \equiv 0$ on $[2,\infty)$ and $|\nabla \psi| \leq 2$. We set
\begin{equation} \label{defPsiS}
\Psi_{\mathcal S}(X) = {\mathds 1}_\Omega(X) \psi\mathcal Big(\frac{d(\Pi(X))}{3|X - \mathfrak b(\Pi(X))|}\mathcal Big) \psi\mathcal Big(\frac{|X - \mathfrak b(\Pi(X))|}{2\ell(Q(\mathcal S))}\mathcal Big).
\end{equation}
We want to prove the points $(b)$, $(c)$, and $(d)$ of Lemma \ref{lemWOG}, that is
\begin{lemma} \label{LprWS}
The function $\Psi_{\mathcal S}$ is constant equal to 1 on $W_\Omega(\mathcal S)$ and $\Omega \cap \supp \Psi_{\mathcal S} \subset W^*_\Omega(\mathcal S)$.
Consequently, for any $X\in \supp \Psi_\mathcal S $, we have \eqref{prWSa} and \eqref{prWSb} by Lemma \ref{LclaimPib}.
\end{lemma}
\begin{remark}
We know from its definition that $\Psi_\mathcal S \equiv 0$ on $\mathbb R^n \setminus \Omega$, but the support of $\Psi_\mathcal S$ can reach the boundary $\partial \Omega$. So if $\Omega \cap \supp \Psi_\mathcal S \subset W_\Omega^*(\mathcal S)$, then we actually have
\[\supp \Psi_{\mathcal S} \subset W_\Omega^*(\mathcal S) \cup \mathcal Big( \partial \Omega \cap \overline{W_\Omega^*(\mathcal S)} \mathcal Big).\]
\end{remark}
\noindent {\em Proof: } Take $Q\in \mathcal S$ and $X\in W_\Omega(Q)$, and pick $x\in Q$ such that $|X-x| = \delta(X)$. We want to show that $\Psi_{\mathcal S} (X) = 1$, i.e. that
\begin{equation} \label{414a}
d(\Pi(X)) \leq 3|X - \mathfrak b(\Pi(X))|
\end{equation}
and
\begin{equation} \label{414b}
|X - \mathfrak b(\Pi(X))| \leq 2\ell(Q(\mathcal S)).
\end{equation}
For \eqref{414b}, it suffices to notice that $|X - \mathfrak b(\Pi(X))| \leq 2 |X-x| \leq 2\ell(Q) \leq 2\ell(Q(\mathcal S))$ by \eqref{claimdPib} and by the definition of $x$ and $Q$. As for \eqref{414a}, observe that
$\abs{X-x}\le 2
$\square$
silon_0\delta(X)+\abs{X-\mathfrak b(\Pi(X))}$ by the triangle inequality and \eqref{claimbxd},
and thus
\[d(\Pi(X)) \leq \ell(Q) \leq 2|X-x| \leq 3 |X - \mathfrak b(\Pi(X))|\]
by \eqref{claimdPib}.
It remains to verify that $\supp \Psi_{\mathcal S}$ is supported in $W^*_\Omega(\mathcal S)$, because \eqref{prWSa} and \eqref{prWSb} are then just \eqref{claimdPib} and \eqref{claimdGPib}.
So we pick $X\in \supp \Psi_{\mathcal S}$ which means in particular that
\begin{equation} \label{414c}
d(\Pi(X)) \leq 6|X - \mathfrak b(\Pi(X))|
\end{equation}
and
\begin{equation} \label{414d}
|X - \mathfrak b(\Pi(X))| \leq 4\ell(Q(\mathcal S)),
\end{equation}
and we want to show that $X\in W^*_\Omega(\mathcal S)$. By definition of $d(\Pi(X))$, there exists $Q \in \mathcal S$ such that
\[\dist(\Pi(X),\Pi(2B_Q)) + \ell(Q) = d(\Pi(X)) \leq 6 |X - \mathfrak b(\Pi(X))| \leq 24 \ell(Q(\mathcal S))\]
by \eqref{414c} and \eqref{414d}. Since $\mathcal S$ is coherent, by taking a suitable ancestor of $Q$, we can find $Q_X \in \mathcal S$ such that
\begin{equation} \label{414f1}
\frac14 |X - \mathfrak b(\Pi(X))| \leq \ell(Q_X) \leq 6 |X - \mathfrak b(\Pi(X))|
\end{equation}
and
\begin{equation} \label{414f2}
\Pi(X) \in 26 \Pi(B_{Q_X}).
\end{equation}
We want to prove that $X\in W_\Omega^*(Q_X)$. The combination of \eqref{414f2}, Lemma \ref{Lbbeps}, and \eqref{claimPibot} forces $\mathfrak b(\Pi(X)) \in 27B_{Q(\mathcal S)}$ when $
$\square$
silon_0$ is small, and hence $X \in 31B_X$ by \eqref{414f1}.
Let $x\in \partial \Omega$ such that $|X-x| = \delta(X)$. Since $X\in 31B_X$, we have $x\in 2^6\mathbb Delta_{Q_X}$, and of course $|X-x| \leq 2^6\ell(Q_{X})$. So it remains to verify if $|X-x| \mathfrak geq 2^{-6} \ell(Q_X)$.
In one hand, thanks to \eqref{defP'Qb}, we know that $x$ lies close to $P_Q$, in the sense that $\dist(x,P_{Q_X}) \leq
$\square$
silon_1 \ell(Q_X)$. In the other hand, if $P_{Q_X}$ is the graph of the function $a_{Q_X}: \, P \mapsto P^\bot$, we have
\[\begin{split}
\dist(X,P_{Q_X}) & \mathfrak geq (1-
$\square$
silon_0) |\Pi^\bot(X) - a_{Q_X}(\Pi(X))| \\
& \mathfrak geq (1-
$\square$
silon_0) \mathcal Big[ |X-\mathfrak b(\Pi(X))| - \dist(\mathfrak b(\Pi(X)),P_{Q_X}) \mathcal Big] \\
& \mathfrak geq (1-
$\square$
silon_0-C
$\square$
silon_1) |X-\mathfrak b(\Pi(X))| \\
& \mathfrak geq \frac16(1-
$\square$
silon_0-C
$\square$
silon_1) \ell(Q_X)
\end{split}\]
by Lemma \ref{Lbbeps} and \eqref{414f1}; that is $X$ is far from $P_{Q_X}$. Altogether, we deduce that
\[|X-x| \mathfrak geq (1-C
$\square$
silon_1) \dist(X,P_{Q_X}) \mathfrak geq (1-
$\square$
silon_0-C
$\square$
silon_1) |X-\mathfrak b(\Pi(X))| \mathfrak geq \frac18 \ell(Q_X).\]
if $
$\square$
silon_0$ and $
$\square$
silon_1$ are small. The lemma follows.
$\square$
We are left with the proof of point $(e)$ in Lemma \ref{lemWOG}, which is:
\begin{lemma} \label{LprPsiS}
There exists a collection of dyadic cubes $\{Q_i\}_{i\in I_\mathcal S}$ in $\mathbb D_{\partial \Omega}$ such that $\{2Q_i\}_{i\in I_\mathcal S}$ has an overlap of at most 2, and
\[ \Omega \cap (\supp \Psi_{\mathcal S}) \cap \supp (1-\Psi_{\mathcal S}) \subset \bigcup_i W^{**}_\Omega(Q_i).\]
\end{lemma}
\noindent {\em Proof: }
Observe that $(\supp \Psi_{\mathcal S}) \cap \supp (1-\Psi_{\mathcal S}) \subset E_1 \cup E_2$ where
\[E_1 := \{X\in W_\Omega^*(\mathcal S), \, 2\ell(Q(\mathcal S)) \leq |X- \mathfrak b(\Pi(X))| \leq 4\ell(Q(\mathcal S))\}\]
and
\[E_2 := \{X\in W_\Omega^*(\mathcal S), \, d(\Pi(X))/6 \leq |X- \mathfrak b(\Pi(X))| \leq d(\Pi(X))/3\}.
\]
Thanks to \eqref{prWSa}, the set $E_1$ is included in $W^{*}_\Omega(Q(\mathcal S))$.
For each $X\in E_2$, we construct the ball $B_X:= B(\mathfrak b(\Pi(X)),d(\Pi(X))/100) \subset \mathbb R^n$. The radius of $B_X$ is bounded uniformly by $\ell(Q(\mathcal S))/4$.
So by the Vitali lemma, we can find a non overlapping subfamily $\{B_{X_i}\}_{i\in I_2}$ such that $E_2 \subset \bigcup_{i\in I_2} 5B_{X_i}$.
We use \eqref{claimbxd} and \eqref{claimdPib} to find a point $x_i \in \frac12B_{X_i} \cap \partial \Omega$. We take $Q_i \in \mathbb D_{\partial \Omega}$ to be the unique dyadic cube such that such that $x_i \in Q_i$ and $\ell(Q_i) < d(\Pi(X_i))/400 \leq 2\ell(Q_i)$.
By construction, we have $2Q_i \subset B_{X_i}$, so the $\{2Q_i\}_{i\in I_2}$ are non-overlapping, and $5B_{X_i} \subset 100B_{Q_i}$.
It remains to check that $E_2 \subset \bigcup_i W^{**}_\Omega(Q_i)$. Take $X\in E_2$. From what we proved, there exists an $i\in I_2$ such that
\begin{equation} \label{423a}
|\Pi(X) - \Pi(X_i)| \leq |\mathfrak b(\Pi(X))- \mathfrak b(\Pi(X_i))| \leq d(\Pi(X_i))/20.
\end{equation}
Observe from the definition that $d$ is $1$-Lipschitz. Therefore,
\[|d(\Pi(X)) - d(\Pi(X_i))| \leq |\Pi(X) - \Pi(X_i)| \leq d(\Pi(X_i))/20\]
and
\begin{equation} \label{423b}
\frac{19}{20} d(\Pi(X_i)) \leq d(\Pi(X)) \leq \frac{21}{20} d(\Pi(X_i)).
\end{equation}
From \eqref{423a} and \eqref{423b}, we obtain
\[|X-x_i| \leq |X-\mathfrak b(X)| + |\mathfrak b(X) - \mathfrak b(X_i)| + |\mathfrak b(X_i) - x_i| \leq d(\Pi(X)) \leq 800\ell(Q_i)\]
and, from \eqref{prWSa} and \eqref{423b}, we get
\[\delta(X) \mathfrak geq (1-2
$\square$
silon_0)|X-\mathfrak b(\Pi(X))| \mathfrak geq \frac17 d(\Pi(X)) \mathfrak geq \frac18 d(\Pi(X_i)) \mathfrak geq 50\ell(Q_i).\]
The last two computations show that $X\in W^{**}_\Omega(Q_i)$ if $K^{**} \mathfrak geq 1601$. The lemma follows.
$\square$
\section{Replacement lemma and application to the smooth distance $D$}
\label{SDbeta}
As usual, let $0 <
$\square$
silon_1 \ll
$\square$
silon_0 \ll 1$, and then construct the collection of coherent regimes $\mathfrak S$ given by Lemma \ref{Lcorona}. We take then $\mathcal S$ to be either in $\mathfrak S$, or a coherent regime included in an element of $\mathfrak S$.
In Lemma \ref{Lbbalpha}, we started to show that the graph of $b$ behaves well with respect to the approximating planes $P_Q$, and we want to use the graph of $b$ as a substitute for $\partial \Omega$. Roughly speaking, the graph of the Lipschitz function $b$ is ``a good approximation of $\partial \Omega$ for the regime $\mathcal S$''. Let us explain what we mean by this. The Lipschitz graph $\mathcal Gamma_\mathcal S$ defined in \eqref{defGS}
is uniformly rectifiable, that is, $\mathcal Gamma_{\mathcal S}$ is well approximated by planes. And even better, we can easily construct explicit planes that approximate $\mathcal Gamma_{\mathcal S}$.
First, we equip $P$ with an Euclidean structure, which means that $P$ can be identified to $\mathbb R^{n-1}$. Similarly, we identify $P^\bot$ to $\mathbb R$, and of course, we choose $P$ and $P^\bot$ such that $\Pi^{\bot}(P) = \{0\}$ and $\Pi(P^{\bot}) = \{0\}$, and so $\mathbb R^n$ can be identified to $P \times P^\bot$.
We take a non-negative radial smooth function $\eta \in C^\infty_0(P,\mathbb R_+)$ which is supported in the unit ball and that satisfies $\int_{P} \eta dx = 1$. Even if $P$ depends on the regime $\mathcal S$, $P$ is identified to $\mathbb R^{n-1}$, so morally the smooth function $\eta$ is defined on $\mathbb R^{n-1}$ and does not depend on anything but the dimension $n$. For $t\neq 0$, we construct the approximation of identity by
\begin{equation} \label{defetat}
\eta_t(p) := \abs{t}^{1-n} \eta\mathcal Big(\frac p{\abs{t}}\mathcal Big),
\end{equation}
then the functions
\begin{equation} \label{defbt}
b^t := \eta_t * b, \quad \mathfrak b^t := \eta_t * \mathfrak b,
\end{equation}
and the planes
\begin{equation} \label{defLambda}
\Lambda(p,t):= \{(q,(q-p) \nabla b^t(p) + b^t(p)), \, q\in P\}.
\end{equation}
Notice that $\Lambda(p,t)$ is the tangent plane of the approximating graph $\set{\mathfrak b^t(p), \, p\in P}$ at $\mathfrak b^t(p)$.
What we actually want is flat measures, so we fix a radial function $\theta \in C^\infty(\mathbb R^n)$ such that $0\leq \theta \leq 1$, $\supp \theta \subset B(0,1)$, and $\theta \equiv 1$ on $B(0,\frac12)$. We set then
\begin{equation} \label{deftheta}
\theta_{p,t}(y) := \theta \left( \frac{\mathfrak b^t(p) - y}{t} \right)
\end{equation}
and
\begin{equation} \label{deflambda}
\lambda(p,t) := \dfrac{\displaystyle \int_{\partial \Omega} \theta_{p,t} \, d\sigma}{\displaystyle \int_{\Lambda(p,t)} \theta_{p,t} \, d\mu_{\Lambda(p,t)}} = c_\theta^{-1} \abs{t}^{1-n} \int_{\partial \Omega} \theta_{p,t} \, d\sigma
\end{equation}
where the second inequality uses the fact that we centered $\theta_{p,t}$ at $\mathfrak b^t(p) \in \Lambda(p,t)$, and $c_\theta := \int_{\mathbb R^{n-1}} \theta(y) dy$. Note that the Ahlfors regularity of $\sigma$ implies that
\begin{equation} \label{lambda1}
\lambda(p,t) \approx 1,
\end{equation}
whenever $\mathfrak b^t(p)$ is close to $\partial \Omega$ - which is the case when $ d(p) \lesssim \abs{t} \lesssim \ell(Q(\mathcal S))$ - and with constants that depend only on $C_\sigma$ and $n$.
Finally, we introduce the flat measures
\begin{equation} \label{defmupr}
\mu_{p,t} := \lambda(p,t) \mu_{\Lambda(p,t)}.
\end{equation}
The flat measures $\mu_{p,t}$ are approximations of the Hausdorff measure on $\mathcal Gamma_{\mathcal S}$, and we shall show that the same explicit measures almost minimize the distance from $\sigma$ to flat measures, for the local Wasserstein distances $\dist_Q$ with $Q\in \mathcal S$.
\begin{lemma} \label{Lmupr}
For $Q\in \mathcal S$, $p\in \Pi(\frac32B_Q)$, and $\ell(Q)/4 \leq \abs{t} \leq \ell(Q)/2$, we have
\begin{equation} \label{Lmupr1}
\dist_Q(\sigma,\mu_{p,t}) \leq C \alpha_\sigma(Q),
\end{equation}
where $C>0$ depends only on $n$ and $C_\sigma$.
\end{lemma}
The lemma is not very surprising. The plane $\Lambda(p,t)$ is obtained by locally smoothing $\mathcal Gamma_{\mathcal S}$, which is composed of pieces of planes that approximate $\partial \Omega$.
\noindent {\em Proof: }
Thanks to the good approximation properties of the Lipschitz graph $\mathfrak b(p)$ that we obtain in Section~\ref{SSLipschitz}, this lemma can be proved similarly as Lemma 5.22 in \cite{DFM3}.
Let $Q\in \mathcal S$, $p\in \Pi(\frac32B_Q)$, and $t$ with $\ell(Q)/4 \leq \abs{t} \leq \ell(Q)/2$ be fixed. Denote $r=\abs{t}$. By Lemma \ref{Lcorona}, $\alpha_\sigma(Q)\le
$\square$
silon_1$. Since we have chosen $
$\square$
silon_1$ sufficiently small, Lemma \ref{Lalphatobeta} gives that
\begin{equation}\label{0PQ}
\sup_{y\in 999\mathbb Delta_Q}\dist(y,P_Q)\le C
$\square$
silon_1^{1/n}\ell(Q)\le 10\ell(Q).
\end{equation}
Define a Lipschitz function $\Psi$ by
\[
\Psi(z):=\begin{cases}
\frac14 \qquad\qquad z\in B(x_Q,100\ell(Q)),\\
\frac{1}{3600\ell(Q)}\br{10^3\ell(Q)-\abs{z-x_Q}}_+ \quad\text{otherwise},
\end{cases}
\]
where $(f(z))_+:=\max\set{0,f(z)}$. Then set $f(z)=\Psi(z)\dist(z, P_Q)$. Observe that $\supp f\subset B(x_Q,10^3\ell(Q))$, and that $\abs{\nabla f(z)}\le \Psi(z)+\dist(z, P_Q)\abs{\nabla\Psi}\le 1$, because $\dist(z,P_Q)\le 10\ell(Q)+10^3\ell(Q)$ by \eqref{0PQ}. Hence $f\in Lip(Q)$. By using successively the facts that $f\mathfrak geq 0$, $\int f \, d\mu_Q = 0$ and \eqref{ftestinalpha}, we have that
\begin{multline}\label{1PQ}
\int_{\mathbb Delta(x_Q,100\ell(Q))}\dist(z, P_Q)d\sigma(z)=4\int_{\mathbb Delta(x_Q,100\ell(Q))}\Psi(z)\dist(z,P_Q)d\sigma(z)\\
\le 4 \int f\, d\sigma = 4\int f(z)(d\sigma-d\mu_Q)\le C\ell(Q)^n\alpha_\sigma(Q).
\end{multline}
Now we estimate the distance from $\Lambda(p,t)$ to $P_Q$. Write
\[
\dist(\mathfrak b^t(p),P_Q)\le\int_{q\in B(p,r)\cap P}\eta_t(p-q)\dist(\mathfrak b(q),P_Q)dq.
\]
Notice that our choice of $p$ and $t$ ensures that
\begin{equation}\label{Bptsubset}
B(p,r)\cap P\subset\Pi(2B_Q).
\end{equation} So we have that
\begin{multline}\label{LambdaPQ1}
\dist(\mathfrak b^t(p),P_Q)\le r^{1-n}\norm{\eta}_\infty \int_{q\in\Pi(2B_Q)}\dist(\mathfrak b(q),P_Q)dq \\
\le Cr^{1-n}\norm{\eta}_\infty\ell(Q)^n\alpha_\sigma(Q)
\le C\ell(Q)\alpha_\sigma(Q),
\end{multline}
where we have used Lemma \ref{Lbbalpha}. We claim that
\begin{equation}\label{LambdaPQ2}
\dist(y,P_Q)\le C\alpha_\sigma(Q)\br{\abs{y-\mathfrak b^t(p)}+\ell(Q)} \qquad\text{for all }y\in \Lambda(p,t).
\end{equation}
Let $y=(q,(q-p) \nabla b^t(p) + b^t(p))\in\Lambda(p,t)$ be fixed. Denote by $\Pi_Q^\bot$ the orthogonal projection on the orthogonal complement of $P_Q$.
Then
\begin{equation}\label{LambdaPQ21}
\dist(y,P_Q)\le \abs{\Pi_Q^\bot\br{y-\mathfrak b^t(p)}}+\dist(\mathfrak b^t(p), P_Q).
\end{equation}
Also, $\Pi_Q^\bot(P_Q)$ is a single point $\xi_Q\in\mathbb R$. Denote $v:=y-\mathfrak b^t(p)=(q-p, (q-p)\nabla b^t(p))$. Let $\hat v^i=\hat v^i(p,t)=\partial_{p_i}\mathfrak b^t(p)$, $i=1,2,\dots,n-1$. Then $v=\sum_{i=1}^{n-1}(q_i-p_i)\hat v^i$. We estimate $\abs{\Pi_Q^\bot(\hat v^i)}$. By definition, we write
\begin{multline*}
\abs{\Pi_Q^\bot(\hat v^i)}=\abs{\Pi_Q^\bot(\partial_{p_i}\mathfrak b^t(p))}=\frac{1}{r}\abs{\Pi_Q^\bot((\partial_i\eta)_t*\mathfrak b(p))}
=\frac{1}{r}\abs{\int_{q\in B(p,r)\cap P}(\partial_i\eta)_t(p-q)\Pi_Q^\bot(\mathfrak b(q))dq}\\
=\frac{1}{r}\abs{\int_{q\in B(p,r)\cap P}(\partial_i\eta)_t(p-q)\br{\Pi_Q^\bot(\mathfrak b(q))-\xi_Q}dq},
\end{multline*}
where in the last equality we have used that $\int(\partial_i\eta)_t(x) dx=0$. Notice that $\abs{\Pi_Q^\bot(z)-\xi_Q}=\abs{\Pi_Q^\bot(z)-\Pi_Q^\bot(P_Q)}=\dist(z,P_Q)$, so we have that
\[
\abs{\Pi_Q^\bot(\hat v^i)}\le\frac{1}{r^n}\norm{\partial_i\eta}_\infty\int_{q\in B(p,r)\cap P}\dist(\mathfrak b(q),P_Q)dq\le \frac{C}{r^n}\ell(Q)^n\alpha_\sigma(Q)\le C\alpha_\sigma(Q)
\]
by \eqref{Bptsubset} and Lemma \ref{Lbbalpha}. This gives that \begin{equation}\label{PiQbotv}
\abs{\Pi_Q^\bot(v)}\le C\alpha_\sigma(Q)\abs{v}.
\end{equation} Then the claim \eqref{LambdaPQ2} follows from \eqref{LambdaPQ21} and \eqref{LambdaPQ1}.
Next we compare $c_Q$ (defined in \eqref{defmuQ}) and $\lambda(p,t)$, and claim that
\begin{equation}\label{lambdapt-cQ}
\abs{\lambda(p,t)-c_Q}\le C\alpha_\sigma(Q).
\end{equation}
We intend to apply \eqref{ftestinalpha} to the 1-Lipschitz function $|t|\,\theta_{p,t}/\norm{\theta}_{Lip}$. So we need to check that $\supp\theta_{p,t}\subset B(x_Q,10^3\ell(Q))$. By the construction of $\theta_{p,t}$, we have that $\supp\theta_{p,t}\subset B(\mathfrak b^t(p),r)$. By Lemma \ref{LALip} and the fact that $
$\square$
silon_0$ has been chosen to be small,
\begin{equation}\label{eqbtb}
\abs{\mathfrak b^t(p)-\mathfrak b(p)}=\abs{b^t(p)-b(p)}=\abs{\int\eta_t(q)\br{b(q)-b(p)}dq}\le\norm{\nabla b}_\infty r\le 2\,
$\square$
silon_0\,r<r.
\end{equation}
So $B(\mathfrak b^t(p),r)\subset B(\mathfrak b(p),2r)$. We show that
\begin{equation}\label{bbpxQ}
\abs{\mathfrak b(p)-x_Q}\le 10\ell(Q).
\end{equation}
Then the assumption $r\in [\ell(Q)/4,\ell(Q)/2]$ gives that $\supp\theta_{p,t}\subset B(\mathfrak b(p),2r)\subset B(x_Q,10^3\ell(Q))$, as desired.
To see \eqref{bbpxQ}, we recall that $p\in\Pi(\frac32 B_Q)$, and so $\abs{p-\Pi(x_Q)}\le 3\ell(Q)/2$. Let $x\in\partial\Omega$ be a point such that $\abs{\mathfrak b(p)-x}=\dist(\mathfrak b(p),\partial\Omega)\lesssim
$\square$
silon_1\ell(Q)$, where the last inequality is due to Lemma \ref{Lbbeps}. Notice that by the definition \eqref{defdx}, $d(\Pi(x_Q))\le \ell(Q)$. So if $\abs{x-x_Q}\le 10^{-3}d(\Pi(x_Q))$, then $\abs{x-x_Q}\le 10^{-3}\ell(Q)$, and thus \[
\abs{\mathfrak b(p)-x_Q}\le \abs{\mathfrak b(p)-x}+\abs{x-x_Q}\le C
$\square$
silon_1\ell(Q)+10^{-3}\ell(Q)\le 10\ell(Q),
\]
as desired. If $\abs{x-x_Q}> 10^{-3}d(\Pi(x_Q))$, then we can apply \eqref{claimPibot} to get that $\abs{\Pi^\bot(x)-\Pi^\bot(x_Q)}\le 2
$\square$
silon_0\abs{\Pi(x)-\Pi(x_Q)}$. By the triangle inequality,
\(
\abs{\Pi(x)-\Pi(x_Q)}\le \abs{\Pi(x)-p}+\abs{p-\Pi(x_Q)}\le \br{C
$\square$
silon_1+\frac32}\ell(Q)
\), and so $\abs{\Pi^\bot(x)-\Pi^\bot(x_Q)}\le 2
$\square$
silon_0\br{\frac32+C
$\square$
silon_1}\ell(Q)$. Hence, we still have that \[
\abs{\mathfrak b(p)-x_Q}\le \abs{\mathfrak b(p)-x}+\abs{x-x_Q}\le C
$\square$
silon_1\ell(Q)+2\br{C
$\square$
silon_1+3/2}\ell(Q)\le 10\ell(Q),
\]
which completes the proof of \eqref{bbpxQ}.
We have justified that $t\, \theta_{p,t}/\norm{\theta}_{Lip}\in Lip(Q)$, so we can apply \eqref{ftestinalpha} to this function and obtain that
\begin{equation}\label{AmuAsigma}
\abs{c_\theta r^{n-1}\lambda(p,t)-c_Q\int\theta_{p,t}d\mu_{P_Q}}
=\abs{\int \theta_{p,t}(d\sigma-d\mu_Q)}\le C\ell(Q)^{n-1}\alpha_\sigma(Q).
\end{equation}
We now estimate $A_\mu:=c_Q\int\theta_{p,t}(z)d\mu_{P_Q}(z)$. Denote by $\Pi_Q$ the orthogonal projection from $\Lambda(p,t)$ to $P_Q$; by \eqref{PiQbotv} this is an affine bijection, with a constant Jacobian $J_Q$ that satisfies
\begin{equation}\label{eqdetJQ}
\abs{\det(J_Q)-1}\le\abs{\sqrt{1- C\alpha_\sigma(Q)^2}-1}\le C\alpha_\sigma(Q).
\end{equation}
By a change of variables $z=\Pi_Q(y)$, we write
\begin{equation}\label{eqAmu}
A_\mu=c_Q\det(J_Q)\int_{y\in\Lambda(p,t)}\theta_{p,t}\br{\Pi_Q(y)}d\mu_{\Lambda(p,t)}(y).
\end{equation}
We compare $\int_{y\in\Lambda(p,t)}\theta_{p,t}\br{\Pi_Q(y)}d\mu_{\Lambda(p,t)}(y)$ and $\int_{y\in\Lambda(p,t)}\theta_{p,t}(y)d\mu_{\Lambda(p,t)}(y)$. For $y\in\Lambda(p,t)$, $\abs{\Pi_Q(y)-y}=\dist(y,P_Q)$. So by \eqref{LambdaPQ2}, for $y\in\Lambda(p,t)$
\begin{equation}\label{lambda-cQ1}
\abs{\theta_{p,t}\br{\Pi_Q(y)}-\theta_{p,t}(y)}\le\norm{\theta}_{Lip}r^{-1}\abs{\Pi_Q(y)-y}\le C\,r^{-1}\alpha_\sigma(Q)\br{\abs{y-\mathfrak b^t(p)}+\ell(Q)}.
\end{equation}
Moreover, the support property of $\theta_{p,t}$ implies that $\abs{\theta_{p,t}\br{\Pi_Q(y)}-\theta_{p,t}(y)}$ is not zero when either $y\in B(\mathfrak b^t(p),r)$ or $\Pi_Q(y)\in B(\mathfrak b^t(p),r)$. By the triangle inequality and \eqref{LambdaPQ2},
\[
\abs{y-\mathfrak b^t(p)}\le\abs{\Pi_Q(y)-\mathfrak b^t(p)}+C\alpha_\sigma(Q)\br{\abs{y-\mathfrak b^t(p)}+\ell(Q)}.
\]
Since $\alpha_\sigma(Q)\le
$\square$
silon_1$ is sufficiently small, we get that when $\Pi_Q(y)\in B(\mathfrak b^t(p),r)$,
\(
\abs{y-\mathfrak b^t(p)}\le 2r+\mathcal C
$\square$
silon_1\ell(Q)\le 3\ell(Q)
\).
So by \eqref{lambda-cQ1} and the fact that $\supp\theta_{p,t}\subset B(\mathfrak b^t(p),r)$,
\begin{equation}\label{eqthetapt}
\abs{\int_{y\in\Lambda(p,t)}\br{\theta_{p,t}\br{\Pi_Q(y)}-\theta_{p,t}(y)}d\mu_{\Lambda(p,t)}(y)}\le C\,r^{n-1}\alpha_\sigma(Q).
\end{equation}
Recalling the definition of $c_\theta$, we have obtained that
$\abs{A_\mu-c_Q\det(J_Q)c_\theta\, r^{n-1}}\le C\,c_Q\,r^{n-1}\alpha_\sigma(Q)$.
By the triangle inequality, \eqref{eqdetJQ}, and the fact that $r \approx \ell(Q)$,
\[
\abs{A_\mu-c_Q\,c_\theta\,r^{n-1}}\le C\,c_Q\alpha_\sigma(Q)\ell(Q)^{n-1}.
\]
By this, the triangle inequality and \eqref{AmuAsigma},
\begin{equation}\label{lambda-cQ2}
c_\theta\,r^{n-1}\abs{\lambda(p,t)-c_Q}=\abs{c_\theta r^{n-1}\lambda(p,t)-c_Q\,c_\theta\,r^{n-1}}\le C(1+c_Q)\alpha_\sigma(Q)\ell(Q)^{n-1},
\end{equation}
which implies that $\abs{\lambda(p,t)-c_Q}\le\frac{1}{2}(1+c_Q)$ because $\alpha_\sigma(Q)$ is sufficiently small. But we know $\lambda(p,t)\approx 1$ by \eqref{lambda1}, so $c_Q\approx 1$ and then \eqref{lambda-cQ2} yields the desired estimate \eqref{lambdapt-cQ}.
Finally, we are ready to show that
\begin{equation}\label{muQmupt}
\dist_Q(\mu_Q,\mu_{p,t})\le C\alpha_\sigma(Q).
\end{equation}
Let $f\in Lip(Q)$. We have that
\[
\int f(z)d\mu_Q(z)=c_Q\int_{P_Q}f(z)d\mu_{P_Q}(z)=c_Q\det(J_Q)\int_{\Lambda(p,t)}f\br{\Pi_Q(y)}d\mu_{\Lambda(p,t)}(y).
\]
An argument similar to the one for \eqref{eqthetapt} gives that \[\abs{\int_{\Lambda(p,t)}\br{f\br{\Pi_Q(y)}-f(y)}d\mu_{\Lambda(p,t)}(y)}\le C\alpha_\sigma(Q)\ell(Q)^n.\]
So
\begin{multline*}
\abs{\int f\,d\mu_Q-\lambda(p,t)\int f\,d\mu_{\Lambda(p,t)}}\le
\abs{\int f\,d\mu_Q-c_Q\int f\,d\mu_{\Lambda(p,t)}}+\abs{\lambda(p,t)-c_Q}\abs{\int f\,d\mu_{\Lambda(p,t)}}\\
\le Cc_Q\det(J_Q)\alpha_\sigma(Q)\ell(Q)^n +c_Q\abs{\det(J_Q)-1}\abs{\int f\,d\mu_{\Lambda(p,t)}}+\abs{\lambda(p,t)-c_Q}\abs{\int f\,d\mu_{\Lambda(p,t)}}.
\end{multline*}
By \eqref{eqdetJQ}, \eqref{lambdapt-cQ}, and $c_Q\approx 1$,
\begin{equation}\label{eqfQfLambda}
\abs{\int f\,d\mu_Q-\lambda(p,t)\int f\,d\mu_{\Lambda(p,t)}}\le C\alpha_\sigma(Q)\ell(Q)^n,
\end{equation}
which proves \eqref{muQmupt}. Now \eqref{Lmupr1} follows from \eqref{muQmupt} and \eqref{ftestinalpha}.
\qed
We want to use the flat measures $\mu_{p,t}$ to estimate the smooth distance $D_\beta$ introduced in \eqref{IdefD}. But before that, we shall need to introduce
\begin{equation} \label{defalphak}
\alpha_\sigma(Q,k) := \alpha_\sigma(Q^{(k)}),
\end{equation}
where $Q^{(k)}$ is the unique ancestor of $Q$ such that $\ell(Q^{(k)}) = 2^k\ell(Q)$, and then for $\beta>0$,
\begin{equation} \label{defalphabeta}
\alpha_{\sigma,\beta}(Q) := \sum_{k\in \mathbb N} 2^{-k\beta} \alpha_\sigma(Q,k).
\end{equation}
The collection $\{\alpha_{\sigma,\beta}(Q)\}_Q$ is nice, because we have
\begin{equation} \label{pralphabeta}
\alpha_{\sigma,\beta}(Q^*) \approx \alpha_{\sigma,\beta}(Q)
\end{equation}
whenever $Q\in \mathbb D_{\partial \Omega}$ and $Q^*$ is the parent of $Q$, a property which is not satisfied by the $\{\alpha_\sigma(Q)\}_Q$. And of course the $\alpha_{\sigma,\beta}(Q)$'s still satisfies the Carleson packing condition.
\begin{lemma} \label{LalphabetaCM} Let $\partial \Omega$ be uniformly rectifiable and $\sigma$ be an Ahlfors regular measure satisfying \eqref{defADR}.
There exists a constant $C_{\sigma,\beta}$ that depends only on the constant in \eqref{defUR}, the Ahlfors regular constant $C_\sigma$, and $\beta$ such that, for any $Q_0 \in \mathbb D_{\partial \Omega}$,
\begin{equation} \label{pralphabeta2}
\sum_{Q\in \mathbb D_{\partial\Omega}(Q_0)} |\alpha_{\sigma,\beta}(Q)|^2 \sigma(Q) \leq C_{\sigma,\beta} \sigma(Q_0).
\end{equation}
\end{lemma}
\noindent {\em Proof: }
By Cauchy-Schwarz,
\[
\abs{\alpha_{\sigma,\beta}(Q)}^2\le \br{\sum_{k\in\mathbb N}2^{-\beta k}\alpha_{\sigma}(Q,k)^2}\br{\sum_{k\in\mathbb N}2^{-\beta k}}\le C\sum_{k\in\mathbb N}2^{-\beta k}\alpha_{\sigma}(Q,k)^2.
\]
Therefore,
\begin{multline}\label{alphabeta1}
\sum_{Q\in \mathbb D_{\partial\Omega}(Q_0)} |\alpha_{\sigma,\beta}(Q)|^2 \sigma(Q)\le C\sum_{Q\in\mathbb D_{\partial\Omega}(Q_0)}\sum_{k\in\mathbb N}2^{-\beta k}\alpha_{\sigma}(Q,k)^2\sigma(Q)\\
=C\sum_{k\in\mathbb N}2^{-\beta k}\sum_{Q\in\mathbb D_{\Omega}(Q_0)}\alpha_{\sigma}(Q,k)^2\sigma(Q)
=: C\sum_{k\in\mathbb N}2^{-\beta k} \br{I_1+I_2},
\end{multline}
where $I_1$ is the sum over $Q\in\mathbb D_{\sigma}(Q_0)$ such that $2^k\ell(Q)<\ell(Q_0)$, and $I_2$ is the rest. By \eqref{eqalphaQbdd} and Alfhors regularity of $\partial\Omega$,
\[
I_2\le C\sum_{Q\in\mathbb D_{\partial\Omega}(Q_0), \, \ell(Q)\mathfrak ge 2^{-k}\ell(Q_0)}\sigma(Q)\le C\sum_{j=0}^k\sum_{\ell(Q)=2^{-j}\ell(Q_0)}\sigma(Q)\le C\sum_{j=0}^k\ell(Q_0)^d=Ck\sigma(Q_0).
\]
For $I_1$, we observe that $\sigma(Q^{(k)})\approx 2^{kd}\sigma(Q)$, and that for each $Q^{(k)}$, it has at most $C2^{kd}$ descendants such that $\ell(Q^{(k)}) = 2^k\ell(Q)$. Therefore,
\begin{multline*}
I_1\le C\sum_{Q\in\mathbb D_{\partial\Omega}(Q_0), \, \ell(Q)< 2^{-k}\ell(Q_0)}\alpha_\sigma(Q^{(k)})^2\sigma(Q^{(k)})2^{-kd}\\
\le C\sum_{Q^{(k)}\in\mathbb D_{\partial\Omega}(Q_0), \, \ell(Q^{(k)})
\le \ell(Q_0)}\alpha_\sigma(Q^{(k)})^2\sigma(Q^{(k)})=C\sum_{Q\in\mathbb D_{\partial\Omega}(Q_0)}\alpha_\sigma(Q)^2\sigma(Q)\le C\sigma(Q_0)
\end{multline*}
by \eqref{defUR}. Returning to \eqref{alphabeta1}, we have that
\[
\sum_{Q\in \mathbb D_{\partial\Omega}(Q_0)} |\alpha_{\sigma,\beta}(Q)|^2 \sigma(Q)\le C\sum_{k\in\mathbb N}2^{-\beta k}(k+1)\sigma(Q_0)\le C\sigma(Q_0),
\]
as desired.
$\square$
The quantities $\alpha_{\sigma,\beta}$ are convenient, because we can now obtain an analogue of Lemma \ref{Lmupr} where we don't need to pay too much attention on the choices of $p$ and $t$.
\begin{lemma} \label{LmuprG} Let $\beta >0$ and $K\mathfrak geq 1$.
For $Q\in \mathcal S$, $p\in \Pi( K B_Q)$, and $\ell(Q)/K \leq \abs{t} \leq K\ell(Q)$, we have
\begin{equation} \label{Lmupr1G}
\dist_Q(\sigma,\mu_{p,t}) \leq C_{\beta,K} \alpha_{\sigma,\beta}(Q),
\end{equation}
where $C_{\beta,K}>0$ depends only on $n$, $C_\sigma$, $\beta$, and $K$.
\end{lemma}
\noindent {\em Proof: }
First, we prove that when $p\in \Pi(\frac32 B_Q)$ and $\ell(Q)/K \leq |t| \leq \ell(Q)/2$, we have
\begin{equation} \label{Lmupr2G}
\dist_Q(\sigma,\mu_{p,t}) \leq C_K \alpha_{\sigma}(Q),
\end{equation}
We set $t_j = 2^{-2-j}\ell(Q)$. We also take a dyadic cube $Q' \subset Q$ such that $\ell(Q')/4 \leq |t| \leq \ell(Q')/2$, and then we pick $p_0\in \Pi(\frac32 B_{Q'})$. By Lemma \ref{Lmupr}, we have that
\[ \dist_{Q}(\sigma,\mu_{p,t_0}) + \dist_{Q}(\sigma,\mu_{p_0,t_0}) \lesssim \alpha_\sigma(Q),\]
so $\dist_{Q}(\mu_{p,t_0},\mu_{p_0,t_0}) \lesssim \alpha_\sigma(Q)$ too. Consequently, the claim \eqref{Lmupr2G} is reduced to
\begin{equation} \label{Lmupr3G}
\dist_{Q}(\mu_{p_0,t},\mu_{p_0,t_0}) \leq C_K \alpha_\sigma(Q).
\end{equation}
For this latter bound, we decompose
\begin{equation} \label{Lmupr4G}
\dist_{Q}(\mu_{p_0,t},\mu_{p_0,t_0}) \leq \dist_{Q}(\mu_{p_0,t},\mu_{p_0,t_k}) + \sum_{j=0}^{k-1} \dist_{Q}(\mu_{p_0,t_j},\mu_{p_0,t_{j+1}}),
\end{equation}
where $k$ is chosen so that $t_k = \ell(Q')/2$, and $k\leq 1+\log_2(K)$ is bounded by $K$. We look at $\dist_{Q}(\mu_{p_0,t},\mu_{p_0,t_k})$, but since we are dealing with two flat measures that intersects $B_{Q'}$, Lemma A.5 in \cite{FenUR} shows that
\begin{equation} \label{Lmupr5G}
\dist_{Q}(\mu_{p_0,t},\mu_{p_0,t_k}) \lesssim \dist_{Q'} (\mu_{p_0,t},\mu_{p_0,t_k})
\end{equation}
and then Lemma \ref{Lmupr} and the fact that $\ell(Q') \approx_K \ell(Q)$ entail that
\begin{equation} \label{Lmupr6G}
\dist_{Q}(\mu_{p_0,t},\mu_{p_0,t_k}) \lesssim \dist_{Q'} (\mu_{p_0,t},\sigma) + \dist_{Q'} (\sigma,\mu_{p_0,t_k}) \lesssim \alpha_{\sigma}(Q') \leq C_K \alpha_{\sigma}(Q).
\end{equation}
A similar reasoning gives that
\begin{equation} \label{Lmupr7G}
\dist_{Q}(\mu_{p_0,t_j},\mu_{p_0,t_{j+1}}) \lesssim C_K \alpha_{\sigma}(Q)
\end{equation}
whenever $0\leq j \leq k-1$. The combination of \eqref{Lmupr4G}, \eqref{Lmupr6G}, and \eqref{Lmupr7G} shows the claim \eqref{Lmupr3G} and thus \eqref{Lmupr2G}.
In the general case, we pick the smallest ancestor $Q^*$ of $Q$ such that $p\in \Pi(\frac32B_{Q^*})$ and $|t| \leq \ell(Q^*)/2$, and we apply \eqref{Lmupr2G} to get
\[ \dist_Q(\sigma,\mu_{p,t}) \leq C_K \alpha_{\sigma}(Q^*).\]
The lemma follows then by simply observing that $\alpha_\sigma(Q^*) \lesssim \alpha_{\sigma,\beta}(Q)$.
$\square$
We need the constant
\begin{equation} \label{defcbeta}
c_\beta := \int_{\mathbb R^{n-1}} (1+|p|^2)^{-\frac{d+\beta}2} dy
\end{equation}
and the unit vector $N_{p,t}$ defined as the vector
\begin{equation} \label{defNpr}
N_{p,t}(X) := [\nabla \dist(.,\Lambda(p,t))](X)
\end{equation}
which is of course constant on the two connected components of $\mathbb R^n \setminus \Lambda(p,t)$. We are now ready to compare $D_\beta$ with the distance to $\Lambda(p,t)$.
\begin{lemma} \label{LestD}
Let $Q\in \mathcal S$, $X\in W_{\Omega}(Q)$, $p\in \Pi(2^5Q)$ and $2^{-5}\ell(Q) \leq |t| \leq 2^5 \ell(Q)$. We have
\begin{equation} \label{estD1}
|D^{-\beta}_\beta(X) - c_\beta \lambda(p,t) \dist(X,\Lambda(p,t))^{-\beta}| \leq C \ell(Q)^{-\beta} \alpha_{\sigma,\beta}(Q).
\end{equation}
and
\begin{equation} \label{estD2}
|\nabla [D^{-\beta}_\beta](X) + \beta c_\beta \lambda(p,t) \dist(X,\Lambda(p,t))^{-\beta-1} N_{p,t}(X)| \\ \leq C \ell(Q)^{-\beta-1} \alpha_{\sigma,\beta+1}(Q),
\end{equation}
where the constant $C>0$ depends only $C_\sigma$ and $\beta$.
\end{lemma}
\noindent {\em Proof: }
Denote $r=\abs{t}$, and $d=n-1$.
By the definition of $W_\Omega(Q)$, $\dist(X,\partial\Omega)>\ell(Q)/2$ and $X\in 2B_Q$. We show that in addition,
\begin{equation}\label{eqXball}
X\in B(\mathfrak b^t(p),2^6\ell(Q)),
\end{equation}
and
\begin{equation}\label{eqXdist}
\dist(X,\Lambda(p,t)\cup\partial\Omega)>\frac{\ell(Q)}{20}.
\end{equation}
Since $X\in 2B_Q$, $\abs{\Pi(X)-p}\le (2^5+2) \ell(Q)$. Then $\abs{\mathfrak b(\Pi(X))-\mathfrak b(p)}\le (1+2
$\square$
silon_0)(2^5+2)\ell(Q)$ because $\mathfrak b$ is the graph of a $2
$\square$
silon_0$-Lipschitz function. Write
\[
\abs{X-\mathfrak b^t(p)}\le \abs{X-\mathfrak b(\Pi(X))}+\abs{\mathfrak b(\Pi(X))-\mathfrak b(p)}+\abs{\mathfrak b(p)-\mathfrak b^t(p)},
\]
then use \eqref{claimdPib} and \eqref{eqbtb} to get
\[\abs{X-\mathfrak b^t(p)}\le (1+2
$\square$
silon_0)\br{\delta(X)+(2^5+2)\ell(Q)}+2
$\square$
silon_0r\le 2^6\ell(Q),\]
and thus \eqref{eqXball} follows. To see \eqref{eqXdist}, we only need to show that
$\dist(X,\Lambda(p,t))>\frac{\ell(Q)}{20}$. Notice that $(\nabla b^t(p),-1)$ is a normal vector of the plane $\Lambda(p,t)$, and that $\mathfrak b^t(p)\in\Lambda(p,t)$. So
\begin{multline}\label{distXLambda}
\dist(X,\Lambda(p,t))=\frac{\abs{\br{X-\mathfrak b^t(p)}\cdot\br{\nabla b^t(p),-1}}}{\abs{(\nabla b^t(p),1)}}=\frac{\abs{(\Pi(X)-p)\cdot\nabla b^t(p)+\br{b^t(p)-\Pi^\bot(X)}}}{\sqrt{\abs{\nabla b^t(p)}^2+1}}\\
\mathfrak ge \frac12\br{\abs{\Pi^\bot(X)-b^t(p)}-\abs{(\Pi(X)-p)\cdot\nabla b^t(p)}}\mathfrak ge\frac12\abs{\Pi^\bot(X)-b^t(p)}-C\,2^5\ell(Q)
$\square$
silon_0,
\end{multline}
by $\norm{\nabla b^t}_\infty\le C
$\square$
silon_0$ (see \eqref{brbdd}). We have that $\abs{b(\Pi(X))-b(p)}\le 2
$\square$
silon_0\abs{\Pi(X)-p}\le 2^6
$\square$
silon_0$, and that
\(
\abs{\Pi^\bot(X)-b(\Pi(X))}\mathfrak ge\dist(X,\mathcal Gamma_S)\mathfrak ge\frac{\delta(X)}{1+3
$\square$
silon_0}\mathfrak ge \frac{\ell(Q)}{2(1+3
$\square$
silon_0)}
\)
by \eqref{claimdGPib}. So
\[
\abs{\Pi^\bot(X)-b^t(p)}\mathfrak ge\abs{\Pi^\bot(X)-b(\Pi(X))}-\abs{b(\Pi(X))-b(p)}-\abs{b^t(p)-b(p)}\mathfrak ge \frac{\ell(Q)}{5}
\]
by \eqref{eqbtb}. Then $\dist(X,\Lambda(p,t))>\frac{\ell(Q)}{20}$ follows from this and \eqref{distXLambda}.
Now we prove \eqref{estD1}. We intend to cut the integral $D_\beta^{-\beta}=\int_{\partial\Omega}\abs{X-y}^{-d-\beta}d\sigma(y)$ into pieces. So we introduce a cut-off function $\theta_0\in C_c^\infty(B(0,r/2))$, which is radial, ${\mathds 1}_{B(0,r/4)}\le\theta_0\le {\mathds 1}_{B(0,r/2)}$, and $\abs{\nabla\theta_0}\le 2r$. Then we set $\theta_k(y):=\theta_0(2^{-k}y)-\theta_0(-2^{-k+1}y)$ for $k\mathfrak ge 1$ and $y\in\mathbb Rn$, and define $\widetilde\theta_k(y)=\theta_k(y-\mathfrak b^t(p))$ for $k\in\mathbb N$. Denote $B_k=B(\mathfrak b^t(p),2^{k-1}r)$. We have that $\supp\widetilde\theta_0\subset B_0$, $\supp\widetilde\theta_k\subset B_k\setminus B_{k-2}$ for $k\mathfrak ge 1$, and that
\[
\sum_{k\in\mathbb N}\widetilde\theta_k=1.
\]
Now we can write
\[D_\beta(X)^{-\beta}=\sum_{k\in\mathbb N}\int_{\partial\Omega}\abs{X-y}^{-d-\beta}\widetilde\theta_k(y)d\sigma(y)=:\sum_{k\in\mathbb N}\int_{\partial\Omega} f_k(y)d\sigma(y),\]
with $f_k(y)=\abs{X-y}^{-d-\beta}\widetilde\theta_k(y)$. We intend to compare $\int f_k(y)d\sigma(y)$ and $\int f_k(y)d\mu_{p,t}(y)$. Both integrals are well-defined because of \eqref{eqXdist}. Observe that
\begin{multline*}
\sum_{k\in\mathbb N}\int f_k(y)d\mu_{p,t}(y)=\lambda(p,t)\int_{\Lambda(p,t)}\abs{X-y}^{-d-\beta}d\mu_{\Lambda(p,t)}(y)\\
=\lambda(p,t)\int_{\mathbb R^d}\br{\dist(X,\Lambda(p,t))^2+\abs{y}^2}^{-(d+\beta)/2}dy=\lambda(p,t)c_\beta\dist(X,\Lambda(p,t))^{-\beta}
\end{multline*}
by a change of variables. So
\begin{equation}\label{Dbeta}
D^{-\beta}_\beta(X) - c_\beta \lambda(p,t)\dist(X,\Lambda(p,t))^{-\beta}=\sum_{k\in\mathbb N}\int f_k\,(d\sigma-d\mu_{p,t}).
\end{equation}
We are interested in the Lipschitz properties of $f_k$ because we intend to use Wasserstein distances.
We claim that
\begin{equation}\label{X-ylwbd}
\abs{X-y}\mathfrak ge c2^kr \quad\text{when }y\in\partial\Omega\cup\Lambda(p,t) \text{ is such that } \widetilde\theta_k(y)\neq0,
\end{equation}
where $c=10^{-1}2^{-21}$.
In fact, by \eqref{eqXball} and the support properties of $\widetilde\theta_k$, if $k\mathfrak ge 15$, then
\[
\abs{X-y}\mathfrak ge 2^{k-3}r-2^6\ell(Q)\mathfrak ge (2^{k-3}-2^{11})r\mathfrak ge 2^{k-4}r \quad\text{for }y\in\supp\widetilde\theta_k.
\]
If $0\le k<15$, then by \eqref{eqXdist}, for $y\in\partial\Omega\cup\Lambda(p,t)$,
\[
\abs{X-y}\mathfrak ge\dist(X,\partial\Omega\cup\Lambda(p,t))\mathfrak ge\frac{\ell(Q)}{20}\mathfrak ge \frac{2^{-6}r}{10}\mathfrak ge \frac{2^{-21}}{10}2^kr.
\]
So \eqref{X-ylwbd} is justified. But $f_k$ is not a Lipschitz function in $\mathbb Rn$ because $y$ can get arbitrary close to $X$ when $k$ is small. Set
\[
\widetilde f_k(y):=\max\set{\abs{X-y},c\,2^kr}^{-d-\beta}\widetilde\theta_k(y).
\]
Then by \eqref{X-ylwbd}, $\widetilde f_k(y)=f_k(y)$ for $y\in\partial\Omega\cup\Lambda(p,t)$, and therefore,
\begin{equation}\label{fkwtfk}
\int f_k\,(d\sigma-d\mu_{p,t})=\int \widetilde f_k\,(d\sigma-d\mu_{p,t}).
\end{equation}
The good thing about $\widetilde f_k$ is that it is Lipschitz. A direct computation shows that
$\norm{\widetilde f_k}_\infty\le C\br{2^kr}^{-d-\beta}$, and $\norm{\nabla \widetilde f_k}_{\infty}\le C(2^kr)^{-d-\beta-1}$. Moreover, $\widetilde f_k$ is supported on $B(\mathfrak b^t(p),2^{k-1}r)$, which is contained in $B(x_{Q^{(k)}},10^3\ell(Q^{(k)}))$. To see this, one can use \eqref{eqbtb}, \eqref{bbpxQ}, and $\abs{x_Q-x_{Q^{(k)}}}\le 2^{k-1}\ell(Q)$ to get that
\[
\abs{\mathfrak b^t(p)-x_{Q^{(k)}}}\le \br{2
$\square$
silon_0+2^{k-1}}r+10\,2^5\ell(Q)+2^{k-1}\ell(Q)\le 2^5(2^k+11)\ell(Q)\le 10^32^k\ell(Q).
\]
Write
\begin{multline*}
\int \widetilde f_k\,(d\sigma-d\mu_{p,t})
=\int \widetilde f_k\,(d\sigma-d\mu_{Q^{(k)}})+\int \widetilde f_k\,(d\mu_Q-d\mu_{p,t})
+\sum_{j=1}^k\int \widetilde f_k\,(d\mu_{Q^{(j)}}-d\mu_{Q^{(j-1)}})\\
=:I+II+\sum_{j=1}^kIII_j.
\end{multline*}
By the definition \eqref{defmuQ} of $\mu_{Q^{(k)}}$ and properties of $\widetilde f_k$,
\(
\abs{I}\le C\br{2^kr}^{-\beta}\alpha_\sigma(Q,k)
\).
We then have $|II| \leq \br{2^kr}^{-\beta} \dist_{Q^{(k)}} (\mu_Q , \mu_{p,t})$, but because we are looking at the Wasserstein distance between two flat measures whose supports intersect $10B_Q$, Lemma A.5 in \cite{FenUR} shows that
\[\dist_{Q^{(k)}} (\mu_Q , \mu_{p,t}) \lesssim \dist_{Q} (\mu_Q , \mu_{p,t})\]
and thus
\[ |II| \lesssim \br{2^kr}^{-\beta} \dist_{Q} (\mu_Q , \mu_{p,t}) \leq \br{2^kr}^{-\beta} \mathcal Big( \dist_{Q} (\mu_Q, \sigma) + \dist_Q( \sigma , \mu_{p,t}) \mathcal Big) \lesssim \br{2^kr}^{-\beta} \alpha_{\sigma,\beta}(Q)\]
by Lemma \ref{LmuprG}. The terms $III_j$ can be bounded by a Wasserstein distance between planes, and similarly to $II$, we get
\[ |III_j| \lesssim \br{2^kr}^{-\beta} \dist_{Q^{(k)}} (\mu_{Q^{(j)}} , \mu_{Q^{(j-1)}}) \lesssim \br{2^kr}^{-\beta} \dist_{Q^{(j)}} (\mu_{Q^{(j)}} , \mu_{Q^{(j-1)}}) \lesssim \br{2^kr}^{-\beta} \alpha_\sigma(Q,j).\]
Altogether, we obtain that
\[
\abs{ \int \widetilde f_k\,(d\sigma-d\mu_{p,t})}\le C\br{2^kr}^{-\beta} \left( \alpha_{\sigma,\beta}(Q) + \sum_{j=0}^k\alpha_\sigma(Q,j) \right).
\]
Then by \eqref{fkwtfk} and \eqref{Dbeta},
\begin{multline*}
\abs{D^{-\beta}_\beta(X) - c_\beta \lambda(p,t)\dist(X,\Lambda(p,t))^{-\beta}}\le C\sum_{k\in\mathbb N}\br{2^kr}^{-\beta}\left( \alpha_{\sigma,\beta}(Q) + \sum_{j=0}^k\alpha_\sigma(Q,j) \right) \\
\le C\ell(Q)^{-\beta}\alpha_{\sigma,\beta}(Q),
\end{multline*}
which is \eqref{estD1}.
We claim that \eqref{estD2} can be established similarly to \eqref{estD1} as long as one expresses the left-hand side of \eqref{estD2} appropriately.
A direct computation shows that
\[\nabla(D_\beta^{-\beta})(X)=-(d+\beta)\int\abs{X-y}^{-d-\beta-2}(X-y)d\sigma(y).\]
On the other hand,
\begin{multline*}
\int\abs{X-y}^{-d-\beta-2}(X-y)d\mu_{\Lambda(p,t)}(y)= N_{p,t}(X)\int\abs{X-y}^{-d-\beta-2}(X-y)\cdot N_{p,t}(X)d\mu_{\Lambda(p,t)}(y)\\
=N_{p,t}(X)\int\abs{X-y}^{-d-\beta-2}\dist(X,\Lambda(p,t))d\mu_{\Lambda(p,t)}(y)=c_{\beta+2}\dist(X,\Lambda(p,t))^{-\beta-1}N_{p,t}(X).
\end{multline*}
By \cite{FenUR} (3.30), $(\beta+d)c_{\beta+2}=\beta c_\beta$ for all $\beta>0$. Hence
\begin{multline*}
\nabla [D^{-\beta}_\beta](X) + \beta c_\beta \lambda(p,t) \dist(X,\Lambda(p,t))^{-\beta-1} N_{p,t}(X)\\
=-(d+\beta)\int\abs{X-y}^{-d-\beta-2}(X-y)\br{d\sigma(y)-d\mu_{p,t}(y)}.
\end{multline*}
Now we set $f_k'(y)=\abs{X-y}^{-d-\beta-2}(X-y)$. Using \eqref{eqXball} and \eqref{eqXdist}, we can see that $f_k'$ is Lipschitz on $\partial\Omega\cup\Lambda(p,t)$. Then we can play with measures as before to obtain \eqref{estD2}.
$\square$
\begin{corollary} \label{CestD}
Let $Q\in \mathcal S$, $X\in W_{\Omega}(Q)$, $p\in \Pi(2^5Q)$ and $2^{-5}\ell(Q) \leq |t| \leq 2^5 \ell(Q)$. We have
\begin{equation} \label{estD3}
\left|\dfrac{\nabla D_\beta(X)}{D_\beta(X)} - \dfrac{N_{p,t}(X)}{\dist(X,\Lambda(p,t))}\right| \leq C \ell(Q)^{-1} \alpha_{\sigma,\beta}(Q).
\end{equation}
where the constant $C>0$ still depends only $C_\sigma$ and $\beta$.
\end{corollary}
\noindent {\em Proof: }
To lighten the notation, we denote by $\mathcal O_{CM}$ any quantity such that $$|\mathcal O_{CM}| \leq C \alpha_{\sigma,\beta}(Q)$$ for some constant $C$. Then by \eqref{estD2},
\begin{multline*}
\frac{\nabla D_\beta(X)}{D_\beta(X)} = - \frac1{\beta} \frac{\nabla[D_\beta^{-\beta}](X)}{D_\beta^{-\beta}(X)} \\
= - \frac1{\beta}\br{ \frac{-\beta c_\beta \lambda(p,t) \dist(X,\Lambda(p,t))^{-\beta-1}N_{p,t}(X)}{D_{\beta}^{-\beta}(X)} + \frac{\ell(Q)^{-\beta-1}\mathcal O_{CM}}{D_{\beta}^{-\beta}(X)}}.
\end{multline*}
Using $D_\beta(X)\approx\delta(X)\approx\ell(Q)$ and \eqref{estD1}, we can further write the above as
\[
\frac{\nabla D_\beta(X)}{D_\beta(X)} = \dfrac{N_{p,t}(X)}{\dist(X,\Lambda(p,t))} + \ell(Q)^{-1} \mathcal O_{CM},
\]
which implies the corollary.
$\square$
\section{The bi-Lipschitz change of variable $\rho_{\mathcal S}$}
\label{Srho}
The results in this section are similar, identical, or often even easier than the ones found in Sections 2, 3, and 4 of \cite{DFM3}. Many proofs will only be sketched and we will refer to the corresponding result in \cite{DFM3} for details.
As in the previous sections, we take $0<
$\square$
silon_0 \ll
$\square$
silon_1 \ll 1$ and we use Lemma \ref{Lcorona} with such $
$\square$
silon_0,
$\square$
silon_1$ to obtain a collection $\mathfrak S$ of coherent regimes. We take then $\mathcal S$ that either belongs to $\mathfrak S$, or is a coherent regime included in an element of $\mathfrak S$.
We keep the notations introduced in Sections \ref{SUR}, \ref{SWhitney}, and \ref{SDbeta}.
\subsection{Construction of $\rho_{\mathcal S}$}
In this section, the gradients are column vectors. The other notation is fairly transparent. A hyperplane $P$ is equipped with an orthonormal basis and $\nabla_p$ correspond to the vector of the derivatives in each coordinate of $p$ in this basis; $\partial_t$ or $\partial_s$ are the derivatives with respect to $t$ or $s$, that are always explicitly written; $\nabla_{p,t}$ or $\nabla_{p,s}$ are the gradients in $\mathbb R^n$ seen as $P \times P^\bot$.
\begin{lemma} \label{LbrCM}
The quantities $\nabla_{p,t} b^t$ and $t \nabla_{p,t} \nabla_p b^t$ are bounded, that is, for any $t \neq0$ and any $p_0\in P$,
\begin{equation} \label{brbdd}
|\nabla_{p,t} b^t| + |t \nabla_{p,t} \nabla_p b^t| \leq C
$\square$
silon_0.
\end{equation}
In addition, $|\partial_{t} b^t| + |t \nabla_{p,t} \nabla_p b^t| \in CM_{P\times (P^\bot \setminus \{0\})}$, that is, for any $r>0$ and any $p_0\in P$,
\begin{equation} \label{brCM}
\iint_{B(p_0,r)} \mathcal Big( |\partial_t b^t(p)|^2 + |t \nabla_{p,t} \nabla_p b^t(p)|^2 \mathcal Big) \frac{dt}{t} \, dp \leq C
$\square$
silon_0^2 r^{n-1}.
\end{equation}
In both cases, the constant $C>0$ depends only on $n$ (and $\eta$).
\end{lemma}
\noindent {\em Proof: }
The result is well known and fairly easy. The boundedness is proven in Lemma 3.17 of \cite{DFM3}, while the Carleson bound is established Lemma 4.11 in \cite{DFM3} (which is itself a simple application of the Littlewood-Paley theory found in \cite[Section I.6.3]{Stein93} to the bounded function $\nabla b$).
$\square$
Observe that the convention that we established shows that $(\nabla_p b^t(p))^T$ is $n-1$ dimensional horizontal vector. We define the map $\rho: P\times P^\bot \to P\times P^\bot$ as
\begin{equation} \label{defrhoS}
\rho_{\mathcal S}(p,t) := (p - t(\nabla b^t(p))^T, t + b^t(p))
\end{equation}
if $t\neq 0$ and $\rho_{\mathcal S}(p,0) = \mathfrak b(p)$.
Because the codimension of our boundary in 1 in our paper, contrary to \cite{DFM3} which stands in the context of domains with higher codimensional boundaries, our map is way easier than the one found in \cite{DFM3}. However, the present mapping is still different from the one found in \cite{KePiDrift}, and has the same weak and strong features as the change of variable in \cite{DFM3}. Note that the $i^{th}$ coordinate of $\rho_{\mathcal S}$, $1\leq i \leq n-1$, is
\begin{equation} \label{defrhoSi}
\rho_{\mathcal S}^i(p,t) := p_i - t\partial_{p_i} b^t(p).
\end{equation}
Note that $\rho_{\mathcal S}$ is continuous on $P\times P^\bot = \mathbb R^n$, because both $t\nabla b^t$ and $b^t-b$ converges (uniformly in $p \in P$) to $0$ as $t\to 0$. The map $\rho_{\mathcal S}$ is $C^\infty$ on $\mathbb R^n \setminus P$, and we compute the Jacobian $\Jac$ of $\rho_{\mathcal S}$ which is
\begin{equation} \label{defJacS}
\Jac(p,t) = \begin{pmatrix} I - t\partial_{p_i} \partial_{p_j} b^t(p) & \partial_{p_i} b^t(p) \\ - t \partial_t \partial_{p_j} b^t(p) - \partial_{p_j} b^t(p) & 1 + \partial_t b^t(p) \end{pmatrix},
\end{equation}
where $i$ and $j$ refers to respectively the line and the column of the matrix. We define the approximation of the Jacobian $\Jac$ as
\begin{equation} \label{defJS}
J = \begin{pmatrix} I & \partial_{p_i} b^t(p) \\ - \partial_{p_j} b^t(p) & 1 \end{pmatrix} = \begin{pmatrix} I & \nabla_p b^t(p) \\ - (\nabla_p b^t(p))^T & 1 \end{pmatrix}.
\end{equation}
\begin{lemma} \label{LestonJ}
We have the following pointwise bounds:
\begin{enumerate}[(i)]
\item $\displaystyle \|J-I\| \lesssim |\nabla_p b^t| \lesssim
$\square$
silon_0$, \setminusallskip
\item $\displaystyle \|\Jac - J\| \lesssim |\partial_t b^t| + |t\nabla_{p,t} \nabla_p b^t| \lesssim
$\square$
silon_0$, \setminusallskip
\item $\displaystyle |\det(J)-1| \lesssim |\nabla_p b^t| \lesssim
$\square$
silon_0$, \setminusallskip
\item $\displaystyle |\det(\Jac) - \det(J)| \lesssim |\partial_t b^t| + |t\nabla_{p,t} \nabla_p b^t|$, \setminusallskip
\item $\displaystyle \|(\Jac)^{-1} - J^{-1}\| \lesssim |\partial_t b| + |t\nabla_{p,t} \nabla_p b^t|$, \setminusallskip
\item $\displaystyle |\nabla_{p,t} \det(J)| + \||\nabla_{p,t} J^{-1}|\| \lesssim |\nabla_{p,t} \nabla_p b^t|$, \setminusallskip
\end{enumerate}
In each estimate, the constants depends only on $n$ and $\eta$.
\end{lemma}
\noindent {\em Proof: } Only a rapid proof is provided, and details are carried out in the proof of Lemmas 3.26, 4.12, 4.13, and 4.15 in \cite{DFM3}.
The items (i) and (ii) are direct consequences of \eqref{brbdd} and the definitions of $J$ and $\Jac$.
For items $(iii)$ and $(iv)$, we use the fact that the determinant is the sum of products of coefficients of the matrix. More precisely, the Leibniz formula states that
\begin{equation} \label{defdetM}
\det(M) := \sum_{\sigma \in S_n} \sgn(\sigma) \prod_{i=1}^n M_{i,\sigma(i)},
\end{equation}
where $S_n$ is the sets of permutations of $\{1,\dots,n\}$ and $\sgn$ is the signature. So the difference between the determinant of two matrices $M_1$ and $M_2$ is the sum of products of coefficients of $M_1$ and $M_2-M_1$, and each product contains at least one coefficient of $M_2-M_1$. With this observation, $(iii)$ and $(iv)$ follow from $(i)$ and $(ii)$.
The items $(iii)$ and $(iv)$ shows that both $\det(J)$ and $\det(\Jac)$ are close to $1$ - say in $(1/2,2)$ - as long as $
$\square$
silon_0$ is small enough. This implies that
\begin{equation} \label{diff1/detJ}
\left|\frac1{\det(\Jac)} - \frac1{\det(J)}\right| = \left|\frac{\det(J) - \det(\Jac)}{\det(\Jac)\det(J)} \right| \lesssim |\partial_t b| + |t\nabla_{p,t} \nabla_p b^t|
\end{equation}
by $(iv)$. Cramer's rule states that the coefficients of $M^{-1}$ is the quotient of a linear combination of product of coefficients of $M$ over $\det(M)$. By using Cramer's rule to $\Jac$ and $J$, \eqref{diff1/detJ}, and $(ii)$, we obtain $(v)$.
Finally, the bound on $\nabla \det(J)$ and $\nabla J^{-1}$ are obtained by taking the gradient respectively in \eqref{defdetM} and in Cramer's rule.
$\square$
\begin{lemma} \label{LprrhoS}
For any $p\in P$ and $t\in P^\bot \setminus \{0\}$, we have
\begin{equation} \label{prrhoS1}
(1-C
$\square$
silon_0) |t| \leq \dist(\rho_{\mathcal S}(p,t),\mathcal Gamma_{\mathcal S}) \leq |\rho_{\mathcal S}(p,t) - \mathfrak b(p)| \leq (1+C
$\square$
silon_0) |t|
\end{equation}
and
\begin{equation} \label{prrhoS4}
|\rho_{\mathcal S}(p,t) - \mathfrak b(p) - (0,t)| \leq C
$\square$
silon_0 |t|,
\end{equation}
where $C>0$ depends only on $n$ (and $\eta$).
\end{lemma}
\noindent {\em Proof: } The lemma is an analogue of Lemma 3.40 in \cite{DFM3}. But since the lemma is key to understand why $\rho_\mathcal S$ is a bi-Lipschitz change of variable, and since it is much easier in our case, we prove it carefully.
By definition of $\rho_{\mathcal S}$,
\[\rho_{\mathcal S}(p,t) - \mathfrak b(p) - (0,t) = (-t(\nabla b^t(p))^T, b^t(p) - b(p)).\]
So the mean value theorem applied to the continuous function $t \mapsto b^t(p)$ [recall that $b$ is Lipschitz and $b^t$ is the convolution of $b$ with a mollifier, so we even have a uniform convergence of $b^t$ to $b$] entails that
\begin{equation} \label{prrhoS3}
\begin{split}
|\rho_{\mathcal S}(p,t) - \mathfrak b(p) - (0,t)| & \leq |t\nabla b^t(p)| + |b^t(p) - b(p)| \\
& \leq |t\nabla b^t(p)| + |t| \sup_{s\in (0,|t|)} |\partial_s b^s(p)| \lesssim
$\square$
silon_0|t|
\end{split}
\end{equation}
by \eqref{brbdd}. Therefore, \eqref{prrhoS4} is proven and we have
\begin{equation} \label{prrhoS2}
|\rho_{\mathcal S}(p,t) - \mathfrak b(p)| \leq (1+C
$\square$
silon_0) |t|,
\end{equation}
is the upper bound in \eqref{prrhoS1}. The middle bound of \eqref{prrhoS1} is immediate, since $\mathfrak b(p) \in \mathcal Gamma_{\mathcal S}$. It remains thus to prove the lower bound in \eqref{prrhoS1}.
Let $q\in P$ be such that $|\rho_{\mathcal S}(p,t) - \mathfrak b(q)| = \dist(\rho_{\mathcal S}(p,t), \mathcal Gamma_{\mathcal S})$. We know that
\[|\mathfrak b(q) - \mathfrak b(p)| \leq |\mathfrak b(q) - \rho_{\mathcal S}(p,t)| + |\rho_{\mathcal S}(p,t) - \mathfrak b(p)| \leq 2|\rho_{\mathcal S}(p,t) - \mathfrak b(p)| \leq 3|t|,\]
if $
$\square$
silon_0\ll 1$ is small enough, hence $|q-p| \leq 3|t|$ too. So
\[\begin{split}
\dist(\rho_{\mathcal S}(p,t), \mathcal Gamma_{\mathcal S}) & = |\rho_{\mathcal S}(p,t) - \mathfrak b(q)| \mathfrak geq |\mathfrak b(p) - \mathfrak b(q) + (0,t)| - |\rho_{\mathcal S}(p,t) - \mathfrak b(p) - (0,t)| \\
& \mathfrak geq |b(p) - b(q) + t| - C
$\square$
silon_0|t|
\end{split}\]
by \eqref{prrhoS4}. But by Lemma \ref{LALip}, the function $b$ is $2
$\square$
silon_0$-Lipschitz, so we can continue with
\[\dist(\rho_{\mathcal S}(p,t), \mathcal Gamma_{\mathcal S}) \mathfrak geq (1-C{
$\square$
silon_0})|t| - |b(p)-b(q)| \mathfrak geq (1-C{
$\square$
silon_0}) |t| - 2
$\square$
silon_0 |p-q| \mathfrak geq (1 - C'{
$\square$
silon_0}) |t|. \]
The lemma follows.
$\square$
\begin{lemma}
The map $\rho_{\mathcal S}$ is a bi-Lipschitz change of variable that maps $P$ to $\mathcal Gamma_{\mathcal S}$.
\end{lemma}
\noindent {\em Proof: } See Theorem 3.53 in \cite{DFM3} for more details. We shall show that $\rho_{\mathcal S}$ is a bi-Lipschitz change of variable from $P \times (0,\infty)$ to
\[\Omega^+_{\mathcal S} := \{(p,t) \in P \times P^\bot, \, t>b(p)\}\]
and a similar argument also give that $\rho_{\mathcal S}$ is a bi-Lipschitz change of variable from $P \times (-\infty,0)$ to
\[\Omega^-_{\mathcal S} := \{(p,t) \in P \times P^\bot, \, t<b(p)\}.\]
The lemma follows because we know that $\rho_{\mathcal S}$ is continuous on $P \times P^\bot$.
First, we know by the lower bound in \eqref{prrhoS1} that the range of $\rho_{\mathcal S}(P\times (0,\infty))$ never intersects $\mathcal Gamma_{\mathcal S}$, so since $\rho_{\mathcal S}$ is connected, it means that $\rho_{\mathcal S}(P\times (0,\infty))$ is included in either $\Omega^+_{\mathcal S}$ or $\Omega^-_{\mathcal S}$. A quick analysis of $\rho_{\mathcal S}$, for instance \eqref{prrhoS4}, shows that $\rho_{\mathcal S}(P\times (0,\infty)) \subset \Omega^+_{\mathcal S}$.
At any point $(p,t) \in P\times (0,\infty)$ the Jacobian of $\rho_{\mathcal S}$ is close to the identity, as shown by $(i)$ and $(ii)$ of Lemma \ref{LestonJ}. So $\rho_{\mathcal S}$ is a local diffeomorphism, and the inversion function theorem shows that there exists a neighborhood $V_{p,t} \subset P\times (0,\infty)$ of $(p,t)$ such that $\rho_{\mathcal S}$ is a bijection between $V_{p,t}$ and its range $\rho(V_{p,t})$, which is a neighborhood of $\rho_{\mathcal S}(p,t)$. Since the Jacobian is uniformly close to the identity, all the $\rho_{\mathcal S}:\, V_{p,t} \mapsto \rho(V_{p,t})$ are bi-Lipschitz maps with uniform Lipschitz constant.
If $z\in \Omega^+_{\mathcal S}$, we define the degree of the map $\rho_{\mathcal S}$ as
\[N(z) := \text{``number of points $(p,t) \in P\times (0,\infty)$ such that $\rho(p,t) = z$''} \in \mathbb N \cup \{+\infty\}.\]
We want to prove that $N(z)$ is constantly equal to 1. If this is true, then the lemma is proven and we can construct the inverse $\rho^{-1}$ locally by inversing the appropriate bijection $\rho_{\mathcal S}:\, V_{p,t} \mapsto \rho(V_{p,t})$.
We already know that the number of points that satisfies $\rho(p,t) = z$ is countable, because we can cover $P\times (0,\infty)$ by a countable union of the neighborhoods $V_{p,t}$ introduced before. Moreover, if $N(z) \mathfrak geq v >0$, then we can find $v$ points $(p_i,t_i)\in P\times (0,\infty)$ such that $\rho_{\mathcal S}(p_i,t_i) = z$ and so $v$ disjoint neighborhoods $V_{p_i,t_i}$ of $(p_i,t_i)$. Consequently, each point $z'$ in the neighborbood $\bigcap_i \rho_{\mathcal S}(V_{p_i,t_i})$ of $z$ satisfies $N(z') \mathfrak geq v$. This proves that $N$ is constant on any connected component, that is
\[ \text{$N$ is constant on $\Omega^+_{\mathcal S}$}.\]
It remains to prove that $N(z_0) = 1$ for one point $z_0$ in $\Omega^+_{\mathcal S}$. Take $p_0$ far from the support of $b$, for instance $\dist(p_0,\Pi(Q(\mathcal S)) \mathfrak geq 99 \ell(Q(\mathcal S))$ and $t_0 = \ell(Q(\mathcal S))$. In this case, we have $\rho_{\mathcal S}(p_0,t_0) = (p_0,t_0)$ and $\dist(\rho_{\mathcal S}(p_0,t_0),\mathcal Gamma_{\mathcal S}) = t_0$. Let $(p_1,t_1) \in P \times (0,\infty)$ be such that $\rho_{\mathcal S}(p_1,t_1) = (p_0,t_0)$, the bound \ref{prrhoS1} entails that $|t_1-t_0| \leq C
$\square$
silon_1 |t_0| \leq \ell(Q(\mathcal S))$ and \eqref{prrhoS1} implies that $|p_1 - p_0| \leq C
$\square$
silon_0 |t_1| \leq \ell(Q(\mathcal S))$. Those conditions force $p_1$ to stay far away from the support of $b$, which implies that $\rho_{\mathcal S}(p_1,t_1) = (p_1,t_1) = (p_0,t_0)$. We just proved that $N(p_0,t_0) = 1$, as desired.
$\square$
\subsection{Properties of the operator $L_\mathcal S$}
\begin{lemma} \label{LdefAS}
Let $L = - \mathop{\operatorname{div}} \mathcal A \nabla$ is a uniformly elliptic operator satisfying \eqref{defelliptic} and \eqref{defbounded} on $\Omega$. Construct on $\rho_{\mathcal S}^{-1}(\Omega)$ the operator $L_{\mathcal S} =-\mathop{\operatorname{div}} \mathcal A_{\mathcal S} \nabla$ with
\begin{equation} \label{defAS}
\mathcal A_{\mathcal S}(p,t) := \det(\Jac(p,t)) \Jac^{-T}(p,t) \mathcal A(\rho_{\mathcal S}(p,t)) \Jac^{-1}(p,t) \qquad \text{ for } (p,t) \in \rho_{\mathcal S}^{-1}(\Omega).
\end{equation}
Then $L_{\mathcal S}$ is the conjugate operator of $L$ by $\rho_{\mathcal S}$, that is, $u\circ \rho_{\mathcal S}$ is a weak solution to $L_{\mathcal S}(u\circ \rho_{\mathcal S}) = 0$ in $\rho_{\mathcal S}^{-1}(\Omega)$ if and only if $u$ is a weak solution to $Lu = 0$ in $\Omega$.
\end{lemma}
\noindent {\em Proof: } The maps $\rho_{\mathcal S}$ is a bi-Lipschitz change of variable on $\mathbb R^n = P \times P^\bot$, so the construction \eqref{defAS} properly define a matrix of coefficients in $L^\infty(\rho_{\mathcal S}^{-1}(\Omega))$.
Let $u$ be a weak solution to $Lu=0$ in $\Omega$. Then, for any $\varphi \in C^\infty_0(\rho_{\mathcal S}^{-1}(\Omega))$, we have
\[\begin{split}
\iint_{\mathbb R^n} \mathcal A_{\mathcal S} \nabla (u\circ \rho_{\mathcal S}) \cdot \nabla \varphi \, dp\, dt &
= \iint_{\mathbb R^n} \det(\Jac) \Jac^{-T} (\mathcal A\circ \rho_{\mathcal S}) \Jac^{-1} \nabla (u\circ \rho_{\mathcal S}) \cdot \nabla \varphi \, dp\, dt \\
& = \iint_{\mathbb R^n} \det(\Jac) (\mathcal A\circ \rho_{\mathcal S}) \Jac^{-1} \nabla (u\circ \rho_{\mathcal S}) \cdot \Jac^{-1} \nabla \varphi \, dp\, dt \\
& = \iint_{\mathbb R^n} \det(\Jac) (\mathcal A\circ \rho_{\mathcal S}) (\nabla u \circ \rho_{\mathcal S}) \cdot (\nabla [\varphi \circ \rho_{\mathcal S}^{-1}] \circ \rho_{\mathcal S})\, dp\, dt
\end{split}\]
because $\nabla (f\circ \rho_{\mathcal S})$ is equal to the matrix multiplication $\Jac (\nabla f \circ \rho_{\mathcal S})$ by definition of the Jacobian. Recall that $\det(\Jac) >0$, so doing the change of variable $X=\rho_{\mathcal S}(p,t)$ gives
\begin{equation} \label{defAS1}
\iint_{\mathbb R^n} \mathcal A_{\mathcal S} \nabla (u\circ \rho_{\mathcal S}) \cdot \nabla \varphi \, dp\, dt = \iint_{\mathbb R^n} \mathcal A \nabla u \cdot \nabla [\varphi \circ \rho_{\mathcal S}^{-1}] \, dX.
\end{equation}
The function $\varphi \circ \rho_{\mathcal S}^{-1}$ may not be smooth anymore, but is still compactly supported in $\Omega$ and in $W^{1,\infty}(\Omega) \subset W^{1,2}_{loc}(\Omega)$, so $\varphi \circ \rho_{\mathcal S}^{-1}$ is a valid test function for the weak solution $u$, and so the right-hand side of \eqref{defAS1} is 0. We conclude that
\[\iint_{\mathbb R^n} \mathcal A_{\mathcal S} \nabla (u\circ \rho_{\mathcal S}) \cdot \nabla \varphi \, dp\, dt = 0\]
for any $\varphi \in C^\infty_0(\rho_{\mathcal S}^{-1}(\Omega))$, hence $u\circ \rho_{\mathcal S}$ is a weak solution to $L_{\mathcal S}(u\circ \rho_{\mathcal S}) = 0$ in $\rho_{\mathcal S}^{-1}(\Omega)$.
The same reasoning shows that $u$ is a weak solution to $Lu=0$ in $\Omega$ whenever $u\circ \rho_{\mathcal S}$ is a weak solution to $L_{\mathcal S}(u\circ \rho_{\mathcal S}) = 0$ in $\rho_{\mathcal S}^{-1}(\Omega)$. The lemma follows.
$\square$
We want to say that $A_{\mathcal S}$ satisfies the same Carleson-type condition as $A \circ \rho_\mathcal S$. For instance, we want to say that $\delta \nabla A \in CM_{\Omega}$ - which implies $(\delta\circ \rho_\mathcal S) \nabla (A\circ \rho_\mathcal S) \in CM_{\rho_{\mathcal S}^{-1}(\Omega)}$ - will give that $(\delta\circ \rho_\mathcal S) \nabla A_{\mathcal S} \in CM_{\rho_{\mathcal S}^{-1}(\Omega)}$.
However, it is not true, for the simple reason that the Carleson estimates on $\Jac$ are related to the set $\mathbb R^n \setminus P$ while the ones on $A \circ \rho_\mathcal S$ are linked to the domain $\rho_{\mathcal S}^{-1}(\Omega)$. Since $A_\mathcal S$
is the product of these two objects, we only have Carleson estimates for $A_{\mathcal S}$ in the areas of $\mathbb R^n$ where $\rho_{\mathcal S}(\partial \Omega)$ looks like $P$.
\begin{lemma} \label{LprAS}
Assume that the matrix function $\mathcal A$ defined on $\Omega$ satisfies \eqref{defelliptic} and \eqref{defbounded}, and can be decomposed as $\mathcal A=\mathcal B+\mathcal C$ where
\begin{equation} \label{prAS2}
|\delta \nabla \mathcal B| + |\mathcal C| \in CM_{\Omega}(M)
\end{equation}
Then the matrix $\mathcal A_{\mathcal S}$ constructed in \eqref{defAS} can also be decomposed as $\mathcal A_{\mathcal S} = \mathcal B_{\mathcal S} + \mathcal C_{\mathcal S}$ where $\mathcal B_{\mathcal S}$ satisfies \eqref{defelliptic} and \eqref{defbounded} with the constant $2C_{\mathcal A}$, $|t\nabla \mathcal B_\mathcal S|$ is uniformly bounded by $CC_{\mathcal A}$, and
\begin{equation} \label{prAS1}
(|t \nabla \mathcal B_{\mathcal S}| + |\mathcal C_{\mathcal S}|){\mathds 1}_{\rho^{-1}_{\mathcal S}(W^*_\Omega(\mathcal S))} \in CM_{\mathbb R^n \setminus P}(C(
$\square$
silon_0^2+M))
\end{equation}
for a constant $C$ that depends only on $n$ and the ellipticity constant $C_\mathcal A$.
\end{lemma}
\noindent {\em Proof: }
Let $\mathcal A = \mathcal B + \mathcal C$ as in the lemma. Without loss of generality, we can choose $\mathcal B$ to be a smooth average of $\mathcal A$ (see Lemma \ref{LellipB=ellipA}) and so $\mathcal B$ satisfies \eqref{defelliptic} and \eqref{defbounded} with the constant $C_{\mathcal A}$ and $|\delta \nabla \mathcal B| \leq CC_\mathcal A$. Define
\[\mathcal B_{\mathcal S} := \det(J) J^{-T} (\mathcal B \circ \rho_{\mathcal S}) J^{-1} \]
and of course $\mathcal C_{\mathcal S} := \mathcal A_{\mathcal S} - \mathcal B_{\mathcal S}$. First, Lemma \ref{LestonJ} shows that $\det(J)$ is close to $1$ and $J^{-1}$ is close to the identity, so $\mathcal B_{\mathcal S}$ satisfies \eqref{defelliptic} and \eqref{defbounded} with the constant $(1+C
$\square$
silon_0)C_\mathcal A \leq 2C_\mathcal A$.
Moreover, the same Lemma \ref{LestonJ} gives that $|\det(J)| + \|J^{-1}\| \leq 3$, $\|\Jac - I\| \leq 3$, and $\displaystyle |\nabla_{p,t} \det(J)| + \||\nabla_{p,t} J^{-1}|\| \lesssim |\nabla_{p,t} \nabla_p b^t|$, and hence
\[|\nabla \mathcal B_{\mathcal S}| \lesssim |(\nabla \mathcal B) \circ \rho_{\mathcal S}| + |\nabla_{p,t} \nabla_p b^t|,\]
and
\[\begin{split}
|\mathcal C_{\mathcal S}| & \lesssim |\det(\Jac) - \det(J)| + \|\Jac^{-1} - J^{-1}\| + |\mathcal C \circ \rho_{\mathcal S}| \\
& \lesssim |\partial_t b^t| + |t\nabla_{p,t} \nabla_p b^t| + |\mathcal C \circ \rho_{\mathcal S}|.
\end{split}\]
Lemma \ref{LbrCM} entails that $|t\nabla_{p,t} \nabla_p b^t| \lesssim
$\square$
silon_0 \leq 1 \leq C_{\mathcal A}$, so $\mathcal B_{\mathcal S}$ verifies $|t\nabla \mathcal B_{\mathcal S}| \lesssim C_\mathcal A$, so thus \eqref{prAS1} is the only statement we still have to prove. Lemma \ref{LbrCM} also implies that $|\partial_t b^t| + |t\nabla_{p,t} \nabla_p b^t| \in CM_{P\times (0,\infty)}(C
$\square$
silon_0^2)$. Therefore, it suffices to establish that
\begin{equation} \label{prAS3}
(|t\nabla \mathcal B \circ \rho_{\mathcal S}| + |\mathcal C \circ \rho_{\mathcal S}|) {\mathds 1}_{\rho^{-1}_{\mathcal S}(W^*_\Omega(\mathcal S))} \in CM_{P\times (0,\infty)}(CM).
\end{equation}
Take $p_0 \in P$ and $r_0>0$. We want to show that
\begin{equation} \label{prAS4}
\iint_{B(p_0,r_0) \cap \rho^{-1}_{\mathcal S}(W^*_\Omega(\mathcal S))} (|t\nabla \mathcal B \circ \rho_{\mathcal S}|^2 + |\mathcal C \circ \rho_{\mathcal S}|^2) \, \frac{dt}{t} \, dp \leq CM(r_0)^{n-1}.
\end{equation}
If $\rho_{\mathcal S}(B(p_0,r_0)) \cap W^*_{\Omega}(\mathcal S) = \emptyset$, the left hand side above is zero and there is nothing to prove. Otherwise, pick a point $X\in \rho_{\mathcal S}(B(p_0,r_0)) \cap W^*_{\Omega}(\mathcal S)$. The fact that $X\in \rho_{\mathcal S}(B(p_0,r_0))$ means that
\begin{equation} \label{prAS5}
|X-\mathfrak b(p_0)| \leq (1+C
$\square$
silon_0)r_0.
\end{equation}
since $\rho_{\mathcal S}(p_0) = \mathfrak b(p_0)$ and $\|\Jac - I\| \leq C
$\square$
silon_0$ by Lemma \ref{LestonJ}. Because $b$ is $2
$\square$
silon_0$-Lipschitz with $
$\square$
silon_0 \ll 1$, we deduce
\begin{equation} \label{prAS6}
|X-\mathfrak b(\Pi(X))| \leq (1+
$\square$
silon_0) |X-\mathfrak b(p_0)| \leq (1+2C
$\square$
silon_0)r_0.
\end{equation}
The fact that $X\in W^*_{\Omega}(\mathcal S)$ implies by \eqref{claimdPib} that
\begin{equation} \label{prAS7}
\delta(X) \leq (1+2
$\square$
silon_0)|X-\mathfrak b(\Pi(X))| \leq 2r_0
\end{equation}
thanks to \eqref{prAS6}.
Moreover, if $x \in \partial \Omega$ is such that $|X-x| = \delta(X)$,
\begin{multline} \label{prAS8}
|x-\mathfrak b(p_0)| \leq |x-\mathfrak b(\Pi(X))| + |\mathfrak b(\Pi(X) - \mathfrak b(p_0)| \\ \leq 2
$\square$
silon_0\delta(X) + (1+
$\square$
silon_0) |\Pi(X) - p_0| \leq \frac12 r_0 + (1+
$\square$
silon_0) |X-\mathfrak b(p_0)| \leq 2r_0
\end{multline}
by using in order \eqref{claimbxd}, the fact that $b$ is $2
$\square$
silon_0$-Lipschitz, \eqref{prAS7}, and \eqref{prAS5}. Fix $X_0 \in \rho_{\mathcal S}(B(p_0,r_0)) \cap W^*_{\Omega}(\mathcal S)$ and $x_0\in\partial\Omega$ such that $|X_0 - x_0| = \delta(X_0)$. The inequalities \eqref{prAS5} and \eqref{prAS8} show that,
\[|X-x_0| \leq |X-\mathfrak b(p_0)| + |x_0 - \mathfrak b(p_0)| \leq 4r_0 \qquad \text{ for } X\in \rho_{\mathcal S}(B(p_0,r_0)) \cap W^*_{\Omega}(\mathcal S),\]
that is
\begin{equation} \label{prAS9}
\rho_{\mathcal S}(B(p_0,r_0)) \cap W^*_{\Omega}(\mathcal S) \subset B(x_0,4r_0).
\end{equation}
We are now ready to conclude. We make the change of variable $X = \rho_{\mathcal S}(p,s)$ in \eqref{prAS4}, and since $\rho_\mathcal S$ is a bi-Lipschitz change of variable that almost preserves the distances (because $\|\Jac - I\| \leq C
$\square$
silon_0 \ll 1$), we obtain
\[\begin{split}
\iint_{B(p_0,r) \cap \rho^{-1}_{\mathcal S}(W^*_\Omega(\mathcal S))} & (|t\nabla \mathcal B \circ \rho_{\mathcal S}|^2 + |\mathcal C \circ \rho_{\mathcal S}|^2) \, \frac{dt}{t} \, dp \\
& \leq 2 \iint_{B(x_0,4r) \cap W^*_\Omega(\mathcal S)} (|\dist(X,\mathcal Gamma_{\mathcal S}) \nabla \mathcal B|^2 + |\mathcal C|^2) \, \frac{dX}{\dist(X,\mathcal Gamma_{\mathcal S})} \\
& \leq 4 \iint_{B(x_0,4r)} (|\delta\nabla \mathcal B|^2 + |\mathcal C|^2) \, \frac{dX}{\delta(X)} \leq CM(r_0)^{n-1}
\end{split}\]
by using \eqref{claimdGPib} and then the fact that $|\delta\nabla \mathcal B| + |\mathcal C| \in CM_{\Omega}(M)$. The lemma follows.
$\square$
\subsection{Properties of the composition of the smooth distance by $\rho_\mathcal S$}
The change of variable $\rho_{\mathcal S}$ maps $P\times (P^\bot\setminus \{0\})$ to $\mathbb R^n \setminus \mathcal Gamma_{\mathcal S}$, so for any $X\in \mathbb R^n \setminus \mathcal Gamma_{\mathcal S}$, the quantities $N_{\rho^{-1}_{\mathcal S}(X)}(Y)$ and $\Lambda(\rho^{-1}_{\mathcal S}(X))$ make sense as $N_{p,t}(Y)$ and $\Lambda(p,t)$, respectively, where $(p,t) = \rho^{-1}_{\mathcal S}(X)$. With this in mind, we have the following result.
\begin{lemma} \label{LprDb}
For any $Q\in \mathcal S$, we have
\begin{equation} \label{prDb1}
\iint_{W_\Omega(Q)} \left| \frac{\nabla D_\beta(X)}{D_\beta(X)} - \frac{N_{\rho^{-1}_{\mathcal S}(X)}(X)}{\dist(X,\Lambda(\rho^{-1}_{\mathcal S}(X))} \right|^2\, \delta(X) \, dX \leq C |\alpha_{\sigma,\beta}(Q)|^2 \sigma(Q),
\end{equation}
with a constant $C>0$ that depends only on $n$, $C_\sigma$, and $\beta$.
\end{lemma}
\noindent {\em Proof: }
The lemma is a consequence of Corollary \ref{CestD} and the definition of $\rho_{\mathcal S}$.
First , Lemma \ref{lemWOG} (d) entails that $W_\Omega(Q) \subset \mathbb R^n \setminus \mathcal Gamma_{\mathcal S}$, which means that the quantities $N_{\rho^{-1}_{\mathcal S}(X)}$ and $\Lambda(\rho^{-1}_{\mathcal S}(X))$ are well defined in \eqref{prDb1}. Let $X\in W_\Omega(Q)$ and set $(p,t) = \rho^{-1}_{\mathcal S}(X)$.
On one hand, Lemma \ref{LprrhoS} gives that
\[\dist(X,\mathcal Gamma_{\mathcal S}) \leq |X- \mathfrak b(p)| \leq (1+C
$\square$
silon_0) |t| \leq (1+C'
$\square$
silon_0) \dist(X,\mathcal Gamma_{\mathcal S})\]
and
\[|X - \mathfrak b(p) - (0,t)| \leq C
$\square$
silon_0|t|.\]
By projecting the left-hand side on $P$, the latter implies that
\[|\Pi(X) - p| \leq C
$\square$
silon_0|t|.\]
On the other hand, since $X\in W_\Omega(Q)$, Lemma \ref{LclaimPib} gives that
\[\dist(X,\mathcal Gamma_{\mathcal S}) \leq |X- \mathfrak b(\Pi(X))| \leq (1+2
$\square$
silon_0) \delta(X) \leq (1+C
$\square$
silon_0) \dist(X,\mathcal Gamma_{\mathcal S}),\]
and, if $x \in Q$ is such that $|X-x| = \delta(X)$,
then by \eqref{claimbxd},
\[|\mathfrak b(\Pi(X)) - x| \leq 2
$\square$
silon_0 \delta(X),\]
which implies that
\[|\Pi(X) - \Pi(x)| \leq 2
$\square$
silon_0 \delta(X),\]
Altogether, we have
\[ \delta(X) (1-C
$\square$
silon_0) \leq |t| \leq (1+C
$\square$
silon_0) \delta(X)\]
and
\[\dist(p,\Pi(Q)) \leq |p - \Pi(x)|\le\abs{p-\Pi(X)}+\abs{\Pi(X)-\Pi(x)} \leq C
$\square$
silon_0 \delta(X).\]
If we throw in the fact that $\delta(X) \in [\ell(Q)/2,\ell(Q)]$ by definition of $W_\Omega(Q)$, then we easily observe that $p$ and $t$ satisfy the assumptions of Corollary \ref{CestD}, and so
\[\left| \frac{\nabla D_\beta(X)}{D_\beta(X)} - \frac{N_{\rho^{-1}_{\mathcal S}(X)}(X)}{\dist(X,\Lambda(\rho^{-1}_{\mathcal S}(X))} \right| \leq C \ell(Q)^{-1} \alpha_{\sigma,\beta}(Q) \qquad \text{ for } X\in W_\Omega.\]
We conclude that
\[\begin{split}
\iint_{W_\Omega(Q)} \left| \frac{\nabla D_\beta(X)}{D_\beta(X)} - \frac{N_{\rho^{-1}_{\mathcal S}(X)}(X)}{\dist(X,\Lambda(\rho^{-1}_{\mathcal S}(X))} \right|^2\, \delta(X) \, dX
& \leq C |W_\Omega(Q)| |\ell(Q)^{-1} \alpha_{\sigma,\beta}(Q)|^2 \ell(Q) \\
& \leq C |\alpha_{\sigma,\beta}(Q)|^2 \sigma(Q)
\end{split}\]
because $|W_\Omega(Q)| \approx \sigma(Q) \ell(Q)$ by \eqref{prWO2} and \eqref{defADR}. The lemma follows.
$\square$
\begin{lemma} \label{LprNpt}
We have
\begin{equation}\label{prNpt1}
\iint_{\rho_{\mathcal S}^{-1}(W_\Omega(\mathcal S))} \left| \frac{\nabla t}{t} - \frac{\Jac(p,t) N_{p,t}(\rho_{\mathcal S}(p,t))}{\dist(\rho_{\mathcal S}(p,t), \Lambda(p,t))}\right|^2 \abs{t} \, dt\, dp \leq C(
$\square$
silon_0)^2 \sigma(Q(\mathcal S))
\end{equation}
where $C>0$ depends only on $n$ (and $\eta$).
\end{lemma}
\noindent {\em Proof: }
From the definition, we can see that $\Lambda(p,t)$ is the affine plane that goes through the point $\mathfrak b^t(p)$ and whose directions are given by the vectors $(q,q \nabla b^t(p))$, that is $\Lambda(p,t)$ is the codimension 1 plane that goes through $\mathfrak b^t(p)$ and with upward unit normal vector
\[N_{p,t} = \frac{1}{|(-(\nabla b^r(p))^T,1)|} \begin{pmatrix} -\nabla b^t(p) \{\mathds 1} \end{pmatrix} = \frac{1}{\sqrt{1+|\nabla b^t(p)|^2}} \begin{pmatrix} -\nabla b^t(p) \{\mathds 1} \end{pmatrix} \]
The vector function $N_{p,t}(X)$ is just $+N_{p,t}$ or $-N_{p,t}$, depending whether $X$ lies above or below $\Lambda(p,t)$.
Observe that $\rho_{\mathcal S}(p,t) - \mathfrak b^t(p) = t(-(\nabla b^t(p))^T,1)$, which means that $\mathfrak b^t(p)$ is the projection of $\rho_{\mathcal S}(p,t)$ onto $\Lambda(p,t)$ and that
\[\dist(\rho_{\mathcal S}(p,t), \Lambda(p,t)) = |t| |(-(\nabla b^t(p))^T,1)| = |t| \sqrt{1+|\nabla b^t(p)|^2}.\]
Moreover, $\rho_{\mathcal S}(p,t)$ lies above $\Lambda(p,t)$ if $t>0$ and below otherwise, that is
\[N_{p,t}(\rho(p,t)) = \sgn(t) N_{p,t} = \frac{\sgn(t)}{\sqrt{1+|\nabla b^t(p)|^2}} \begin{pmatrix} -\nabla b^t(p) \{\mathds 1} \end{pmatrix}.\]
From all this, we deduce
\begin{equation}\label{prNpt2}
\begin{split}
\frac{J(p,t) N_{p,t}(\rho_{\mathcal S}(p,t))}{\dist(\rho_{\mathcal S}(p,t), \Lambda(p,t))} & = \frac{1}{t(1+|\nabla b^t(p)|^2)} \begin{pmatrix} I & \nabla b^t(p) \\ -(\nabla b^t(p))^T & 1 \end{pmatrix} \begin{pmatrix} -\nabla b^t(p) \{\mathds 1} \end{pmatrix} \\
& = \frac{1}{t} \begin{pmatrix} 0_{\mathbb R^{n-1}} \\ 1 \end{pmatrix} = \frac{\nabla t}t.
\end{split}\end{equation}
Recall that $|\Jac - J| \lesssim |\partial_t b^t| + |t \nabla_{p,t} \nabla_p b^t|$. Together with \eqref{prNpt2}, we obtain that the left-hand side of \eqref{prNpt1} is equal to
\begin{multline}
I = \iint_{\rho^{-1}_{\mathcal S}(W_\Omega(\mathcal S))} \left| \frac{\Jac(p,t) - J(p,t)}{t(1+|\nabla b^t(p)|^2)} \begin{pmatrix} -\nabla b^t(p) \{\mathds 1} \end{pmatrix} \right|^2 \abs{t} \, dt\, dp
\\ \lesssim \iint_{\rho_{\mathcal S}^{-1}(W_\Omega(\mathcal S))} (|\partial_t b^t|^2 + |t \nabla_{p,t} \nabla_p b^t|^2) \, \frac{dt}{\abs{t}} \, dp.
\end{multline}
Take $X_0 \in W_{\Omega}(\mathcal S)$, and notice that the set $W_{\Omega}(\mathcal S)$ is included in $B(\mathfrak b(\Pi(X_0)),4\ell(Q(\mathcal S)))$ by definition of $W_\Omega(\mathcal S)$ and by \eqref{claimbxd}. Since the Jacobian of $\rho_{\mathcal S}$ is close to the identity, $\rho_{\mathcal S}^{-1}$ almost preserves the distance, and hence $\rho^{-1}_{\mathcal S}(W_{\Omega}(\mathcal S)) \subset B(\Pi(X_0),5\ell(Q(\mathcal S)))$. We conclude that
\[I \lesssim \iint_{B(\Pi(X_0),5\ell(Q(\mathcal S)))} (|\partial_t b^t|^2 + |t \nabla_{p,t} \nabla_p b^t|^2) \, \frac{dt}{t} \, dp \lesssim (
$\square$
silon_0)^2 \ell(Q(\mathcal S))^{n-1} \lesssim (
$\square$
silon_0)^2 \sigma(Q(\mathcal S))\]
by Lemma \ref{LbrCM} and then \eqref{defADR}. The lemma follows.
$\square$
\section{The flat case.}
\label{Sflat}
In this section, we intend to prove an analogue of Theorem \ref{Main1} when the boundary is flat, that is when the domain is $\Omega_0:= \mathbb R^n_+$. This is our main argument on the PDE side (contrary to other sections which are devoted to geometric arguments) and the general case of Chord-Arc Domains is eventually brought back to this simpler case.
We shall bring a little bit of flexibility in the following manner. We will allow $\Omega$ to be different from $\mathbb R^n_+$, but we shall stay away from the parts where $\partial \Omega$ differs from $\partial\mathbb Rn_+$ with some cut-off functions. More exactly, we shall use cut-off functions $\phi$ that guarantee that $\delta(X) := \dist(X,\partial \Omega) \approx t$ whenever $X = (x,t) \in \supp \phi$. We shall simply use $\mathbb R^{n-1}$ for $\partial\mathbb Rn=\mathbb R^{n-1}\times\set{0}$. We start with the precise definition of the cut-off functions that we are allowing.
\begin{definition} \label{defcutoffboth}
We say that $\phi \in L^\infty(\Omega)$ is a cut-off function associated to both $\partial \Omega$ and $\mathbb R^{n-1}$ if $0 \leq \phi \leq 1$, and there is a constant $C_\phi \mathfrak geq 1$ such that $|\nabla \phi| \leq C_\phi \delta^{-1}$,
\begin{equation} \label{t=dist}
(C_\phi)^{-1} |t| \leq \delta(X) \leq C_\phi |t| \qquad \text{ for all } X=(x,t) \in \supp \phi,
\end{equation}
and there exists a collection of dyadic cubes $\{Q_i\}_{i\in I_\phi}$ in $\mathbb D_{\partial \Omega}$ such that
\begin{equation} \label{Qioverlap}
\text{$\{Q_i\}_{i\in I_\phi}$ is finitely overlapping with an overlap of at most $C_\phi$,}
\end{equation}
and
\begin{equation} \label{1phiCM3}
\Omega \cap (\supp \phi) \cap \supp (1-\phi) \subset \bigcup_{i\in I_\phi} W_\Omega^{**}(Q_i).
\end{equation}
\end{definition}
The condition \eqref{t=dist} allows us to say that
\begin{multline} \label{BOtoBR}
\text{ if, for $x\in \partial \Omega$ and $r>0$, $B(x,r) \cap \supp \phi \neq \emptyset$,} \\
\text{ then there exists $y\in \mathbb R^{n-1}$ such that $B(x,r) \subset B(y,Cr)$;}
\end{multline}
so we can pass from Carleson measures in $\Omega$ to Carleson measure in $\mathbb R^n \setminus \mathbb R^{n-1}$. For instance, we have
\begin{equation} \label{CMOCMR} \begin{array}{c}
f \in CM_{\Omega}(M) \implies f\phi, \, f{\mathds 1}_{\supp \phi} \in CM_{\mathbb R^n \setminus \mathbb R^{n-1}}(C'_\phi M), \\
\delta \nabla g \in CM_{\Omega}(M) \implies t \phi \nabla g \in CM_{\mathbb R^n \setminus \mathbb R^{n-1}}(C'_\phi M).
\end{array} \end{equation}
and vice versa. The conditions \eqref{Qioverlap} and \eqref{1phiCM3} ensure that ${\mathds 1}_{(\supp \phi) \cap \supp (1-\phi)}$ (and hence $\delta \nabla \phi$) satisfies the Carleson measure condition on $\Omega$. So by \eqref{CMOCMR},
\begin{equation} \label{1phiCM}
|t \nabla \phi| + {\mathds 1}_{\supp \nabla \phi } + {\mathds 1}_{(\supp \phi) \cap \supp (1-\phi)} \in CM_{ \mathbb R^n \setminus \mathbb R^{n-1}}(C'_\phi).
\end{equation}
And if the support of of $\phi$ is contained in a ball of radius $r$ centered on $\partial \Omega$, then
\begin{equation} \label{1phiCM2}
\iint_\Omega \big(|\nabla \phi|t + |t\nabla \phi|^2\big) \, \frac{dt}{t}\, dy \lesssim r^{n-1}.
\end{equation}
We are ready to state the main result of the section.
\begin{lemma} \label{lemflat}
Let $\Omega$ be a Chord-Arc Domain and let $L=-\mathop{\operatorname{div}}er \mathcal A \nabla$ be a uniformly elliptic operator on $\Omega$, that is $\mathcal A$ verifies \eqref{defelliptic} and \eqref{defbounded}. Assume that the $L^*$-elliptic measure $\Omegaega_{L^*}\in A_\infty(\sigma)$, where $L^*$ is the adjoint operator of $L$, and $\sigma$ is an Ahlfors regular measure on $\partial\Omega$. Let $\phi$ be as in Definition \ref{defcutoffboth} and be supported in a ball $B:=B(x,r)$ centered on the boundary $\partial \Omega$. Assume that the coefficients $\mathcal A$ can be decomposed as $\mathcal A = \mathcal B + \mathcal C$ where
\begin{equation} \label{NB+CareCM}
(|t\nabla \mathcal B| + |\mathcal C|){\mathds 1}_{\supp \phi} \in CM_{\mathbb R^n \setminus \mathbb R^{n-1}}(M)\footnote{We actually only need the Carleson condition on the last column of $\mathcal B$ and $\mathcal C$ (instead of the full matrix).}.
\end{equation}
Then for any non-negative nontrivial weak solution $u$ to $Lu = 0$ in $2B \cap \Omega$ with zero trace on $\partial \Omega \cap 2B$, one has
\begin{equation} \label{flat1}
\iint_{\Omega} |t|\left|\frac{\nabla u}{u} - \frac{\nabla t}{t} \right|^2 \phi^2 \, dt\, dy = \iint_{\Omega} |t|\left|\nabla \ln\mathcal Big( \frac{u}{|t|} \mathcal Big) \right|^2 \phi^2 \, dt\, dy \leq C(1+M) r^{n-1},\end{equation}
where $C$ depends only on the dimension $n$, the elliptic constant $C_\mathcal A$, the 1-sided CAD constants of $\Omega$, the constant $C_\phi$ in Definition \ref{defcutoffboth}, and the intrinsic constants in $\Omegaega_{L^*} \in A_\infty(\sigma)$.
\end{lemma}
The above lemma is the analogue of Theorem 2.21 from \cite{DFMGinfty} in our context, and part of our proof will follow the one from \cite{DFMGinfty} but a new argument is needed to treat the non-diagonal structure of $\mathcal A$.
We need $\Omegaega_{L^*} \in A_\infty(\sigma)$ for the proof of the following intermediate lemma. Essentially, we need that the logarithm of the Poisson kernel lies in $BMO$. Let us state and prove it directly in the form that we need.
\begin{lemma} \label{lemlogk}
Let $\Omega$, $L$, $\phi$, $B:=B(x,r)$, and $u$ be as in Lemma \ref{lemflat}. Assume that $\Omegaega_{L^*}\in A_\infty(\sigma)$ as in Lemma \ref{lemflat}. Then there exists $K:= K(u,B)$ such that
\[\iint_{\Omega} |\nabla \phi| \left|\ln\mathcal Big( \frac{K u}{|t|} \mathcal Big) \right| dt\, dy \leq C r^{n-1},\]
where $C$ depends only on $n$, $C_\mathcal A$, the 1-sided CAD constants of $\Omega$, the constant $C_\phi$ in Definition \ref{defcutoffboth}, and the intrinsic constants in $\Omegaega_{L^*} \in A_\infty(\sigma)$.
\end{lemma}
\noindent {\em Proof of Lemma \ref{lemlogk}.} The first step is to replace $Ku/t$ by the elliptic measure. Take $X_0 \in B(x,r) \cap \Omega$ and $X_1\in \Omega \setminus B(x,4r)$ to be two corkscrew points for $x$ at the scale $r$.
If $G(Y,X)$ is the Green function associated to $L$ in $\Omega$ and $\{\Omegaega^X_*\}_{X\in \Omega}$ is the elliptic measure associated to the adjoint $L^*$, the CFMS estimates (Lemma \ref{LCFMS}) entails, for $Y \in W_{\Omega}^{**}(Q) \cap B$, that
\[\frac{u(Y)}{u(X_0)} \approx \frac{G(Y,X_1)}{G(X_0,X_1)} \approx \frac{\ell(Q)}{r} \frac{\sigma(\mathbb Delta)}{\sigma(Q)} \frac{\Omegaega_*^{X_1}(Q)}{\Omegaega_*^{X_1}(\mathbb Delta)},\]
where $\mathbb Delta = B\cap \partial \Omega$.
Moreover, if $Y=(y,t)\in \supp \phi \cap W^{**}_\Omega(Q)$, then $\ell(Q) \approx |t|$ by \eqref{t=dist}. Altogether, we have
\begin{equation} \label{CFMSa}
\frac{u(Y)}{|t|} \approx \frac{u(X_0)}{r} \frac{\sigma(\mathbb Delta)}{\sigma(Q)} \frac{\Omegaega_*^{X_1}(Q)}{\Omegaega_*^{X_1}(\mathbb Delta)} \qquad \text{ for } Y=(y,t) \in \supp \phi \cap W_{\Omega}^{**}(Q).
\end{equation}
Set $K := r/u(X_0)$, and $I_\phi':=\set{i\in I_\phi: W_\Omega^{**}(Q_i) \text{ intersects }\supp\nabla\phi}$,
\begin{equation} \label{logk1}
\begin{split}
\iint_{\Omega} |\nabla \phi| \left|\ln\mathcal Big( \frac{K u}{|t|} \mathcal Big) \right| dt\, dy
& \lesssim \sum_{i\in I_\phi'} \ell(Q_i)^{-1} \int_{W_\Omega^{**}(Q_i)} \left|\ln\mathcal Big( \frac{K u}{|t|} \mathcal Big) \right| \, dt\, dy \\
& \lesssim \sum_{i\in I_\phi'} \sigma(Q_i) \left[ 1 + \left|\ln\mathcal Big(\frac{\sigma(\mathbb Delta)}{\sigma(Q_i)} \frac{\Omegaega_*^{X_1}(Q_i)}{\Omegaega_*^{X_1}(\mathbb Delta)} \mathcal Big) \right| \right]
\end{split}
\end{equation}
by \eqref{1phiCM3}, \eqref{CFMSa}, and the fact that $|W^{**}_\Omega(Q_i)| \approx \ell(Q_i) \sigma(Q_i)$.
The second step is to use the fact that $\Omegaega^{X_1}_*$ is $A_\infty$-absolutely continuous with respect to $\sigma$. To that objective, we define for $k\in \mathbb Z$
\[\mathcal I_k := \mathcal Big\{i \in I_\phi', \, 2^{k} \leq \frac{\sigma(\mathbb Delta)}{\sigma(Q_i)} \frac{\Omegaega_*^{X_1}(Q_i)}{\Omegaega_*^{X_1}(\mathbb Delta)} \leq 2^{k+1}\mathcal Big\}\]
and then $E_k:= \bigcup_{i\in \mathcal I_k} Q_i$. Since the collection $\{Q_i\}_{i\in I_\phi}$ is finitely overlapping, due to \eqref{Qioverlap}, the bound \eqref{logk1} becomes
\begin{equation} \label{logk2}
\begin{split}
\iint_{\Omega} |\nabla \phi| \left|\ln\mathcal Big( \frac{K u}{|t|} \mathcal Big) \right| dt\, dy
& \lesssim \sum_{k\in \mathbb Z} (1+|k|) \sigma(E_k).
\end{split}
\end{equation}
We want thus to estimate $\sigma(E_k)$. Observe first that for any $i\in I_\phi'$, $W_\Omega^{**}(Q_i)$ intersects $\supp \phi \subset B$. Therefore $Q_i$ and $E_k$ have to be inside $\mathbb Delta^*:=C\mathbb Delta$ for a large $C$ depending only on the constant $K^{**}$ in \eqref{defWO**}. The finite overlapping \eqref{Qioverlap} also implies that
\[\frac{\sigma(\mathbb Delta^*)}{\sigma(E_k)} \frac{\Omegaega_*^{X_1}(E_k)}{\Omegaega_*^{X_1}(\mathbb Delta^*)} \approx 2^k\]
For $k\mathfrak geq 0$, we have
\begin{equation} \label{logk3}
\frac{\sigma(E_k)}{\sigma(\mathbb Delta^*)} \approx 2^{-k} \frac{\Omegaega_*^{X_1}(E_k)}{\Omegaega_*^{X_1}(\mathbb Delta^*)} \lesssim 2^{-k}.
\end{equation}
The elliptic measure $\Omegaega_*^{X_1}$ is $A_\infty$-absolutely continuous with respect to $\sigma$ by assumption, so for $k\leq 0$, we use the characterization (iv) from Theorem 1.4.13 in \cite{KenigB} to deduce
\begin{equation} \label{logk4}
\frac{\sigma(E_k)}{\sigma(\mathbb Delta^*)} \lesssim \left(\frac{\Omegaega_*^{X_1}(E_k)}{\Omegaega_*^{X_1}(\mathbb Delta^*)}\right)^\theta \approx 2^{k\theta} \left(\frac{\sigma(E_k)}{\sigma(\mathbb Delta^*)}\right)^\theta \lesssim 2^{k\theta}
\end{equation}
for some $\theta\in (0,1)$ independent of $x$, $r$, and $k$. We reinject \eqref{logk3} and \eqref{logk4} in \eqref{logk2} to conclude that
\[\begin{split}
\iint_{\Omega} |\nabla \phi| \left|\ln\mathcal Big( \frac{K u}{|t|} \mathcal Big) \right| dt\, dy
& \lesssim \sigma(\mathbb Delta^*) \sum_{k\in \mathbb Z} (1+|k|) 2^{-|k|\theta} \lesssim \sigma(\mathbb Delta^*) \lesssim r^{n-1}
\end{split}\]
because $\sigma$ is Ahlfors regular. The lemma follows.
$\square$
\noindent {\em Proof of Lemma \ref{lemflat}.} The proof is divided in two parts: the first one treats the case where $\mathcal B_{i,n}=0$ for $i<n$,
and the second one shows that we can come back to the first case by a change of variable, by adapting the method presented in \cite{FenDKP}.
Observe that
$\phi$ can be decomposed as $\phi=\phi_+ + \phi_-$ where $\phi_+ = {\mathds 1}_{t>0} \phi$ and $\phi_- = {\mathds 1}_{t<0} \phi$. Both $\phi_+$ and $\phi_-$ are as in Definition \ref{defcutoffboth} with constant $C_\phi$. So it is enough to prove the lemma while assuming
\begin{equation} \label{phit>0}
\supp \phi \subset \{t\mathfrak geq 0\} = \overline{\mathbb R^n_+}.
\end{equation}
The proof of the case $\supp \phi \subset \mathbb R^n_-$ is of course identical up to obvious changes.
\noindent {\bf Step 1:} Case where $\mathcal B_{i,n} = 0$ for $i<n$ on $\supp \phi$ and $\mathcal B$ satisfies \eqref{defelliptic} and \eqref{defbounded} with the same constant $C_\mathcal A$ as $\mathcal A$. If $b:= \mathcal B_{n,n}$, this assumption on $\mathcal B$ implies that
\begin{equation} \label{prBinS1}
\mathcal B \nabla t \cdot \nabla v \, \phi^2= b \, \partial_t v \, \phi^2.
\end{equation}
whenever $v\in W^{1,1}_{loc}(\Omega)$ and
\begin{equation} \label{Blambda}
(C_\mathcal A)^{-1} \leq b \leq C_\mathcal A.
\end{equation}
We want to prove \eqref{flat1} with the assumption \eqref{phit>0}, and for this, we intend to establish that
\begin{equation} \label{flat2}
T:= \iint_{\mathbb R^n_+} t\left| \nabla \ln\mathcal Big( \frac{u}{t} \mathcal Big) \right|^2 \phi^2 \, dt\, dy \lesssim T^{\frac12} r^{\frac{n-1}2} + r^{n-1},
\end{equation}
which implies the desired inequality \eqref{flat1} provided that $T$ is {\em a priori} finite. However that is not necessary the case, because some problems can occur when $t$ is close to 0.
So we take $\psi \in C^\infty(\mathbb R)$ such that $\psi(t) = 0$ when $t<1$, $\psi(t) = 1$ when $t\mathfrak geq 2$, and $0 \leq \psi \leq 1$. We construct then $\psi_k(Y) = \psi(2^k\delta(Y))$ and $\phi_k = \phi \psi_k$.
It is not very hard to see that
\[\supp \nabla \psi_k := \{X\in \Omega, \, 2^{-k} \leq \delta(X) \leq 2^{1-k}\} \subset \bigcup_{Q\in \mathbb D_k} W_\Omega^{**}(Q)\]
and therefore that $\phi_k$ is as in Definition \ref{defcutoffboth} (with $C_{\phi_k} = C_\phi +1$). The quantity
\[T(k) := \iint_{\mathbb R^n_+} t \left| \nabla \ln\mathcal Big( \frac{u}{t} \mathcal Big) \right|^2 \phi_k^2 \, dt\, dy
= \iint_{\mathbb R^n_+} t \left| \frac{\nabla u}{u} - \frac{\nabla t}{t} \right|^2 \phi_k^2 \, dt\, dy\]
is finite, because $\phi_k$ is compactly supported in both $\Omega$ and $\mathbb R^{n}_+$ (the fact that $\nabla u/u$ is in $L^2_{loc}(\Omega)$ for a non-negative nontrivial solution to $Lu=0$ is a consequence of the Caccioppoli inequality and the Harnack inequality). So, we prove \eqref{flat2} for $T(k)$ instead of $T$, which implies $T(k) \lesssim r^{n-1}$ as we said, and take $k\to \infty$ to deduce \eqref{flat1}.
We are now ready for the core part of the proof, which can be seen as an elaborate integration by parts. Our previous discussion established that we (only) have to prove \eqref{flat2}, and that we can assume that $\phi$ is compactly supported in $\Omega \cap \mathbb R^{n}_+$.
We use the ellipticity of $\mathcal A$ and the boundedness of $b$ to write
\[\begin{split}
T & = \iint_{\mathbb R^n_+} t \left| \frac{\nabla u}{u} - \frac{\nabla t}{t}\right|^2 \phi^2 \, dt\, dy
\le C_{\mathcal A}^2\iint_{\mathbb R^n_+}\frac{\mathcal A}{b}\br{\frac{\nabla u}{u}-\frac{\nabla t}{t}}\cdot\br{\frac{\nabla u}{u}-\frac{\nabla t}{t}}\phi^2dtdy
\\
& = C_{\mathcal A}^2\br{ \iint_{\mathbb R^n_+} \frac{\mathcal A \nabla u}{bu} \cdot \left( \frac{\nabla u}{u} - \frac{\nabla t}{t} \right) \, t\phi^2 \, dt\, dy
- \iint_{\mathbb R^n_+} \frac{\mathcal A \nabla t}{bt} \cdot \nabla \ln \mathcal Big( \frac{u}{t}\mathcal Big) \, t\phi^2 \, dt\, dy} \\
& :=C_{\mathcal A}^2( T_1 + T_2).
\end{split}\]
We deal first with $T_2$. We use the fact that $\mathcal A = \mathcal B + \mathcal C$ and \eqref{prBinS1} to obtain
\[T_2 = - \iint_{\mathbb R^n_+} \partial_t \ln \mathcal Big( \frac{u}{t}\mathcal Big) \, \phi^2 \, dt\, dy
- \iint_{\mathbb R^n_+} \frac{\mathcal C}{b} \nabla t \cdot \nabla \ln \mathcal Big( \frac{u}{t}\mathcal Big) \, \phi^2 \, dt\, dy := T_{21} + T_{22}.\]
The term $T_{22}$ can be then bounded with the help of the Cauchy-Schwarz inequality as follows
\[T_{22} \leq \|b^{-1}\|_\infty \left( \iint_{\mathbb R^{n}_+} |\mathcal C|^2\phi^2 \, \frac{dt}{t} \, dy \right)^\frac12 \left( \iint_{\mathbb R^{n}_+} t \left|\nabla \ln \mathcal Big( \frac{u}{t}\mathcal Big)\right|^2 \, \phi^2 \, dt \, dy \right)^\frac12 \lesssim r^{\frac{n-1}2}T^{\frac12}\]
by \eqref{NB+CareCM}.
As for $T_{21}$, observe that multiplying by any constant $K$ inside the logarithm will not change the term (because we differentiate the logarithm). As a consequence, we have
\[\begin{split}
T_{21} & = - \iint_{\mathbb R^{n}_+} \partial_t \ln \mathcal Big( \frac{Ku}{t}\mathcal Big) \, \phi^2 \, dt\, dy = \iint_{\mathbb R^{n}_+} \ln \mathcal Big( \frac{Ku}{t}\mathcal Big) \, \partial_n [\phi^2] \, dt\, dy \\
& \leq \iint_{\mathbb R^{n}_+} |\nabla \phi| \left|\ln \mathcal Big( \frac{Ku}{t}\mathcal Big)\right| \, dt\, dy \lesssim r^{n-1}
\end{split}\]
by successively using integration by parts and Lemma \ref{lemlogk}.
We turn to $T_1$, and we want now to use the fact that $u$ is a weak solution to $Lu = 0$. So we notice that
\[\begin{split}
T_1 & = - \iint_{\mathbb R^{n}_+} \frac{\mathcal A}{b} \nabla u \cdot \nabla \mathcal Big( \frac{t}{u} \mathcal Big) \, \phi^2 \, dt\, dy \\
& = - \iint_{\mathbb R^{n}_+} \mathcal A \nabla u \cdot \nabla \mathcal Big( \frac{t\phi^2}{bu} \mathcal Big) \, \, dt\, dy + 2 \iint_{\mathbb R^{n}_+} \mathcal A \nabla u \cdot \nabla \phi \, \mathcal Big(\frac{t\phi}{bu} \mathcal Big) \, dt\, dy - \iint_{\mathbb R^n_+} \mathcal A \nabla u \cdot \nabla b \, \mathcal Big(\frac{t\phi^2}{b^2u} \mathcal Big) \, dt\, dy \\
& := - T_{11} + 2 T_{12} - T_{13}.
\end{split}\]
Since $\phi$ is compactly supported, we have that $u >
$\square$
silon_\phi$ on $\supp \phi$ (by the Harnack inequality, see Lemma \ref{Harnack}) and $\nabla u\in L^2_{loc}(\Omega)$ (by the Caccioppoli inequality, see Lemma \ref{Caccio}). Therefore $t\phi^2/(bu)$ is a valid test function for the solution $u\in W^{1,2}_{loc}(\Omega)$ to $Lu=0$, and then $T_{11} = 0$.
As for $T_{12}$, we have
\[\begin{split}
T_{12} & = \iint_{\mathbb R^{n}_+} \frac{\mathcal A}{b} \left( \frac{\nabla u}{u} -\frac{\nabla t}{t} \right) \cdot \nabla \phi \, (t\phi) \, dt\, dy + \iint_{\mathbb R^{n}_+} \frac{\mathcal A}b \nabla t \cdot \nabla \phi \, \phi \, dt\, dy := T_{121} + T_{122}.
\end{split}\]
The term $T_{121}$ is similar to $T_{22}$. The boundedness of $\mathcal A/b$ and the Cauchy-Schwarz inequality infer that
\[T_{121} \leq \left( \iint_{\mathbb R^{n}_+} t|\nabla \phi|^2 \, \frac{dt}{t} \, dy \right)^\frac12 \left( \iint_{\mathbb R^{n}_+} t \left|\nabla \ln \mathcal Big( \frac{u}{t}\mathcal Big)\right|^2 \, \phi^2 \, dt \, dy \right)^\frac12 \lesssim r^{\frac{n-1}2}T^{\frac12}\]
by \eqref{1phiCM2}. The quantity $T_{122}$ is even easier since
\[T_{122} \lesssim \iint_{\mathbb R^{n}_+} |\nabla \phi| \, dt\, dy \lesssim r^{n-1},\]
again by \eqref{1phiCM2}. It remains to bound $T_{13}$. We start as for $T_{12}$ by writing
\[\begin{split}
T_{13} & = \iint_{\mathbb R^{n}_+} \mathcal A \left( \frac{\nabla u}{u} - \frac{\nabla t}{t} \right) \cdot \nabla b \, \frac{t\phi^2}{b^2} \, dt\, dy + \iint_{\mathbb R^{n}_+} \mathcal A \nabla t \cdot \nabla b \, \frac{\phi^2}{b^2} \, dt\, dy := T_{131} + T_{132}.
\end{split}\]
The term $T_{131}$ is like $T_{121}$, and by using $t\nabla b \in CM_{\mathbb R^n_+}$ instead of $t\nabla \phi \in CM_{\mathbb R^n_+}$ , we obtain that $T_{131} \lesssim r^{(n-1)/2}T^{1/2}$. The term $T_{132}$ does not contain the solution $u$, but it is a bit harder than $T_{122}$ to deal with, because $\nabla b$ is not as nice as $\nabla \phi$. We use $\mathcal A = \mathcal B + \mathcal C$ and \eqref{prBinS1} to get
\[\begin{split}
T_{132} & = \iint_{\mathbb R^{n}_+} (\partial_t b) \, \frac{\phi^2}{b} \, dt\, dy + \iint_{\mathbb R^{n}_+} \mathcal C \nabla t \cdot \nabla b \, \frac{\phi^2}{b^2} \, dt\, dy := T_{1321} + T_{1322}.
\end{split}\]
We easily deal with $T_{1322}$ by using the Cauchy-Schwarz inequality as follows:
\[T_{1322} \leq \|b^{-1}\|_\infty^2 \left( \iint_{\mathbb R^{n}_+} |\mathcal C|^2 \phi^2 \, \frac{dt}{t} \, dy \right)^\frac12 \left( \iint_{\mathbb R^{n}_+} |t\nabla b|^2 \phi^2 \, \frac{dt}{t} \, dy\right)^\frac12 \lesssim r^{n-1}\]
by \eqref{NB+CareCM}. As last, observe that
\[T_{1321} = \iint_{\mathbb R^{n}_+} \partial_t [\ln(b)\phi^2] \, \, dt\, dy - \iint_{\mathbb R^{n}_+} \partial_t \phi \, \phi \ln(b) \, dt\, dy, \]
but the first integral in the right-hand side above is zero, so
\[|T_{1321}| \lesssim \|\ln(b)\|_\infty \iint_{\mathbb R^{n}_+} |\partial_t \phi| \, dt\, dy \lesssim r^{n-1}, \]
by \eqref{1phiCM2} and the fact that $b\approx 1$. The inequality \eqref{flat1} under the three assumptions \eqref{prBinS1}, \eqref{Blambda}, and \eqref{NB+CareCM} follows.
\noindent {\bf Step 2: We can assume that $\norm{t \abs{\nabla_y\mathcal B}}_\infty$ is as small as we want.}
We construct
\begin{equation} \label{defwtA}
\widetilde{\mathcal A} := \mathcal A \phi + (1-\phi) I,
\end{equation}
where $I$ is the identity matrix. Note that $\widetilde{\mathcal A}$ is elliptic with the same elliptic constant $C_\mathcal A$ as $\mathcal A$. We choose then a bump function $\theta \in C^\infty_0(\mathbb R^n)$ supported in $B(0,1/10)$, that is $0 \leq \theta \leq 1$ and $\iint_{\mathbb R^n} \theta \, dX = 1$. We construct $\theta_{y,t}(z,s) = t^{-n}\theta\big(\frac{z-y}{t},\frac{s-t}{t}\big)$, which satisfies $\iint_{\mathbb R^n} \theta_{y,t} = 1$, and then
\begin{equation} \label{defwtBbbb}
\widetilde{\mathcal B}(y,t) := \iint_{\mathbb R^n} \widetilde{\mathcal A} \, \theta_{y,Nt} \, dz\, ds.
\end{equation}
for a large $N$ to be fixed later to ensure that \eqref{defJacrho} below is invertible. Since $\widetilde{\mathcal B}$ is some average of $\widetilde{\mathcal A}$, then
\begin{equation} \label{BellipasA}
\text{$\widetilde{\mathcal B}$ is elliptic and bounded with the same constant $C_{\mathcal A}$ as $\widetilde{\mathcal A}$ and $\mathcal A$.}
\end{equation}
The construction is similar to the one done in Lemma \ref{LellipB=ellipA}, so we do not write the details again. Observe also that
\begin{equation} \label{NBissmall2}
|t \nabla_y \widetilde{\mathcal B}(y,t)| \lesssim \frac1{N} \|\widetilde{\mathcal A}\|_\infty \quad \text{ and } \quad |t\, \partial_t \widetilde{\mathcal B}(y,t)| \lesssim \|\widetilde{\mathcal A}\|_\infty.
\end{equation}
In addition, we have that
\[|\nabla \widetilde{\mathcal B}(y,t)| \lesssim t^{-n} \iint_{B_{Nt/10}(y,Nt)} \mathcal Big( |\nabla \mathcal B| \phi + |\nabla \phi| + \frac1t |\mathcal C| \phi \mathcal Big) dz\, ds,\]
and if $\widetilde {\mathcal C}$ denotes $(\mathcal A - \widetilde{\mathcal B}) {\mathds 1}_{\supp \phi}$, the Poincar\'e inequality entails that
\begin{multline*}\int_{\mathbb Delta(x,t)} \int_{t}^{3t} |\widetilde{\mathcal C}(z,s)|^2 \frac{ds}{s}\, dz
\\ \lesssim \int_{\mathbb Delta(x,2Nt)} \int_{t}^{9Nt} \mathcal Big( s^2 |\nabla \mathcal B|^2 \phi^2 + |\mathcal C|^2 \phi^2 + s^2 |\nabla \phi|^2 + |{\mathds 1}_{(\supp \phi) \cap \supp (1-\phi)}|^2 \mathcal Big) \frac{ds}{s}\, dz,
\end{multline*}
which means that $t|\nabla \widetilde{\mathcal B}| + |\widetilde{\mathcal C}| \in CM_{\mathbb R^n_+}$ by \eqref{NB+CareCM}, and \eqref{1phiCM}.
\noindent {\bf Step 3: The change of variable.} We write $\widetilde{\mathcal B}$ as the block matrix
\begin{equation} \label{defBi}
\widetilde{\mathcal B} = \begin{pmatrix} B_1 & B_2 \\ B_3 & b \end{pmatrix},
\end{equation}
where $b$ is the scalar function $\widetilde{\mathcal B}_{n,n}$, so $B_1$ is a matrix of order $n-1$, $B_2$ and $B_3$ are respectively a vertical and a horizontal vector of length $n-1$. We use $v$ for the horizontal vector $v = - (B_2)^T/b$, and we define
\begin{equation} \label{defrho12}
\rho(y,t) := (y+t v(y,t), t),
\end{equation}
which is a Lipschitz map from $\mathbb R^n_+$ to $\mathbb R^n_+$ (since $v$ and $t |\nabla v|$ are uniformly bounded, see \eqref{BellipasA} and \eqref{NBissmall2}), and we compute its Jacobian
\begin{equation} \label{defJacrho}
Jac_\rho := \begin{pmatrix} I + t\nabla_y v & 0 \\ v+t\partial_t v & 1 \end{pmatrix}.
\end{equation}
We can choose $N$ big enough in \eqref{NBissmall2} such that $Jac_\rho$ is invertible and even $\det(Jac_\rho) \mathfrak geq 1/2$. Let $J_\rho$ be the matrix
\begin{equation} \label{defJrho}
J_\rho := \begin{pmatrix} I & 0 \\ v & 1 \end{pmatrix}.
\end{equation}
We easily have that
\begin{equation} \label{prJrho}
|Jac_\rho - J_\rho| + |\det(Jac_{\rho})^{-1} - 1| \lesssim |t\nabla v| \lesssim |t\nabla \widetilde {\mathcal B}| .
\end{equation}
We aim to use $\rho$ for a change of variable. If $u$ is a weak solution to $L=-\mathop{\operatorname{div}}er \mathcal A \nabla$, then $u\circ \rho^{-1}$ is solution to $L_\rho = -\mathop{\operatorname{div}}er (\mathcal A_\rho \circ \rho^{-1}) \nabla$ where
\begin{equation}
\mathcal A_\rho = \det(Jac_\rho)^{-1} (Jac_\rho)^T \mathcal A \, Jac_\rho.
\end{equation}
We want to compute $\mathcal A_\rho$. To lighten the notation, we write $\mathcal O_{CM}$ for a scalar function, a vector, or a matrix which satisfies the Carleson measure condition with respect to $\mathbb R^{n}_+$, i.e. $\mathcal O_{CM}$ can change from one line to another as long as $\mathcal O_{CM} \in CM_{\mathbb R^n_+}$. So \eqref{prJrho} becomes
\begin{equation} \label{prJrho2}
Jac_\rho = J_\rho + \mathcal O_{CM} \quad \text{ and } \quad \det(Jac_\rho)^{-1} = 1 + \mathcal O_{CM}.
\end{equation}
Remember that by construction, the matrix $\mathcal A$ equals $\widetilde{\mathcal B} + \widetilde{\mathcal C} = \widetilde{\mathcal B} + \mathcal O_{CM}$ on $\supp \phi$, and that $\Jac_\rho$ and $\mathcal A$ are uniformly bounded, so
\begin{equation}\label{eqBrho}
\begin{split}
({\mathds 1}_{\supp \phi}) \mathcal A_\rho
& = {\mathds 1}_{\supp \phi} \begin{pmatrix} I & v^T \\ 0 & 1 \end{pmatrix} \begin{pmatrix} B_1 & B_2 \\ B_3 & b \end{pmatrix} \begin{pmatrix} I & 0 \\ v & 1 \end{pmatrix} + \mathcal O_{CM} \\
& = {\mathds 1}_{\supp \phi} \begin{pmatrix} B_1 + v^TB_3 + B_2v + bvv^T & B_2+bv^T \\ B_3 +bv & b \end{pmatrix} + \mathcal O_{CM} \\
& = {\mathds 1}_{\supp \phi} \underbrace{\begin{pmatrix} b(B_1 + v^TB_3 + B_2v + bvv^T) & 0 \\ B_3-(B_2)^T & b \end{pmatrix}}_{:=\mathcal B_\rho} + \mathcal O_{CM}
\end{split}
\end{equation}
with our choices of $v$. We write $\mathcal C_\rho$ for $(\mathcal A_\rho - \mathcal B_\rho){\mathds 1}_{\supp \phi} = \mathcal O_{CM}$. The matrices $\mathcal B_\rho \circ \rho^{-1}$ and $\mathcal C_\rho \circ \rho^{-1}$ satisfy \eqref{NB+CareCM} (because the Carleson measure condition is stable under bi-Lipschitz transformations) and $\mathcal B_\rho \circ \rho^{-1}$ has the structure \eqref{prBinS1} as in Step 1. So Step 1 gives that
\begin{equation} \label{flat4}
\iint_{\mathbb R^{n}_+} s \left| \nabla \ln\mathcal Big( \frac{u\circ \rho^{-1}}{s} \mathcal Big) \right|^2 \phi^2\circ \rho^{-1} \, ds\, dz \lesssim r^{n-1}.\end{equation}
If $s$ (and $t$) is also used, by notation abuse, for the projection on the last coordinate, then
\[\begin{split}
\iint_{\mathbb R^{n}_+} t \left| \nabla \ln\mathcal Big( \frac{u}{t} \mathcal Big) \right|^2 \phi^2 \, dt\, dy
& = \iint_{\mathbb R^{n}_+} t \left| \frac{\nabla u}{u} - \frac{\nabla t}{t} \right|^2 \phi^2 \, dt\, dy \\
& = \iint_{\mathbb R^{n}_+} t \left| \frac{Jac_\rho \nabla (u\circ \rho^{-1}) \circ \rho}{u} - \frac{\nabla t}{t} \right|^2 \phi^2 \, dt\, dy \\
&\leq \iint_{\mathbb R^{n}_+} t \left| \frac{Jac_\rho \nabla (u\circ \rho^{-1}) \circ \rho}{u} - \frac{Jac_\rho(\nabla s)\circ \rho}{s\circ \rho} \right|^2 \phi^2 \, dt\, dy \\
& \hspace{4.5cm}
+ \iint_{\mathbb R^{n}_+} t \left| \frac{Jac_\rho(\nabla s)\circ \rho}{s\circ \rho} - \frac{\nabla t}{t} \right|^2 \phi^2 \, dt\, dy \\
& := I_1 + I_2.
\end{split}\]
Yet, $\rho$ is a bi-Lipschitz change of variable, so $Jac_\rho$ and $\det(Jac_\rho)^{-1}$ are uniformly bounded, and we have
\begin{multline}
I_1 \lesssim \iint_{\mathbb R^{n}_+} t \left| \frac{\nabla (u\circ \rho^{-1}) \circ \rho}{u} - \frac{(\nabla s)\circ \rho}{s\circ \rho} \right|^2 \phi^2 \, dt\, dy \\
\lesssim \iint_{\mathbb R^{n}_+} s \left|\frac{\nabla (u\circ \rho^{-1})}{u \circ \rho^{-1}} - \frac{\nabla s}{s}\right|^2 \phi^2\circ \rho^{-1} \, ds\, dz \\
= \iint_{\mathbb R^{n}_+} s \left| \nabla \ln\mathcal Big( \frac{u\circ \rho^{-1}}{s} \mathcal Big) \right|^2 \phi^2\circ \rho^{-1} \, ds\, dz \lesssim r^{n-1}
\end{multline}
by \eqref{flat4}. As for $I_2$, we simply observe that $s\circ \rho = t$ and
\[ Jac_\rho(\nabla s)\circ \rho = \nabla t\]
to deduce that $I_2 = 0$.
The lemma follows.
$\square$
\section{Proof of Theorem \ref{Main4}}\label{SecPfofThm4}
In this section we prove Theorem \ref{Main4}, using the same strategy as our proof of Theorem \ref{Main1}. As mentioned in the introduction, we shall explain how to change the 5-step sketch of proof given in Subsection~\ref{Sproof} to prove Theorem \ref{Main4}.
Fix a bounded solution $u$ of $Lu=0$ in $\Omega$ with $\norm{u}_{L^\infty(\Omega)}\le 1$ and a ball $B=B(x_0,r)$ centered on $\partial\Omega$ with radius $r$. By the same argument as Step 1 in in Subection~\ref{Sproof}, it suffices to show that there exists some constant $C\in(0,\infty)$ depending only on $n$, $M$ and the UR constants of $\partial\Omega$, such that
\begin{equation}
I := \sum_{Q\in\mathbb D_{\partial\Omega}(Q_0)}\iint_{W_\Omega(Q)}\abs{\nabla u(X)}^2\delta(X)dX\le C\sigma(Q_0)
\end{equation}
for any cube $Q_0\in\mathbb D_{\partial\Omega}$ that satisfies $Q_0\subset\frac87B\cap\partial\Omega$ and $\ell(Q_0)\le 2^{-8}r$.
Then observe that if $E\subset\Omega$ is a Whitney region, that is, $E\subset\frac74 B$ and $\diam(E)\le K\delta(E)$, then
\begin{equation}\label{eqWhitney}
\iint_E\abs{\nabla u}^2\delta\, dX\le C_K\diam(E)^{-1}\iint_{E^*}\abs{u}^2dX\le C_K\delta(E)^{n-1},
\end{equation}
by the Caccioppoli inequality and $\norm{u}_{L^\infty(\Omega)}\le 1$, where $E^*$ is an enlargement of $E$. This bound \eqref{eqWhitney} is the analogue of \eqref{Main1e}, and proves Step 2.
Step 3 is not modified. We pick $0 <
$\square$
silon_1\ll
$\square$
silon_1 \ll 1$ and we use the corona decomposition constructed in Section~\ref{SUR} to decompose $I$ as follows.
\[
I=\sum_{Q\in\mathcal B(Q_0)}\iint_{W_\Omega(Q)}\abs{\nabla u}^2\delta\, dX+\sum_{\mathcal S\in \mathfrak S(Q_0)}\iint_{W_\Omega(\mathcal S)}\abs{\nabla u}^2\delta\, dX=: I_1+\sum_{\mathcal S\in \mathfrak S(Q_0)} I_\mathcal S.
\]
By \eqref{eqWhitney} and \eqref{packingBS},
\[
I_1\le C\sum_{Q\in\mathcal B(Q_0)}\ell(Q)^{n-1}\le C\sigma(Q_0).
\]
Step 4 is significantly simpler for Theorem \ref{Main4}, because we do not need any estimate on the smooth distance $D_\beta$, but the spirit is the same. That is, by using the bi-Lipschitz map $\rho_\mathcal S$ constructed in Section~\ref{Srho}, $I_\mathcal S$ can be turned into an integral on $\mathbb R^n \setminus \mathbb R^{n-1}$, which can be estimated by an integration by parts argument. More precisely, for any fixed $\mathcal S\in\mathfrak S(Q_0)$,
\begin{multline*}
I_\mathcal S=\iint_{\rho_\mathcal S^{-1}(W_\Omega(\mathcal S))}\abs{(\nabla u)\circ\rho_\mathcal S(p,t)}^2\delta\circ\rho_\mathcal S(p,t)\det\Jac(p,t)dpdt\\
\le 2\iint\abs{\nabla( u\circ\rho_\mathcal S(p,t))}^2\dist(\rho_\mathcal S(p,t),\mathcal Gamma_{\mathcal S})\br{\Psi_{\mathcal S}\circ\rho_{\mathcal S}(p,t)}^2dpdt\\
\le 3\iint\abs{\nabla v(p,t)}^2\abs{t}\phi(p,t)^2dpdt, \qquad v=u\circ\rho_\mathcal S, \quad\phi=\Psi_\mathcal S\circ\rho_\mathcal S
\end{multline*}
by \eqref{claimdGPib}, Lemmata \ref{lemWOG} (d) and \ref{LestonJ}, as well as \eqref{prrhoS1}, for $
$\square$
silon_0$ sufficiently small.
The fifth step consists roughly in proving the result in $\mathbb R^n \setminus \mathbb R^{n-1}$. The function $\phi$ is the same as the one used to prove Theorem \ref{Main1}, in particular it is a cutoff function associated to both $\rho_\mathcal S^{-1}(\partial\Omega)$ and $\mathbb R^{n-1}$ as defined in Definition \ref{defcutoffboth}, and it satisfies
\begin{equation}\label{eqsuppphi}
\supp \phi\subset \rho_{\mathcal S}^{-1}(W_\Omega^*(\mathcal S)) ,
\end{equation}
and
\begin{equation}\label{eqphi}
\iint\abs{\nabla\phi}dtdp+\iint\abs{\nabla\phi}^2tdtdp\lesssim\sigma(Q(\mathcal S)),
\end{equation}
where the implicit constant depends on $n$ and the AR constant in \eqref{defADR}.
Notice that $v=u\circ\rho_{\mathcal S}$ is a bounded solution of $L_{\mathcal S}=-\mathop{\operatorname{div}}er\mathcal A_\mathcal S\nabla$ that satisfies $\norm{v}_{L^\infty}\le1$, where $\mathcal A_\mathcal S$ is defined in \eqref{defAS}. By Lemma \ref{LprAS}, $I_\mathcal S\le C\sigma(Q(\mathcal S))$ will follow from the following lemma, which is essentially a result in $\mathbb R^n \setminus \mathbb R^{n-1}$.
\begin{lemma}\label{LCarlesonflat}
Let $L=-\mathop{\operatorname{div}}er \mathcal A \nabla$ be a uniformly elliptic operator on $\Omega_{\mathcal S}:=\rho^{-1}_{\mathcal S}(\Omega)$. Assume that the coefficients $\mathcal A$ can be decomposed as $\mathcal A = \mathcal B + \mathcal C$ where
\begin{equation} \label{B+CCMflat}
(|t\nabla \mathcal B| + |\mathcal C|){\mathds 1}_{\supp \phi} \in CM_{\mathbb R^n \setminus \mathbb R^{n-1}}(M),
\end{equation}
where $\phi=\Psi_\mathcal S\circ\rho_{\mathcal S}$ is as above.
Then for any solution $v$ of $Lv=0$ in $\rho^{-1}_{\mathcal S}(\Omega)$ that satisfies $\norm{v}_{L^\infty}\le 1$, there holds
\begin{equation} \label{vflat}
\iint_{ \mathbb R^n \setminus \mathbb R^{n-1}} \abs{\nabla v}^2\phi^2 { |t|}\,dtdy\leq C(1+M) \sigma(Q(\mathcal S)),
\end{equation}
where $C$ depends only on the dimension $n$, the elliptic constant $C_\mathcal A$, the AR constant of $\partial\Omega$, and the implicit constant in \eqref{eqphi}.
\end{lemma}
The proof of this lemma is similar to the proof of Lemma \ref{lemflat}, except that there is no need to invoke the CFMS estimates and $A_\infty$ as in Lemma \ref{lemlogk}, essentially because $v$ is bounded and we do not need information of $v$ on the boundary. For the same reason, with the properties of the cutoff function $\phi$ in mind, we can forget about the domain $\Omega_\mathcal S$, and in particular, we do not need the corkscrew and Harnack chain conditions in the proof.
\begin{proof}[Proof of Lemma \ref{LCarlesonflat}]
We can decompose $\phi = \phi \,{\mathds 1}_{t>0} + \phi \,{\mathds 1}_{t<0} := \phi_+ + \phi_-$ and prove the result for each of the functions $\phi_+$ and $\phi_- $, and since the proof is the same in both cases (up to a sign), we can restrain ourselves as in the proof of Lemma \ref{lemflat} to the case where $\phi = \phi {\mathds 1}_{t>0}$. By an approximation argument as in Step 1 of the proof of Lemma \ref{lemflat}, we can assume that $T:=\iint_{\mathbb R^n_+}\abs{\nabla v}^2t\phi^2dydt$ is finite, and that $\phi$ is compactly supported in $\Omega\cap\mathbb R^n_+$. We first assume that $\mathcal B$ has the special structure that
\begin{equation}\label{Bni}
\mathcal B_{n i}=0\qquad \text{for all } 1\le i\le n-1, \qquad \mathcal B_{nn}=b.
\end{equation}
Then for any $f\in W_0^{1,2}(\mathbb R^n_+)$,
\begin{equation}\label{eqtsol}
\iint\frac{\mathcal B}{b}\nabla f\cdot\nabla t\,dydt=\iint \partial_t f\,dydt=0.
\end{equation}
Using ellipticity of $\mathcal A$ and boundeness of $b$, we write
\begin{multline*}
T\le C_{\mathcal A}^2\iint\frac{\mathcal A}{b}\nabla v\cdot\nabla v\,\phi^2t\,dydt\\
=C_{\mathcal A}^2\mathcal Big\{\iint \mathcal A\nabla v\cdot\nabla\br{v\phi^2 b^{-1}t}dydt-\iint \mathcal A\nabla v\cdot\nabla\br{\phi^2b^{-1}}vt\,dydt-\iint \mathcal A\nabla v\cdot\nabla t\,v\phi^2b^{-1}dydt\mathcal Big\}\\
=-C_\mathcal A^2\mathcal Big\{\iint \mathcal A\nabla v\cdot\nabla\br{\phi^2b^{-1}}vt\,dydt+\iint \mathcal A\nabla v\cdot\nabla t\,v\phi^2b^{-1}dydt\mathcal Big\}=: -C_\mathcal A^2\br{T_1+T_2}
\end{multline*}
since $Lv=0$. We write $T_1$ as
\[
T_1=2\iint \mathcal A\nabla v\cdot\nabla\phi\, \phi b^{-1}vt \,dydt-\iint\mathcal A \nabla v\cdot\nabla b \,\phi^2 b^{-2}vt\,dydt=:T_{11}-T_{12}.
\]
By Cauchy-Schwarz and Young's inequalities, as well as the boundedness of $v$ and $b$,
\[
\abs{T_{11}}\le \frac{C_\mathcal A^{-2}}{6} T+ C\iint\abs{\nabla\phi}^2t\,dydt, \qquad \abs{T_{12}}\le \frac{C_\mathcal A^{-2}}8 T+ C\iint\abs{\nabla b}^2t\phi^2dtdy.
\]
So \eqref{eqphi} and \eqref{B+CCMflat}, as well as \eqref{eqsuppphi} give that
\begin{equation*}
\abs{T_1}\le \frac{C_\mathcal A^2}{4} T + C\ell(Q(\mathcal S))^{n-1}.
\end{equation*}
For $T_2$, we write
\begin{multline*}
T_2=\frac12\iint\frac{\mathcal A}{b}\nabla\br{v^2\phi^2}\cdot\nabla t\,dydt -\iint \frac{\mathcal A}{b}\nabla\phi\cdot\nabla t\,v^2\phi\,dydt\\
=\frac12\iint\frac{\mathcal C}{b}\nabla\br{v^2\phi^2}\cdot\nabla t\,dydt -\iint \frac{\mathcal A}{b}\nabla\phi\cdot\nabla t\,v^2\phi\,dydt=:T_{21}+T_{22}
\end{multline*}
by writing $\mathcal A=\mathcal B+\mathcal C$ and applying \eqref{eqtsol}. For $T_{21}$, we use Cauchy-Schwarz and Young's inequalities, and get
\begin{multline*}
\abs{T_{21}}\le\abs{\iint \mathcal C\nabla v\cdot\nabla t\,v\phi^2b^{-1}dydt}+2\abs{\iint \mathcal C\nabla\phi\cdot\nabla t\,v^2\phi b^{-1}dydt}\\
\le \frac{C_\mathcal A^{-2}}{4}T+ C\iint\abs{\mathcal C}^2\phi^2t^{-1}dydt +\iint\abs{\nabla\phi^2}tdydt\le \frac{C_\mathcal A^{-2}}{4}T+ C\ell(Q(\mathcal S))^{n-1}
\end{multline*}
by the boundedness of $v$, \eqref{eqphi}, \eqref{B+CCMflat}, and \eqref{eqsuppphi}. The boundedness of the coefficients and $v$ implies that \[
\abs{T_{22}}\le C\iint\abs{\nabla\phi}dydt\le C\ell(Q(\mathcal S))^{n-1}
\]
by \eqref{eqphi}. Altogether, we have obtained that
$T\le \frac12 T+ C\ell(Q(\mathcal S))^{n-1}$, and thus the desired estimate \eqref{vflat} follows.
We claim that the lemma reduces to the case when \eqref{Bni} holds by almost the same argument as in Steps 2 and 3 in the proof of Lemma \ref{lemflat}. That is, we can assume that $\norm{\abs{\nabla_y\mathcal B}t}_\infty\lesssim\frac{C_\mathcal A}{N}$ with $N$ to be chosen to be sufficiently large, and then we do a change of variables, which produces the structure \eqref{Bni} in the conjugate operator. The only difference from the proof of Lemma \ref{lemflat} is that now we need to choose $v=- B_3/b$ in the bi-Lipschitz map $\rho$ defined in \eqref{defrho12} because we want $B_3+bv=0$ in \eqref{eqBrho}. We leave the details to the reader.
\end{proof}
\section{The converse} \label{Sconv}
In this section, we show
that $(v) \implies (i)$ in Theorem \ref{Main1}, that is, we establish that under certain conditions on the domain $\Omega$ and the operator $L$, the Carleson condition \eqref{Main2b} on the Green function implies that $\partial\Omega$ is uniformly rectifiable. More precisely, we prove the following.
\begin{theorem}\label{thm.conv}
Let $\Omega$ be a 1-sided Chord-Arc Domain (bounded or unbounded) and let $L=-\mathop{\operatorname{div}} \mathcal A \nabla$ be an uniformly elliptic operator which satisfies the weak DKP condition with constant $M\in(0,\infty)$ on $\Omega$. Let $X_0\in\Omega$, and when $\Omega$ is unbounded, $X_0$ can be $\infty$. We write $G^{X_0}$ for the Green funtion of $L$ with pole at $X_0$. Suppose that there exists $C\in(0,\infty)$ and $\beta>0$ such that for all balls $B$ centered at the boundary and such that $X_0 \notin 2B$, we have
\begin{equation} \label{Main1b'}
\iint_{\Omega \cap B} \left| \frac{\nabla G^{X_0}}{G^{X_0}} - \frac{\nabla D_\beta}{D_\beta} \right|^2 D_\beta \, dX \leq C \sigma(B\cap\partial\Omega).
\end{equation}
Then $\partial\Omega$ is uniformly rectifiable.
\end{theorem}
In \cite{DM2} Theorem 7.1, uniform rectifiability is obtained from some weak condition on the Green function, namely, $G^\infty$ being prevalently close to $D_\beta$. Following \cite{DM2}, we say that $G^\infty$ is prevalently close to $D_\beta$ if for each choice of $
$\square$
silon>0$ and $M\mathfrak ge 1$, the set $\mathcal{G}_{GD_\beta}(
$\square$
silon,M)$ of pairs $(x,r)\in \partial\Omega\times(0,\infty)$ such that there exists a positive constant $c>0$, with
\[
\abs{D_\beta(X)-c\,G^\infty(X)}\le
$\square$
silon r \quad \text{for }X\in\Omega\cap B(x,Mr),
\]
is {\it Carleson-prevalent}.
\begin{definition}[Carleson-prevalent]
We say that $\mathcal{G}\subset\partial\Omega\times(0,\infty)$ is a Carleson-prevalent set if there exists a constant $C\mathfrak ge0$ such that for every $x\in\partial\Omega$ and $r>0$,
\[
\int_{y\in\partial\Omega\cap B(x,r)}\int_{0<t<r}{\mathds 1}_{\mathcal{G}^c}(y,t)\frac{d\sigma(y)dt}{t}\le C\,r^{n-1}.
\]
\end{definition}
One could say that our condition \eqref{Main1b'} is stronger than $G^\infty$ being prevalently close to $D_\beta$, and so the theorem follows from \cite{DM2}. But actually it is not so easy to link the two conditions directly. Nonetheless, we can use Chebyshev's inequality to derive a weak condition from \eqref{Main1b'}, which can be used as a replacement of $G^\infty$ being prevalently close to $D_\beta$ in the proof.
We will soon see that the condition on the operator in Theorem \ref{thm.conv} can be relaxed.
Again following \cite{DM2}, given an elliptic operator $L=-\mathop{\operatorname{div}}{\mathcal A \nabla}$, we say that $L$ is {\it locally sufficiently close to a constant coefficient elliptic operator} if for every choice of $\tau>0$ and $K\mathfrak ge 1$, $\mathcal{G}_{cc}(\tau,K)$ is a Carleson prevalent set, where $\mathcal{G}_{cc}(\tau,K)$ is the set of pairs $(x,r)\in\partial\Omega\times(0,\infty)$ such that there is a constant matrix $\mathcal A_0=\mathcal A_0(x,r)$ such that
\[
\iint_{X\in W_K(x,r)}\abs{\mathcal A(X)-\mathcal A_0}dX\le\tau r^n,
\]
where
\begin{equation}\label{Wkxr}
W_K(x,r)=\set{X\in\Omega\cap B(x, Kr): \dist(X,\partial\Omega)\mathfrak ge K^{-1}r}.
\end{equation}
We will actually prove Theorem \ref{thm.conv} for elliptic operators $L$ that are sufficiently close locally to a constant coefficient elliptic operator.
The first step of deriving weak conditions from the strong conditions on the operator and $G^\infty$ is the observation that for any integrable function $F$, if there is a constant $C\in(0,\infty)$ such that
\[
\iint_{B(x,r)\cap\Omega}\abs{F(Y)}dY\le C\,r^{n-1} \quad \text{for }x\in\partial\Omega, r>0,
\]
then for any $K\mathfrak ge1$,
\begin{equation}\label{obs.avg}
\int_{y\in B(x,r)\cap\partial\Omega}\int_{0<t<r}\Yint\longdash_{W_K(y,t)}\abs{F(Y)}dY\,dt\,d\sigma(y)\le C\,K^{n-1}r^{n-1}
\end{equation}
for $x\in\partial\Omega$, $r>0$. This follows immediately from Fubini's theorem and the fact that $W_K(x,r)$ defined in \eqref{Wkxr} is a Whitney region which is away from the boundary.
\begin{lemma}\label{lem.Carlprev}
\begin{enumerate}
\item Let $L=-\mathop{\operatorname{div}} \mathcal A \nabla$ be a uniformly elliptic operator which satisfies the weak DKP condition with constant $M\in(0,\infty)$ on $\Omega$. Then $L$ is locally sufficiently close to a constant coefficient elliptic operator.
\item If $G^{X_0}$ satisfies \eqref{Main1b'} for all $B$ centered at the boundary and such that $X_0 \notin 2B$, then for every choice of $
$\square$
silon>0$ and $K\mathfrak ge1$, the set \begin{multline}\label{def.setG}
\mathcal{G}^{X_0}(
$\square$
silon,K):= \mathcal Big\{(x,r)\in\partial\Omega\times(0,\infty): \, X_0 \notin B(x,2Kr) \text{ and } \\ \iint_{W_K(x,r)}\abs{\nabla\ln\br{\frac{G^{X_0}}{D_\beta}(X)}}^2D_\beta(X)dX\le
$\square$
silon\,r^{n-1} \mathcal Big\}
\end{multline}
is Carleson-prevalent.
\end{enumerate}
\end{lemma}
\begin{proof}
Both results follow from the previous observation \eqref{obs.avg} and Chebyshev's inequality. In fact, for (1), we have $\mathcal A=\mathcal B+\mathcal C$ such that for any $x\in\partial\Omega$ and $r>0$,
\begin{equation}\label{eq9a1}
\int_{y\in B(x,r)\cap\Omega}\int_{0<t<r}\Yint\longdash_{W_K(y,t)}\br{\abs{\nabla\mathcal B}^2\delta+\abs{\mathcal C}^2\frac{1}{\delta}}dY\,dt\,d\sigma(y)\le M\,K^{n-1}r^{n-1}.
\end{equation}
By the Poincar\'e inequality, the left-hand side is bounded from below by
\begin{multline*}
c\int_{y\in B(x,r)\cap\Omega}\int_{0<t<r}\Yint\longdash_{W_K(y,t)}\br{\abs{\mathcal B-(\mathcal B)_{W_K(y,t)}}^2+\abs{\mathcal C}^2}dY(Kt)^{-1}dt\,d\sigma(y)\\
\mathfrak ge\frac{c}2\int_{y\in B(x,r)\cap\Omega}\int_{0<t<r}\Yint\longdash_{W_K(y,t)}\br{\abs{\mathcal A-(\mathcal B)_{W_K(y,t)}}^2}dY(Kt)^{-1}dt\,d\sigma(y)\\
\mathfrak ge \frac{c}2\frac{\tau}{K^{n+1}}\int_{y\in B(x,r)\cap\Omega}\int_{0<t<r}{\mathds 1}_{\mathcal{G}_{cc}(\tau,K)^c}(y,t)\frac{dt\,d\sigma(y)}{t},
\end{multline*}
where we have used the fact that $(\mathcal B)_{W_K(y,t)}$ is a constant matrix and the definition of the set $\mathcal{G}_{cc}(\tau,K)$. Combining with \eqref{eq9a1}, we have that
\[
\int_{y\in B(x,r)\cap\Omega}\int_{0<t<r}{\mathds 1}_{\mathcal{G}_{cc}(\tau,K)^c}(y,t)\frac{dt\,d\sigma(y)}{t}\le \frac{CMK^{2n}}{\tau}r^{n-1},
\]
which proves (1).
Now we justify (2). Let $
$\square$
silon>0$, $K\mathfrak ge1$ and $X_0\in\Omega$ be fixed, and let $B$ be a ball of radius $r$ centered at the boundary.
Our goal is to show that
\[
\int_{y\in B\cap\partial\Omega}\int_{0<t<r}{\mathds 1}_{\mathcal{G}^{X_0}(
$\square$
silon,K)^c}(y,t)\frac{dt\,d\sigma(y)}{t}\le C_{
$\square$
silon,K}\sigma(B\cap \partial\Omega).
\]
We discuss two cases. If $X_0 \notin 4KB$, then since $G^{X_0}$ satisfies \eqref{Main1b'} for the ball $2KB$, we have that
\[
\int_{y\in B\cap\partial\Omega}\int_{0<t<r}\Yint\longdash_{W_K(y,t)}\abs{\nabla\ln\br{\frac{G^{X_0}}{D_\beta}}(Y)}^2D_\beta(Y)dY dtd\sigma(y)\le CK^{n-1}r^{n-1}.
\]
Notice that the assumption $X_0 \notin 4KB$ guarantees that $X_0\notin B(y,2Kt)$ for all $y\in B\cap\partial\Omega$ and $0<t<r$. Therefore, if $(y,t)\in \mathcal{G}^{X_0}(
$\square$
silon,K)^c$, then
\[\iint_{W_K(y,t)}\abs{\nabla\ln\br{\frac{G^{X_0}}{D_\beta}}(Y)}^2D_\beta(Y)dY>
$\square$
silon\,r^{n-1}.\]
From this, it follows that
\begin{equation} \label{case1GeK}
\int_{y\in B\cap\partial\Omega}\int_{0<t<r}{\mathds 1}_{\mathcal{G}^{X_0}(
$\square$
silon,K)^c}(y,t)\frac{dt\,d\sigma(y)}{t}\le \frac{CK^{2n-1}}{
$\square$
silon}\sigma(B\cap \partial\Omega).
\end{equation}
Now let us deal with the case where $X_0 \in 4KB$. For $x\in B \cap \partial \Omega$, we define $B_x:= B(x,|x-X_0|/20K)$. Since $\{B_x\}_{x\in B}$ covers $B \cap \partial \Omega$, we can find a non-overlapping subcollection $\{B_{i}\}_{i\in I}$ such that $\{5B_{i}\}_{i\in I}$ covers $B \cap \partial \Omega$. We write $r_i>0$ for the radius of $B_i$ and we define
\[S:= (B\cap \partial \Omega) \times (0,r) \setminus \bigcup_{i\in I} (5B_i\cap \partial \Omega) \times (0,5r_i) \]
We have
\begin{multline*}
\int_{y\in B\cap\partial\Omega}\int_{0<t<r}{\mathds 1}_{\mathcal{G}^{X_0}(
$\square$
silon,K)^c}(y,t)\frac{dt\,d\sigma(y)}{t} \leq \sum_{i\in I} \int_{y\in 5B_i\cap\partial\Omega}\int_{0<t<5r_i}{\mathds 1}_{\mathcal{G}^{X_0}(
$\square$
silon,K)^c}(y,t)\frac{dt\,d\sigma(y)}{t} \\
+ \iint_{S} {\mathds 1}_{\mathcal{G}^{X_0}(
$\square$
silon,K)^c}(y,t)\frac{dt\,d\sigma(y)}{t} =: T_1 + T_2.
\end{multline*}
Since $X_0 \notin 20KB_i$, we can apply \eqref{case1GeK}, and we have
\[
T_1 \leq C_{K,
$\square$
silon} \sum_{i\in I} \sigma(5B_i \cap \partial \Omega) \lesssim \sum_{i\in I} \sigma(B_i \cap \partial \Omega) \leq \sigma(2B \cap \partial \Omega) \lesssim \sigma(B \cap \partial \Omega),
\]
because $\{B_i\}$ is a non-overlapping and included in $2B$. It remains to prove a similar bound on $T_2$. Remark first that
\[S \subset \{(y,t) \in \partial\Omega \times (0,r): \, |y-X_0|/100K < t\},\]
and therefore
\[
T_2 \leq \int_{0}^r \int_{y\in B(X_0,100Kt) \cap \partial \Omega} \frac{d\sigma(y)\, dt}{t} \leq C K^{n-1} r^{n-1} \lesssim \sigma(B \cap \partial \Omega).
\]
The lemma follows.
\end{proof}
Before we continue, we need to adapt Theorem 2.19 in \cite{DM2} to our situation, that is we want to construct a positive solution in a domain which is the limit of a sequence of domain.
\begin{lemma}\label{lem.cvg}
Let $\Omega_k$ be a sequence of 1-sided Chord-Arc domains domains in $\mathbb R^n$ with uniform 1-sided CAD constants. Let $\partial \Omega_k$ be its Ahlfors regular boundary equipped with a Ahlfors regular measure $\sigma_k$ (such that the constant in \eqref{defADR} is uniform in $k$).
Assume that $0\in \partial \Omega_k$ and $\diam \Omega_k \mathfrak geq 2^k$. Moreover, assume that the $\partial \Omega_k$ and $\Omega_k$ converges to $E_\infty$ and $\Omega_\infty$ locally in the Hausdorff distance, that is, for any $j\in \mathbb N$, we have
\[\lim_{k\to \infty} d_{0,2^j}(E_\infty,\partial \Omega_k) = 0 \text{ and } \lim_{k\to \infty} d_{0,2^j}(\Omega_\infty,\Omega_k) = 0.\]
Here, for a couple of sets $(E,F)$, we define the Hausdorff distance
\[d_{0,2^j}(E,F):= \sup_{x\in E \cap B(0,2^j)} \dist(x,F) + \sup_{y\in F \cap B(0,2^j)} \dist(y,E).\]
Then $E_\infty = \partial \Omega_\infty$, $E_\infty$ is an unbounded $(n-1)$-Ahlfors regular set, $\Omega_\infty$ is a 1-sided Chord-Arc Domain. Moreover, if the Radon measure $\sigma$ is any weak-* limit of the $\sigma_k$, then $\sigma$ is an Ahlfors regular measure on $E_\infty = \partial \Omega_\infty$.
Let $Y_0$ be a corkscrew point of $\Omega_\infty$ for the boundary point $0$ at the scale 1. If $L_k = - \mathop{\operatorname{div}}er A_k \nabla$ and $L_\infty = -\mathop{\operatorname{div}}er A_\infty \nabla$ are operators - in $\Omega_k$ and $\Omega_\infty$ respectively - that satisfies
\[\lim_{k\to \infty} \|A_k - A_\infty\|_{L^1(B)} = 0 \qquad \text{ for any ball $B$ such that $2B \subset \Omega_\infty$},\]
and if $u_k$ are positive solutions in $\Omega_k \cap B(0,2^{k+1})$ to $L_k u_k = 0$ with $\Tr u_k=0$ on $\partial\Omega_k\cap B(0,2^{k+1})$, then the sequence of functions $v_k:= u_k/u_k(Y_0)$ converges, uniformly on every compact subset of $\Omega_\infty$, and in $W^{1,2}_{\loc}(\Omega_\infty)$, to $G^\infty$, the unique Green function with pole at infinity which verifies $G^\infty(Y_0) = 1$.
\end{lemma}
\begin{proof}
The geometric properties of $E_\infty$ and $\Omega_\infty$ can be derived verbatim as in the proof of Theorem 2.19 in \cite{DM2}. The uniform convergence of a subsequence of $v_k$ on any compact set $K\Subset\Omega_\infty$ follows from the standard argument of uniform boundedness of $\set{v_k}$ on $K$, and H\"older continuity of solutions. The Caccioppoli inequality would give the weak convergence of another subsequence of $v_k$ to some $v_\infty$ in $W_
{\loc}^{1,2}(\Omega_\infty)$. This is enough to show that $v_\infty\in W_{\loc}^{1,2}(\Omega_\infty)\cap C(\overline{\Omega_\infty})$ is a weak solution of $L_\infty v_\infty=0$ in $\Omega_\infty$, as we can write
\[
\iint_{\Omega_\infty}A_\infty\nabla v_\infty\cdot\nabla\varphi dX=\iint_{\Omega_\infty}A_\infty(\nabla v_\infty-\nabla v_k)\cdot\nabla\varphi \,dX+\iint_{\Omega_\infty}(A_\infty-A_k)\nabla v_k\cdot\nabla\varphi \,dX
\]
for every $\varphi\in C_0^\infty(\Omega_\infty)$ and any $k$ sufficiently big so that $\supp\varphi\subset\Omega_k\cap B(0,2^{k+1})$. Therefore, $v_\infty=G^\infty$ is the Green function with pole at infinity for $L_\infty$ in $\Omega_\infty$ and normalized so that $G^\infty(Y_0)=1$.
That $v_k$ converges to $G^\infty$ (strongly) in $W_{\loc}^{1,2}(\Omega_\infty)$ needs more work, but we can directly copy the proof of Lemma 2.29 in \cite{DM2}. Roughly speaking, for any fixed ball $B$ with $4B\subset\Omega$, we would need to introduce an intermediate function $V_k$, which satisfies $L_kV_k=0$ in $B_\rho$ for some $\rho\in(r,2r)$, and $V_k=v_k$ on the sphere $\partial B_\rho$. We refer the readers to \cite{DS2} for the details.
\end{proof}
We shall need the following result on compactness of closed sets, which has been proved in \cite{DS3}.
\begin{lemma}[\cite{DS3} Lemma 8.2]{\label{lem.DS3}}
Let $\set{E_j}$ be a sequence of non-empty closed
subsets of $\mathbb Rn$, and suppose that there exists an $r>0$ such that $E_j\cap B(0,r)\neq\emptyset$ for all $j$. Then there is a subsequence of $\set{E_j}$ that converges to a nonempty closed subset $E$ of $\mathbb Rn$ locally in the Hausdorff distance.
\end{lemma}
Now we are ready to prove the main theorem of this section.
\begin{proof}[Proof of Theorem \ref{thm.conv}]
We prove that $\partial\Omega$ is uniformly rectifiable by showing that $\Omega_{\rm ext}$ satisfies the corkscrew condition (see Lemma \ref{lem.UReqv}). Following the proof of Theorem 7.1 in \cite{DM2}, it suffices to show that the set $\mathcal{G}_{CB}(c)$ is Carleson-prevalent for some $c>0$, where $\mathcal{G}_{CB}(c)$ is the set of pairs $(x,r)\in\partial\Omega\times(0,\infty)$ such that we can find $Z_1,Z_2\in B(x,r)$, that lie in different connected components of $\mathbb Rn\setminus\partial\Omega$, and such that $\dist(Z_i,\partial\Omega)\mathfrak ge cr$ for $i=1,2$. To do that, we will rely on the fact that, on 1-sided CAD domains, if the elliptic measure is comparable to the surface measure, then the complement $\Omega_{\rm ext}$ satisfies the corkscrew condition, which is implied by the main result of \cite{HMMTZ}.
Thanks to Lemma \ref{lem.Carlprev}, for each choice of $
$\square$
silon>0$ and $M\mathfrak ge1$, the sets $\mathcal{G}^{X_0}(
$\square$
silon,M)$ and $\mathcal{G}_{cc}(
$\square$
silon,M)$ are Carleson-prevalent. So it suffices to show that
\begin{equation}\label{eq9b1}
\mathcal{G}^{X_0}(
$\square$
silon,M)\cap\mathcal{G}_{cc}(
$\square$
silon,M)\subset\mathcal{G}_{CB}(c) \quad \text{for some }c>0,
$\square$
silon>0, \text{ and }M\mathfrak ge1.
\end{equation}
We prove by contradiction. Assume that \eqref{eq9b1} is false, then for $c_k=
$\square$
silon_k=M_k^{-1}=2^{-k}$, we can find a 1-sided NTA domain $\Omega_k$ bounded by an Ahlfors regular set $\partial\Omega_k$, a point $X_k\in\Omega_k$ (or $X_k\in\Omega_k\cup\set{\infty}$ when $\Omega$ is unbounded), an elliptic operator $L_k=-\mathop{\operatorname{div}}\mathcal A_k\nabla$ that is locally sufficiently close to a constant coefficient elliptic operator, and a pair $(x_k,r_k)\in\partial\Omega_k\times(0,\infty)$ for which
\[
(x_k,r_k)\in \mathcal{G}^{X_k}(
$\square$
silon_k,M_k)\cap\mathcal{G}_{cc}(
$\square$
silon_k,M_k)\setminus\mathcal{G}_{CB}(c_k).
\]
By translation and dilation invariance, we can assume that $x_k=0$ and $r_k=1$. Notice that $(0,1)\in\mathcal{G}^{X_k}(
$\square$
silon_k,M_k)$ implies that $X_k\notin B(0,2^k)$, and in particular, $\diam(\Omega_k)\mathfrak ge 2^k$, and $X_k$ tends to infinity as $k\to\infty$.
By Lemma \ref{lem.DS3}, we can
extract a subsequence so that $\Omega_k$ converges to a limit $\Omega_\infty$. By Lemma \ref{lem.cvg}, $\Omega_\infty$ is 1-sided NTA, $\partial\Omega_k$ converges to $\partial\Omega_\infty$ which is Ahlfors regular. Moreover, by Lemma \ref{lem.DS3}, we
can extract a further subsequence so that the Ahlfors regular measure $\sigma_k$ given on $\partial\Omega_k$ converges weakly
to an Ahlfors regular measure $\sigma$.
Since $(0,1)\in\mathcal{G}_{cc}(2^{-k},2^k)$, $\mathcal A_k$ converges to some constant matrix $\mathcal A_0$ in $L^1_{\loc}(\Omega_\infty)$.
Choose a corkscrew point $Y_0\in\Omega_\infty$ for some ball $B_0$ centered on $\partial\Omega_\infty$, and let $G_k=G_k^{X_k}$ be the Green function for $L_k$ in $\Omega_k$, normalized so that $G_k(Y_0)=1$. Since $L_kG_k=0$ in $\Omega_k\cap B(0,2^k)$, Lemma \ref{lem.cvg} asserts that $G^k$ converges to the Green function $G=G_\infty^\infty$ with pole at infinity for the constant-coefficient operator $L_0=-\mathop{\operatorname{div}} \mathcal A_0\nabla$, uniformly on compact sets of $\Omega_\infty$, and in $W_{\loc}^{1,2}(\Omega_\infty)$. Since $\sigma_k\rightharpoonup\sigma$, $D_k=D_{\beta,\sigma_k}$ converges to $D=D_{\beta,\sigma}$ uniformly on compact sets of $\Omega_\infty$, and so does $\nabla D_k$ to $\nabla D$. Since $(0,1)\in\mathcal{G}^{X_k}(2^{-k},2^k)$,
\begin{equation}\label{eq9b2}
\iint_{W_{2^k}(0,1)}\abs{\frac{\nabla G_k}{G_k}-\frac{\nabla D_k}{D_k}}^2D_k(X)dX\le 2^{-k} \qquad \text{for all } k\in\mathbb{Z}_+,
\end{equation}
where $W_{2^k}(0,1)$ is the Whitney region defined as in \eqref{Wkxr} for $\Omega_k$. Fix any compact set $K\Subset\Omega_\infty$. We claim that
\begin{equation}\label{eq9b3}
\lim_{k\to\infty} \iint_{K}\abs{\frac{\nabla G_k}{G_k}-\frac{\nabla D_k}{D_k}}^2D_k(X)dX=\iint_{K}\abs{\frac{\nabla G}{G}-\frac{\nabla D}{D}}^2D(X)dX.
\end{equation}
In fact, since $G$ is a positive solution of $L_0 G=0$ in $\Omega_\infty$ with $G(Y_0)=1$, the Harnack inequality implies that $G\mathfrak ge c_0$ on $K$ for some $c_0>0$. Then the uniform convergence of $G_k$ to $G$ on $K$ implies that for $k$ large enough, $\set{G_k^{-1}}$ is uniformly bounded on $K$, and so $G_k^{-1}$ converges uniformly to $G^{-1}$ on $K$. Then \eqref{eq9b3} follows from the fact that $\nabla G_k$ converges to $\nabla G$ in $L^2(K)$, the uniform convergence of $G_k^{-1}$ to $G^{-1}$ on $K$, and the uniform convergences of $\nabla D_k$ and $D_k^{-1}$ to $\nabla D$ and $D^{-1}$.
Now by \eqref{eq9b2} and \eqref{eq9b3}, we get that
\[\iint_K\abs{\nabla\ln\br{\frac{G}{D}}(X)}^2D(X)dX=0,\]
and so $G=CD_{\beta,\sigma}$ in $\Omega_\infty$. We can copy the proof of Theorem 7.1 of \cite{DM2} verbatim from now on to conclude that this leads to a contradiction. Roughly speaking, $G=CD_{\beta,\sigma}$ would imply that the elliptic measure $\Omegaega^\infty$ for $L_0$, with a pole at $\infty$, is comparable to $\mathcal H_{|\partial\Omega_\infty}^{n-1}$. Then by \cite{HMMTZ} Theorem 1.6 one can conclude that $\partial\Omega_\infty$ is uniformly rectifiable, and hence $\mathbb Rn\setminus\overline\Omega_\infty$ satisfies the corkscrew condition, which contradicts with the assumption that $(0,1)=(x_k,r_k)\notin\mathcal{G}_{CB}(c_k)$.
\end{proof}
\section{Assuming that $\Omega$ is semi-uniform is not sufficient.} \label{Scount}
In this subsection, we will give an example of domain where the harmonic measure on $\partial \Omega$ is $A_\infty$-absolutely continuous with respect to the $(n-1)$-dimensional Hausdorff measure, but where Theorem \ref{Main1} fails. It is known that the harmonic measure is $A_\infty$-absolute continuous with respect to the surface measure whenever the domain $\Omega$ is semi-uniform and its boundary is $(n-1)$-Ahlfors regular and uniformly rectifiable (see \cite[Theorem III]{Azzam}). The notion of semi-uniform domain is given by the next definition.
\begin{definition}[{\bf Semi-uniform domains}] \label{defSUD}
We say that $\Omega$ is semi-uniform if it satisfies the corkscrew condition and (see Definition \ref{def1.cork}) if for every $\Lambda\mathfrak geq 1$, there exists $C_\Lambda>0$ such that for any $\rho>1$ and every pair of points $(X,x) \in \Omega \times \partial \Omega$ such that $\abs{X-x}<\Lambda\rho$, there exists a Harnack chain of length bounded by $C_\Lambda$ linking $X$ to one of the corkscrew points for $x$ at scale $\rho$.
\end{definition}
Semi-uniform domains were first introduced by Aikawa and Hirata in \cite{AiHi} using {\it cigar curves}. The two definitions of semi-uniform domains are known to be equivalent, see for instance, \cite{Azzam} Theorem 2.3.
Our counterexample is constructed in $\mathbb R^2$ for simplicity but can easily be extended to any dimension.
\begin{figure}
\caption{The domain $\Omega$}
\label{fig1}
\end{figure}
Our domain (see Figure \ref{fig1}) will be
$$\Omega := \mathbb R^2 \setminus \bigcup_{k\in \mathbb Z} \mathcal Big\{(x,t)\in \mathbb R^2, |x-2k|+|t| < \frac12\mathcal Big\}$$
Note that $\partial \Omega$ is uniformly rectifiable, but the domain contains two parts ($\Omega \cap \mathbb R^2_+$ and $\Omega \cap \mathbb R^2_-$) which are not well connected to each other, that is, this domain does not satisfy the Harnack Chain Condition (see Definition \ref{def1.hc}). We let the reader check that the domain is still semi-uniform.
Due to the lack of Harnack chains, the space $\Omega$ does not have a unique - up to constant - Green function with pole at $\infty$. If we take the pole at $t \to -\infty$, then we can construct a positive function $G$ which will be bounded on $\Omega \cap\mathbb R^2_+$, and we shall prove that this is incompatible with our estimate \eqref{Main2d} that says that $\frac{\partial_t G}{G}$ is ``close'' to $\frac1t$ when $t$ is large enough.
\subsection{Construction of $G$}
The goal now will be to construct a positive function in $\Omega$, which is morally the Green function with pole at $t=-\infty$. We could have used the usual approach, that is taking the limit when $n$ goes to infinity of - for instance - $G(X,X_n)/G(X_0,X_n)$ in the right sense, where $G$ is the Green function on $\Omega$ for the Laplacian, and $X_n := (1,n)$. However, the authors had difficulty proving the 2-periodicity in $x$ of the limit and didn't know where to find the right properties in the literature (as our domains are unbounded). So we decided to make the construction from scratch.
We want to work with the Sobolev space
\begin{multline*}
W = \mathcal Big\{u\in W^{1,2}_{loc}(\overline{\Omega}), \, u(x,t) = u(x+2,t) \text{ and} \, u(x,-t) = u(x,t) \text{ for $(x,t)\in \Omega$}, \\
\, \iint_{S_0} |\nabla u(x,t)|^2 dx \, dt < +\infty\mathcal Big\}.
\end{multline*}
Here and in the sequel $S_k$ is the strip $\Omega \cap \mathcal Big( [k,k+1) \times \mathbb R \mathcal Big)$.
Note that due to the 2-periodicity in $x$ and the symmetry, the function $u\in W$ is defined on $\mathbb R^2$ as soon as $u$ is defined on any of the sets $S_k$. We will also need
\[W^+:= \{u_{|\Omega \cap \mathbb R^2_+}, \, u \in W\} \text{ and } W_0:= \{u\in W, \, \Tr(u) = 0 \text{ on } \partial \Omega\}.\]
We let the reader check that the quantity
\[\|u\|_W := \left( \iint_{S_0} |\nabla u(x,t)|^2 dx \, dt \right)^\frac12\]
is a norm on the space $W_0$, and the couple $(W_0,\|.\|_W)$ is a Hilbert space.
The bilinear form
\[a(u,v):= \iint_{S_0} \nabla u \cdot \nabla v \, dt \, dx\]
is continuous and coercive on $W_0$, so for any $k\in \mathbb N$, there exists $\widetilde G_k \in W_0$ such that
\begin{equation} \label{defwtGk}
a(\widetilde G_k, v ) = \iint_{S_0} \nabla \widetilde G_k \cdot \nabla v \, dx \, dt = 2^{-k} \int_0^1 \int_{-2^{k+1}}^{-2^k} v(x,t) \, dt\, dx \qquad \text{ for } v\in W_0.
\end{equation}
The first key observation is:
\begin{proposition} \label{prwtGk}
$\widetilde G_k \in W_0$ is a positive weak solution to $-\mathbb Delta u = 0$ in $\Omega \cap \{t>-2^k\}$.
\end{proposition}
\noindent {\em Proof: } The fact that $\widetilde G_k$ is nonnegative is a classical result that relies on the fact that $u\in W_0 \implies |u|\in W_0$ and the bilinear form $a(u,v)$ is coercive. See for instance \cite{DFMprelim}, (10.18)--(10.20).
In order to prove that $\widetilde G_k$ is a solution in $\Omega \cap \{t>-2^k\}$, take $\phi \in C^\infty_0(\Omega \cap \{t>-2^k\})$. For $j\in\mathbb{Z}$, let $\phi_j$ be the only symmetric and 2-periodic function in $x$ such that $\phi_j = \phi$ on $S_j$. Observe that $\phi_j$ is necessary continuous, and so $\phi_j$ lies in $W_0$. Thus
\begin{multline*}
\iint_\Omega \nabla \widetilde G_k \cdot \nabla \phi \, dx\, dt
= \sum_{j\in \mathbb Z} \iint_{S_j} \nabla \widetilde G_k \cdot \nabla \phi \, dx\, dt
= \sum_{j\in \mathbb Z} \iint_{S_j} \nabla \widetilde G_k \cdot \nabla \phi_j \, dx\, dt \\
= \sum_{j\in \mathbb Z} \iint_{S_0} \nabla \widetilde G_k \cdot \nabla \phi_j \, dx\, dt = 0
\end{multline*}
by \eqref{defwtGk}, since $\phi_j =\phi \equiv 0$ on $\{t\leq -2^k\}$ for all $j\in\mathbb Z$.
Since $\widetilde G_k$ is a solution, which is nonnegative and not identically equal to 0 (otherwise \eqref{defwtGk} would be false), the Harnack inequality (Lemma \ref{Harnack}) entails that $\widetilde G_k$ is positive. The proposition follows.
$\square$
Let $X_0:= (1,0) \in \Omega$. From the above proposition, $\widetilde G_k(X_0) >0$ so we can define
\begin{equation} \label{defGk}
G_k(X) := \frac{\widetilde G_k(X)}{\widetilde G_k(X_0)}.
\end{equation}
\begin{proposition} \label{prGk}
For each $k\in \mathbb N$, the function $G_k(X) \in W_0$ is a positive weak solution to $-\mathbb Delta u = 0$ in $\Omega \cap \{t>-2^k\}$. Moreover, we have the following properties:
\begin{enumerate}[(i)]
\item for any compact set $K\Subset \overline{\Omega}$, there exists $k:=k(K)$ and $C:=C(K)$ such that $G_j(X) \leq C_K$ for all $j\mathfrak geq k$ and $X\in K$ and $\{G_j\}_{j\mathfrak geq k}$ is equicontinuous on $K$;
\item there exists $C>0$ such that
\[ \iint_{\Omega \cap ([-2,2] \times [-1,1])} |\nabla G_k(x,t)|^2 dx\, dt \leq C \qquad \text{ for all } k\in \mathbb N;\]
\item there exists $C>0$ such that
\[\|G_k\|_{W^+}^2 := \iint_{S_0 \cap \mathbb R^2_+} |\nabla G_k|^2 dx\, dt \leq C \qquad \text{ for all } k\in \mathbb N.\]
\end{enumerate}
\end{proposition}
\noindent {\em Proof: }
The fact that $G_k$ is a positive weak solution is given by Proposition \ref{prwtGk}. So it remains to prove (i), (ii) and (iii).
We start with (i). Since $G_k$ is a weak solution in $\Omega_0 := \Omega \cap [(-4,4) \times (-2,2)]$ when $k\mathfrak geq 1$, and since $\Omega_0$ is a Chord Arc Domain, we can invoke the classical elliptic theory and we can show that there exists $C>0$ such that
\[ \sup_{\Omega \cap ([-2,2] \times [-1,1])} G_k \leq C G_k(1,0) = C \quad\text{for all }k\mathfrak ge 1,\]
see for instance Lemma 15.14 in \cite{DFMprelim2}. By the 2-periodicity of $G_k$, it means that
\[ \sup_{k\mathfrak geq 1} \sup_{\Omega \cap ( \mathbb R \times [-1,1])} G_k \leq C,\]
and then since we can link any point of a compact $K \Subset \overline{\Omega}$ back to $\Omega \cap ( \mathbb R \times [-1,1])$ with a Harnack chain (the length of the chain depends on $K$), we have
\[ \sup_{j\mathfrak geq k} \sup_{K} G_j \leq C_K,\]
whenever $G_j$ is a solution in the interior of $K$, which is bound to happen if $j\mathfrak geq k(K)$ is large enough.
The functions $G_k$ are also H\"older continuous up to the boundary in the areas where they are solutions, so $\{G_j\}_{j\mathfrak geq k}$ is equicontinuous on $K$ as long as $k$ is large enough so that $K \subset \overline{\Omega} \cap \set{t>-2^k}$.
Point (ii) is a consequence of the Caccioppoli inequality at the boundary. We only need to prove the bound when $k\mathfrak geq 2$, since all the $G_k$ are already in $W_0$ by construction. We have by the Caccioppoli inequality at the boundary (see for instance Lemma 11.15 in \cite{DFMprelim2}) that
\begin{multline*}
\iint_{\Omega \cap ([-2,2] \times [-1,1])} |\nabla G_k(x,t)|^2 dx\, dt \lesssim \iint_{\Omega \cap ([-4,4] \times [-2,2])} |G_k(x,t)|^2 dx\, dt \\
\lesssim \sup_{\Omega \cap ([-4,4] \times [-2,2])} |G_k(x,t)|^2 \lesssim 1.
\end{multline*}
Point (iii) is one of our key arguments. We define $W_0^+$ as the subspace of $W^+$ that contained the functions with zero trace on $\partial (\Omega \cap \mathbb R^2_+)$.
Since $G_k \in W_0$, its restriction $(G_k)_{|\Omega \cap \mathbb R^2_+}$ is of course in $W^+$. Moreover, $G_k$ is a solution to $-\mathbb Delta u = 0$ in $\Omega \cap \mathbb R^2_+$. We can invoke the uniqueness in Lax-Milgram theorem (see Lemma 12.2 in \cite{DFMprelim2}, but adapted to our periodic function spaces $W_0^+$ and $W^+$) to get that $G_k$ is the only weak solution to $-\mathbb Delta u = 0$ in $\Omega \cap \mathbb R^2_+$ for which the trace on $\partial (\Omega \cap \mathbb R^2_+)$ is $(G_k)_{|\partial (\Omega \cap \mathbb R^2_+)}$. Moreover,
\[\|G_k\|_{W^+} \leq C \|(G_k)_{|\partial (\Omega \cap \mathbb R^2_+)}\|_{H^{1/2}_{\partial\Omega_+}},\]
where $H^{1/2}_{\partial\Omega_+}$ is the space of traces on $\partial \Omega_+:= \partial (\Omega \cap \mathbb R^2_+)$ for the symmetric 2-periodic functions defined as
\begin{multline*}
H^{1/2}_{\partial\Omega_+} := \mathcal Big\{ f: \, \partial \Omega_+ \mapsto \mathbb R \text{ measurable such that $f$ is symmetric and $2$-periodic in $x$,} \\
\text{ and } \|f\|_{H^{1/2}_{\partial\Omega_+}}:= \left( \int_{\partial\Omega_+ \cap S_0} \int_{\partial\Omega_+ \cap S_0} \frac{|f(x)-f(y)|^2}{|x-y|^{3/2}} d\mathcal H^1(x) \, d\mathcal H^1(y)\right)^\frac12 < +\infty \mathcal Big\}.
\end{multline*}
So in particular, we have by a classical argument that
\[\|(G_k)_{|\partial (\Omega \cap \mathbb R^2_+)}\|^2_{H^{1/2}_{\partial\Omega_+}} \leq C \iint_{\Omega \cap ([-2,2] \times [-1,1])} |\nabla G_k(x,t)|^2 dx\, dt.\]
We conclude that
\[ \iint_{S_0 \cap \mathbb R^2_+} |\nabla G_k|^2 dx\, dt \lesssim \iint_{\Omega \cap ([-2,2] \times [-1,1])} |\nabla G_k(x,t)|^2 dx\, dt \lesssim 1\]
by (ii). Point (iii) follows.
$\square$
\begin{proposition}
There exists a symmetric (in $x$), 2-periodic (in $x$), positive weak solution $G\in W_{\loc}^{1,2}(\Omega)\cap C(\overline\Omega)$ to $-\mathbb Delta G = 0$ in $\Omega$ such that $G=0$ on $\partial\Omega$ and $G(X_0) = 1$ and
\begin{equation} \label{GinW+} \iint_{S_0 \cap \mathbb R^2_+} |\nabla G|^2 dx\, dt < +\infty.
\end{equation}
\end{proposition}
\noindent {\em Proof: } We invoke the Arzel\`a-Ascoli theorem - whose conditions are satisfied thanks to Proposition \ref{prGk} (i) - to extract a subsequence of $G_k$ that converges uniformly on any compact to a continuous function $G$. The fact $G$ is non-negative, symmetric, 2-periodic, and satisfies $G(X_0) =1$ is immediate from the fact that all the $G_k$ are already like this. The functions $G_k$ converges to $G$ in $W^{1,2}_{loc}(\overline{\Omega})$ thanks to the Caccioppoli inequality, and then by using the weak convergence of $G_k$ to $G$ in $W^{1,2}_{loc}(\overline{\Omega})$, we can easily prove that $G$ is a solution to $-\mathbb Delta u = 0$ in $\Omega$ (hence $G$ is positive by the Harnack inequality, since it was already non-negative). The convergence of $G_k$ to $G$ in $W^{1,2}_{loc}(\overline{\Omega})$ also allow the uniform bound on $\|G_k\|_{W^+}$ given by Proposition \ref{prGk} (iii) to be transmitted to $G$, hence \eqref{GinW+} holds. The proposition follows.
$\square$
\subsection{$G$ fails the estimate given in Theorem \ref{Main1}}
\begin{lemma} \label{lemdtGinW}
$\partial_t G$ is harmonic in $\Omega$, that is, it is a solution of $-\mathbb Delta u=0$ in $\Omega$, and
we have
\[\int_1^\infty \int_{0}^1 |\nabla \partial_t G|^2 dx\, dt < +\infty.\]
\end{lemma}
\noindent {\em Proof: }
Morally, we want to prove that if $G$ is a solution (to $-\mathbb Delta u = 0$), then $\nabla G \in W^{1,2}$, which is a fairly classical regularity result. The difficulty in our case is that the domain in consideration is unbounded.
Since $G$ is a harmonic function (solution of the Laplacian), the function $g(x):= G(x,1)$ is smooth. We can prove the bound
\[\int_1^\infty \int_{0}^1 |\nabla \partial_x G|^2 dx\, dt \lesssim \int_0^1 |g'(x)|^2 dx + \int_0^1 |g''(x)|^2 dx + \int_1^\infty \int_{0}^1 |\nabla G|^2 dx\, dt < +\infty \]
by adapting the proof of Proposition 7.3 in \cite{DFMpert-reg} to our simpler context (and invoking \eqref{GinW+} and $g \in C^\infty(\mathbb R)$ to have the finiteness of the considered quantities). In order to have the derivative on the $t$-derivative, it is then enough to observe
\begin{multline*} \int_1^\infty \int_{0}^1 |\nabla \partial_t G|^2 dx\, dt \lesssim \int_1^\infty \int_{0}^1 |\partial_x \partial_t G|^2 dx\, dt + \int_1^\infty \int_{0}^1 |\partial_t \partial_t G|^2 dx\, dt \\
= \int_1^\infty \int_{0}^1 |\partial_t \partial_x G|^2 dx\, dt + \int_1^\infty \int_{0}^1 |\partial_x \partial_x G|^2 dx\, dt \\
\lesssim \int_1^\infty \int_{0}^1 |\nabla \partial_x G|^2 dx\, dt < +\infty,
\end{multline*}
where we use the fact that $G$ is a solution to $-\mathbb Delta u = 0$ - i.e. $\partial_t \partial_t G = - \partial_x \partial_x G$ - for the second line. The lemma follows.
$\square$
We will also need a maximum principle, given by
\begin{lemma} \label{maxprinciple}
If $u$ is a symmetric (in $x$), 2-periodic (in $x$) harmonic function in $\mathbb R \times (t_0,\infty)$ that satisfies
\begin{equation} \label{uinWmp}
\int_{t_0}^\infty \int_{0}^1 |\nabla u|^2 dx\, dt < +\infty,
\end{equation}
then $u$ has a trace - denoted by $\Tr_{t_0} u$ - on $\mathbb R\times \{t_0\}$ and
\[\inf_{y\in (0,1)} (\Tr_{t_0} u)(y) \leq u(x,t) \leq \sup_{y\in (0,1)} (\Tr_{t_0} u)(y) \qquad \text{ for all } x\in \mathbb R, \, t>t_0.\]
\end{lemma}
\noindent {\em Proof: }
The existence if the trace - in the space $W^{2,\frac12}(\mathbb R \times \{t_0\})$ - is common knowledge. The proof of Lemma 12.8 in \cite{DFMprelim2} (for instance) can be easily adapted to prove our case.
$\square$
\begin{lemma} \label{lemGeq1}
There exists $C\mathfrak geq 1$ such that
\[C^{-1} \leq G(x,t) \leq C \qquad \text{ for } x\in \mathbb R, \, t\mathfrak geq1.\]
\end{lemma}
\noindent {\em Proof: }
Since $G(1,0) = G(X_0) = 1$ and $G$ is a positive solution, the Harnack inequality implies that $C^{-1} \leq G(x,1) \leq C$ for $x\in [0,1]$. Since $G$ is symmetric and 2-periodic in $x$, we have $C^{-1} \leq G(x,1) \leq C$ for $x\in \mathbb R$. We conclude with the maximum principle (Lemma \ref{maxprinciple}), since the bound \eqref{uinWmp} is given by \eqref{GinW+}.
$\square$
\begin{lemma} \label{lemdtG<c/t}
For every $c>0$, there exists $t_0\mathfrak geq 1$ such that
\[\partial_t G(x,t) \leq \frac{c}{t} \text{ for all $x\in \mathbb R$, $t\mathfrak geq t_0$}.\]
\end{lemma}
\noindent {\em Proof: } Let $x$ be fixed. Since $G$ is symmetric and 2-periodic in $x$, we can assume without loss of generality that $x\in (0,1)$. Then recall that $\partial_t G$ is a weak solution in $\Omega$, so in particular, we have the Moser estimate and the Caccioppoli inequality, which give
\begin{multline} \label{eqbdddtG}
\sup_{y\in \mathbb R, \, s>4} s|\partial_t G(y,s)| \lesssim \sup_{y\in \mathbb R, \, s>4} s \left(\fint_{s/2}^{2s} \fint_{x-s}^{x+s} |\nabla G(z,r)|^2 dz \, dr\right)^\frac12 \lesssim \sup_{y\in \mathbb R, \, s>1} G \lesssim 1.
\end{multline}
by Lemma \ref{lemGeq1}. Moreover, $\partial_t G$ is H\"older continuous, that is,
\begin{multline} \label{eqHolderdtG}
\sup_{y\in (0,1)} |\partial_t G(x,t) - \partial_t G(y,t)|
\leq Ct^{-\alpha}\br{\fint_{(1-t)/2}^{(1+t)/2}\fint_{t/2}^{3t/2}\abs{\partial_t G(y,s)}^2dsdy}^{1/2}\\
\leq C t^{-\alpha} \sup_{y\in \mathbb R, \, s>t/2} |\partial_t G(y,s)| \leq C' t^{-\alpha - 1} \qquad \text{ for } t\mathfrak geq 8
\end{multline}
by \eqref{eqbdddtG}.
We pick $t_0 \mathfrak geq 8$ such that $2C'(t_0)^{-\alpha} \leq c$. Assume by contradiction that there exist $x\in (0,1)$ and $t\mathfrak geq t_0$ are such that $\partial_t G(x,t) \mathfrak geq c/t$, then
\begin{multline*}
\inf_{y\in \mathbb R} \partial_t G(y,t) = \inf_{y\in (0,1)} \partial_t G(y,t) \mathfrak geq \partial_t G(x,t) - \sup_{y\in (0,1)} |\partial_t G(x,t) - \partial_t G(y,t)| \\
\mathfrak geq \frac{c - C't^{-\alpha}}{t} \mathfrak geq \frac{c}{2t}
\end{multline*}
by our choice of $t_0$. Since $\partial_t G$ is a solution that satisfies \eqref{uinWmp} - see Lemma \ref{lemdtGinW} - the maximum principle given by Lemma \ref{maxprinciple} entails that
\[\partial_t G(y,s) \mathfrak geq \frac{c}{2t} \qquad \text{ for } y \in \mathbb R, \, s>t,\]
which implies
\[\int_0^1 \int_t^{\infty} |\nabla G(y,s)|^2 ds \, dy = +\infty,\]
which is in contradiction with \eqref{GinW+}. We conclude that for every $x\in (0,1)$ and $t\mathfrak geq t_0$, we necessary have $\partial_t G \leq c/t$. The lemma follows.
$\square$
\begin{lemma} \label{lemt>t0}
For any $\beta >0$, there exists a $t_0 \mathfrak geq 1$ and $
$\square$
silon>0$ such that
\begin{equation} \label{dtG/G-dtD/D>e/t}
\left| \frac{ \partial_t G(x,t)}{G(x,t)} - \frac{\partial_t D_\beta(x,t)}{D_\beta(x,t)} \right| \mathfrak geq \frac{
$\square$
silon}{t} \qquad \text{ for } x\in \mathbb R, \, t\mathfrak geq t_0.
\end{equation}
\end{lemma}
\noindent {\em Proof: }
The set $\partial \Omega$ is $(n-1)$-Ahlfors regular, so \eqref{equivD} gives the equivalence $D_\beta(X) \approx \dist(X,\partial \Omega)$ for $X\in \Omega$, and hence the existence of $C_1>0$ (depending on $\beta$ and $n$) such that
\begin{equation} \label{DbDb+2=t}
(C_1)^{-1} t \leq D_\beta(x,t) \leq C_1 D_{\beta+2}(x,t) \leq (C_1)^2 t \qquad \text{ for } x\in \mathbb R, \, t\mathfrak geq 1.
\end{equation}
Check then that
\[\partial_t D_\beta(x,t) = \frac{d+\beta}{\beta} D_\beta^{1+\beta}(x,t) \int_{(y,s) \in \partial \Omega} |(x,t)-(y,s)|^{-d-\beta-2} (t-s) \, d\sigma(y,s) \]
In particular, since $s\leq \frac12$ whenever $(y,s) \in \partial \Omega$, we have, for $(x,t) \in \mathbb R \times [1,\infty)$, that
\begin{multline*}
\partial_t D_\beta(x,t) \mathfrak geq \mathcal Big( t - \frac12 \mathcal Big) \frac{n+\beta-1}{\beta} D^{1+\beta}_\beta(x,t) \int_{(y,s) \in \partial \Omega} |(x,t)-(y,s)|^{-n-\beta-1} \, d\sigma(y,s) \\
\mathfrak geq \frac t2 \frac{n+\beta-1}{\beta} D^{1+\beta}_\beta(x,t) D^{-\beta-2}_{\beta+2}(x,t) \mathfrak geq c_{\beta,n}
\end{multline*}
for some $c_{\beta,n}>0$, by \eqref{DbDb+2=t}. In conclusion, using \eqref{DbDb+2=t} again, we have the existence of $c_1>0$ such that
\begin{equation} \label{NDb/D>c/t}
\frac{\partial_t D_\beta(x,t)}{D_{\beta}(x,t)} \mathfrak geq \frac{c_1}{t} \qquad \text{ for } x\in \mathbb R, \, t\mathfrak geq 1.
\end{equation}
Let $C_2$ be the constant in Lemma \ref{lemGeq1}. Thanks to Lemma \ref{lemdtG<c/t}, there exists $t_0\mathfrak geq 1$ such that $\partial_t G(x,t) \leq c_1/(2C_2t)$ for any $x\in \mathbb R$ and $t\mathfrak geq t_0$, which means that
\begin{equation} \label{NG/G<c/t}
\frac{\partial_t G(x,t)}{G(x,t)} \leq \frac{c_1}{2t} \qquad \text{ for } x\in \mathbb R, \, t\mathfrak geq t_0.
\end{equation}
The combination of \eqref{NDb/D>c/t} and \eqref{NG/G<c/t} gives \eqref{dtG/G-dtD/D>e/t} for $
$\square$
silon = c_1/2$.
$\square$
\begin{lemma} \label{counterexample}
The positive solution $G$ does not satisfies \eqref{Main1b}, proving that assuming that $\Omega$ is semi-uniform is not sufficient for Theorem \ref{Main1}.
\end{lemma}
\noindent {\em Proof: }
Let $B_r$ be the ball of radius $r$ centered at $(0,\frac12) \in \partial \Omega$, and take $r\mathfrak geq 2t_0$, where $t_0\mathfrak geq 1$ is the value from Lemma \ref{lemt>t0}. We have
\begin{multline*}
\iint_{\Omega \cap B_r} \left| \frac{\nabla G}{G} - \frac{\nabla D_\beta}{D_\beta} \right|^2 D_\beta \, dx\, dt \mathfrak geq \iint_{B_r \cap \{t\mathfrak geq t_0\}} \left| \frac{\nabla G}{G} - \frac{\nabla D_\beta}{D_\beta} \right|^2 D_\beta \, dx\, dt \\
\mathfrak geq C^{-1}
$\square$
silon^2\iint_{B_r \cap \{t\mathfrak geq t_0\}} \frac{dx\,dt}{t}
\end{multline*}
by \eqref{dtG/G-dtD/D>e/t} and \eqref{equivD}. We conclude that
\[\frac{1}{\sigma(B_r)} \iint_{\Omega \cap B_r} \left| \frac{\nabla G}{G} - \frac{\nabla D_\beta}{D_\beta} \right|^2 D_\beta \, dx\, dt \mathfrak gtrsim \ln\mathcal Big(\frac{r}{t_0} \mathcal Big) \rightarrow +\infty \text{ as } r\to \infty,\]
which means that $G$ does not satisfies \eqref{Main1b}. The lemma follows.
$\square$
\end{document} |
\betagin{document}
\title{ Extension of Irreducibility results on Generalised Laguerre Polynomials $L_n^{(-1-n-s)}(x)$}
\author[Nair]{Saranya G. Nair}
\address{Department of Mathematics\\
BITS Pilani, K K Birla Goa Campus, Goa- 403726}
\email{saranyan@goa.bits-pilani.ac.in}
\author[Shorey]{T. N. Shorey}
\address{National Institute of Advanced Studies, IISc Campus\\
Bangalore, 560012}
\email{shorey@math.iitb.ac.in}
\thanks{2010 Mathematics Subject Classification: Primary 11A41, 11B25, 11N05, 11N13, 11C08, 11Z05.\\
Keywords: Irreducibility, Laguerre Polynomials, Primes, Newton Polygons.}
\betagin{abstract} We consider the irreducibility of Generalised Laguerre Polynomials
for negative integral values given by $L_n^{(-1-n-s)}(x)=\displaystyle\sum_{j=0}^{n}\binom{n-j+s}{n-j}\frac{x^j}{j!}.$ For different values of $s,$ this family gives polynomials which are of great interest. It was proved earlier that for $0 \leq s \leq 60,$ these polynomials are irreducible over $\mbox{$\mathfrak m$}athbb{Q}.$ In this paper we improve this result upto $s \leq 88.$
\end{abstract}
\mbox{$\mathfrak m$}aketitle
\mbox{$\mathfrak p$}agenumbering{arabic}
\mbox{$\mathfrak p$}agestyle{myheadings}
\mbox{$\mathfrak m$}arkright{Extension of Irreducibility results on Generalised Laguerre Polynomials $L_n^{(-1-n-s)}(x)$}
\mbox{$\mathfrak m$}arkleft{ Nair and Shorey}
\section{{\bf Introduction}}
For a positive integer $n$ and real number $\alphapha,$ the Generalised Laguerre Polynomial (GLP) is defined as
\betagin{align}\lambdabel{lag}
L_n^{(\alphapha)}(x)=\displaystyle\sum_{j=0}^{n}\frac{(n+\alpha)(n-1+\alpha)\cdots (j+1+\alpha)}{j!(n-j)!}(-x)^j.
\end{align}
These polynomials were discovered around 1880 and they have been extensively studied in various branches of mathematics and mathematical physics. The algebraic properties of GLP were first studied by Schur \cite{Sch1},\cite{Sch2} where he established the irreducibility of $L_n^{(\alphapha)}(x)$ for $\alphapha \in \{0,1,-n-1\},$ gave a formula for the discriminant $\Delta_n^{(\alphapha)}$ of $ \mbox{$\mathfrak m$}athcal{L}_n^{(\alphapha)}(x)=n!L_n^{(\alphapha)}(x)$ by \betagin{align*}
\Delta_n^{(\alphapha)}=\displaystyle\mbox{$\mathfrak p$}rod_{j=1}^{n}j^j(\alphapha+j)^{j-1}
\end{align*}
and calculated their assosciated Galois groups. For an account of results obtained on GLP, we refer to \cite{Haj},\cite{NaSh}.
Let $f(x) \in \mbox{$\mathfrak m$}athbb{Q}[x]$ with deg $f=n$. By irreducibility of a polynomial, we shall always mean its irreducibility over $\mbox{$\mathfrak m$}athbb{Q}.$ We observe that if a polynomial of degree $n$ has a factor of degree $k <n,$ then it has a factor of degree $n-k.$ {\it Therefore given a polynomial of degree $n$, we always consider factors of degree $k$ where $1 \leq k \leq \frac{n}{2}$}. If the argument $\alphapha$ of \eqref{lag} is a negative integer, we see that the constant term of $L_n^{(\alphapha)}(x)$ vanishes if and only if $n \geq |\alphapha|=-\alphapha$ and then $L_n^{(\alphapha)}(x)$ is reducible. Therefore we assume that $\alphapha \leq -n-1.$ We write $\alphapha=-n-s-1$ where $s$ is a non-negative integer. We have
\betagin{align}
L_n^{(-n-s-1)}(x)=\displaystyle\sum_{j=0}^{n}(-1)^n\frac{(n+s-j)!}{(n-j)! s!}\frac{x^j}{j!}.
\end{align}
Borrowing the notation from \cite{SinSho}, we consider the following polynomial
\betagin{align*}
g(x):=g(x,n,s)=(-1)^nL_n^{(-n-s-1)}=\displaystyle\sum_{j=0}^{n}\binom{n+s-j}{n-j}\frac{x^j}{j!}=\displaystyle\sum_{j=0}^{n}b_j\frac{x^j}{j!}
\end{align*}
where $b_j=\binom{n+s-j}{n-j}$ for $0 \leq j \leq n.$ Thus $b_n=1,b_0=\binom{n+s}{s}=\frac{(n+1)\cdots (n+s)}{s!}$. We observe that $g(x)$ is irreducible if and only if $L_n^{(-n-s-1)}(x)$ is irreducible. The aim of this paper is to discuss the irreducibility of $g(x)$. We consider more general polynomial
\betagin{align*}
G(x):=G(x,n,s)=\displaystyle\sum_{j=0}^{n}a_jb_j\frac{x^j}{j!}
\end{align*}
such that $a_j \in \mbox{$\mathfrak m$}athbb{Z}$ for $0 \leq j \leq n$ with $|a_0|=|a_n|=1.$ If $a_j=1$ for $0 \leq j \leq n,$ we have $G(x)=g(x).$ We write
\betagin{align*}
g_1(x):=n! g(x) = n! \displaystyle\sum_{j=0}^{n}\binom{n+s-j}{n-j}\frac{x^j}{j!}
\end{align*} and
\betagin{align*}
G_1(x)=n! G(x)
\end{align*}so that $g_1$ and $G_1$ are monic polynomials with integer coefficients of degree $n.$ The irreducibility of $g_1(x)$ and $G_1(x)$ implies the irreducibility of $g(x)$ and $G(x)$ respectively. We begin with the following result by Sinha and Shorey \cite{SinSho} on $G(x)$.
\betagin{lemma}\lambdabel{Lemma 1}
Let $s \leq 92.$ Then $G(x)=G(x,n,s)$ has no factor of degree $k \geq 2$ except when $(n,k,s) \in \{ (4,2,7),(4,2,23),(9,2,19),(9,2,47),(16,2,14),(16,2,34),(16,2,89),\\(9,3,47), (16,3,19),(10,5,4)\}. $
\end{lemma}
We re-state Lemma 1.1 as follows:\\
Let $s \leq 92.$ Assume that $G(x)$ has a factor of degree greater than or equal to $2$. Then
$(n,k,s) \in \{ (4,2,7),(4,2,23),(9,2,19),(9,2,47),(16,2,14),(16,2,34),(16,2,89),(9,3,47),\\ (16,3,19),(10,5,4)\}.$ We check that $g(x)$ is irreducible when $n \in \{ 4,9,10,16\}.$ Therefore by Lemma \ref{Lemma 1} with $G(x)=g(x)$ we have
\betagin{lemma}\lambdabel{Lemma2}
Let $n \geq 3$ and $s \leq 92$. Then $g_1(x)$ is either irreducible or linear factor times an irreducible polynomial.
\end{lemma}
The irreducibility of $g_1(x)$ was proved by Schur \cite{Sch1} for $s=0,$ by Hajir \cite{Haj06} for $s=1$, by Sell \cite{Sell} for $s=2$ and by Hajir \cite{Haj} for $3 \leq s \leq 8.$ We shall prove
\betagin{theorem}\lambdabel{thm1}
$g_1(x)$ is irreducible for $9 \leq s \leq 88.$
\end{theorem}
Nair and Shorey \cite{NaSh15b} and Jindal, Laishram and Sarma \cite{jin} already proved the irreducibility of $g_1(x)$ in the range of $9 \leq s \leq 22$ and $23 \leq s \leq 60$ respectively. But our proof of Theorem 1 is new. The proofs of \cite{NaSh15b} and \cite{jin} depend on the method of Hajir in \cite{Haj} whereas our proof depends on Lemma \ref{Lemma2} which is a direct consequence of Lemma \ref{Lemma 1}. We could not cover the cases of $88 \leq s \leq 92$ due to computational limitations.
At this point, we pause with a digression concerning notation.
In order to dispell possible confusion in the reader concerning notation, it is worth pointing out that the notation $L_n^{<s>}(x)$ (resp., $\mbox{$\mathfrak m$}athcal{L}_n^{<s>}(x)$) was used by the authors in \cite{Haj}, \cite{jin} and \cite{NaSh} to denote the polynomial $g(x, n, s)$ (resp, $g_1(x, n, s)$).
\section{Preliminaries}
From now onwards we shall assume that $s \geq 9.$ For a real number $\alphapha,$ we write $\left[ \alphapha\right]$ to be the largest integer not exceeding $\alphapha.$
Let $f(x) =\displaystyle\sum_{j=0}^{m}d_jx^j \in \mbox{$\mathbb Z$}[x]$ with $d_0d_m \mbox{$\mathcal N$}eq 0$ and let $p$ be a prime. For an integer $x,$ let $\mbox{$\mathcal N$}u(x)=\mbox{$\mathcal N$}u_p(x)$ be the highest power of $p$ dividing $x$ and we write $\mbox{$\mathcal N$}u(0)=\infty.$ Let S be the following set of
points in the extended plane
$$S =\{(0,\mbox{$\mathcal N$}u(d_m)),(1,\mbox{$\mathcal N$}u(d_{m-1})),(2,\mbox{$\mathcal N$}u(d_{m-2})),\ldots,(m,\mbox{$\mathcal N$}u(d_0))\}.$$
Consider the lower edges along the convex hull of these points. The left most endpoint
is $(0,\mbox{$\mathcal N$}u(d_m))$ and the right most endpoint is $(m,\mbox{$\mathcal N$}u(d_0))$. The endpoints of each edge
belong to S and the slopes of the edges increase strictly from left to right. The polygonal path formed by these edges is called the Newton polygon
of $ f(x)$ with respect to the prime $p$ and we denote it by $NP_p(f)$. The endpoints of the
edges of $NP_p(f)$ are called the vertices of $ NP_p(f)$.
We begin with a very useful result, due to Filaseta \cite{Fil}, giving a criterion on the factorisation of a polynomial in terms of the maximum slope of the edges of its Newton polygon.
\betagin{lemma}\lambdabel{newton1}
Let $l, k,m$ be integers with $m \geq 2k > 2l \geq 0$. Suppose $h(x) =\displaystyle\sum_{j=0}^{m}b_jx^j \in
\mbox{$\mathbb Z$}[x] $ and $p$ be a prime such that $ p \mbox{$\mathcal N$}mid b_m$ and $p\mbox{$\mathfrak m$}id b_j $ for $0 \leq j \leq m-l-1$ and the
right most edge of $ NP_p(h)$ has slope $ <\frac{1}{k}$. Then for any integers $a_0, a_1,\ldots, a_m$
with $p\mbox{$\mathcal N$}mid a_0a_m$, the polynomial $f(x) =\displaystyle\sum_{j=0}^{m} a_jb_jx^j$ cannot have a factor with degree in $[l + 1, k]$.
\end{lemma}
The next result is Lemma 4.2 of \cite{ShTi10} with $a=0.$
\betagin{lemma}\lambdabel{ShTi}
Let $a_0,a_1,\cdots,a_n$ denote arbitrary integers and $$h(x)=\displaystyle\sum_{j=0}^{n}a_j\frac{x^j}{j!}.$$ Assume that $h(x)$ has a factor of degree $k \geq 1.$ Suppose that there
exists a prime $p > k $ such that $p$ divides $n(n-1) \cdots (n-k+1).$ Then $p$ divides $a_0a_n.$
\end{lemma}
For a positive integer $l$ and a prime $p,$ let $\mbox{$\mathcal N$}u_p(l)$ be the maximal power of $p$ dividing $l.$
\betagin{lemma}\lambdabel{order1}
Let $p$ be a prime. For any integer $l\geq 1$, write $l$ in base $p$ as $l=l_tp^t+l_{t-1}p^{t-1}+\dots+l_1p+l_0$ where $0\leq l_i\leq p-1$ for $0\leq i \leq t$ and $l_t >0$. Then
\betagin{align*}
\mbox{$\mathcal N$}u_p(l!)=\frac{l-\sigma_p(l)}{p-1}
\end{align*}
where $\sigma_p(l)=l_t+l_{t-1}+\dots+l_1+l_0$.
\end{lemma}
This is due to Legendre. For a proof, see \cite[Ch.17, p 263]{Hasse}. As a consequence we have
\betagin{align}\lambdabel{eq2}
\mbox{$\mathcal N$}u_p\left(\binom{m}{t}\right)= \frac{\sigma_p(t)+\sigma_p(m-t)-\sigma_p(m)}{p-1}.
\end{align}
\betagin{lemma}\lambdabel{Lemma 6}
Assume that $g_1(x)$ is a linear factor times an irreducible polynomial. Let $p$ be a prime dividing $n$ and $ s < p^2.$ Then \betagin{align*}
d+ \left[\frac{s}{p}\right] \geq p
\end{align*}
where $d \equiv \frac{n}{p} (\mbox{$\mathfrak m$}od p)$ for $ 1 \leq d <p.$
\end{lemma}
The assertion of Lemma \ref{Lemma 6} was proved in \cite[Corollary 3.2]{jin} under the assumption of $p$ dividing $n_1$ where
\betagin{align}\lambdabel{defn of n}
n=n_0\cdot n_1 \ {\rm with} \ \gcd(n_0,n_1)=1
\end{align}
and
\betagin{align}\lambdabel{n_1}
n_1=\displaystyle \mbox{$\mathfrak p$}rod_{p| \gcd (n,\binom{n+s}{s})}p^{\text{ord}_p(n)}.
\end{align} Therefore $n_0$ is the largest divisor of $n$ which is coprime to $\binom{n+s}{s}.$ Thus the assumption $p$ dividing $n_1$ in \cite[Corollary 3.2]{jin} is replaced by $p$ dividing $n$ in Lemma \ref{Lemma 6} when $g_1(x)$ is linear factor times an irreducible polynomial.
\betagin{proof}
We apply Lemma \ref{ShTi} with $h(x)=g_1(x)$ and $k=1$ to conclude that \betagin{align} \lambdabel{eq1}
p|\frac{(n+1)\cdots (n+s)}{s!}.
\end{align}
If $\mbox{$\mathcal N$}u_p(n)>s,$ then $\mbox{$\mathcal N$}u_p\left( \frac{(n+1)\cdots (n+s)}{s!}\right) \leq \mbox{$\mathcal N$}u_p(\frac{s!}{s!}) \leq 0$ contradicting \eqref{eq1}. Therefore $\mbox{$\mathcal N$}u_p(n) \leq s < p^2$ and hence $ 1 \leq \mbox{$\mathcal N$}u_p(n) <2.$ We write,
\betagin{align*}
n=pD \ \text{where} \ \gcd(D,p)=1
\end{align*} and $s=ps_1+s_0$ where $1 \leq s_1 <p, 0 \leq s_0 <p$. Then $n+s=p(D+s_1)+s_0$ which implies that $\sigma_p(n+s)=\sigma_p(D+s_1)+s_0$. Now we argue as in \cite[Lemma 3.1]{jin} for deriving from \eqref{eq2} and \eqref{eq1} that
\betagin{align*}
1 \leq \mbox{$\mathcal N$}u_p\left(\binom{n+s}{s}\right)= &\frac{\sigma_p(n)+\sigma_p(s)-\sigma_p(n+s)}{p-1}\\
=& \frac{\sigma_p(D)+s_1+s_0+-\sigma_p(D+s_1)-s_0}{p-1}\\
=&\frac{\sigma_p(D)+s_1-\sigma_p(D+s_1)}{p-1}\\
=& \mbox{$\mathcal N$}u_p\left(\binom{D+s_1}{s_1}\right)\\
=& \mbox{$\mathcal N$}u_p \left(\frac{(D+1)\cdots (D+s_1)}{s_1!}\right)\\
=& \mbox{$\mathcal N$}u_p((D+1) \cdots (D+s_1))\\
=& \mbox{$\mathcal N$}u_p(D+j) \ \text{for \ exactly one} \ j \ \text{with} \ 1 \leq j \leq s_1
\end{align*}
since $s_1 <p.$ Hence $D+\left[ \frac{s}{p}\right]=D+s_1 \geq D+j \equiv 0 $ (mod $p$). Since $\gcd(D,p)=1$, we observe from \eqref{eq2} that $D=\frac{n}{p} \equiv d$ (mod $p$) where $ 1 \leq d <p$ by looking at $p-$ adic representation of $n.$ Hence $ d+ \left[ \frac{s}{p}\right] \geq p$ where $d \equiv \frac{n}{p}$ (mod $p$) for $1 \leq d <p.$
\end{proof}
Next, we prove
\betagin{lemma}\lambdabel{Lemma 7}
Assume that $g_1(x)$ is linear factor times an irreducible polynomial. Then for $n \leq 127$ and $s \leq 103,$ $g_1(x)$ is irreducible.
\end{lemma}
This result is proved in \cite[Lemma 2.10]{NaSh15b} without the assumption that $g_1(x)$ is linear factor times an irreducible polynomial. But Lemma \ref{Lemma 7} suffices for our purpose. We give here a proof of this particular case, as it involves considerably less computations.
\betagin{proof}
Let $n \leq 127$ and $s \leq 103.$ Since $g_1(x)$ is linear factor times an irreducible polynomial, we see that $n_0=1.$ Assume that $n \geq s+2.$ Then $\mbox{$\mathfrak m$}ax( \frac{n+s}{2},n-1)=n-1$ and we derive from \cite[Lemma 4.1]{Haj} that $g_1(x)$ is irreducible if $n$ is prime. Now we check that $g_1(x)$ is irreducible for all pairs $(n,s)$ with $n$ composite and $n \geq s+2.$ Next we assume that $n <s+2.$ Then $\mbox{$\mathfrak m$}ax( \frac{n+s}{2},n-1)=\frac{n+s}{2}$. We determine all pairs $(n,s)$ such that $n <s+2$ and there exists a prime $p$ satisfying $\frac{n+s}{2} < p \leq n$. Then $g_1(x)$ is irreducible for all these pairs $(n,s)$ by \cite[Lemma 4.1]{Haj}. For the remaining pairs $(n,s)$ with $n <s+2,$ we check that $g_1(x)$ is irreducible.
\end{proof}
We close this section by stating the following result which is Lemma 3.6 from \cite{jin} with $i=1$.
\betagin{lemma} \lambdabel{Lemma factor1}
Let $p|n(s+1)$ and $\mbox{$\mathcal N$}u_p\left(\binom{n+s}{s}\right)=u.$ Then $g_1(x)$ cannot have a factor of degree $1$ if any of the following condition holds:
\betagin{itemize}
\item[(i)] $u=0$
\item[(ii)] $u >0$, $ p>2$ and $
\mbox{$\mathfrak m$}ax \left\{\frac{u+1}{p}, \frac{\mbox{$\mathcal N$}u_p(n+s-z_0)-\mbox{$\mathcal N$}u_p(n)}{z_0+1}\right\} < 1$,
where $z_0 \equiv n+s$ (mod $p$) with $1 \leq z_0 <p.$
\end{itemize}
\end{lemma}
\section{Proof of Theorem \ref{thm1}}
For $c> 1$ and $s \geq c^2$, we consider the following set given by
\betagin{align*}
H_{s,c}=\{n \in \mbox{$\mathfrak m$}athbb{N}, n >127 \ \text{and for } p |n, p^{\mbox{$\mathcal N$}u_p(n)} \leq s \ \text{and if} \ p > sc^{-1} \ \text{then} \ d+\left[\frac{s}{p}\right] \geq p \}
\end{align*}
where $1 \leq d <p$ and $d \equiv \frac{n}{p}$ (mod $p$). Since $p \geq sc^{-1} \geq \sqrt{s},$ we derive from Lemmas \ref{Lemma 6} and \ref{Lemma 7} that it suffices to prove the irreducibility of $g_1(x)=g_1(x,n,s)$ with $n \in H_{s,c}.$ We partition $H_{s,c}$ as $H_{s,c,1}$ and $H_{s,c,2}$ given by
\betagin{align*}
H_{s,c,1}=\{ n \in H_{s,c} \ \text{such that} \ P(n) \leq [sc^{-1}]\}
\end{align*}
and
\betagin{align*}
H_{s,c,2}=\{ n \in H_{s,c} \ \text{such that} \ P(n) > [sc^{-1}]\}.
\end{align*}
Let $9 \leq s \leq 88$. By taking $ c \in \{3,3.42,5.5,7.7\}$ we compute $H_{s,c,1}$ and $H_{s,c,2}$ and hence $H_{s,c}$ for $ 9 \leq s \leq 88.$ We give some details regarding the computations of $H_{s,c}$ below. For example, for $s=80,$ and $ c=7.7$, the cardinality of $H_{s,c}$, $|H_{s,c}|=1538$ and for $s=85,$ and $ c=7.7$, the cardinality of $H_{s,c}$, $|H_{s,c}|=2466.$ The following table gives the $c$ values which we have chosen for each $s$ to compute the set $H_{s,c}$.
\betagin{center}
\betagin{tabular}{|c|c|}
\hline
$s$ & $c$ \\
\hline
$9 \leq s \leq 11$ & $3$ \\
\hline
$12 \leq s \leq 35$ & $3.42$ \\
\hline
$36 \leq s \leq 60$ & $5.5$ \\
\hline
$61 \leq s \leq 88$ & $7.7$\\
\hline
\end{tabular}
\end{center}
For each $n \in H_{s,c}$, we apply Lemma \ref{Lemma factor1} to derive that $g_1(x)=g(x,n,s)$ is irreducible except for $(n,s) \in T$ where
\betagin{align*}
T=&\{(272,17),(144,21),(144,23),(144,25),(144,26),(312,26),(600,26),(216,29),(216,31),\\& (720,31),(240,35), (1440,35),(288,40),(288,41),(216,42),(216,44),(216,47),(288,47),\\& (288,48),(216,49),(144,51),(288,51),(144,53),(216,53),(288,53),(4320,55),(216,59),\\&(216, 63), (288, 63),(432, 63), (672, 63),(180,71),(192,71),(216,71),(216,79),(576,79),\\&(144, 80), (192, 80),(216, 80), (320, 80), (432, 80), (576,
80), (720, 80), (4320, 80)\}
\end{align*}
We use IrreduicibilityQ command in Mathematica to check that $g_1(x)$ is irreducible for all these values of $(n,s) \in T.$
\betagin{remark}
It is not necessary to compute $H_{s,c}$ for all values of $s$. For a fixed $c$, if $[sc^{-1}]=[(s+1)c^{-1}]$ and $P(s+1) \leq [sc^{-1}]$, then $H_{s,c}=H_{s+1,c}$. The assertion follows from the definitions of $H_{s,c,1}$ and $H_{s,c,2}$. Therefore $H_{s,c}=H_{s+1,c}$ for $s \in \{19,27,29,34\}$ with $c=3.42$, $s \in \{39,41,47,49,53,55,59\}$ with $c=5.5$ and $s \in \{62,69,71,74,79,83,87\}$ with $c=7.7$.
\end{remark}
\betagin{remark}
We can take $c=5.5$ or $c=7.7$ according as $s$ at least $36$ or $s \geq 60$ respectively. But we cannot take $c$ more than $3.42\frac{s}{s-1}$ without sharpening \cite[Corollary 6]{NaSh15b}. Consequently we get sets $H_{s,c}$ of smaller size when $s \geq 36$ and this reduces the computations.
\end{remark}
\end{document} |
\begin{document}
\title[A remark on the extension of $L^{2}$ holomorphic functions]
{A remark on the extension of $L^{2}$ holomorphic functions}
\author{Qi'an Guan}
\address{Qi'an Guan: School of
Mathematical Sciences, Peking University, Beijing 100871, China.}
\email{guanqian@math.pku.edu.cn}
\thanks{The authors were partially supported by NSFC}
\subjclass[2010]{32D15, 32E10, 32L10, 32U05, 32W05}
\keywords{plurisubharmonic
functions, holomorphic functions, $L^2$ extension}
\date{}
\dedicatory{}
\commby{}
\begin{abstract}
In this note, we answer a question on the extension of $L^{2}$ holomorphic functions posed by Ohsawa.
\end{abstract}
\maketitle
\section{an answer to a question posed by Ohsawa}
In \cite{Ohsawa2017}, Ohsawa gave a survey on a recent ''remarkable'' progress (c.f. \cite{B-L,Bl_inv,G-ZhouL2_CR,G-ZhouL2_ann,G-ZhouL2_Sci,G-Zhou-ZhuL2_CR})
around the famous Ohsawa-Takegoshi $L^{2}$ extension theorem \cite{O-T}.
After that,
Ohsawa recalled the following consequence of the main result in \cite{G-ZhouL2_ann},
and presented a shorter proof based on a general criterion for the extendibility in \cite{Ohsawa2017}.
\begin{Theorem}
\label{coro:GZ-domain}(\cite{G-ZhouL2_ann}, see also Theorem 0.1 in \cite{Ohsawa2017})
Let $D\subset\mathbb{C}^n$ be a pseudoconvex domain, and let $\varphi$ be a plurisubharmonic
function on $D$ and $H=\{z_n=0\}$. Then for any holomorphic
function $f$ on $H$ satisfying
$$\int_{H}|f|^{2}e^{-\varphi}dV_{H}<\infty,$$
there exists a holomorphic function $F$ on $D$ satisfying $F = f$ on $H$ and
\begin{eqnarray*}
\int_{D}|F|^{2}e^{-\varphi-(1+\varepsilon)\log(1+|z_{n}|^{2})}dV_{D}
\leq\frac{\pi}{\varepsilon}\int_{H}|f|^{2}e^{-\varphi}dV_{H}.
\end{eqnarray*}
\end{Theorem}
In \cite{Ohsawa2017},
considering general plurisubharmonic function $\psi(z_{n})$ instead of $(1+\varepsilon)\log(1+|z_{n}|^{2})$
in Theorem \ref{t:ohsawa2017},
Ohsawa posed the following question on the extension of $L^{2}$ holomorphic functions.
\begin{Question}
\label{Q:ohsawa2017}
Given a subharmonic function $\psi$ on $\mathbb{C}$ such that $\int_{\mathbb{C}}e^{-\psi}<+\infty$,
for any subharmonic function $\varphi$ on $\mathbb{C}$,
can one find a holomorphic function $f$ on $\mathbb{C}$ satisfying $f(0)=1$,
and
$$\int_{\mathbb{C}}|f|^{2}e^{-\varphi-\psi}\leq e^{-\varphi(0)}\int_{\mathbb{C}}e^{-\psi}?$$
\end{Question}
When $\psi$ does not depend on $\arg z$, Sha Yao gave a positive answer to Question \ref{Q:ohsawa2017} in her Ph.D thesis
by using the main result in \cite{G-ZhouL2_ann}.
In the present article,
we give the following (negative) answer to Question \ref{Q:ohsawa2017}.
\begin{Theorem}
\label{t:ohsawa2017}
There exist subharmonic functions $\psi$ and $\varphi$ on $\mathbb{C}$ satisfying
$(1)$ $\int_{\mathbb{C}}e^{-\psi}<+\infty$;
$(2)$ $\varphi(0)\in(-\infty,+\infty)$;
$(3)$ for any holomorphic function $f$ on $\mathbb{C}$ satisfying $f(0)=1$,
$\int_{\mathbb{C}}|f|^{2}e^{-\varphi-\psi}=+\infty$ holds.
\end{Theorem}
\section{Proof of Theorem \ref{t:ohsawa2017}}
Let $\psi=2\max\{c_{1}\log|z-1|,c_{2}\log|z-1|\}$ and $\varphi=(1-c_{1})\log|z-1|$,
where $c_{1}\in(\frac{1}{2},1)$ and $c_{2}\in(1,\frac{3}{2})$.
We prove Theorem \ref{t:ohsawa2017} by contradiction:
if not, then there exists holomorphic function $f$ on $\mathbb{C}$ satisfying $f(0)=1$,
and
\begin{equation}
\label{equ:ohsawa1}
\int_{\mathbb{C}}|f|^{2}e^{-\varphi-\psi}<+\infty.
\end{equation}
Note that $(\psi+\varphi)|_{\{|z|<1\}}=2\log|z-1|$,
then inequality \eqref{equ:ohsawa1} implies that $f(1)=0$.
Note that $\psi+\varphi-2(1-c_{1}+c_{2})\log|z|$ is bounded near $\infty$,
then inequality \eqref{equ:ohsawa1} implies that $f$ is a polynomial.
Furthermore, it follows from $1-c_{1}+c_{2}<2$ and inequality \eqref{equ:ohsawa1} that the degree of $f$ must be $0$,
which contradicts $f(1)=0$.
This proves the present theorem.
{\em Acknowledgements}. The author would like to thank Professor Takeo Ohsawa for giving us series talks in Peking University and sharing his recent work.
\end{document} |
\begin{document}
\begin{frontmatter}
\title{Nonclassical properties of a particle in a finite range trap: the $f$-deformed quantum oscillator approach}
\author[]{M. Davoudi Darareh \corref{cor1}}
\ead{m.davoudi@sci.ui.ac.ir}
\author[]{M. Bagheri Harouni}
\ead{m-bagheri@phys.ui.ac.ir}
\address{Department of Physics,
Faculty of Science, University of Isfahan, Hezar Jerib, Isfahan,
81746-73441, Iran} \cortext[cor1]{Corresponding author. Tel.:+98
311 7932435 ; fax: +98 311 7932409
E-mail address: m.davoudi@sci.ui.ac.ir (M. Davoudi Darareh)}
\begin{abstract}
A particle bounded in a potential with finite range is described
by using an $f$-deformed quantum oscillator approach. Finite range
of this potential can be considered as a controllable deformation
parameter. The non-classical quantum statistical properties of
this deformed oscillator can be manipulated by nonlinearities
associated to the finite range.
\end{abstract}
\begin{keyword}
Modified P\"{o}schl-Teller like coherent state \sep nonclassical property \sep
$f$-deformed quantum oscillator
\PACS 03.65.Fd, 03.65.Ge, 42.50.Dv, 42.50.Ar
\end{keyword}
\end{frontmatter}
\section{Introduction}
The quantum harmonic oscillator, its associated coherent states
and their generalizations~\cite{klauder skag perelomov ali} play
an important role in various theoretical and experimental fields
of modern physics, including quantum optics and atom optics.
Motivations for these generalizations have arisen from symmetry
considerations~\cite{klauder 1963}, dynamics~\cite{neito simmons
1978} and algebraic
aspects~\cite{jimbo,biden macfar}. \\
\indent The quantum groups approach~\cite{jimbo} for generalizing
the notion of quantum harmonic oscillator and its realizations in
physical systems, by providing an algebraic method, has given the
possibility of extending the creation and annihilation operators
of the usual quantum oscillator to introduce the deformed
oscillator. In a very general important case, the associated
algebra of this deformed oscillator may be viewed as a
deformation of classical Lie algebra by a generic function $f$,
the so-called $f$-deformation function, depending nonlinearly on
the number of excitation quanta and some deformation parameters.
The corresponding oscillator is called an $f$-deformed
oscillator~\cite{manko 1 manko 55}. In contrast to the usual
quantum harmonic oscillator, $f$-deformed oscillators do not have
equally-spaced energy spectrum. Furthermore, it has been known
that the most of nonlinear generalizations of some physical
models, such as considered in ~\cite{crnug mar mik}, are only
particular cases of $f$-deformed models. Thus, it is reasonable
that $f$-deformed oscillators exhibit strongly various
nonclassical properties~\cite{manko 1 manko 55,roy,filho}, such as
the sub-Poissonian statistics, squeezing and the quantum
interference effects, displaying the striking consequences of the
superposition principle of quantum mechanics. In addition,
$f$-deformed models depend on one or more deformation parameters
which should permit more flexibility and more ability for
manipulating the model~\cite{davoudi,katriel solomon 49}. An
important question in the $f$-deformed model is the physical
meaning of its deformation parameters. The $q$-deformed
oscillator~\cite{biden macfar}, as a special kind of $f$-deformed
oscillators with only one deformation parameter $q$, has been
extensively applied in describing physical models, such as
vibrational and rotational spectra of molecules~\cite{chang yan
bonats argy ray bonatsos raychev rou smi}. The appearance of
various nonclassical features induced by a $q$-deformation
relevant to some specific nonlinearity is also
studied~\cite{katriel solomon
artoni zang birman}. \\
\indent Based on the above-mentioned considerations, $f$-deformed
quantum oscillators and their associated coherent states, such as
$f$-coherent states ~\cite{manko 1 manko 55} or nonlinear coherent
states~\cite{filho}, can be appropriately established in
attempting to describe certain physical phenomena where their
effects could be modelled through a deformation on their
dynamical algebra with respect to conventional or usual
counterparts. This approach has been accomplished, for instance, in the
study of the stationary states of the center-of-mass motion of an ion in the harmonic trap~\cite{filho} and under
effects associated with the curvature of physical space~\cite{mahdifar
vogel}, the influence of the spatial confinement on the
center-of-mass motion of an exciton in a quantum dot~\cite{bageri
41}, the influence of atomic collisions and the finite number of
atoms in a Bose-Einstein condensate on controlled manipulation of
the nonclassical properties of radiation field~\cite{davoudi},
some nonlinear processes in high intensity photon beam~\cite{manko 1 manko
55}, intensity-dependent atom-field interaction in absence and in presence of nonlinear quantum
dissipation in a micromaser~\cite{naderi 32 naderi 39} and finally, incorporating the effects of interactions among
the particles in the framework of the $q$-deformed algebra~\cite{scarfone
41}. \\
\indent It is shown that the trapped systems provide a powerful tool
for preparation and manipulation of
nonclassical states~\cite{liebfried}, quantum
computations~\cite{bennett} and quantum communications~\cite{braunstein
loock}. Improved experimental techniques have caused precise measurements on realistic trapping systems, for example,
trapped ion-laser systems~\cite{meekhof monroe}, trapped gas of
atoms~\cite{anderson} and electron-hole carriers confined in a
quantum well and quantum dot~\cite{harrison}. A study of
confined quantum systems using the Wood-Saxon
potential~\cite{costa} and the $q$-analogue harmonic oscillator
trap~\cite{sharma sharma}, are some efforts which can be used to explain some
experimentally observed deviations from the results predicted by
calculations based on the harmonic oscillator model. \\
\indent A realistic case in any experimental setup is that the
dimension of the trap is finite and the realistic trapping
potential is not the harmonic oscillator potential extending to
infinity. Thus, the realistic confining potential becomes flat near the edges of the trap and
can be simulated by the tanh-shaped potential $V(x)=D\,tanh^2(x/\delta)$,
so-called the modified (or hyperbolic) P\"{o}schl-Teller(MPT)
potential~\cite{mpt potential}. The MPT potential presents
discrete (or bound) and continuum (or scattering) states. The
dynamical symmetry algebra associated with the bound part of the
spectrum is $su(2)$ algebra ~\cite{frank isacker} while for the
complete spectra is $su(1,1)$ algebra ~\cite{arias gomez lemus}.
The MPT potential has been used very widely in many branches of
physics, such as, atom optics~\cite{wang}, molecular physics~\cite{frank wolf lemus bernal} and
nanostructure physics~\cite{harrison}.\\
\indent Constructing coherent states for systems with discrete and continuous spectrum~\cite{gazeau klauder} and for various
kinds of confining potentials~\cite{antoine gazeau klauder} have become a very important tool in the
study of some quantum systems. The P\"{o}schl-Teller(PT) potentials,
including trigonometric PT(TPT) and MPT potentials with
discrete infinite and finite dimensional bound states respectively, because of their relations to several
other trapping potentials are of crucial importance.
Some types of the coherent states for the MPT potential have been constructed. The minimum-uncertainty coherent states
formalism ~\cite{neito 2}, the Klauder-Perelomov
approach~\cite{klauder skag perelomov ali} by realization of
lowering and raising operators in terms of the physical variable $u=tanh(x/\delta)$ by means of
factorizations~\cite{cruz kuru negro} and applying one kind
generalized deformed oscillator algebra with a selected deformed commutation relation~\cite{daskaloyannis
91}, are some attempts for this purpose. \\
\indent In the present paper, we intend to investigate the
nonlinear effects appeared due to finite dimension of the
trapping potential on producing new nonclassical quantum
statistical properties using the
$f$-deformed quantum oscillator approach. For this aim, it
will be shown that the finite range of the trapping potential
leads to the $f$-deformation of the usual harmonic potential with the well depth $D$
as a controllable physical deformation
parameter. Then, the $f$-deformed bound coherent states~\cite{recamier
jauregui} for the above-mentioned MPT quantum oscillator are
introduced and their nonclassical properties are examined. We think that by this $f$-deformed quantum oscillator approach
the problem of trapped ion-laser system and trapped gas of atoms, such as a Bose-Einstein condensate, in a realistic trap
can be studied analytically. \\
\indent The paper is organized as follows. In section \ref{f deformed}, we
introduce the $f$-deformed quantum oscillator equivalent to the
MPT oscillator and obtain the associated ladder operators. In
section \ref{f coherent states}, we construct the $f$-deformed
bound coherent states of the MPT quantum oscillator and examine
its resolution of identity. Section \ref{statistics} devoted to the study of the influence of the
finite range potential on producing and manipulating the
nonclassical properties, including the sub-Poissonian statistics and squeezing character. Finally, the summary and conclusions are presented in
section \ref{conclusion}.
\section{MPT Hamiltonian as an $f$-deformed quantum
oscillator}\label{f deformed}
In this section, we will consider a
bounded particle inside the MPT potential, called the MPT
oscillator, and we will associate to this system an $f$-deformed
quantum oscillator. By using this mathematical model, we try to
investigate physical deformation parameters in the model, to
manipulate the nonlinearities related to the finite range effects
on this system. For this purpose, we first give the bound energy
eigenvalues for the MPT potential. Then, by comparing it with the
energy spectrum of the general $f$-deformed quantum oscillator, we
will obtain the deformed annihilation and
creation operators.\\
\indent Let us consider the MPT potential energy
\begin{equation}\label{mpt potential}
V(x)=D\,tanh^2(\frac{x}{\delta}),
\end{equation}
where $D$ is the depth of the well, $\delta$ determines the range
of the potential and $x$ gives the relative distance from the
equilibrium position. The well depth, D, can be defined as
$D=\frac{1}{2}m\omega^2\delta^2$, with mass of the particle $m$
and angular frequency $\omega$ of the harmonic oscillator, so
that, in the limiting case $D\rightarrow \infty$(or $
\delta\rightarrow \infty)$, but keeping the product $m\omega^2$
finite, the MPT potential energy reduces to harmonic potential
energy, $\lim_{D\rightarrow \infty}
V(x)=\frac{1}{2}m\omega^2x^2$. Figure 1 depicts the MPT potential
for three different values of the well depth $D$. Harmonic
potential limit by increasing $D$ is clear from this figure.
Solving the Schr\"{o}dinger equation, the energy eigenvalues for
the MPT potential are obtained as~\cite{landau}
\begin{equation}\label{mpt energy 1}
E_n=D-\frac{\hbar^2\omega^2}{4D}(s-n)^2, \quad \quad n=0, 1,
2, \cdots, [s]
\end{equation}
in which $s=(\sqrt{1+(\frac{4D}{\hbar\omega})^2}-1)/2$, and $[s]$
stands for the closest integer to $s$ that is smaller than $s$.
The MPT oscillator quantum number $n$ can not be larger than the
maximum number of bound states $[s]$, because of the dissociation
condition $s-n\geq 0$. Consequently, the total number of bound
states is $[s]+1$. We should note that for integer $s$, the final bound
state and the total number of bound states will be $s-1$ and $s$,
respectively. Also, for every small value of the well depth $D$,
we always have at least one bound state for the MPT oscillator, i.e., the ground state. By introducing a dimensionless parameter
$N=\frac{4D}{\hbar\omega}=\frac{2m\omega\delta^2}{\hbar}$, the total number of bound
states will obtain from $[(\sqrt{1+(N)^2}-1)/2]+1$. For
integer $s$, a simple relation $N=2\sqrt{s(s+1)}$ will connect $N$
to the total number of bound states, i.e., $s$. The
bound energy spectrum in equation (\ref{mpt energy 1}) can be
rewritten as
\begin{equation}\label{mpt energy 2}
E_n=\hbar\omega[-\frac{n^2}{N}+(\sqrt{1+\frac{1}{N^2}}-\frac{1}{N})n+\frac{1}{2}(\sqrt{1+\frac{1}{N^2}}-\frac{1}{N})].
\end{equation}
The relation (\ref{mpt energy 2}) shows a nonlinear dependence on
the quantum number $n$, so that, different energy levels are not
equally spaced. It is clear that, in the limit $D\rightarrow
\infty$ (or $N\rightarrow \infty$), the energy spectrum for the
quantum harmonic oscillator will be obtained, i.e.,
$E_n=\hbar\omega(n+\frac{1}{2})$. In contrast with some confined
systems such as a particle bounded in an infinite and finite
square well potentials, by decreasing the size of the confinement
parameter, i.e., the finite range $\delta$ of the MPT oscillator,
energy eigenvalues decreases.\\ \indent A quantity that has a
close connection to experimental information is the energy level
spacing, $E_{n+1}-E_{n}$, where it corresponds to the transition
frequency between two adjacent energy levels. Furthermore, by this
quantity one can theoretically explore an algebraic
representation for the quantum mechanical potentials with discrete
spectrum~\cite{wunsche}. Based upon above considerations, a useful
illustration for the effects of the deformation parameter $D$ on
the nonlinear behavior of the deformed oscillator, can be
investigated by introducing the delta parameter $\Delta_n$ as
\begin{equation}\label{delta param}
\Delta_n=\frac{E_{n+1}-E_{n}}{\hbar\omega}-1
\end{equation}
\noindent which measures the amount of deviation of the adjacent
energy level spacing of the deformed oscillator with respect to
the non-deformed or harmonic oscillator. Substituting from
equation (\ref{mpt energy 2}) in equation (\ref{delta param}) we
can ontain the delta parameter $\delta$ for the MPT potential
\begin{equation}\label{delta param 2}
\Delta_n=-\frac{2}{N}n+\sqrt{1+\frac{1}{N^2}}-\frac{2}{N}-1
\end{equation}
\indent On the other
hand, the $f$-deformed quantum oscillator \cite{manko 1 manko 55},
as a nonlinear oscillator with a specific kind of nonlinearity, is
characterized by the following deformed dynamical variables
$\hat{A}$ and $\hat{A}^\dag$
\begin{eqnarray}\label{fd}
\hat{A}&=&\hat{a}f(\hat{n})=f(\hat{n}+1)\hat{a},\nonumber\\
\hat{A}^\dag&=&f(\hat{n})\hat{a}^\dag=\hat{a}^\dag f(\hat{n}+1), \quad
\quad
\hat{n}=
\hat{a}^\dag\hat{a},
\end{eqnarray}
\noindent where $\hat{a}$ and $\hat{a}^\dag$ are usual boson
annihilation and creation operators $([\hat{a},
\hat{a}^\dag]=1)$, respectively. The real deformation function
$f(\hat{n})$ is a nonlinear operator-valued function of the
harmonic number operator $\hat{n}$, where it introduces some
nonlinearities to the system. From equation (\ref{fd}), it
follows that the $f$-deformed operators $\hat{A}$,
$\hat{A}^\dag$ and $\hat{n}$ satisfy the following closed
algebra
\begin{eqnarray}\label{algebrafd}
&[\hat{A}, \hat{A}^\dag]=&(\hat{n}+1)f^2(\hat{n}+1)-\hat{n}f^2(\hat{n}),\nonumber\\
&[\hat{n}, \hat{A}]=&-\hat{A}, \quad \quad
[\hat{n}, \hat{A}^{\dag}]=\hat{A}^{\dag}.
\end{eqnarray}
\noindent The above-mentioned algebra, represents a deformed
Heisenberg-Weyl algebra whose nature depends on the nonlinear
deformation function $f(\hat{n})$.
An $f$-deformed oscillator is a nonlinear system
characterized by a Hamiltonian of the harmonic oscillator
form
\begin{equation}\label{hamiltf}
\hat{H}=\frac{\hbar\omega}{2}(\hat{A}^\dag\hat{A}+\hat{A}\hat{A}^\dag).
\end{equation}
Using equation (\ref{fd}) and the number state representation
$\hat{n}|n \rangle=n|n \rangle$, the eigenvalues of the
Hamiltonian (\ref{hamiltf}) can be written as
\begin{equation}\label{energyf}
E_n=\frac{\hbar\omega}{2}[(n+1)f^2(n+1)+nf^2(n)].
\end{equation}
\indent It is worth noting that in the limiting case $f(n)\rightarrow 1$, the deformed
algebra (\ref{algebrafd}) and the deformed energy eigenvalues
(\ref{energyf}) will
reduce to the conventional Heisenberg-Weyl
algebra and the harmonic oscillator spectrum, respectively.\\
\indent Comparing the bound energy spectrum of the MPT oscillator,
equation (\ref{mpt energy 2}), and the energy spectrum of an
$f$-deformed oscillator, equation (\ref{energyf}), we obtain
the corresponding deformation function for the MPT oscillator as
\begin{equation}\label{f}
f^2(\hat{n})=\sqrt{1+\frac{1}{N^2}}-\frac{\hat{n}}{N}.
\end{equation}
Furthermore, the ladder operators of the bound eigenstates of
the MPT Hamiltonian can be written in terms of the
conventional operators $\hat{a}$ and $\hat{a}^\dag$ as
follows
\begin{equation}\label{a mpt}
\hat{A}=\hat{a}\sqrt{\sqrt{1+\frac{1}{N^2}}-\frac{\hat{n}}{N}},
\quad \quad
\hat{A}^\dag=\sqrt{\sqrt{1+\frac{1}{N^2}}-\frac{\hat{n}}{N}}\hat{a}^\dag.
\end{equation}
\noindent These two operators satisfy the deformed Heisenberg-Weyl commutation relation
\begin{equation}\label{algebra mpt}
[\hat{A},
\hat{A}^\dag]=\sqrt{1+\frac{1}{N^2}}-\frac{2\hat{n}+1}{N},
\end{equation}
\noindent and they act upon the quantum number states $|n\rangle$, corresponding to the
energy eigenvalues $E_n$ given in equation (\ref{mpt energy
2}), as
\begin{eqnarray}\label{def ladder}
\hat{A}|n\rangle&=&f(n)\sqrt{n}|n-1 \rangle, \nonumber\\
\hat{A}^{\dag}|n\rangle&=&f(n+1)\sqrt{n+1}|n+1 \rangle.
\end{eqnarray}
\indent The commutation relation (\ref{algebra mpt}), can be
identified with the usual $su(2)$ commutation relations by
introducing the set of transformations
\begin{equation}\label{transform j}
\hat{A} \rightarrow \frac{\hat{J}_+}{\sqrt{N}}, \quad
\hat{A}^{\dag} \rightarrow \frac{\hat{J}_-}{\sqrt{N}}, \quad
\hat{n} \rightarrow \frac{\sqrt{1+N^2}-1}{2}-\hat{J}_0,
\end{equation}
\noindent where $\hat{J}_\mu$ satisfy the usual angular momentum
relations ~\cite{rose}. The $f$-deformed commutation relation
(\ref{algebra mpt}) in a special case of large but finite value of $N$, which corresponds to the small deformation,
can lead to a maths-type $q$-deformed commutation
relation~\cite{arik}, i.e., $\hat{A}\hat{A}^{\dag}-q\hat{A}^{\dag}\hat{A}=1$, with
$q=1-\frac{2}{N}=1-\frac{\hbar\omega}{2D}$. The harmonic oscillator
limit corresponds to $D\rightarrow \infty$ then $q\rightarrow
1$. This result confirms a correspondence between the
$q$-deformed oscillators and finite range potentials, which
is studied elsewhere~\cite{balles civ reb monat das kok}.\\
\indent It is evident that, herein, we have focused our attention on the quantum states
of the MPT Hamiltonian which exhibit bound oscillations with
finite range. The remaining states, i.e., the scattering states or energy continuum eigenstates, have
non-evident boundary conditions. From physical point of
view, it means that the excitation energies of this confined
system in the MPT potential energy are small compared with
the well depth potential energy $D$, such that, only the
vibrational modes dominated and the scattering or continuum
states should be neglected. Some important physical systems
with such circumstances are vibrational excitations of
molecular systems \cite{carrington choi moore},
trapped ions or atoms~\cite{song hai luo} and the electron-hole
carriers confined in a quantum well~\cite{harrison}.\\
\section{$f$-Deformed bound coherent states}\label{f coherent states}
In the context of the $f$-deformed quantum oscillator
approach, we introduce the $f$-deformed bound coherent states $|\alpha,f\rangle$ for the MPT
oscillator as a coherent superposition of all bound energy
eigenstates of the MPT Hamiltonian as below
\begin{equation}\label{alfa f}
|\alpha,f\rangle=C_f\sum_{n=0}^{[s]}\frac{\alpha^n}{\sqrt{n!}f(n)!}|n\rangle,
\quad
C_f=\left(\sum_{n=0}^{[s]}\frac{|\alpha|^{2n}}{n!(f(n)!)^2}\right)^{-1/2},
\end{equation}
\noindent so that $\hat{n}|n\rangle=n|n\rangle$, and $f(n)!=f(n)f(n-1)\cdots f(0)$, where $f(n)$ is
obtained in equation (\ref{f}). Since the sum in the
equation (\ref{alfa f}) is finite, the states $|\alpha,f\rangle$,
similar to the Klauder-Perelomov coherent states ~\cite{klauder skag perelomov ali}, are not an eigenstate of the
annihilation operator $\hat{A}$. From equations (\ref{def ladder})
and (\ref{alfa f}), we arrive at
\begin{equation}\label{a alfa f}
\hat{A}|\alpha,f\rangle=\alpha|\alpha,f\rangle-\frac{C_f\alpha^{[s]+1}}{\sqrt{[s]!}f([s])!}|[s]\rangle.
\end{equation}
As is clear from this equation, these states can not be considered as a right-hand eigenstate of annihilation
operator $\hat A$. This property is common character of all coherent states that are defined in a finite-
dimensional basis~\cite{recamier jauregui,buzek 1}.\\
\indent The ensemble of the $f$-deformed bound coherent states
$|\alpha,f\rangle$ labelled by the complex number $\alpha$
form an overcomplete set with the resolution of the identity
\begin{equation}\label{resolution}
\int d^2\alpha|\alpha,f\rangle
m_f(|\alpha|)\langle\alpha,f|=\sum_{n=0}^{[s]}
|n\rangle\langle n|=\hat{\textbf{1}},
\end{equation}
\noindent where $m_f(|\alpha|)$ is the proper measure for this family
of the bound coherent states. Substituting from equation (\ref{alfa f}) in equation
(\ref{resolution}) and using integral relation $\int _{0}^{\infty}K_\nu (t)t^{\mu-1}dt=
2^{\mu-2}\Gamma(\frac{\mu-\nu}{2})\Gamma(\frac{\mu+\nu}{2})$
for the modified Bessel function $K_\nu (t)$ of the second kind and of the
order $\nu$, we obtain the suitable choice for the measure
function as
\begin{equation}\label{measure}
m_f(|\alpha|)=\frac{K_\nu
(|\alpha|)}{2^l \pi |\alpha|^\nu C_f^2(|\alpha|)},
\end{equation}
\noindent where $\nu=(1+\gamma)n-\eta$, $l=(1-\gamma)n+\eta+1$ and
$\gamma=\frac{1}{N}$, $\eta=\sqrt{1+\frac{1}{N^2}}$.
\indent In contrast to the Gazeau-Klauder coherent
states~\cite{gazeau klauder}, the $f$-deformed coherent
states, such as introduced in equation (\ref{alfa f}), do not generally
have the temporal stability~\cite{manko 1 manko 55}. But it
is possible to introduce a notion of temporally stable
$f$-deformed coherent states~\cite{roknizadeh tavassoly}.
\section{Quantum statistical properties of the MPT
oscillator}\label{statistics}
\subsection{Sub-Poissonian statistics}
In order to determine the quantum statistics of the MPT quantum oscillator, we consider
Mandel parameter $Q$ defined by~\cite{mandel}
\begin{equation}\label{mandel param1}
Q=\frac{\langle\hat{n}^2\rangle-\langle\hat{n}\rangle^2}{\langle\hat{n}\rangle}-1.
\end{equation}
The sub-Poissonian statistics (antibunching effect), as an
important nonclassical property, exists whenever $Q <0$. When
$Q >0$, the state of the system is called super-Poissonian (bunching
effect). The state with $Q =0$ is called Poissonian.
Calculating the Mandel parameter $Q$ in equation (\ref{mandel
param1}) over the $f$-deformed bound coherent states $|\alpha, f\rangle$ defined in
equation (\ref{alfa f}), it can be described the
finite range dependence of the Mandel parameter. Figure 2 shows the parameter $Q$ for
four different values of $|\alpha|$, i.e., $|\alpha|=3,\,
4,\,5,\,7$. As is seen, for every one of the values of $|\alpha|$, the Mandel parameter $Q$ exhibits the
sub-Poissonian statistics at certain range of $D$ or the dimensionless
parameter $N=\frac{4D}{\hbar\omega}$, where this range is
determined by the value of $|\alpha|$. The bigger
parameter $|\alpha|$ is, the more late the Mandel parameter
tends to the Poissonian statistics. As expected, with further increasing values
of $D$ or $N$, the Mandel parameter $Q$ finally stabilized at
an asymptotical zero value, corresponding to the Poissonian
statistics associated to the canonical harmonic oscillator
coherent states. For the limit $N\rightarrow
0$(or $D\rightarrow 0$) and for every values of $|\alpha|$,
the Mandel parameter becomes $Q=-1$, where it is reasonable,
because in this limit, only the ground state supports by the potential.
\subsection{Quadrature squeezing}
As another important nonclassical property, we examine the
quadrature squeezing of the MPT quantum oscillator. For this
purpose, we consider quadrature operators $\hat{q}_{\varphi}$ and
$\hat{p}_{\varphi}$ defined as ~\cite{dodonov}
\begin{equation}\label{quadr operat}
\hat{q}_{\varphi}=\frac{1}{\sqrt{2}}(\hat{a}e^{-i\varphi}+\hat{a}^\dag
e^{i\varphi}), \quad \quad
\hat{p}_{\varphi}=\frac{i}{\sqrt{2}}(\hat{a}^{\dag}e^{i\varphi}-\hat{a}e^{-i\varphi}),
\end{equation}
\noindent satisfying the commutation relation
$[\hat{q}_{\varphi},\hat{p}_{\varphi}]=i$. One can define the
invariant squeezing coefficient $S$ as the difference between
the minimal value (with respect to the phase $\varphi$) of
the variances of each quadratures and the mean value $1/2$ of
these variances in the coherent or vacuum state. Simple
calculations result in the formula
\begin{equation}\label{}
S=\langle\hat a^{\dag}\hat a\rangle-|\langle \hat
a\rangle|^2-|\langle\hat a^2 \rangle-\langle\hat a\rangle^2|,
\end{equation}
\noindent so that the condition of squeezing is $S<0$.
Calculating the squeezing parameter $S$
over the $f$-deformed bound coherent states in equation
(\ref{alfa f}), we examine the squeezed character of these states. In figure 3, we
have plotted the parameter $S$ with respect to the dimensionless
deformation parameter $N=\frac{4D}{\hbar\omega}$ for three
different values of $|\alpha|$, namely $|\alpha|=0.5,\, 1,\,
1.3$. As is seen, the states $|\alpha, f\rangle$ exhibit squeezing for
certain values of $|\alpha|$. Furthermore, the
squeezing character of the states $|\alpha, f\rangle$ tend
to zero as $N$ or the well depth $D$ of the MPT potential
approaches to infinity, according to the coherent states of
the quantum harmonic oscillator. In the limit $N\rightarrow
0$(or $D\rightarrow 0$), this plot shows the quadrature
squeezing $S=0$, where it is in agreement with the only ground
state supported by the potential in this limit.
\section{Conlusions}\label{conclusion}
In this paper, we have introduced an algebraic approach based
on the $f$-deformed quantum oscillator for considering a
particle in the real confining potential which has finite trap
dimension, in contrast to the harmonic oscillator potential
extending to infinity. Proposed confining model potential is
the modified P\"{o}schl-Teller potential. We have shown that
the effects of the finite trap dimension in this model
potential can be considered as a natural deformation in
the quantum harmonic oscillator algebra. This quantum
deformation approach makes possible analytical study of a wide category of
realistic bound quantum systems algebraically. It is shown
that the nonlinear behavior resulted from this finite range
effects can lead to generate and manipulate some important nonclassical
properties for this deformed quantum oscillator. We have
obtained that the presented $f$-deformed bound coherent
states of the modified P\"{o}schl-Teller potential can
exhibit the sub-Poissonian statistics and quadrature
squeezing in definite domain of the trap dimension or well depth $D$ of this potential.
In the large but finite value for the well depth $D$,
i.e., small deformation, a $q$-deformed oscillator with $q=1-\hbar\omega/(2D)$ will
result. In the limit $D\rightarrow \infty$, the harmonic
oscillator counterpart is obtained.\\ \indent
Based on the
approach in this paper, we can obtain exact solutions for
realistic confined physical systems such as, trapped ion-laser system (in
progress), Bose-Einstein condensate and confined carriers in
nano-structures.\\ \\
{\bf Acknowledgments}\\
The authors wish to thank The Office of Graduate Studies and Research Vice President of The University of Isfahan for their support.
{\bf Figure captions}\\
Fig. 1. Plots of the MPT potential for three different values of
the well depth $D$, $D=1$(solid curve), $D=2$(dashed curve) and
$D\rightarrow \infty$(dotted curve).
Fig. 2. Plots of the Mandel parameter $Q$ versus the dimensionless
deformation parameter $N$ for $|\alpha|=3$(solid curve),
$|\alpha|=4$(dashed curve), $|\alpha|=5$(dotted curve) and
$|\alpha|=7$(dash-dotted curve).
Fig. 3. Plots of the invariant squeezing coefficient $S$ versus
the dimensionless deformation parameter $N$ for
$|\alpha|=0.5$(solid curve), $|\alpha|=1$(dashed curve) and
$|\alpha|=1.3$(dotted curve).
\end{document} |
\begin{document}
\title{Model-Free Quantum Control with Reinforcement Learning}
\author{V. V. Sivak}
\email{vladimir.sivak@yale.edu}
\affiliation{Department of Applied Physics, Yale University, New Haven, CT 06520, USA}
\author{A. Eickbusch}
\affiliation{Department of Applied Physics, Yale University, New Haven, CT 06520, USA}
\author{H. Liu}
\affiliation{Department of Applied Physics, Yale University, New Haven, CT 06520, USA}
\author{B. Royer}
\affiliation{Department of Physics, Yale University, New Haven, CT 06520, USA}
\author{I. Tsioutsios}
\affiliation{Department of Applied Physics, Yale University, New Haven, CT 06520, USA}
\author{M. H. Devoret}
\affiliation{Department of Applied Physics, Yale University, New Haven, CT 06520, USA}
\begin{abstract}
Model bias is an inherent limitation of the current dominant approach to optimal quantum control, which relies on a system simulation for optimization of control policies. To overcome this limitation, we propose a circuit-based approach for training a reinforcement learning agent on quantum control tasks in a model-free way. Given a continuously parameterized control circuit, the agent learns its parameters through trial-and-error interaction with the quantum system, using measurement outcomes as the only source of information about the quantum state. Focusing on control of a harmonic oscillator coupled to an ancilla qubit, we show how to reward the learning agent using measurements of experimentally available observables. We train the agent to prepare various non-classical states using both unitary control and control with adaptive measurement-based quantum feedback, and to execute logical gates on encoded qubits.
This approach significantly outperforms widely used model-free methods in terms of sample efficiency.
Our numerical work is of immediate relevance to superconducting circuits and trapped ions platforms where such training can be implemented in experiment, allowing complete elimination of model bias and the adaptation of quantum control policies to the specific system in which they are deployed.
\end{abstract}
\maketitle
\section{Introduction}
Quantum control theory addresses a problem of optimally implementing a desired quantum operation using external controls. The design of experimental control policies is currently dominated by {\sl simulation-based} optimal control theory methods with favorable convergence properties thanks to the availability of analytic gradients \cite{Khaneja2005, Caneva2011, DeFouquieres2011} or automatic differentiation \cite{Leung2017, Abdelhafez2019}. However, it is important to acknowledge that simulation-based methods can only be as good as the underlying models used in the simulation. Empirically, model bias leads to a significant degradation of performance of the quantum control policies, when optimized in simulation and then tested in experiment \cite{Kelly2014, Chen2016, Rol2017, Werninghaus2021}. A practical model-free alternative to simulation-based methods in quantum control is thus desirable.
The idea of using model-free optimization in quantum control can be traced back to the pioneering proposal in 1992 of laser pulse shaping for molecular control with a genetic algorithm \cite{Judson1992}. Only in recent years has the controllability of quantum systems and the duty cycle of optimization feedback loops reached sufficient levels to allow for the experimental implementation of such ideas. The few existing demonstrations are based on model-free optimization algorithms such as Nelder-Mead simplex search \cite{Kelly2014, Chen2016, Rol2017}, evolutionary strategies \cite{Werninghaus2021} and particle swarm optimization \cite{Lumino2018}.
At the same time, deep reinforcement learning (RL) \cite{Sutton2017, Francois-Lavet2018} emerged as not only a powerful optimization technique but also a tool for discovering adaptive decision-making policies. In this framework, learning proceeds by trial-and-error, without access to the model generating the dynamics and its gradients.
Being intrinsically free of model bias, it is an attractive alternative to traditional simulation-based approaches in quantum control. In a variety of domains, deep reinforcement learning has recently produced spectacular results, such as beating world champions in board games \cite{Silver2016, Silver2018}, reaching human-level performance in sophisticated computer games \cite{Mnih2015, Vinyals2019}, and controlling robotic locomotion \cite{Levine2015, Haarnoja2018b}.
Applying model-free RL to quantum control implies direct interaction of the learning agent with the controlled quantum system, which presents a number of unique challenges not typically encountered in classical environments. Quantum systems have large continuous state spaces that are only partially observable to the agent through measurements. For example, a pure qubit state can be described as a point on a Bloch sphere, but a projective measurement of a qubit observable yields a random {\it binary}$\,$ outcome. Qubits are often used as ancillary systems to control harmonic oscillators, in which case the underlying state space is formally infinite-dimensional. Learning quantum control of such systems is akin to learning to drive a car with a single sensor that provides binary-valued feedback.
The question arises: can classical model-free RL agents efficiently handle such ``quantum-observable'' environments?
The previous applications of RL to quantum control
\cite{Chen2014, Bukov2018, Bukov2018a, Zhang2019, Zhang2019, Porotti2019, An2020, August2018, Haug2020, Kuo2021, Wang2019a, Borah2021, Dalgaard2019, Niu2019, An2019, Fosel2018, Andreasson2019, Nautrup2019, DomingoColomer2020, Xu2019, Schuff2020}, which we survey in Section~\ref{sec:related work},
relied on a number of simplifying assumptions rendering the quantum control problem more tractable for the agent, but severely limiting their experimental feasibility.
These approaches provide the agent with the knowledge of a quantum state, or rely on fidelity as a measure of optimization progress. Such requirements are at odds with the fundamental properties of quantum environments, stochasticity and minimalistic observability.
Trying to meet these requirements in realistic experiments leads to large sample size, e.g. $10^7$ measurements to learn a single-qubit gate with only 16 parameters, as recently demonstrated in Ref.~\cite{Baum2021} using a quantum-state-aware agent that relied on tomography to obtain the quantum state.
Other model-free approaches that view quantum control as a standard cost function optimization problem \cite{Judson1992, Kelly2014, Chen2016, Rol2017, Werninghaus2021} are subject to similar limitations. Scaling such methods beyond one or two-qubit applications is prohibitively expensive from a practical point of view.
In this paper, we develop a framework for model-free learning of quantum control policies which is explicitly tailored to the stochasticity and minimalistic quantum observability. It does not rely on restrictive assumptions, such as a model of the system's dynamics, knowledge of a quantum state, or access to fidelity.
By framing quantum control as a Quantum-Observable Markov decision process (QOMDP) \cite{Barry2014}, we consider each stochastic experimental realization as an episode of interaction of the learning agent with a controlled quantum system, after which the agent receives a binary-valued reward through a projective measurement. Instead of utilizing averaging, every such episode is performed with a {\it different} control policy, which is being continually updated by a small amount within a trust region with the help of the reward signal. This novel policy space exploration strategy leads to excellent sample efficiency on challenging high-dimensional tasks, significantly outperforming widely used model-free methods.
To illustrate our approach with specific examples, we focus on the quantum control of a harmonic oscillator. Harmonic oscillators are ubiquitous physical systems, realized, for instance, as the motional degrees of freedom of trapped ions \cite{Leibfried2003, Bruzewicz2019} or electromagnetic modes in superconducting circuits \cite{Krantz2019a, Blais2020}. They are primitives for bosonic quantum error correction \cite{Ofek2016a, Campagne-Ibarcq2019, Hu2019a} and quantum sensing \cite{Wang2019b}. Universal quantum control of an oscillator is typically realized by coupling it to an ancillary nonlinear system, such as a qubit, with state-of-the-art fidelities in the $0.9-0.99$ range in circuit quantum electrodynamics (QED) \cite{Heeres2017, Eickbusch2021, Kudra2021} and trapped ions \cite{Fluhmann2019a}. In such a quantum environment, ancilla measurements with binary outcomes are the agent's only source of information about the quantum state in the vast unobservable Hilbert space and the only source of rewards guiding the learning algorithm.
For an oscillator-qubit system, we demonstrate learning of both unitary control and control with adaptive measurement-based quantum feedback. These types of control are special instances of a modular circuit-based framework, in which the quantum operation executed on a system is represented as a sequence of continuously parameterized {\it control circuits}, whose parameters are learned in-situ with the help of a {\it reward circuit}.
We show how to construct task-specific reward circuits that implement an experimentally feasible dichotomic positive operator-valued measure (POVM) on the oscillator, and how to use its outcomes as reward bits in the classical training loop.
We train the agent to prepare various non-classical oscillator states, such as Fock states, Gottesman-Kitaev-Preskill (GKP) states \cite{Gottesman2001}, Schr\"{o}dinger cat states, and binomial code states \cite{Michael2016}, and to execute gates on logical qubits encoded in an oscillator.
Although our demonstration is based on a simulated environment producing mock measurement outcomes, the RL agent that we developed (code available at \cite{Sivak2021}) is compatible with real-world experiments.
\section{Related work\label{sec:related work}}
In recent years, multiple theoretical proposals have emerged around applying reinforcement learning to quantum control problems such as quantum state preparation \cite{Chen2014, Bukov2018, Bukov2018a, Zhang2019, Zhang2019, Porotti2019, An2020, August2018, Haug2020, Kuo2021} and feedback stabilization \cite{Wang2019a, Borah2021}, the construction of quantum gates \cite{Dalgaard2019, Niu2019, An2019}, design of quantum error correction protocols \cite{Fosel2018, Andreasson2019, Nautrup2019, DomingoColomer2020}, and control-enhanced quantum sensing \cite{Xu2019, Schuff2020}.
These proposals formulate the control problem in a way that avoids directly facing quantum observability and makes it more tractable for the RL agent.
In simulated environments, this is possible, for example, by providing the agent with full knowledge of the system's quantum state, which supplies enough information for decision making \cite{Chen2014, Zhang2019, Porotti2019, An2020, Haug2020, Schuff2020, Wang2019a, Fosel2018, Xu2019}. Moreover, in the simulation the distance to the target state or operation is known at every step of the quantum trajectory, and it can be used to construct a steady reward signal to guide the learning algorithm \cite{Zhang2019, Porotti2019, An2020, Xu2019}, thereby alleviating the well-known delayed reward assignment problem \cite{Sutton2017, Francois-Lavet2018}.
Taking RL a step closer towards quantum observability, some works only provide the agent with access to fidelities and expectation values of physical observables in different parts of the training pipeline \cite{Bukov2018, August2018, Garcia-Saez2019, Wauters2020, Kuo2021}, which would still require a prohibitive amount of averaging in an experiment, a problem exacerbated by the iterative nature of the training process.
Under these various simplifications, there are positive indications \cite{Zhang2019, Dalgaard2019} that RL is able to match the performance of traditional gradient-based methods, albeit in situations where the agent or the learning algorithm has access to expensive or unrealistic resources.
Therefore, such RL proposals are not compatible with efficient training in experiment, which is required in order to eliminate model bias from quantum control.
To address this challenge, it is necessary to develop agents that learn directly from stochastic measurement outcomes or from low-sample estimators of physical observables. Initial steps towards this goal were studied in \cite{Bukov2018a, Bilkis2020, Borah2021}.
\section{Reinforcement learning approach to quantum control}
\subsection{Markov decision process \label{MDP section}}
We begin by introducing several concepts from the field of artificial intelligence (AI). An intelligent {\it agent} is any device that can be viewed as perceiving its {\it environment} through sensors and acting upon that environment with actuators \cite{Russel}.
In reinforcement learning (RL) \cite{Sutton2017, Francois-Lavet2018}, a sub-field of AI, the interaction of the agent with its environment is usually described with a powerful framework of Markov decision processes (MDP).
In the MDP framework, the agent-environment interaction proceeds in {\it episodes} consisting of a sequence of discrete {\it time-steps}. At every time-step $t$ the agent receives an {\it observation} $\,o_{t}\in{\cal O}$ containing some information about the current environment {\it state} $s_t\in{\cal S}$, and acts on the environment with an {\it action} $a_{t}\in{\cal A}$. This action induces a transition of the environment to a new state $s_{t+1}$ according to a Markov transition function ${\cal T}(s_{t+1}|s_{t},a_{t})$. The agent selects actions according to a {\it policy} $\pi(a_t|h_t)$, which in general can depend on the {\it history} $h_t=o_{0:t}$ of all past observations made in the current episode. In the {\it partially observable} environment, observations are issued according to an {\it observation function} $O(o_{t}|s_{t})$ and carry only a limited information about the state. In the special case of a {\it fully-observable} environment, the observation $o_t = s_t$ is a sufficient statistic of the past, which allows to restrict the policy to a mapping from states to actions $\pi(a_t|s_t)$. Environments can be further categorized as {\it discrete} or {\it continuous} according to the structure of the state space $\cal S$, and as {\it deterministic} or {\it stochastic} according to the structure of the transition function $\cal T$. Likewise, policies can be categorized as discrete or continuous according to the structure of the actions space $\cal A$, and as deterministic or stochastic.
The agent is guided through the learning process by a {\it reward} signal $r_{t}\in{\cal R}$. The reward is issued to the agent after each action, but it cannot be used by the agent to decide on the next action. Instead, it is used by the {\it learning algorithm} to improve the policy.
The reward signal is designed by a human supervisor according to the final goal, and it must indicate how good the new environment state is after the applied action.
Importantly, it is possible to specify the reward signal for achieving a final goal without knowing what the optimal actions are, which is a major difference between reinforcement learning and more widely appreciated supervised learning.
The goal of the learning algorithm is to find a policy $\pi$ that maximizes the agent's {\it utility function} $J$, which in RL is taken to be the expectation $J=\mathbb{E}_\pi[R]$ of the reward accumulated during the episode, also known as a {\it return} $R=\sum_{t}r_{t}$.
Even from this brief description it is clear that learning environments vary vastly in complexity from ``simple'' discrete fully-observable deterministic environments, such as a Rubik's cube, to ``difficult'' continuous partially-observable stochastic environments, such as those of self-driving cars. Where does quantum control land on this spectrum?
\begin{figure}
\caption{\label{fig1}
\label{fig1}
\end{figure}
\subsection{Quantum control as quantum-observable Markov decision process \label{QOMDP}}
To explain how quantum control can be viewed as a sequential decision problem, for concreteness we specialize the discussion to a typical circuit QED \cite{Blais2020} experimental setup, depicted in Fig.~\ref{fig1}, although our framework is independent of the physical platform. The agent is a program implemented in a classical computer controlling the quantum system. The quantum environment of the agent consists of a quantum harmonic oscillator, realized as an electromagnetic mode of the superconducting resonator, and an ancilla qubit, realized as the two lowest energy levels of a transmon \cite{Koch2007}. Note the difference in the use of the term “environment” which in quantum physics refers to dissipative bath coupled to a quantum system, while in our RL context it refers to the quantum system itself, which is the environment of the agent.
It is convenient to abstract away the exact details of the control hardware and adopt the circuit model of quantum control. According to such operational definition, the agent interacts with the environment by executing a parameterized control circuit in discrete steps, as illustrated in Fig.~\ref{fig1}. On each step $t$, the agent receives an observation $o_t$, and produces the action-vector $a_{t}$ of parameters of the control circuit to apply in the next time step. The agent-environment interaction proceeds for $T$ steps, comprising an episode.
Compared to the typical classical partially-observable MDPs (POMDPs), there are two significant complications in the quantum case: (i) the quantum environment is minimally observable to the agent through projective ancilla measurements, i.e. the observations $o_{t}$ carry no more than $1$ bit of information, and (ii) the observation causes a random discontinuous jump in the underlying environment state.
While in principle classical POMDPs could have such properties, they arise more naturally in the quantum case.
Historically, RL was benchmarked sometimes in stochastic but always richly observable environments, and it is therefore an open question whether existing RL algorithms are well suited for quantum environments with properties (i)-(ii). There is also a fundamental question of whether classical agents can efficiently, in the algorithmic complexity sense, learn compressed representations of the latent quantum states producing the observations, and if such representations are necessary for learning quantum control policies. Recognizing some of these difficulties, Ref.~\cite{Barry2014} introduced ``Quantum-Observable Markov Decision Process'' (QOMDP), a term we will adopt to describe our quantum control framework.
We use the Monte Carlo wave-function method \cite{Molmer1993} to simulate the quantum environment of the agent. For the environment consisting of an oscillator coupled to ancilla qubit and isolated from the dissipative bath, the most general QOMDP has the following specifications:
{\bf 1.} State space is the joint Hilbert space of the oscillator-qubit system, which in our simulation corresponds to ${\cal S}=\{|s\rangle\in \mathbb{C}^{2}\otimes\mathbb{C}^{N}, \, \langle s|s\rangle=1 \}$, with $N=100$ being oscillator Hilbert space truncation in the photon number basis.
{\bf 2.} Observation space ${\cal O}=\{-1,+1\}$ is a set of possible measurement outcomes of the qubit $\sigma_{z}$ operator.
If the control circuit contains a qubit measurement, the observation function is given by the Born rule of probabilities. If the control circuit does not contain a measurement, the observation is a constant which we take to be $o_t=+1$. We refer to the former as measurement-based feedback control and the latter as unitary control.
In other approaches \cite{Chen2014, Zhang2019, Porotti2019, An2020, Haug2020, Schuff2020, Wang2019a, Fosel2018, Xu2019}, an observation is a quantum state itself $o_t=|s_t\rangle$, which is not naturally compatible with real-world experiments. It could be obtained through quantum state tomography \cite{Baum2021}, but this would result in exponential scaling of the training sample complexity with system size.
{\bf 3.} Action space ${\cal A}=\mathbb{R}^{|\cal A|}$, is the space of parameters $a$ of the control circuit. It generates the set $\{{\cal K}[{a}]\}$ of continuously parameterized Kraus maps. If the control circuit contains a qubit measurement, then each map ${\cal K}[{a}]$ consists of two Kraus operators $K_{\pm}[{a}]$ satisfying the completeness relation $K_{+}^{\dagger}[a]K_{+}[a]+K_{-}^{\dagger}[a]K_{-}[a]=I$ and corresponding to observations $\pm1$. If the control circuit does not contain a measurement, then the map consists of a single unitary operator $K_0[a]$.
{\bf 4.} State transitions happen deterministically according to $|s_{t+1}\rangle = K_0[{a_t}]|s_t\rangle$ if the control circuit does not contain a measurement, and otherwise stochastically according to $|s_{t+1}\rangle = K_\pm[{a_t}]|s_t\rangle/\sqrt{p_\pm}$ with probabilities $p_{\pm}=\langle s_t| K_{\pm}^{\dagger}[a_t]K_{\pm}[a_t]|s_t\rangle$.
In this paper, we do not consider the coupling of a quantum system to a dissipative bath, but it can be incorporated into the QOMDP by expanding the Kraus maps to include uncontrolled quantum jumps of the state $|s_t\rangle$ induced by the bath. This would lead to more complicated dynamics, but since the quantum state and its transitions are hidden from the agent, nothing would change in the RL framework.
In the traditional simulation-based approach to quantum control, the model for ${\cal K}[a]$ is specified, for example through the system's Hamiltonian and Schr\"{o}dinger equation, allowing for gradient-based optimization of the cost function \cite{Khaneja2005, Caneva2011, DeFouquieres2011, Leung2017, Abdelhafez2019}.
In contrast, in our approach the Kraus map ${\cal K}[a]$ is not modeled. Instead, the experimental apparatus implements ${\cal K}[a]$ exactly. In this case, the optimization proceeds at a higher level by trial-and-error learning of the patterns in the action-reward relationship. This ensures that the learned control sequence is free of model bias.
In practice, common contributions to model bias come from frequency- and power-dependent pulse distortions in the control lines \cite{Jerger2019,Rol2020}, higher order nonlinearities, coupling to spurious modes, etc.
Simulation-based approaches often attempt to compensate for model bias by introducing additional terms in the cost function, such as penalties for pulse power and bandwidth, weighted with somewhat arbitrarily chosen coefficients, or finding policies that are first-order insensitive to deviations in system parameters \cite{Propson2021}. In contrast, our RL agent will learn the relevant constraints automatically, since it optimizes the true unbiased objective incorporated into the reward.
As shown in Fig.~\ref{fig1}, the reward in our approach is produced by following the training episode with the reward circuit. This circuit realizes a dichotomic POVM on the oscillator, whose binary outcome probabilistically indicates whether the applied action sequence implements the desired quantum operation. Since the agent’s goal is to maximize the expectation $J = \mathbb{E}[R]$, we require that in the state preparation QOMDPs the reward circuit is designed to satisfy the condition
\begin{align}
\underset{|\psi\rangle}{\rm argmax}\;\mathbb{E}[R]=|\psi_{\rm target}\rangle, \label{condition}
\end{align}
where expectation is taken with respect to the sampling noise in reward measurements when the state $|\psi\rangle|g\rangle$ is supplied at the input to the reward circuit.
In circuit QED, dichotomic POVMs are realized through unitary operation on the oscillator-qubit system followed by a projective qubit measurements in $\sigma_{z}$ basis.
Since the reward measurement in general will disrupt the quantum state, we only apply the reward circuit at the end of the episode, and use the reward $r_{t<T}=0$ at all intermediate time-steps. Hence, from now on we will omit the time-step index and refer to the reward as simply $R\equiv r_T$.
Such delayed rewards are known to be particularly challenging for RL agents, because they need to make multiple action decisions during the episode, while the reward only informs whether the complete sequence of actions was successful but does not provide feedback on the individual actions.
A common choice of reward $R$ in other approaches \cite{Chen2014, Bukov2018, Zhang2019, An2020, August2018, Haug2020, Kuo2021, Dalgaard2019, Niu2019, An2019, Baum2021} is the fidelity of the executed quantum operation. The fidelity oracle, often assumed freely available, would translate into time-consuming averaging in experiments involving quantum systems with high-dimensional Hilbert space, and is therefore prohibitively expensive from a practical point of view.
Clearly, quantum control is a ``difficult'' decision process according to a rough categorization outlined in Section~\ref{MDP section}.
One may compare it to driving a car blind with a single sensor that provides binary-valued feedback instead of a rich visual picture of the surroundings.
In the following Section, we describe our approach to solving QOMDPs through policy gradient RL.
\subsection{Solving quantum control through policy gradient reinforcement learning}
The solution to a POMDP is a policy $\pi(a_{t}|h_{t})$ which assigns a probability distribution over actions to each possible history $h_{t}=o_{0:t}$ that the agent might see. In large problems, it is unfeasible to represent the policy as a lookup table, and instead it is convenient to parameterize it using a powerful function approximator such as a deep neural network \cite{Mnih,Mnih2015,Silver2016}.
As an additional benefit, this representation allows the learning agent to generalize via parameter sharing to histories it has never encountered during training.
We will refer to such neural network policies as $\pi_{\theta}$ where $\theta$ represents the network parameters. It is advantageous to adopt recurrent network architectures, such as the Long Short-Term Memory (LSTM) \cite{Hochreiter1997}, in problems with variable-length inputs. In this work, we use neural networks with an LSTM layer and several fully connected layers.
The output of the policy network is the mean $\mu_\theta[h_t]$ and diagonal covariance $\sigma^2_\theta[h_t]$ of the multivariate Gaussian distribution from which the action $a_t$ is sampled on every time-step, as depicted in Fig.~\ref{fig1}. The stochastisity of the policy during the training ensures a balance between exploration of new actions and exploitation of the current best estimate $\mu_\theta$ of the optimal action. Typically, as training progresses, the agent learns to reduce the entropy of the stochastic policy and eventually converges to a near-deterministic policy. After the training is finished, the deterministic policy is obtained by choosing the optimal action $\mu_\theta$.
In application to QOMDPs, such a stochastic action space exploration strategy means that every experimental run is performed with a different policy candidate which is evaluated with a binary reward measurement. Instead of spending the sample budget on increasing the evaluation accuracy for any given policy candidate through averaging, our strategy is to spend this budget on evaluating more policy candidates albeit with the minimal accuracy. Such strategy is explicitly tailored to the stochasticity and minimalistic observability of quantum environments, and is conceptually rather different from widely used model-free optimization methods that crucially rely on averaging to suppress noise in the cost function, as we further discuss in Appendix~\ref{NM}.
Policy gradient reinforcement learning \cite{Sutton2017, Francois-Lavet2018} provides a set of tools for learning the policy parameters $\theta$ guided by the reward signal. Even though the binary-valued reward $R$ is a non-differentiable random variable sampled from episodic interactions with the environment, its expectation $J$ depends on the policy parameters $\theta$ and it is therefore differentiable. The basic working principle of the policy gradient algorithms is to construct an empirical estimator $g_{k}$ of the gradient of performance measure $\nabla_{\theta}J(\pi_{\theta})|_{\theta=\theta_{k}}$ based on a batch of $B$ episodes of experience collected in the environment following the current stochastic policy $\pi_{\theta_{k}}$, and then perform a gradient ascent step on the policy parameters $\theta_{k+1}=\theta_{k}+\alpha g_{k}$, where $\alpha$ is the {\it learning rate}. This data collection and the subsequent policy update comprises a single {\it epoch} of training.
Various policy gradient RL algorithms differ in their construction of the gradient estimator. In this work, we use the Proximal Policy Optimization algorithm (PPO) \cite{Schulman2017} whose brief summary is included in the Supplementary Material \cite{SuppMat}. PPO was developed to cure sudden performance collapses often observed when using high-dimensional neural network policies. It achieves this by discouraging large policy updates (hence ``proximal''), inspired by ideas from trust region optimization. The stability of PPO is essential in stochastic environments, motivating our choice of this algorithm for solving QOMDPs.
As described above, the learning process is a guided search in the policy space, where the guiding signal is the reward assigned to each attempted action sequence. Since in the state preparation QOMDP the goal is to approach arbitrarily close to the target state that resides in a {\it continuous} state space, it is tempting to think that the guiding signal needs to be of high resolution, i.e. assign different rewards to policies of different qualities, with reward difference being indicative of the quality difference.
This condition is certainly satisfied by using fidelity as a reward \cite{Chen2014, Bukov2018, Zhang2019, An2020, August2018, Haug2020, Kuo2021, Dalgaard2019, Niu2019, An2019, Baum2021}. In contrast, our reward-circuit-based approach breaks this condition but promises high experimental sample efficiency by virtue of not having to perform expensive fidelity estimation. However, it is not obvious that stochastic $\pm 1$ outcomes of the reward circuits are sufficient to navigate a continuous policy space and converge at all, not to mention reaching a high fidelity. For example, consider that for two policies with fidelities ${\cal F}_1>{\cal F}_2$, in our approach it is possible to receive the rewards $R_1=-1<R_2=+1$ due to the measurement sampling noise, leading to the incorrect contribution to policy gradient.
By probabilistically comparing multiple policy candidates and performing small updates within the trust region, our proximal policy optimization is able to successfully cope with such a highly stochastic learning problem.
The next Section is devoted to empirically proving that our approach indeed leads to stable learning convergence, i.e. that the agent's performance gradually improves to a desired level and does not collapse or stagnate. We demonstrate this by training the agent to solve challenging state preparation instances.
We also provide a simple introductory example illustrating the basic principles of our approach in Appendix~\ref{sec:qubit_flip_demo}.
\section{Results\label{results}}
Currently, direct pulse shaping with GRAPE (gradient ascent pulse engineering) is a dominant approach to quantum state preparation in circuit QED \cite{Heeres2017, Wang2019b, Hu2019a}. Nevertheless, a modular approach based on repetitive application of a parameterized control circuit has several advantages \cite{Eickbusch2021, Kudra2021}. Firstly, thanks to a reduced number of parameters, the modular approach is less likely to overfit and can generalize better under small environment perturbations. In addition, each gate in the module can be individually tested and calibrated. Finally, the modular approach is physically motivated and more interpretable, leading to a better understanding of the solution.
Our RL approach is compatible with any parameterized control circuit, including piece-wise constant parameterization used in the direct pulse-shaping. In this work, for concreteness, we made a particular choice of a control circuit based on the universal gate set consisting of the Selective Number-Dependent Arbitrary Phase gate ${\rm SNAP}(\varphi)$ and displacement $D(\alpha)$ \cite{Krastanov2015}:
\begin{align}
{\rm SNAP}(\varphi) & = \sum_{n=0}^\infty e^{i\varphi_n}|n\rangle \langle n|, \label{snap} \\
D(\alpha) & = \exp(\alpha a^\dagger - \alpha^* a). \label{displacement}
\end{align}
In practice, this gate set has been realized in the strong dispersive limit of circuit QED \cite{Heeres2015, Kudra2021}. Displacements $D(\alpha)$ are implemented with resonant driving of the oscillator, while the Berry phases $\varphi_n$ in the ${\rm SNAP}(\varphi)$ gate are created by driving the qubit resonantly with the $|g\rangle |n\rangle \leftrightarrow |e\rangle |n\rangle$ transition.
Recently, it was demonstrated that SNAP can be made first-order path-independent with respect to ancilla qubit decay \cite{Reinhold2020, Ma2020}. Furthermore, a linear scaling of the circuit depth $T$ with the state size $\langle n \rangle$ can be achieved for this approach \cite{Fosel2020}, while many interesting experimentally achievable states can be prepared with just $T\sim5$. Inspired by this finding, we parameterize our open-loop control circuit as $D^\dagger(\alpha)\,{\rm SNAP}(\varphi)\,D(\alpha)$, see Fig.~\ref{fig2}(a).
In the following Sections \ref{sec:Fock states}-\ref{sec:arbitrary states} our aim is to demonstrate that model-free RL is feasible, i.e. the learning converges to high-fidelity protocols in a realistic number of training episodes. To isolate the learning aspect of the problem, in Sections~\ref{sec:Fock states}-\ref{sec:arbitrary states} we use perfect gate implementations acting on the Hilbert space as intended by Eqs.~\eqref{snap}-\eqref{displacement}. However, the major power of the model-free paradigm is the ability to utilize available controls even when they do not produce the expected effect, tailoring the learned actions to the unique control imperfections present in the system. We focus on this aspect in Section~\ref{sec:imperfect SNAP} by training the agent with an imperfectly implemented SNAP. Moreover, the advantage of model-free RL compared to other model-free optimization methods is that it can efficiently solve problems requiring adaptive decision-making \cite{Silver2016, Silver2018, Mnih2015, Vinyals2019, Levine2015, Haarnoja2018b}. We leverage this advantage of RL in Section \ref{sec:imperfect SNAP} to learn adaptive measurement-based quantum feedback strategies compensating for imperfect SNAP implementation. Finally, in Appendix~\ref{gates} we demonstrate learning of gates for logical qubits encoded in an oscillator.
\subsection{Preparation of oscillator Fock states \label{sec:Fock states}}
One central question in our RL approach is how to assign a reward $R$ to the agent by performing a measurement on the prepared state $|s_T\rangle$. To satisfy Eq.~\eqref{condition}, it is sufficient to design the reward circuit in such a way that $\mathbb{E}[R]=f({\cal F})$ where $f$ is any monotonously increasing function of fidelity ${\cal F}$ to the target state. Although this is not necessary, we found it to be a useful guiding principle.
For example, the most efficient choice is to generate $R$ as an outcome of a measurement with POVM $\{\Omega_{\rm target},I-\Omega_{\rm target}\}$ where $\Omega_{\rm target}=|\psi\rangle\langle\psi|_{{\rm target}}$ is the target projector. This POVM maximizes the distinguishability of the target state from all other states \cite{Kliesch2021}. We will refer to such reward as target projector reward. If the measurement outcomes associated to this POVM are $\pm1$, then reward will satisfy $\mathbb{E}[R]=2{\cal F}-1$.
In the strong dispersive limit of circuit QED \cite{Schuster2007}, a dichotomic POVM measurement required for the target projector reward can be routinely realized for an important class of non-classical states known as Fock states $|n\rangle$ that are eigenstates of the photon number operator.
To learn preparation of such states, we use the ``Fock reward circuit'' shown in Fig.~\ref{fig2}(a).
All reward circuits considered in this work contain two ancilla measurements. If the SNAP is ideal as in Eq.~\eqref{snap}, the qubit will remain in $|g\rangle$ after the control sequence, and the outcome of the first measurement will always be $m_1=1$, which is the case in Sections~\ref{sec:Fock states}-\ref{sec:arbitrary states} and in Appendix~\ref{gates}. However, in a real experimental setup, residual entanglement between the qubit and oscillator can remain. Therefore, in general the first measurement serves to disentangle them. The second measurement with outcome $m_2$ is used to produce the reward. In the Fock reward circuit, this is done according to the rule $R=-m_2$.
The training episodes begin with the oscillator in vacuum $|\psi_0\rangle=|0\rangle$ and the ancilla qubit in the ground state $|g\rangle$. Episodes follow the general template shown in Fig.~\ref{fig1}, in which the control circuit is applied for $T=5$ time-steps, followed by the Fock reward circuit. The SNAP gate is truncated at $\Phi=15$ levels, leading to the $(15+2)$-dimensional parameterization of the control circuit, and amounting to 85 real parameters for the full control sequence. In our approach, the choice of the circuit depth $T$ and the action space dimension $|{\cal A}|=\Phi+2$ needs to be made in advance, which requires some prior understanding of the problem complexity. In this example, we chose $T=5$ and $\Phi=15$ for all Fock states $|1\rangle$,..,$|10\rangle$ to ensure a fair comparison of the convergence speed, but, in principle, the states with lower $n$ can be prepared with shorter sequences \cite{Krastanov2015, Heeres2015}.
An automated method for selecting the circuit depth was proposed in Ref.~\cite{Fosel2020}, and it can be utilized here to make an educated guess of $T$.
\begin{figure}
\caption{\label{fig2}
\label{fig2}
\end{figure}
The action-vectors are sampled from the Gaussian distribution produced by the deep neural network with one LSTM layer and two fully-connected layers, representing the stochastic policy.
The neural network input is only the ``clock'' observation (one-hot encoding of the step index $t$), since there are no measurement outcomes in the unitary control circuit.
The agent is trained for $4\cdot 10^3$ epochs with batches of $B=10^3$ episodes per epoch. This amounts to a sample size of $M_{\rm tot}=4\cdot 10^6$ experimental runs. The total time budget of the training is split between (i) experience collection, (ii) optimization of the neural network, and (iii) communication and instruments re-initialization.
We estimate that with the help of active oscillator reset \cite{Pfaff2017} the experience collection time in experiment can be as short as 10 minutes in total for such training (assuming $150\, {\rm \mu s}$ duty cycle per episode).
Our neural network is implemented with TensorFlow \cite{Abadi2016} on an NVIDIA Tesla V100 graphics processing unit (GPU). The total time spent updating the neural network parameters is 10 minutes in total for such training.
The real experimental implementation will likely be limited by instrument re-initialization \cite{Werninghaus2021}. This time budget puts our proposal within the reach of current technology.
Throughout this manuscript, we use the fidelity $\cal F$ only as an evaluation metric to benchmark the agent, and it is not used anywhere in the training loop. If desired, in experiment the training epochs can be periodically interleaved with evaluation epochs to perform fidelity estimation \cite{Flammia2011, DaSilva2011} for the deterministic version of the current stochastic policy. Other metrics can also be used to monitor the training progress without interruption, such as the return and entropy of the stochastic policy.
The agent benchmarking results for this QOMDP are shown in Fig.~\ref{fig2}(b).
They indicate that our stochastic action space exploration strategy is not only able to converge, but also yields high-fidelity solutions within a realistic number of experimental runs. The agent was able to reach ${\cal F}>0.99$ for all Fock states, and ${\cal F}>0.999$ for Fock state $|1\rangle$.
Such stable convergence in a stochastic setting is possible with proximal policy optimization because after every epoch the policy distribution only changes by a small amount within a trust region. This working principle is in stark contrast with popular optimization algorithms such as the Nelder-Mead (NM) simplex search \cite{Kelly2014, Chen2016, Rol2017} or simulated annealing (SA) \cite{Baum2021}, where each update of the simplex (in NM) or the state (in SA) can result in a drastically different policy. As a result, both these approaches perform poorly on high-dimensional problems with stochastic cost function, as shown in Appendix~\ref{NM} and summarized in Fig.~\ref{fig2}(c). When allowed the same total number of experimental runs $M_{\rm tot}=4\cdot 10^6$ as in Fig.~\ref{fig2}(b), NM is only able to find solutions with ${\cal F}>0.99$ for Fock states $|1\rangle$ and $|2\rangle$ and SA only for Fock state $|1\rangle$.
Despite its low resolution, target projector reward represents the most informative POVM from the perspective of state certification \cite{Kliesch2021}, and results in efficient learning of state preparation protocols. However, for most target states it will be unfeasible to experimentally implement such POVM in a trustworthy way. Recall that in circuit QED any dichotomic POVM on the oscillator is implemented with a unitary operation on the oscillator-qubit system and a subsequent qubit measurement in $\sigma_{z}$ basis. The trustworthiness requirement implies that this unitary operation can be independently calibrated to high accuracy, because errors in its implementation can bias the reward circuit and, as a result, bias the learning objective of the agent. For example, in the Fock reward circuit in Fig.~\ref{fig2}(a) the unitary is a simple photon-number-selective qubit flip whose calibration is relatively straightforward.
Therefore, we consider Fock reward as a {\sl feasible and trustworthy} instance of the target projector reward.
In a more general case, when a target projector reward is unfeasible to implement, consider the following probabilistic measurement strategy. Let $\{\Omega_{k}\}$ be a parameterized set of POVM elements that can be realized in a trustworthy way. To implement a reward measurement, in each episode we first sample the parameter $k$ from some probability distribution $P(k)$, and then implement a dichotomic POVM $\{\Omega_{k},I-\Omega_{k}\}$ with associated reward $R=\pm R_k$. One can view such a reward scheme as probabilistically testing different properties of the prepared state, instead of testing directly whether it is equal to the target state. The scale $R_k$ of the binary reward is chosen according to the importance of each such property. Note that in such reward scheme the expectation in Eq.~\eqref{condition} is taken with respect to both the sampling of POVMs and the shot noise in measurement outcomes.
In the following Sections~\ref{sec:Stabilizer states}-\ref{sec:arbitrary states}, we consider examples of such probabilistic reward measurement schemes, with further examples relevant for other physical systems included in Appendix~\ref{sec:other systems}.
\subsection{Preparation of stabilizer states \label{sec:Stabilizer states}}
The class of stabilizer states is of particular interest for quantum error correction \cite{Nielsen2010}. A state is a stabilizer state if it is a unique joint eigenvalue-1 eigenstate of a commutative stabilizer group.
To demonstrate learning stabilizer state preparation in an oscillator, we train the agent to prepare a grid state, also known as the Gottesman-Kitaev-Preskill (GKP) state \cite{Gottesman2001}. Grid states were originally introduced for encoding a 2D qubit subspace into an infinite-dimensional Hilbert space of an oscillator for bosonic quantum error correction, and were subsequently recognized to be valuable resources for various other quantum applications. In particular, the 1D version of the grid state which we consider here, can be used for sensing both real and imaginary parts of a displacement simultaneously \cite{Duivenvoorden2017, Noh2020}.
\begin{figure}
\caption{\label{fig3}
\label{fig3}
\end{figure}
An infinite-energy 1D grid state is a Dirac comb $|\psi^{\rm GKP}_0\rangle \propto \sum_{t\in \mathbb{Z}} D(t\sqrt{\pi})|0_x\rangle$, where $|0_x\rangle$ is a position eigenstate located at $x=0$.
The generators of a stabilizer group for such a state are $S_{x,0}=D(\sqrt{\pi})$ and $S_{p,0}=D(i\sqrt{\pi})$. The finite-energy version of this state $|\psi^{\rm GKP}_\Delta\rangle$ can be obtained with generators $S_{x,\Delta}=E_\Delta S_{x, 0} E_\Delta^{-1}$ and $S_{p,\Delta}=E_\Delta S_{p, 0} E_\Delta^{-1}$, where $E_\Delta=\exp(-\Delta^2 a^\dagger a)$ is the envelope operator, and $\Delta$ determines the degree of squeezing in the peaks of the Dirac comb and the extent of the grid envelope.
To learn preparation of such a GKP state, consider a probabilistic reward measurement scheme based on a set $\{\Omega_{k}\}$ with $k=x,p$ of POVM elements which are the projectors onto the $+1$ eigenspaces of stabilizer generators $S_{x/p,\Delta}$. The direction of the stabilizer displacement (along $x$ or $p$ quadrature) is sampled uniformly, and the scale of reward is $R_k=1$ for each direction. In this scheme, there is no simple relation between $\mathbb{E}[R]$ and ${\cal F}$, but the condition \eqref{condition} is satisfied. In contrast, for a multi-qubit system with a finite stabilizer group it is possible to construct a scheme in which the expectation of reward is a monotonous function of fidelity by sampling uniformly from the full stabilizer group, see Appendix~\ref{sec:other systems}.
The infinite-energy stabilizers $S_{x/p,0}$ are unitary and can be measured in the oscillator-qubit system with the standard phase estimation circuit \cite{Terhal2016}, as was experimentally demonstrated with trapped ions \cite{Fluhmann2019} and superconducting circuits \cite{Campagne-Ibarcq2020}. On the other hand, the finite-energy stabilizers $S_{x/p,\Delta}$ are not unitary nor Hermitian. Recently, an approximate circuit for generalized measurement of $S_{x/p,\Delta}$ was proposed \cite{Royer2020,DeNeeve2020} and realized with trapped ions \cite{DeNeeve2020}. Our stabilizer reward circuit, shown in Fig.~\ref{fig3}(a), is based on these proposals. The measurement outcome $m_2$, obtained in this circuit, is administered as a reward $R=m_2$. Since this circuit only approximates the desired POVM, such reward will only approximately satisfy $\mathbb{E}[R]=(\langle S_{x,\Delta}\rangle + \langle S_{p,\Delta}\rangle)/2$ and fullfill the condition \eqref{condition}. Nevertheless, the agent that strives to maximize such a reward will learn to prepare an approximate $|\psi^{\rm GKP}_\Delta\rangle$ state.
After choosing the reward circuit, we need to properly constrain the control circuit. Grid states have a large photon number variance $\sqrt{{\rm var}(n)}\approx \langle n\rangle \approx 1/(2\Delta^2)$, hence preparation of such states requires a large SNAP truncation $\Phi$. However, increasing the action space dimension $|{\cal A}|=\Phi+2$ can result in less stable and efficient learning. As a compromise, we choose $\Phi=30$ and $T=9$, amounting to 288 real parameters for the full control sequence.
The agent benchmarking results for this QOMDP are shown in Fig.~\ref{fig3}(b), with average stabilizer value as the evaluation metric [measured with the approximate circuit from Fig.~\ref{fig3}(a)]. For a perfect policy, the stabilizers would saturate to $+1$, but it is increasingly difficult to satisfy this requirement for target states with smaller $\Delta$ due to a limited SNAP truncation and circuit depth. Nevertheless, our agent successfully copes with this task.
Example Wigner functions of the states prepared by the agent after 10,000 epochs of training are shown as insets.
Learning state preparation with a probabilistic reward measurement scheme is generally less efficient than with target projector reward because individual reward bits carry only partial information about the state. However, in principle, if stabilizer measurements can be realized in a quantum non-demolition way, this opens a possibility of acquiring the values of multiple commuting stabilizers after every episode, and thereby increasing the signal-to-noise ratio (SNR) of the reward signal.
Reward circuits in Sections~\ref{sec:Fock states}-\ref{sec:Stabilizer states} are designed for special classes of states. Next, we consider how to construct a reward circuit applicable to arbitrary states.
\subsection{Preparation of arbitrary states \label{sec:arbitrary states}}
In the general case, we aim to construct an unbiased estimator of fidelity $\cal F$ based on a measurement scheme which is (i) tomographically complete, (ii) feasible to implement in a given experimental platform, and (iii) trustworthy. The requirement (i) in combination with universality of the control circuit is necessary to guarantee that arbitrary states can in principle be prepared with our approach. However, it is not by itself sufficient, and needs to be supplemented with requirements (ii) and (iii) to ensure practical feasibility.
In the strong dispersive limit of circuit QED, the Wigner tomography is a canonical example satisfying all three requirements above \cite{Vlastakis2013}. Wigner function is defined on the oscillator phase space with coordinates $\alpha\in\mathbb{C}$, and is given as the expectation value of the ``displaced parity'' operator $W(\alpha)=\frac{2}{\pi}\langle \Pi_\alpha\rangle$, where $\Pi_\alpha=D(\alpha) \Pi D^\dagger(\alpha)$, and $\Pi=e^{i\pi a^\dagger a}$ is the photon number parity. Hence, for the probabilistic reward measurement scheme based on the Wigner function, we consider a continuously parameterized set of POVM elements $\{\Omega_\alpha\}$, where $\Omega_\alpha = (I + \Pi_\alpha)/2$ is a projector onto +1 (even) eigenspace of the displaced parity operator.
\begin{figure}
\caption{\label{fig4}
\label{fig4}
\end{figure}
Next, we need to determine the probability distribution $P(\alpha)$ according to which the POVMs are samples from the set $\{\Omega_\alpha\}$ for reward evaluation. To this end, we derive the estimator of fidelity based on the Monte Carlo importance sampling of the phase space:
\begin{align}
{\cal F} & = \pi\int d^{2}\alpha\,W(\alpha)W_{{\rm target}}(\alpha) \label{integral} \\
&=2 \underset{\alpha\sim P}{\mathbb{E}}\, \underset{\psi}{\mathbb{E}} \left[ \frac{1}{P(\alpha)} \Pi_\alpha W_{{\rm target}}(\alpha)\right],
\label{fidelity_estimator_wigner}
\end{align}
where points $\alpha$ are sampled according to an arbitrary probability distribution $P(\alpha)$ which is nonzero where $W_{{\rm target}}(\alpha)\neq0$.
The estimator \eqref{fidelity_estimator_wigner} leads to the following scheme, dubbed ``Wigner reward'': first, the phase space point $\alpha$ is generated with rejection sampling, as illustrated in Fig.~\ref{fig4}(b), and then the displaced parity $\Pi_\alpha$ is measured, corresponding to the reward circuit shown in Fig.~\ref{fig4}(a). Reward is then assigned according to the rule $R = R_\alpha m_2$, where $R_\alpha=\frac{2c}{P(\alpha)}W_{{\rm target}}(\alpha)$ is chosen to reflect the importance of a sampled phase space point, and $c>0$ is an arbitrary scaling factor. Such reward satisfies $\mathbb{E}[R] = c\,{\cal F}$ according to Eq.~\eqref{fidelity_estimator_wigner}, but only requires a single binary tomography measurement per policy candidate.
The estimator \eqref{fidelity_estimator_wigner} is unbiased for any $P(\alpha)$, but its variance can be reduced by choosing $P(\alpha)$ optimally. The lowest variance is achieved with $P(\alpha)\propto |W_{{\rm target}}(\alpha)|$, as shown in Appendix~\ref{sec: variance}. Such a choice also helps to stabilize the learning algorithm, since it conveniently leads to rewards $R=m_2\, {\rm sgn}\,W_{{\rm target}}(\alpha)$ of equal magnitude $|R|=1$, where we made a proper choice of the scaling factor $c$.
We investigate the agent's performance with Wigner reward circuit for (i) preparation of the Schr{\"o}dinger cat state $|\psi_{\rm target}\rangle\propto|\beta\rangle + |-\beta\rangle$ with $\beta=2$ in $T=5$ steps, shown in Fig.~\ref{fig4}(c), and (ii) preparation of the binomial code state $|\psi_{\rm target}\rangle \propto \sqrt{3}|3\rangle + |9\rangle$ \cite{Michael2016} in $T=8$ steps, shown in Fig.~\ref{fig4}(d). In contrast to target projector and stabilizer rewards that asymptotically lead to reward of $+1$ for optimal policy, Wigner reward remains stochastic even under the optimal policy. Since in this case it is impossible to find the policy that would systematically produce a reward of $+1$, for some states the agent converges to policies of intermediate fidelity (green). To increase the SNR of the Wigner reward, we evaluate every policy candidate with reward circuits corresponding to $1,10,100$ different phase space points, doing a single measurement per point and averaging the obtained measurement outcomes to generate the reward $R$. The results show that increased reward SNR allows to reach higher fidelity, albeit at the expense of increased sample size. We expect that in the limit of infinite averaging the training would proceed as if the fidelity $\cal F$ was directly available to be used as reward (blue).
We observe notable variations in convergence speed and saturation fidelity depending on the choice of hyperparameters, which is typical of reinforcement learning. A lot of progress has been made in developing robust RL algorithms applicable to a variety of tasks without extensive problem-specific hyperparameter tuning \cite{Mnih2015, Silver2018}, but this still remains a major open problem in the field. The list of hyperparameters used in all our training examples can be found in the Supplementary Material \cite{SuppMat}. Even with the optimal choice of hyperparameters, there is no rigorous guarantee of convergence -- a problem plaguing all heuristic optimization methods in non-convex spaces. In the presented examples, we plot learning trajectories corresponding to several random seeds to demonstrate that the probability of getting stuck with a suboptimal solution is small.
This demonstration shows that arbitrary state preparation is in principle possible with our approach, as long as a tomographically complete reward measurement scheme is available in a given physical system. In Appendix~\ref{sec:other systems}, we provide fidelity estimators based on the characteristic function, enabling training for arbitrary state preparation in trapped ions and multi-qubit systems.
\begin{figure*}
\caption{\label{fig5}
\label{fig5}
\end{figure*}
Examples considered in Sections~\ref{sec:Fock states}-\ref{sec:arbitrary states} already demonstrate the model-free aspect of our approach despite the perfect gate implementations in the underlying simulation of the quantum state evolution. In the following example, we demonstrate this aspect more explicitly by training the agent on a system with imperfect SNAP. In addition, the next example highlights the potential of RL for measurement-based feedback control.
\subsection{Learning adaptive quantum feedback with imperfect controls \label{sec:imperfect SNAP}}
Many quantum control experiments with circuit QED systems claim decoherence-limited fidelity \cite{Heeres2015,Heeres2017}. The effect of decoherence on the quantum operation can be decreased by reducing the execution time. However, this would involve controls with wider spectrum and larger amplitude, pushing the system to the limits where model assumptions are no longer valid. Therefore, such experiments are decoherence-limited instead of model-bias-limited only {\sl by choice}. Recent experiments that push quantum control towards faster implementation \cite{Eickbusch2021, Kudra2021} reveal that significant parts of the error budget cannot be accounted for by common and well-understood theoretical models, making the problem of model bias explicit. Model-free optimization will become an indispensable tool to achieve higher experimental fidelity despite the inability to capture the full complexity of a quantum system with a simple model.
To provide an example of this effect, we consider again a SNAP-displacement control sequence. In the oscillator-qubit system with dispersive coupling $H_c/h=\frac{1}{2}\chi\, a^\dagger a\,\sigma_z$, the Berry phases $\varphi_n$ in \eqref{snap} are created through photon-number-selective qubit rotations:
\begin{align}
{\rm SNAP}(\varphi) = \sum_n |n\rangle \langle n| \otimes R_{\pi-\varphi_n}(\pi)R_0(\pi),
\end{align}
where $R_\phi(\vartheta)=\exp(-i\frac{\vartheta}{2}[\cos\phi\, \sigma_x+\sin\phi\, \sigma_y])$.
Note that this operation, if implemented perfectly, would return the qubit to the ground state, and hence it can be considered as an operation on the oscillator alone, as defined in \eqref{snap}.
Such an implementation relies on the ability to selectively address number-split qubit transitions, which requires pulses of long duration $\tau\gg1/\chi$. In practice, it is desirable to keep the pulses short to reduce the probability of ancilla relaxation during the gate. However, shorter pulses of wider bandwidth would drive unintended transitions, as illustrated in Fig.~\ref{fig5}(b), leading to imperfect implementation of the SNAP gate: in addition to accumulating incorrect Berry phases for different levels, this will generally leave the qubit and oscillator entangled. Such imperfections are notoriously difficult to calibrate out or precisely account for at the pulse or sequence construction level, which presents a good testbed for our model-free learning paradigm.
We demonstrate that our approach leads to high-fidelity protocols even in the case $\tau<1/\chi$ far from theoretically optimal regime, where the sequences produced assuming ideal SNAP yield poor fidelity due to severe model bias.
We begin by illustrating in Fig.~\ref{fig5}(a) the degradation of performance of the policies optimized for preparation of Fock state $|3\rangle$ using the open-loop control circuit from Fig.~\ref{fig2}(a) with an ideal SNAP (blue), when tested with a finite-duration gate ${\rm SNAP}_\tau$ (red, pink) whose details are included in the Supplementary Material \cite{SuppMat}. Achieving extremely high fidelity (blue) requires delicate adjustment of the control parameters, but this fine-tuning is futile when the remaining infidelity is smaller than the performance gap due to model bias, shown with arrows in Fig.~\ref{fig2}(a) and a priori unknown.
As seen by testing on the $\chi\tau=3.4$ case (red), any progress that the optimizer made after 300 epochs was due to overfitting to the model of the ideal SNAP. As depicted with a spectrum in Fig.~\ref{fig5}(b), the qubit pulse of such duration is still reasonably selective (and is close to the experimental choice $\chi\tau\approx 4$ in \cite{Heeres2015}), but it already requires a much more sophisticated modeling of the SNAP implementation in order to not limit the experimental performance. In the partially selective case $\chi\tau=0.4$ (pink) the performance is drastically worse. Note that sequences optimized with any other simulation-based approach assuming ideal SNAP, such as \cite{Krastanov2015,Fosel2020}, would exhibit a similar degradation.
One way to recover higher fidelity is through a detailed modeling of the composite qubit pulse in the SNAP \cite{Kudra2021}, although such approach will still contain residual model bias. An alternative approach, which comes at the expense of reduced success rate, is to perform a verification ancilla measurement and post-selection, leading to a control circuit shown in Fig.~\ref{fig5}(c). Post-selecting on a qubit measured in $|g\rangle$ in all time steps (history $h_T=11111$) significantly boosts the fidelity of a biased policy from $0.9$ to $0.97$ in the case $\chi\tau=3.4$, but does not lead to any improvement in the extreme case $\chi\tau=0.4$. The post-selected fidelity is still lower than with the ideal SNAP, because such a scheme only compensates for qubit under- or over-rotation, and not for the incorrect Berry phases. Additionally, the trajectories corresponding to other measurement histories have extremely poor fidelities because only the history $h_T=11111$ was observed during the optimization with an ideal SNAP.
However, in principle, if the qubit is projected to $|e\rangle$ by the measurement, the desired state evolution can still be recovered using adaptive quantum feedback.
Experimental Fock state preparation with quantum feedback was demonstrated in the pioneering work in cavity QED \cite{Sayrin2011}.
In our context, a general policy in the adaptive setting is a binary decision tree, equivalent to $2^{T-1}$ distinct parameter settings for every possible measurement history. There exist model-based methods for construction of such a tree \cite{Shen2017}, but they are not applicable in the cases dominated by a-priori unknown control errors. An RL agent, on the other hand, can discover such a tree in a model-free way. Even though our policies are represented with neural networks, they can be easily converted to a decision tree representation which is more advantageous for low-latency inference in real-world experimental implementation.
To this end, we train a new agent with a feedback-based control circuit that directly incorporates a finite-duration imperfect gate ${\rm SNAP}_\tau$, shown in Fig.~\ref{fig5}(c), mimicking training in an experiment. We use Fock reward circuit, shown in Fig.~\ref{fig2}(a), in which $m_1=1$ in all episodes despite the imperfect SNAP because of the qubit reset operation.
Since the control circuit contains a measurement, the agent will be able to dynamically adapt its actions during the episode depending on the received outcomes $o_t$.
As shown with the green curves in Fig.~\ref{fig5}(a), the agent successfully learns adaptive strategies of high fidelity even in the extreme case $\chi\tau=0.4$. This indicates that RL is not only good for fine-tuning or ``last-mile'' optimization, but is a valuable tool for the domains where model-based quantum control is not applicable, e.g. due to absence of reliable models or prohibitive memory requirements for simulation of a large Hilbert space.
To further analyze the agent's strategy, we select the best-performing random seed for the case $\chi\tau=0.4$ after 25,000 epochs of training and visualize the resulting state evolution in Fig.~\ref{fig5}(d). The average fidelity of such policy is ${\cal F} = 0.974$. There are 5 high-probability branches, all of which yield ${\cal F}>0.9$, and further post-selection of history $h_T=1\overline{1}111$ will boost the fidelity to ${\cal F}>0.999$. We observe that fidelity reduces in the branches with more $``\text{-}1"$ measurement outcomes (top to bottom), because, being less probable, such branches receive less attention from the agent during the training. As shown in Fig.~\ref{fig5}(e) top panel, the agent chooses to focus only on a small number of branches (5 out of $2^5$) and ensure that they lead to high-fidelity states. This is in contrast to the protocol optimized with the ideal SNAP and tested with ${\rm SNAP}_\tau$ (bottom panel), which, as a result of model bias, performs poorly and has relatively uniform probability of all histories (of course, such protocol would produce only history $11111$ if it was applied with an ideal SNAP).
It is noteworthy that in the two most probable branches in Fig.~\ref{fig5}(e) the agent actually finishes preparing the state in just $3$ steps, and in the remaining time chooses to simply idle instead of further entangling the qubit with the oscillator and subjecting itself to additional measurement uncertainty. In the other branches, this extra time is used to catch up after previously receiving undesired measurement outcomes. This indeed seems to be an intelligent strategy for such a problem, which serves as a positive indication that this agent will be able to cope with incoherent errors by shortening the effective sequence length.
We emphasize that even though for this numerical demonstration of model-free learning we had to build a specific model of the finite-duration SNAP, the agent is completely agnostic to it by construction. The only input that the agent receives is binary measurement outcomes, whose source is a black box to the agent.
Effectively, in this demonstration the model bias comes from the mismatch between ideal and finite-duration SNAP.
We also tested the agent against other types of model bias: we added independent random static offsets to the Berry phases and qubit rotation angles, and found that the agent performs equally well in this situation.
\section{Discussion \label{sec:Discussion}}
As empirically demonstrated in Section~\ref{results}, our stochastic policy optimization is stable and leads to high sample efficiency.
Starting from a random initial policy, learning the preparation of high-fidelity Fock states (with target projector reward) and GKP states (with stabilizer reward) required $10^6-10^7$ experimental runs, and learning with Wigner reward required $10^7-10^8$ runs.
Although seemingly large, this sample size compares favorably with the number of measurements required to merely tomographically verify the states of similar quality in experiments, e.g. $3\cdot 10^6$ for Fock states \cite{Heeres2017} and $2\cdot 10^7$ for GKP states \cite{Campagne-Ibarcq2020}.
Exactly quantifying the sample complexity of heuristic learning algorithms remains difficult. However, we can qualitatively establish the general trends.
A natural question to ask is whether our approach will scale favorably with increased (i) target state complexity, (ii) action space, (iii) sequence length.
{\bf (i) Target state complexity.} Sample efficiency of learning the control policy is affected by multiple interacting factors, but among the most important is the variance of the fidelity estimator used for the reward assignment. Variance of the estimator in Eq.~\eqref{fidelity_estimator_wigner} with $P(\alpha)\propto |W_{\rm target}(\alpha)|$ is given by ${\rm Var} = 4(1+{\delta}_{\rm target})^2 - {\cal F}^2$, where ${\delta}_{\rm target}=\int |W_{\rm target}(\alpha)|d\alpha-1$ is one measure of the state non-classicality known as the Wigner negativity \cite{Kenfack2004} (see Appendix~\ref{sec: variance} for the derivation). This result leads to a simple lower bound on the sample complexity of learning the state preparation policy that reaches the fidelity $\cal F$ to the desired target state
\begin{align}
M > \frac{4(1+{\delta}_{\rm target})^2 - {\cal F}^2}{(1-{\cal F})^2}. \label{bouund}
\end{align}
This expression bounds the number of measurements $M$ required for resolving the fidelity $\cal F$ of a fixed policy with standard error of the mean comparable to the infidelity.
The task of the RL agent is more complicated, since it needs to not only resolve the fidelity of the current policy, but at the same time learn how to improve it. Therefore, this bound is not tight, and the practical overhead depends on the choice of control parameterization, learning algorithm and its hyperparameters.
However, the bound \eqref{bouund} clearly indicates that learning the preparation of larger non-classical states is increasingly difficult, as one would expect, and the difficulty can be quantified according to the Wigner negativity of the state.
This is a fundamental limitation on the learning efficiency with the Wigner reward, which can only be overcome by designing a reward scheme that takes advantage of the special structure of the target state and available trustworthy state manipulation tools, as we did, for instance, for Fock states and GKP states.
The Wigner negativity of Fock states grows as $\sqrt{n}$ \cite{Kenfack2004}, where $n$ is the photon number, which would result in $O(n)$ scaling of the bound \eqref{bouund}. In contrast, target projector reward, of which Fock reward is a special case, has target-state-independent variance ${\rm Var}={\cal F}(1-{\cal F})$ leading to a bound $M> {\cal F}/(1-{\cal F})$ which does not increase with the photon number.
How such reward design can be optimized in general is a matter that we leave for further investigation.
{\bf (ii) Action space.} The overhead on top of Eq.~\eqref{bouund} is determined, among other factors, by the choice of the control circuit. In the case of SNAP and displacement, the action space dimension $|{\cal A}|=\Phi+2$ has to grow with the target state size to ensure individual control of the phases of involved oscillator levels. This might be problematic, since the performance of RL (or any other approach) usually declines on high-dimensional tasks, as evidenced, for instance, by studies of robotic locomotion with different numbers of controllable joints \cite{Schulman2015, Duan2016}.
However, the sample complexity is not a simple function of $|\cal A|$, as can be inferred from Fig.~\ref{fig2}(b) where we use the same $|{\cal A}|=17$ for all Fock states. For lower Fock states, the agent quickly learns to disregard the irrelevant action dimensions because their contribution to policy gradient averages to zero. In contrast, for higher Fock states it needs to discover the pattern of relations between {\sl all} action dimensions across different time-steps, and thus the learning is slower. Note that on the same problem a much stronger degradation is observed when using the Nelder-Mead approach or simulated annealing, see Fig.~\ref{fig2}(c).
{\bf (iii) Sequence length.} Tackling decision-making problems with long-term dependencies (i.e. $T\gg1$) is what made RL popular in the first place, as exemplified by various game-playing agents \cite{Silver2016, Silver2018, Mnih2015, Vinyals2019}. In quantum control, the temporal structure of the control sequences can be exploited by adopting recurrent neural network architectures, such as the LSTM used in our work. Recently, machine learning for sequential data has significantly advanced with the invention of the Transformer models \cite{Vaswani2017} which use attention mechanisms to ensure that the gradients do not decay with the sequence depth $T$. Machine learning innovations such as this will undoubtedly find applications in quantum control.
As can be seen above, there are some aspects of scalability that are not specific to quantum control, but are common in any control task. The generality of the model-free reinforcement learning framework makes it possible to transfer the solutions to such challenges, found in other domains, to quantum control problems.
Let us now return to the discussion of other factors influencing the sample efficiency. As we briefly alluded to previously, the overhead on top of Eq.~\eqref{bouund} depends on the learning algorithm and its hyperparameters.
Model-free RL is known to be less sample efficient than gradient-based methods, typically requiring millions of training episodes \cite{Francois-Lavet2018}. This is especially true for {\it on-policy} RL algorithms, such as PPO, since they discard the training data after each policy update. In contrast, {\it off-policy} methods keep old experiences in the replay buffer and learn from them even after the current policy has long diverged from the old policy under which the data was collected, typically resulting in better sample efficiency. Our pick of PPO was motivated by its simplicity and stability in the stochastic setting, but it is worth exploring an actively expanding collection of RL algorithms \cite{Francois-Lavet2018}, and understanding which are most suitable for quantum-observable environments.
The sample efficiency of model-free RL in the quantum control setting can be further improved by utilizing the strength of conventional simulation-based methods. A straightforward way to achieve this would be through supervised pre-training of the agent's policy in the simulation. Such pre-training would provide a better initial point for the agent subsequently re-trained in the real-world setting. Our preliminary numerical experiments show that this indeed provides significant speedups.
The proposals discussed above resolve the bias-variance trade-off in favor of complete bias elimination, necessarily sacrificing sample efficiency. In this respect, model-free learning is a swing in the opposite direction from the traditional approach in physics of constructing sparse physically-interpretable models with very few parameters which can be calibrated in experiment. Building on the insights from machine learning community, model bias can in principle be strongly reduced (not eliminated) by learning a richly parameterized model, either physically motivated \cite{Krastanov2019a,Krastanov2020} or neural-network-based \cite{Flurin2018,Banchi2018}, from direct interaction with a quantum system. The learned model can then be used to optimize the control policy with simulation-based (not necessarily RL) methods. Another promising alternative is to use model-based reinforcement learning techniques \cite{Plaat2020}, where the agent can plan the actions by virtually interacting with its learned model of the environment while refining both the model and the policy using real-world interactions. Finally, in addition to adopting existing RL algorithms, a worthwhile direction is to design new algorithms tailored to the specifics of quantum-observable environments.
\section{Conclusion}
Addressing the problem of model bias as an inherent limitation of the dominant simulation-based approach to quantum control, we claim that end-to-end model-free reinforcement learning is not only a feasible alternative, but is also a powerful tool which will extend the capabilities of quantum control to domains where simulation-based methods are not applicable.
By focusing on control of a harmonic oscillator in the circuit QED architecture, we explored various aspects of learning under the conditions of quantum uncertainty and scarce observability. Our policy exploration strategy is explicitly tailored to these features of the quantum learning environments. We demonstrated stable learning directly from stochastic binary measurement outcomes, instead of relying on averaging to eliminate stochasticity as is done in other model-free quantum control optimization methods. With multiple numerical experiments, we confirmed that such strategy leads to high fidelity and sample efficiency on challenging control tasks that include both the unitary control and control with adaptive measurement-based quantum feedback. The RL agent that we developed can be directly applied in real-world experiments with various physical systems.
\section{Acknowledgment\label{sec:acknowledgement}}
We acknowledge a helpful discussion with Thomas F\"{o}sel. We thank the anonymous reviewers for their comments which encouraged us to make several additions.
We thank Yale Center for Research Computing for providing compute resources.
This research is supported by ARO under Grant No. W911NF-18-1-0212.
\appendix
\section{Educational example \label{sec:qubit_flip_demo}}
In this Appendix, we analyze a deliberately simple problem with a purpose of illustrating in detail various components and stages of the learning process.
\textbf{Problem setting.} Consider a qubit state preparation problem, in which the initial state is $|g\rangle$ and the target state is $|e\rangle$. Such state preparation can be achieved with a unitary rotation gate parameterized as $U(a)=\exp(-i\pi a\sigma_{x})$, where the optimal solution $a=0.5$ is known in advance. We will let the agent discover this solution in a model-free way, without knowing which unitary is actually applied. The training episodes consist of a single time-step in which the agent produces an action $a\in\mathbb{R}^{1}$, leading to execution of control circuit $U(a)$, and then collects a reward with a simple reward circuit consisting of a $\sigma_{z}$ measurement, as show in the inset of Fig.~\ref{fig6}(a). The resulting measurement outcome $m\in\{-1,1\}$ is used to issue a reward $R=-m$, which is maximized in the target state $|e\rangle$, hence satisfying \eqref{condition}.
\begin{figure}
\caption{\label{fig6}
\label{fig6}
\end{figure}
\textbf{Actor and critic.} In every training episode, the action $a$ is sampled according to the probability distribution specified by the policy.
Policy $\pi_{\theta}(a)$ is parameterized with learnable parameters $\theta$.
In this problem, it is convenient to chose a simple Gaussian policy
\begin{align}
\pi_{\theta}(a)=\frac{1}{\sqrt{2\pi\sigma^{2}}}\exp\bigg[-\frac{(a-\mu)^{2}}{2\sigma^{2}}\bigg],
\label{stochastic policy}
\end{align}
whose learnable parameters are $\theta=\{\mu,\sigma^2\}$. The policy defines how the agent interacts
with the environment, and it is often referred to as {\it actor}. Another important component of PPO is the value function $V_{\theta'}$, or {\it critic}, which helps the agent asses the value of the environment state, see Supplementary Material \cite{SuppMat}. In this example, the value function can be chosen as a simple baseline $V_{\theta'}=b$ with learnable parameters $\theta'=\{b\}$.
During the training process, parameters $\{\mu,\sigma^2,b\}$ are iteratively updated
according to the PPO algorithm.
\textbf{Training process.} The training process, illustrated in Fig.~\ref{fig6}, is split into $50$ epochs. Within each epoch $k$ the parameters of the policy remain fixed, and the agent collects
a batch of $B=30$ episodes of experience, behaving stochastically according to the current policy $\pi_{\theta_{k}}(a)$. Fig.~\ref{fig6}(a) shows the policy distribution for a selected set of epochs, and the actions that the agent tried in the episodes of corresponding epoch. The initial policy is widely distributed to ensure that the agent can adequately explore the action space. Since initially most of the actions do not lead to high-fidelity states, the agent is very likely to receive negative rewards, as shown in Fig.~\ref{fig6}(b). After every epoch the parameters of the stochastic policy \eqref{stochastic policy} are updated
$\theta_{k}\to\theta_{k+1}$ in a way that utilizes the information contained in the reward signal. Controlled by the learning rate, these updates result in gradually shifting the probability density of the stochastic policy towards more promising actions, as seen in Fig.~\ref{fig6}(a). After iterating in this manner for several epochs, the policy becomes localized near the correct value of the action, which leads to a significantly increased fraction of positive rewards. In the initial stage the best progress is achieved by rapidly learning the parameter $\mu$. However, to achieve high fidelity it is necessary to localize $\mu$ more finely, and thus in the later stages the agent shrinks the variance
$\sigma^2$ of the policy. Eventually there are almost no episodes with negative reward, meaning that the agent has achieved good performance.
\textbf{Complications.} This simple example illustrates how learning proceeds in our approach. More realistic examples contained in Section~\ref{results} follow the same basic principles. Additional complications arise from the following considerations:
(i) Typically the action space ${\cal A}$ is high-dimensional. In such case the Gaussian policy distribution is defined on $\mathbb{R}^{|{\cal A}|}$ instead of $\mathbb{R}$.
(ii) The agent can receive a nontrivial observation $o$, for instance a qubit measurement outcome, which requires incorporating adaptive measurement-based feedback into the policy. In such case, the policy distribution $\pi_{\theta}(a|o)$ is conditioned on the observation. In case of a Gaussian policy, this is achieved by making the mean and variance be parameterized functions of the observation $\{\mu,\sigma^2\}=\{\mu_{\theta}(o),\sigma^2_{\theta}(o)\}$. In our work, these functions are chosen to be neural networks.
(iii) The episodes typically consist of multiple time-steps. In such case, the policy distribution $\pi_{\theta}(a|t;h_t)$ is conditioned on the time-step index $t$ and on the history of observations $h_t=o_{0:t}$ received up to the current time-step. For notational simplicity we usually treat the time dependence as implicit and denote the policy as $\pi_{\theta}(a|h_t)$.
\section{Alternative model-free approaches \label{NM}}
\subsection{Qualitative comparison of action space exploration strategies}
\begin{figure}
\caption{\label{fig7}
\label{fig7}
\end{figure}
It is instructive to compare the action space exploration strategy of our RL agent to widely used model-free methods. For this comparison, we focus on the Nelder-Mead (NM) simplex search used in many quantum control experiments \cite{Kelly2014, Chen2016, Rol2017}.
NM and other model-free methods that view quantum control as a standard cost function optimization problem, explore the action space by evaluating the cost function for a set of policy candidates, and using this evaluation to inform the selection of the next candidate. In NM, the latter step is done by choosing a new vertex of the simplex, as illustrated in Fig.~\ref{fig7}(a). The effectiveness of such approach relies on the ability to reliably approximate the cost function landscape by only sampling it at a small subset of points. In general, this is difficult to achieve in high-dimensional action spaces or when the cost function is stochastic. Therefore, such approach requires spending a large part of the sample budget on averaging, which limits the number of policies that it can explore under the constraint of a fixed total sample size of $M_{\rm tot}$ experimental runs.
On the other hand, in our RL approach every experimental run (episode) is performed with a slightly different policy. These random policy candidates are assigned a stochastic score of $\pm 1$, resulting from the reward measurement outcome. Even though the value of the ``cost function'' is not known to any satisfying accuracy for any of the policy candidates, the acquired information is sufficient to stochastically move the Gaussian distribution of policy candidates towards a more promising region of the action space, as illustrated in Fig.~\ref{fig7}(b).
In contrast to NM that crucially relies on averaging, our RL agent spends the sample budget to effectively explore a much larger part of the action space.
To confirm this intuition, we quantitatively compare the RL agent to widely used model-free approaches, Nelder-Mead (NM) simplex search and simulated annealing (SA), on the task of Fock state preparation when constrained to the same total sample size of $M_{\rm tot}=4\cdot 10^6$. The results of this comparison are shown in Fig.~\ref{fig2}(c), revealing that RL indeed significantly outperforms its model-free alternatives in terms of sample efficiency, especially when the effective problem dimension increases, i.e. for higher photon numbers $n$. In the following Sections, we describe the numerical experiments with NM and SA, performed using their SciPy~1.4.1 implementation \cite{2020SciPy-NMeth}.
\subsection{Nelder-Mead simplex search}
To ensure a fair comparison of NM with RL, we perform hyperparameter tuning for NM, and display the best of 6 independent optimization runs for each problem setting. Given the simplicity of the NM heuristic with its small number of hyperparameters, we believe that the performed tuning is exhaustive and that no further significant improvements are possible.
First, we study the performance of NM when it is given direct access to fidelity on the task of Fock state preparation. We initialize the control circuits with random parameters whose magnitude is swept to optimize the NM performance, as it is known to be sensitive to the simplex initialization. We find that the optimal initialization is similar to that in RL, and corresponds to random initial circuits that do not significantly deviate the oscillator state from vacuum. With this choice, the convergence of NM is shown in Fig.~\ref{fig7}(c). It exhibits fast degradation with increasing photon number $n$.
Next, we study the performance of NM in the presence of measurement sampling noise. We constrain NM to the same total sample size $M_{\rm tot}=4\cdot 10^6$ as used for RL, and optimally split the sample budget between algorithm iterations and averages per iteration to maximize the final performance. The convergence of NM with 2000 averages per iteration is shown in Fig.~\ref{fig7}(d), and can be directly compared to the RL results in Fig.~\ref{fig2}(b), clearly showing the advantage of RL in stochastic setting.
\subsection{Simulated annealing}
We use simulated annealing with Cauchy-Lorentz visiting distribution and without local search on accepted locations, which is a similar version to the recent experiment \cite{Baum2021}. We performed extensive tuning of hyperparameters, including the magnitude of the randomly initialized control circuit parameters, parameters of the visiting distribution, as well as initial and final temperatures. The optimization results with the best choice of hyperparameters are shown in Fig.~\ref{fig8}, where for each optimization trajectory we only display the best fidelity of every 100 consecutive iterations to reduce the plot clutter resulting from the periodic restarts of the annealing.
With direct access to fidelity, as shown in Fig.~\ref{fig8}(a), the convergence of SA is similar to NM, and is significantly slower than the RL agent even when the agent does not have access to fidelity. Next, we replace the fidelity with its estimator based on 1000 runs of the Fock reward circuit. This number of runs per cost function evaluation is tuned to achieve the highest performance under the constrained total sample size of $M_{\rm tot}=4\cdot 10^6$. In such stochastic setting, the performance of SA drops significantly, as shown in Fig.~\ref{fig8}(b), and is worse than that of both NM and RL.
\begin{figure}
\caption{\label{fig8}
\label{fig8}
\end{figure}
\section{Variance of the fidelity estimator \label{sec: variance}}
Variance of the estimator \eqref{fidelity_estimator_wigner} is given by
\begin{align}
{\rm Var} =& \underset{\alpha\sim P}{\mathbb{E}}\,\underset{\psi}{\mathbb{E}}\bigg[\bigg(\frac{2}{P(\alpha)}\Pi_{\alpha}\,W_{{\rm target}}(\alpha)\bigg)^{2}\bigg]\nonumber\\
&-\bigg(\underset{\alpha\sim P}{\mathbb{E}}\,\underset{\psi}{\mathbb{E}}\bigg[\frac{2}{P(\alpha)}\Pi_{\alpha}\,W_{{\rm target}}(\alpha)\bigg]\bigg)^{2}\\
= &\int\frac{4}{P(\alpha)}W_{{\rm target}}^{2}(\alpha)d\alpha-{\cal F}^{2}, \label{variance}
\end{align}
where we made the simplifications $\Pi_{\alpha}^{2}=1$ and $\underset{\alpha\sim P}{\mathbb{E}}\big[...\big]=\int\big[...\big]P(\alpha)d\alpha$.
We now use variational calculus to find $P(\alpha)$ that minimizes \eqref{variance} with the constraint $\int P(\alpha)d\alpha=1$. The variational derivative is given by
\begin{align}
\delta({\rm Var})=\int\bigg[c-\frac{4}{P^{2}(\alpha)}W_{{\rm target}}^{2}(\alpha)\bigg]\delta P(\alpha)d\alpha,
\end{align}
where $c$ is the Lagrange multiplier for the constraint. From this we find that the optimal sampling distribution satisfies
$P(\alpha)\propto|W_{{\rm target}}(\alpha)|$
and the minimal variance is
\begin{align}
{\rm min}\{{\rm Var}\} = 4\left(\int |W_{{\rm target}}(\alpha)|d\alpha\right)^2-{\cal F}^{2}.
\end{align}
We considered the sampling problem in which $N_m=1$ parity measurement is done per phase space point, and in such setting we found an optimal sampling distribution independent of the state that is being characterized -- a rather convenient property for the online training, since the actual prepared state is not known (only the target state is known).
We can consider a different problem, in which both $W(\alpha)$ and $W_{\rm target}(\alpha)$ are known, and where the goal is to compute the fidelity integral \eqref{integral} through Monte Carlo phase space sampling.
This can be relevant, for instance, in a simulation, as an alternative to computing the integral through the Riemann sum.
In such setting, the optimal condition for the variance is modified to $P(\alpha)\propto|W(\alpha)W_{{\rm target}}(\alpha)|$. If, in addition, the fidelity is known in advance to be close to 1, i.e. $W(\alpha)\approx W_{{\rm target}}(\alpha)$, then the optimal sampling distribution becomes $P(\alpha)\propto W_{{\rm target}}^{2}(\alpha)$. The latter does not depend on the state that is being characterized, and therefore it can also be used in the online setting, as was proposed in \cite{Flammia2011, DaSilva2011}. However, such sampling distribution is going to be optimal only in the limit $N_m\gg1$.
In general, consider fidelity estimation based on $N_\alpha$ phase space points and $N_m$ parity measurements per point, such that the total number of measurements $N=N_\alpha N_m$ is fixed. Under this condition, the optimal choice is $N_\alpha=N$, $N_m=1$ (adopted in this work), in which case the distribution $P(\alpha)\propto|W_{{\rm target}}(\alpha)|$ is optimal. However, due to various hardware constrains, e.g. small memory of the FPGA controller, in some experiments it might be preferred to limit $N_\alpha=C$ and compensate for it by accumulating multiple measurements in each phase space point, i.e. $N_m=N/C\gg1$. Under such constraints, the optimal sampling corresponds to $P(\alpha)\propto W_{{\rm target}}^2(\alpha)$.
\section{Other reward measurement schemes \label{sec:other systems}}
In this Appendix, we describe how our approach can be adapted to control of other physical systems, focusing specifically on the design of probabilistic reward measurement schemes.
\subsection{State preparation in trapped ions}
Universal control of a motional state of a trapped ion can be achieved by utilizing the ion's internal electronic levels as ancilla qubit \cite{Leibfried2003, Bruzewicz2019}. Control policies are typically produces with GRAPE, but modular constructions also exist \cite{Kneer1998}. Regardless of the control circuit parameterization, our RL approach can be used for model-free learning of its parameters. Here, we propose a reward circuit that can be used for such learning in trapped ions, based on the characteristic function.
The symmetric characteristic function of a continuous-variable system is defined as $C(\alpha)=\langle D(\alpha) \rangle$ \cite{Haroche2006}. It is equal to the 2D Fourier transform of the Wigner function, and is therefore tomographically complete and can be used to construct the fidelity estimator similar to Eq.~\eqref{fidelity_estimator_wigner}:
\begin{align}
{\cal F} &=\frac{1}{\pi}\int d^{2}\alpha\,C(\alpha)\,C^*_{{\rm target}}(\alpha)\\
&=\frac{1}{\pi}\underset{\alpha\sim P}{\mathbb{E}}\,\underset{\psi}{\mathbb{E}}\bigg[\frac{1}{P(\alpha)}D({\alpha})\,C^*_{{\rm target}}(\alpha)\bigg],
\label{fidelity_estimator_characteristic_function}
\end{align}
where $P(\alpha)$ is the phase space sampling distribution. In trapped ions the characteristic function can be measured with phase estimation of the unitary displacement operator \cite{Fluhmann2019,Fluhmann2019a}.
For simplicity, we focus on symmetric states whose characteristic function is real (e.g. Fock states, GKP states), although the procedure can be generalized to asymmetric states. In this case, the reward circuit is similar to Wigner reward, and is shown in Fig.~\ref{fig9}.
The conditional displacement gate $CD(\alpha)$, required for such a reward circuit, is typically called ``internal state dependent force'' in trapped ions community. Note that it was also recently realized in circuit QED \cite{Campagne-Ibarcq2020, Eickbusch2021}.
\begin{figure}
\caption{Reward circuit for learning preparation of arbitrary symmetric states of a continuous-variable system, based on the characteristic function. \label{fig9}
\label{fig9}
\end{figure}
\subsection{Multi-qubit systems}
Universal control of a system of $n$ qubits with Hilbert space of dimension $d=2^n$ can be achieved with various choices of control circuits that can be tailored to the specific physical layout of the device. We refer to the literature on variational quantum algorithms for more details \cite{Cerezo2020}. Here, we focus instead on the reward measurement schemes. There exists a large body of work on quantum state certification in the multi-qubit systems \cite{Kliesch2021}. Our RL approach greatly benefits from this work, since state certification protocols can be directly converted into probabilistic reward measurement schemes for state preparation control problem. Moreover, some state certification protocols are directly linked to fidelity estimation, which allows to construct reward measurement schemes satisfying the condition $\mathbb{E}[R]=f({\cal F})$, where $f$ is a monotonously increasing function of fidelity. Here, we propose a stabilizer reward built on the stabilizer state certification protocol \cite{Kliesch2021} and a reward for preparation of arbitrary $n$-qubit states based on the characteristic function.
\subsubsection{Stabilizer states}
Consider a stabilizer group ${\cal S}=\{I,S_{1},...,S_{d-1}\}$ and a corresponding parameterized set of POVM elements $\{\Omega_{k}\}$ which consists of projectors $\Omega_{k}=\frac{1}{2}(I+S_{k})$ onto the +1 eigenspace of each stabilizer, except for the trivial stabilizer $I$. We sample the parameter $k=1,...,d-1$ uniformly with probabilities $P({k})=\frac{1}{d-1}$, and with the associated identical reward scale $R_{k}=1$. The reward of $\pm1$ is issued based on the stabilizer measurement outcome. A straightforward calculation shows that in this case the expectation of reward satisfies $\mathbb{E}[R]=\frac{2^{n}{\cal F}-1}{2^{n}-1}$, and therefore it also automatically satisfies the condition \eqref{condition}. Note the difference from the GKP state preparation example considered in Section~\ref{sec:Stabilizer states}, where the stabilizer group was infinite and we considered sampling of only the generators of this group, which does not lead to a simple connection between $\mathbb{E}[R]$ and $\cal F$.
\subsubsection{Arbitrary states}
Stabilizer reward is only applicable to a restricted family of states. To construct a reward measurement scheme applicable to arbitrary states, we need to choose a tomographically complete set of POVM elements. The simplest such scheme is based on the Pauli group, where the fidelity estimator can be constructed based on the measurements of $d^2$ possible $n$-fold tensor-products $G_k$ of single-qubit Pauli operators \cite{Flammia2011}. Instead of sampling points $\alpha$ in the continuous phase space, in this case we sample indexes $k$ of the Pauli operators from a discrete set $\{k=1,...,d^2\}$ with probability distribution $P(k)$. Denoting the characteristic function as $C(k)=\langle G_k \rangle$, we obtain an estimator
\begin{align}
{\cal F} &= \frac{1}{d}\sum_k C(k) C_{\rm target}(k)\\
&=\frac{1}{d}\, \underset{k\sim P}{\mathbb{E}}\, \underset{\psi}{\mathbb{E}} \left[ \frac{1}{P(k)} G_k \,C_{\rm target}(k)\right],
\end{align}
Given the estimator above, the reward circuit consists of a measurement of the sampled Pauli operator.
\section{Learning gates for encoded qubits\label{gates}}
The tools demonstrated for quantum state preparation in Section~\ref{results} are applicable for learning more general quantum operations that map an input subspace of the state space to the target output subspace. For example, consider a qubit encoded in oscillator states $\{|\pm Z_L\rangle\}$ which serve as logical $Z$ eigenstates. Learning a gate $U_{\rm target}$ on this logical qubit amounts to finding an operation that simultaneously implements the state transfers $|\pm Z_L\rangle \to U_{\rm target} |\pm Z_L\rangle$ and which extends to logical qubit subspace by linearity. However, the reward circuits introduced in Section~\ref{results} will result in the final state equal to the target up to an arbitrary phase factor, hence it is insufficient to only use the set $\{|\pm Z_L\rangle\}$ during the training. To constrain the phase factor, we extend this set to include all cardinal points $\{|\pm X_L\rangle, |\pm Y_L\rangle, |\pm Z_L\rangle\}$ on the logical Bloch sphere.
The training process for a gate is a straightforward generalization of the training for state preparation depicted in Fig.~\ref{fig1}, as summarized below:
{\bf 1.} Sample initial state $|\psi_0\rangle \in \{|\pm X_L\rangle, |\pm Y_L\rangle, |\pm Z_L\rangle\}$. Start the episode by preparing this state.
{\bf 2.} Run the episode by applying $T$ steps of the control circuit, resulting in a state $|\psi_T\rangle$.
{\bf 3.} Apply a reward circuit to state $|\psi_T\rangle$, with the target state given by $|\psi_{\rm target}\rangle = U_{\rm target}|\psi_0\rangle$.
\begin{figure}
\caption{\label{fig10}
\label{fig10}
\end{figure}
Here, we demonstrate learning of logical gates for the Fock encoding with $|+Z\rangle = |0\rangle$ and $|-Z\rangle=|1\rangle$, and for the GKP encoding with $\Delta=0.3$. In these numerical experiments, we sample a new initial state every epoch, and use the same state for all batch members within the epoch (preparation of the initial states can be learned beforehand). We use ideal SNAP-displacement control circuit, as shown in Fig.~2(a), and a Wigner reward circuit as shown in Fig.~4(a) with a single phase space point and a single measurement per policy candidate. The choice of training hyperparameters is summarized in the Supplementary Material \cite{SuppMat}.
The training results are displayed in Figure~\ref{fig10}, for the Hadamard $H$ and Pauli $X$ gates on the Fock qubit, and a non-Clifford $\sqrt{H}$ gate on the GKP qubit. We use average gate fidelity \cite{Nielsen2002} as an evaluation metric. These results show that stable convergence is achieved in such QOMDP despite an additional source of randomness due to the sampling of initial states. The total number of experimental realizations used by the agent is $10^6$, $2\cdot 10^6$ and $4\cdot 10^6$ for the $H$, $X$ and $\sqrt{H}$ gates respectively.
In the future work, error amplification technique based on gate repetitions, such as randomized benchmarking (RB), can be incorporated to increase the SNR of the reward, similarly to how it is done in other quantum control demonstrations \cite{Kelly2014, Werninghaus2021}. However, this technique could be modified in the spirit of our approach, to use a single experimental realization of a random RB sequence as one episode, instead of averaging them to suppress the stochasticity of the cost function.
\appendix
\title{Supplementary Material \\[1ex] \large
``Model-Free Quantum Control with Reinforcement Learning''}
\author{V. V. Sivak}
\affiliation{Department of Applied Physics, Yale University, New Haven, CT 06520, USA}
\author{A. Eickbusch}
\affiliation{Department of Applied Physics, Yale University, New Haven, CT 06520, USA}
\author{H. Liu}
\affiliation{Department of Applied Physics, Yale University, New Haven, CT 06520, USA}
\author{B. Royer}
\affiliation{Department of Physics, Yale University, New Haven, CT 06520, USA}
\author{I. Tsioutsios}
\affiliation{Department of Applied Physics, Yale University, New Haven, CT 06520, USA}
\author{M. H. Devoret}
\affiliation{Department of Applied Physics, Yale University, New Haven, CT 06520, USA}
\maketitle
\onecolumngrid
\renewcommand{S\arabic{section}}{S\arabic{section}}
\renewcommand{S\arabic{table}}{S\arabic{table}}
\renewcommand{S\arabic{figure}}{S\arabic{figure}}
\renewcommand{S\arabic{equation}}{S\arabic{equation}}
\setcounter{equation}{0}
\setcounter{figure}{0}
\section{Introduction to Proximal Policy Optimization}
In this section, we present a brief derivation of the Proximal Policy Optimization algorithm (PPO) \cite{Schulman2017SM}. For simplicity, we consider a fully-observable MDP, but the results can be generalized to partially-observable MDPs.
The performance measure $J$ in reinforcement learning is the expected return $R(\tau)$ of the trajectory (episode) $\tau$ generated while following the policy $\pi_{\theta}$:
\begin{equation}
J(\pi_{\theta})=\underset{\tau\sim\pi_{\theta}}{\mathbb{E}}[R(\tau)]=\sum_{\tau}R(\tau)P(\tau|\theta), \label{eq:performance measure}
\end{equation}
where the probability $P(\tau|\theta)$ of the trajectory $\tau$ under the policy $\pi_{\theta}$ is given by the product over all time steps of the conditional probabilities $\pi_{\theta}(a_{t}|s_{t})$ of choosing the action $a_t$ in the state $s_t$, and ${\cal T}(s_{t+1}|s_{t},a_{t})$ of environment transition $s_t\to s_{t+1}$ given this choice of action:
\begin{equation}
P(\tau|\theta)=\prod_t {\cal T}(s_{t+1}|s_{t},a_{t})\pi_{\theta}(a_{t}|s_{t}).
\end{equation}
With this expression substituted in Eq.~\eqref{eq:performance measure}, we can compute the gradient of the performance measure
\begin{align}
\nabla_{\theta}J(\pi_{\theta}) & =\sum_{\tau}R(\tau)\nabla_{\theta}P(\tau|\theta)=\sum_{\tau}R(\tau)P(\tau|\theta)\nabla_{\theta}\log P(\tau|\theta)\\
& =\underset{\tau\sim\pi_{\theta}}{\mathbb{E}}\big[R(\tau)\nabla_{\theta}\log P(\tau|\theta)\big]=\sum_{t}\underset{\tau\sim\pi_{\theta}}{\mathbb{E}}\bigg[R(\tau)\nabla_{\theta}\log\pi_{\theta}(a_{t}|s_{t})\bigg]. \label{gradient}
\end{align}
Note that the environment transition function ${\cal T}(s_{t+1}|s_{t},a_{t})$ has dropped out because it is independent of the policy parameters -- a crucial feature enabling model-free learning. The performance gradient \eqref{gradient} has a natural form of the sum of policy gradients over all time-steps of the trajectory, weighted by the return of the trajectory $R(\tau)$. Such gradient will increase the probabilities of actions that caused high return in the past experiences, and decrease the probabilities of actions that caused low return. However, as explained in detail in \cite{Schulman2015SM}, such weighting with $R(\tau)$ is sub-optimal in terms of the estimator variance. For instance, it propagates the influence of the rewards received prior to applying a given action $a_t$ on the score of this action, which indeed seems counter-intuitive. A better weighting can be obtained by replacing the full trajectory return $R(\tau)$ with the partial return $R(\tau; s_t, a_t)$ accumulated in trajectory $\tau$ after visiting the state $s_t$ and taking the action $a_t$. Such replacement preserves the unbiased nature of the estimator, but allows to reduce its variance. Further improvement can be obtained by subtracting the state-dependent baseline $b(s_t)$, which helps to ensure that good (relative to the baseline) actions have positive weight, while bad actions have negative weight. The baseline can be any function that only depends on the state $s_t$, but the optimal baseline would satisfy the condition $b(s_t)={\mathbb{E}_{\tau\sim\pi_{\theta}}}[R(\tau;s_t)]$, where $R(\tau;s_t)$ is the partial return accumulated in trajectory $\tau$ after visiting the state $s_t$ and averaged over all possible actions in that state. In practice, since the optimal baseline is not known in advance, it is represented with a {\it value} neural network $V_{\theta'}(s_{t})$ whose parameters $\theta'$ are learned concurrently with parameters $\theta$ of the policy network. Incorporating these improvements leads to the following weighing factor for the policy gradients in Eq.~\eqref{gradient}, known as the empirical advantage function
\begin{align}
A(\tau; s_t, a_t)=R(\tau; s_t, a_t)-V_{\theta'}(s_{t}). \label{advantage}
\end{align}
When using the advantage estimator \eqref{advantage} in place of the empirical return $R(\tau)$ in \eqref{gradient}, the performance gradient becomes
\begin{align}
\nabla_{\theta}J(\pi_{\theta})=\nabla_{\theta}\sum_{t}L_{t}^{PG},\qquad L_{t}^{PG}=\hat{\mathbb{E}}\left[A(\tau;s_t, a_t)\log\pi_{\theta}(a_{t}|s_{t})\right],
\end{align}
where $L_{t}^{PG}$ is the per-time-step policy-gradient loss that can be used with automatic differentiation, and $\hat{\mathbb{E}}[...]$ is an empirical average over a finite batch of $B$ trajectories.
In the actor-critic methods \cite{Sutton2017SM}, which PPO also belongs to, the value function $V_{\theta'}$ (critic) is learned concurrently with the policy $\pi_{\theta}$ (actor) to predict the partial return $R(\tau; s_t)$. Typically, this is achieved with a simple quadratic loss $L_{t}^{V}=[R(\tau;s_t)-V_{\theta'}(s_{t})]^{2}$. Policy-gradient loss $L_{t}^{PG}$ and value-function loss $L_{t}^{V}$ are combined to compute the total gradient which is passed on to the optimizer.
What was described so far is the basic working principle of the REINFORCE algorithm \cite{Sutton1999SM}. A convenient feature of the simple policy gradient is that we can use the first order optimizers such as stochastic gradient descent (SGD) or Adam \cite{Kingma2014SM} to minimize the loss function. However, it was found that such policy optimization in a high dimensional space of neural network parameters is often unstable -- there can be drastic jumps in the policy performance even with the small changes of the parameters $\theta$ of the policy. The trust-region policy optimization algorithm (TRPO) \cite{Schulman2015aSM}, which is a precursor to PPO, attempted to cure this issue by using an expensive second order optimization within the trust region where the Kullback–Leibler divergence between the old and the new policy is constrained. PPO emerged as an attempt to get the best of both worlds: efficiency of the first order optimizers and guarantee that the policy will not make any catastrophic jumps. It achieves this by constructing a special (although very simple) loss function that does not incentivize the optimizer to deviate the new policy far from the old one.
To derive the PPO loss function, we first rewrite the per-time-step gradient of $L_t^{PG}$ using importance sampling
\begin{equation}
\underset{\tau\sim\pi_{\theta}}{\hat{\mathbb{E}}}\big[\frac{\nabla_{\theta}\pi_{\theta}(a_{t}|s_{t})}{\pi_{\theta}(a_{t}|s_{t})}A(\tau; s_t,a_t)\big]=\underset{\tau\sim\pi_{{\rm old}}}{\hat{\mathbb{E}}}\big[\frac{\pi_{\theta}(a_{t}|s_{t})}{\pi_{\theta_{{\rm old}}}(a_{t}|s_{t})}\frac{\nabla_{\theta}\pi_{\theta}(a_{t}|s_{t})}{\pi_{\theta}(a_{t}|s_{t})}A(\tau;s_t,a_t)\big]=\underset{\tau\sim\pi_{{\rm old}}}{\hat{\mathbb{E}}}\big[\frac{\nabla_{\theta}\pi_{\theta}(a_{t}|s_{t})}{\pi_{\theta_{{\rm old}}}(a_{t}|s_{t})}A(\tau;s_t,a_t)\big],
\end{equation}
which leads to the per-time-step loss contribution $L_{t}=\hat{\mathbb{E}}\big[A(\tau;s_t,a_t)\frac{\pi_{\theta}(a_{t}|s_{t})}{\pi_{\theta_{{\rm old}}}(a_{t}|s_{t})}\big]$.
If a small change in the policy parameters $\theta$ causes policy to differ significantly from the old policy with parameters $\theta_{\rm old}$, the importance ratio
$\frac{\pi_{\theta}(a_{t}|s_{t})}{\pi_{\theta_{{\rm old}}}(a_{t}|s_{t})}$ will deviate significantly from 1. PPO simply clips the importance ratio to the range $(1-\epsilon,1+\epsilon)$ (where typically $\epsilon\sim0.2$), leading to the new per-time-step loss
\begin{equation}
L_{t}^{PPO}=\hat{\mathbb{E}}\left[{\rm min}\bigg(\frac{\pi_{\theta}(a_{t}|s_{t})}{\pi_{\theta_{{\rm old}}}(a_{t}|s_{t})}A(\tau;s_t,a_t),\,{\rm clip}\bigg[\frac{\pi_{\theta}(a_{t}|s_{t})}{\pi_{\theta_{{\rm old}}}(a_{t}|s_{t})}\bigg]A(\tau;s_t,a_t)\bigg)\right],
\end{equation}
replacing $L_t^{PG}$ loss. With such modification, if a certain policy update $\theta_{{\rm old}}\to\theta$ attempts to reduce the loss by making the importance ratio deviate significanty from 1, it will not be able to achieve this because the importance ratio will be clipped and the loss will not benefit from such an update. Therefore, importance ratio clipping removes the incentive for such changes, although it does not strictly guarantee that they will not happen. Empirically, this leads to significantly improved stability of the training, which is especially relevant in stochastic environments.
\section{Implementation of the training environment}
We realize a custom training environment following TensorFlow Agents interface \cite{Hafner2017SM}. TF-Agents is an open-source library for reinforcement learning which provides reliable implementations of several popular algorithms including PPO. The TensorFlow implementation of both the custom training environment and the agent was efficiently accelerated with the graphics processing unit NVIDIA Tesla V100. Given the computational demand of our task (tuning hyperparameters, exploring different fidelity estimators, training for different quantum states, collecting statistics over multiple random seeds, etc), the GPU acceleration of the simulation can be acknowledged as the most significant factor in the success of this numerical project.
As an example, in the training for Fock state preparation described in Section IV A, the wall clock time of the quantum simulation alone (excluding the neural network update time) is 13 minutes in total for 4000 epochs consisting of 1000 episodes each.
Several most significant factors contributing to the numerical complexity of the project:
\begin{enumerate}
\item The Hilbert space of the oscillator is truncated at $N=100$ states in the photon number basis, and in product with the ancilla qubit this leads to $200$-dimensional vectors representing the quantum states (for the GKP states, the truncation is increased to $N=200$). The operators on the joint Hilbert space are $200\times200$ complex-valued matrices represented in the single-precision floating point format to speed up the computation.
\item At each time step $t=1,...,T$ of the trajectory, the agent predicts a {\it new} parameterization of the control circuit, and thus the operators cannot be pre-computed and stored in memory and instead need to be computed on-the-fly. For the displacement operators $D(\alpha)$, instead of performing expensive matrix exponentiation at every time-step, we implement this subroutine efficiently using the Baker–Campbell–Hausdorff (BCH) formula for matrix exponential, see Section~\ref{BCH}. For the finite-duration $\rm SNAP_\tau(\varphi)$ gate, we use a closed-form approximate model vectorizable on the GPU, instead of time-domain integration of the Schrödinger equation, see Section~\ref{SNAP}.
\item Model-free reinforcement learning, and especially policy-gradient algorithms, is known to have poor sample efficiency, which stems from the need to collect new training dataset after each update of the policy. In our state preparation examples, each training requires tens of millions of episodes. We implemented an efficient vectorized quantum trajectory simulator on the GPU, which allows to collect batches of $B\sim 1000$ episodes in parallel. In addition to unitary dynamics, it allows to simulate asynchronous quantum jumps (not used in this work).
\end{enumerate}
\subsection{Efficient implementation of the displacement operator \label{BCH}}
In addition to utilizing GPU acceleration and vectorization for the batch, we can further take advantage of the structure of the displacement operator $D(\alpha)=\exp(\alpha a^\dagger -\alpha^* a)$ to customize the matrix exponentiation routine. We first rewrite $D(\alpha)$ using the position $x=(a+a^\dagger)/\sqrt{2}$ and momentum $p=i(a^\dagger-a)/\sqrt{2}$ Hermitian operators:
\begin{align}
D(\alpha) = e^{-i\sqrt{2}{\rm Re}(\alpha)p + i\sqrt{2}{\rm Im}(\alpha) x}.
\end{align}
This expression can be further transformed using the BCH formula $e^{A+B}=e^{A}e^{B}e^{-\frac{1}{2}[A,B]}$, which is exact in this case because both $x$ and $p$ commute with their commutator $[x,p]=i$:
\begin{align}
D(\alpha)=e^{i\sqrt{2}{\rm Im}(\alpha)x}e^{-i\sqrt{2}{\rm Re}(\alpha)p}e^{-i{\rm Im}(\alpha){\rm Re}(\alpha)}.
\end{align}
Given this form, $D(\alpha)$ can be computed efficiently by pre-diagonalizing $x$ and $p$ operators in the beginning of the training as
\begin{align}
x=U_{x}\,{\rm diag}(x)\,U_{x}^{\dagger}\qquad p=U_{p}\,{\rm diag}(p)\,U_{p}^{\dagger},
\end{align}
and then performing only element-wise diagonal matrix exponentiation at each time step
\begin{align}
D(\alpha)=U_{x}\,e^{i\sqrt{2}{\rm Im}(\alpha){\rm diag}(x)}\,U_{x}^{\dagger}\,U_{p}\,e^{-i\sqrt{2}{\rm Re}(\alpha){\rm diag}(p)}\,U_{p}^{\dagger}\,e^{-i{\rm Im}(\alpha){\rm Re}(\alpha)}.
\end{align}
The complexity of this algorithm is $O(N^{3})$ due to matrix multiplication, which is similar to the complexity of the general matrix exponentiation, but the pre-factor is $10-100$ times smaller, see Fig.~\ref{fig_S1} for comparison.
\begin{figure}
\caption{\label{fig_S1}
\label{fig_S1}
\end{figure}
\subsection{Simulation of the finite-duration SNAP \label{SNAP}}
To simulate the effect of the SNAP gate in the real experiment, we need to build an approximate model of the partially-selective qubit pulse, but it is important to realize that the agent is not aware of it by construction. We require that this model captures the essence of the finite-duration pulse and that it can be efficiently implemented numerically, but it does not necessarily have to be very accurate. In general, the problem of driving the qubit with arbitrary time-dependent field is not analytically solvable. The formal solution involves the time-ordered exponential, which can be numerically evaluated with expensive time-domain integration of the Schrödinger equation assuming a certain pulse shape, but this is not an efficient solution for our purpose. Instead, we would like to obtain a closed-form model for the unitary ${\rm SNAP}_{\tau}(\varphi)$ which can be vectorized on the GPU.
The perfect SNAP gate is equivalent to
\begin{equation}
{\rm SNAP}(\varphi)=\sum_{n}|n\rangle\langle n|\otimes R_{\pi-\varphi_{n}}(\pi)R_{0}(\pi)\label{eq:snap with ideal selective pulse}
\end{equation}
where $R_{\phi}(\vartheta)=\exp\big(-i\frac{\vartheta}{2}(\cos\phi\,\sigma_{x}+\sin\phi\,\sigma_{y})\big)$
is the qubit rotation operator. Such decomposition is inspired by
the availability of well-controlled selective qubit rotations in the
strong dispersive limit of circuit QED.
In general, given that the dispersive coupling $H_{c}/h=\frac12\chi a^{\dagger}a \sigma_z$
preserves the oscillator photon number, driving the qubit with arbitrary
near-resonant time-dependent pulse of duration $\tau$ leads to the
joint unitary gate $U=\sum_{n}|n\rangle\langle n|\otimes R_{\phi_{n}}(\vartheta_{n})$
where the gate parameters $\{(\phi_{n},\vartheta_{n})\}_{n=0}^{\infty}$
depend on the pulse composition which in turn depends on the action produced by the agent.
In experiment, the controlled mapping
from parameters of the pulse waveform to parameters of the unitary
can be achieved in two limiting cases of long $\chi\tau\gg1$ selective
pulses or short $\chi\tau\ll1$ un-selective pulses. The intermediate
case $\chi\tau\sim1$ is hard to treat analytically because of the
absence of small parameters, and hard to simulate with
high accuracy because of the sensitivity to experimental distortions
(unlike in the limiting cases where distortions could be calibrated
out).
To practically implement the SNAP truncated at $\Phi$ levels, we use
$\Phi$ carrier frequency components $f_{k}=f_{q}-k\chi$ with $k=0,...,\Phi-1$
in the composite qubit pulse, where $f_{q}$ is the qubit frequency.
For simplicity, we assume that pulse components have rectangular envelopes with amplitudes $\Omega_{k}$ and phases $\delta_{k}$.
Such composite pulse is described by the drive Hamiltonian $H_{d}/h=\frac{1}{2}({\rm Re}[\Omega(t)] \sigma_x + {\rm Im}[\Omega(t)] \sigma_y)$, where $\Omega(t)=\sum_{k=0}^{\Phi-1} \Omega_k e^{2\pi\chi k t i+\delta_k i}$.
After performing the unitary transformation on the total Hamiltonian $H=H_c+H_d$ to eliminate the dispersive term $H_c$,
we obtain the following time-dependent Hamiltonian describing the system evolution during the pulse
\begin{align}
H(t)=\sum_{n=0}^\infty|n\rangle\langle n|\otimes\sum_{k=0}^{\Phi-1}\frac{\Omega_{k}}{2}\bigg[\cos(\Delta_{kn}t+\delta_{k})\sigma_{x}+\sin(\Delta_{kn}t+\delta_{k})\sigma_{y}\bigg],
\end{align}
where $\Delta_{kn}=2\pi\chi(k-n)$. In this sum, the terms with $k=n$
are resonant and thus non-rotating, while all other terms correspond
to detuned driving of transition $n$ with the pulse component $k$
and are thus rotating.
To obtain a simple closed-form model of the unitary gate $U={\cal T}\exp(-i\int_0^\tau H(t)dt)$ implemented by such pulse, we use the first-order rotating wave approximation (RWA) and replace the time-dependent Hamiltonian $H(t)$ with a constant time-averaged Hamiltonian $\overline{H}=\frac{1}{\tau}\int_0^\tau H(t)dt$. Effectively, this removes the time-ordering operation in the unitary $U=\exp(-i\int_0^\tau H(t)dt)$, leading to
\begin{equation}
U=\sum_{n=0}^{\infty}|n\rangle\langle n|\otimes\exp\bigg\{-i\sum_{k=0}^{\Phi-1}\frac{\Omega_{k}\tau}{2}\bigg[\frac{\sin(\Delta_{kn}\tau+\delta_{k})-\sin(\delta_{k})}{\Delta_{kn}\tau}\sigma_{x}-\frac{\cos(\Delta_{kn}\tau+\delta_{k})-\cos(\delta_{k})}{\Delta_{kn}\tau}\sigma_{y}\bigg]\bigg\}. \label{eq:pulse model}
\end{equation}
This is not a parametric approximation in some small parameter, but it captures the essential effect of the pulse. In particular, it will lead to the leftover entanglement between the qubit and the oscillator after the SNAP gate.
In the limit $\chi\tau\gg1$ the unitary \eqref{eq:pulse model} simplifies to the selective qubit rotation where each number-split transition is only affected by the resonant pulse component
\begin{equation}
U_{\chi\tau\gg1}=\sum_{n=0}^{\infty}|n\rangle\langle n|\otimes\exp\bigg(-i\frac{\vartheta_{n}}{2}(\cos\phi_{n}\sigma_{x}+\sin\phi_{n}\sigma_{y})\bigg),\label{eq:selective}
\end{equation}
where $\vartheta_{k}=\Omega_{k}\tau/2$, $\phi_{k}=\delta_{k}$ for $k=0,...,\Phi-1$ and $\vartheta_{k}=\phi_{k}=0$ for $k\ge\Phi$
is a simple mapping from the parameters of the pulse $\{(\delta_{k},\Omega_{k})\}_{k=0}^{\Phi-1}$
to the parameters of the unitary $\{(\phi_{k},\vartheta_{k})\}_{k=0}^{\infty}$.
In the short-time limit $\chi\tau\ll1$ the unitary \eqref{eq:pulse model}
yields the un-selective qubit rotation
\begin{align}
U_{\chi\tau\ll1}=I\otimes\exp\bigg(-i\sum_{k=0}^{\Phi-1}\vartheta_{k}[\cos\phi_{k}\sigma_{x}+\sin\phi_{k}\sigma_{y}]\bigg).
\end{align}
We use the unitary \eqref{eq:pulse model} to interpolate between
these two limits and to build a partially-selective ${\rm SNAP}_{\tau}(\varphi)$
gate in the following way. In this gate, the first qubit pulse $R_{0}(\pi)$
can always be done in the fast un-selective manner, and thus we simulate
it as a perfect rotation. The second pulse depends on the action parameters
$\{\varphi_{k}\}_{k=0}^{\Phi-1}$ produced by the agent. We map the action
component $\varphi_{k}$ to the corresponding pulse component assuming
that the pulse is perfectly selective as in \eqref{eq:selective},
$\delta_{k}=\pi-\varphi_{k}$ and $\Omega_{k}=2\pi/\tau$, but end
up applying the partially selective unitary \eqref{eq:pulse model}
with these parameters. Such approximate model allows us to roughly capture
the expected degradation of performance that the protocol would exhibit
in the real experiment, in contrast to other ad-hoc simulations of
control imperfections, such as, for instance, injecting random static
offsets in the qubit rotation matrix.
\section{Simulation parameters and training hyperparameters}
The simulation parameters and training hyperparameters used for state preparation examples in the main text are summarized in Table~\ref{hyperparameters}, and those used for gates on encoded qubits in Appendix E are summarized in Table~\ref{gates hyperparameters}.
\begin{table}
\begin{centering}
\renewcommand{1.3}{1.3}
\begin{tabular}{|c|c|c|c|c|c|}
\hline
Target state & fock1--10 & cat2 & bin1 & gkp &
\begin{minipage}[t]{0.10\columnwidth}
fock3\\
(adaptive)
\end{minipage}
\tabularnewline
\hline
Epochs & $4\cdot10^3$ &
\begin{minipage}[t]{0.10\columnwidth}
$2\cdot10^4$,\\ $4\cdot10^3$,\\ $10^3$
\end{minipage} &
\begin{minipage}[t]{0.10\columnwidth}
$2\cdot10^4$,\\ $1\cdot10^4$,\\ $4\cdot10^3$
\end{minipage} & $10^4$ & $2.5\cdot10^4$
\tabularnewline
\hline
Episodes per epoch & $10^3$ & $10^3$ & 500 & $10^3$ & $10^3$
\tabularnewline
\hline
Learning rate schedule &
\begin{minipage}[t]{0.16\columnwidth}
$10^{-3}$, epoch $<500$\\
$10^{-4}$, epoch $\ge500$
\end{minipage} & $10^{-3}$ & $10^{-3}$ & $10^{-3}$ &
\begin{minipage}[t]{0.17\columnwidth}
$10^{-3}$, epoch $<1000$\\
$10^{-4}$, epoch $\ge1000$
\end{minipage}
\tabularnewline
\hline
Gradient norm clipping & 1 & 1 & 1 & 1 & 1\tabularnewline
\hline
Importance ratio clipping & $1\pm0.1$ & $1\pm0.1$ & $1\pm0.2$ & $1\pm0.25$ & $1\pm0.1$\tabularnewline
\hline
\begin{minipage}[t]{0.20\columnwidth}
Policy \& value networks hidden layers
\end{minipage}
& \begin{minipage}[t]{0.10\columnwidth}
LSTM(16)\\
Dense(100)\\
Dense(50)
\end{minipage}
& \begin{minipage}[t]{0.10\columnwidth}
LSTM(12)
\end{minipage} &
\begin{minipage}[t]{0.10\columnwidth}
LSTM(12)\\
Dense(50)
\end{minipage}
& \begin{minipage}[t]{0.10\columnwidth}
LSTM(12)
\end{minipage} & \begin{minipage}[t]{0.10\columnwidth}
LSTM(16)\\
Dense(100)\\
Dense(50)
\end{minipage}
\tabularnewline
\hline
Value prediction loss weight & $5\cdot10^{-3}$ & $5\cdot10^{-3}$ & $5\cdot10^{-3}$ & $5\cdot10^{-3}$ & $5\cdot10^{-3}$\tabularnewline
\hline
Joint Hilbert space size, $2N$ & 200 & 200 & 200 & 400 & 200
\tabularnewline
\hline
SNAP truncation, $\Phi$ & 15 & 10 & 15 & 30 & 7
\tabularnewline
\hline
Time steps, $T$ & 5 & 5 & 8 & 9 & 5
\tabularnewline
\hline
Reward function & Fock &
\begin{minipage}[t]{0.10\columnwidth}
Wigner \\
(1, 10, 100\\
pts avg)
\end{minipage} &
\begin{minipage}[t]{0.10\columnwidth}
Wigner \\
(1, 10, 100\\
pts avg)
\end{minipage} & Stabilizers & Fock
\tabularnewline
\hline
\end{tabular}
\par
\end{centering}
\caption{Simulation parameters and training hyperparameters used for state preparation examples in the main text.\label{hyperparameters}}
\end{table}
\begin{table}
\begin{centering}
\renewcommand{1.3}{1.3}
\begin{tabular}{|c|c|c|c|}
\hline
Target gate & $H$ & $X$ & $\sqrt{H}$
\tabularnewline
\hline
Logical encoding & Fock & Fock & GKP, $\Delta=0.3$
\tabularnewline
\hline
Epochs & $2\cdot10^3$ & $4\cdot10^3$ & $8\cdot10^3$
\tabularnewline
\hline
Episodes per epoch & 500 & 500 & 500
\tabularnewline
\hline
Learning rate schedule & $10^{-3}$ & $10^{-3}$ & $10^{-3}$
\tabularnewline
\hline
Gradient norm clipping & 1 & 1 & 1
\tabularnewline
\hline
Importance ratio clipping & $1\pm0.1$ & $1\pm0.1$ & $1\pm0.1$
\tabularnewline
\hline
\begin{minipage}[t]{0.20\columnwidth}
Policy \& value networks hidden layers
\end{minipage}
& \begin{minipage}[t]{0.10\columnwidth}
LSTM(12)\\
Dense(50)
\end{minipage}
& \begin{minipage}[t]{0.10\columnwidth}
LSTM(12)\\
Dense(50)
\end{minipage} &
\begin{minipage}[t]{0.10\columnwidth}
LSTM(12)\\
Dense(50)
\end{minipage}
\tabularnewline
\hline
Value prediction loss weight & $5\cdot10^{-3}$ & $5\cdot10^{-3}$ & $5\cdot10^{-3}$
\tabularnewline
\hline
Joint Hilbert space size, $2N$ & 200 & 200 & 300
\tabularnewline
\hline
SNAP truncation, $\Phi$ & 15 & 15 & 80
\tabularnewline
\hline
Time steps, $T$ & 4 & 4 & 1
\tabularnewline
\hline
Reward function & Wigner (1pt) & Wigner (1pt) & Wigner (1pt)
\tabularnewline
\hline
\end{tabular}
\par
\end{centering}
\caption{
Simulation parameters and training hyperparameters used for logical gate examples in Appendix E. \label{gates hyperparameters}}
\end{table}
\bibliographystyleSM{apsrev_longbib}
\bibliographySM{SM}
\end{document} |
\begin{document}
\title{Three Simulation Algorithms for Labelled Transition Systems}
\begin{abstract}
Algorithms which compute the coarsest simulation preorder are generally
designed on Kripke structures. Only in a second time they are extended to
labelled transition systems. By doing this, the size of the alphabet
appears in general as a multiplicative factor to both time and space
complexities. Let $Q$ denotes the state space, $\rightarrow$ the transition
relation, $\Sigma$ the alphabet and $P_{sim}$ the partition of $Q$ induced
by the coarsest simulation equivalence. In this paper, we propose a base
algorithm which minimizes, since the first stages of its design, the
incidence of the size of the alphabet in both time and space
complexities. This base algorithm, inspired by the one of Paige and Tarjan in
1987 for bisimulation and the one of Ranzato and Tapparo in 2010 for
simulation, is then derived in three versions. One of them has the best bit
space complexity up to now,
$O(|P_{sim}|^2+|{\rightarrow}|.\log|{\rightarrow}|)$,
while another one has the best time complexity up to now,
$O(|P_{sim}|.|{\rightarrow}|)$. Note the absence of the alphabet in these
complexities. A third version happens to be a nice compromise between space
and time since it runs in $O(b.|P_{sim}|.|{\rightarrow}|)$ time, with $b$ a
branching factor generally far below $|P_{sim}|$, and uses
$O(|P_{sim}|^2.\log|P_{sim}|+|{\rightarrow}|.\log|{\rightarrow}|)$ bits.
\end{abstract}
\section{Introduction}
\label{sec:introduction}
Simulation is a behavioral relation between processes \cite{Milner71}. It is
mainly used to tackle the state-explosion problem that arises in \emph{model
checking} \cite{GPP03,ABH+08} and to speed up the test of inclusion of languages
\cite{ACH+10}. It can also be used as a sufficient condition for the inclusion
of languages when this test is undecidable in general \cite{CG11}. The paper
\cite{GPP03} gives a complete state of the art of the notion.
\subsection{Last Ten Years}
\label{sec:lastYears}
Let $T=(Q,\Sigma,\rightarrow)$ be a Labelled Transition System (LTS) with
$Q$ its set of states, $\Sigma$ its alphabet and
$\rightarrow\subseteq Q\times\Sigma\times Q$ its transition relation. A
relation $\mathscr{S}\subseteq Q\times Q$ is a simulation over $T$ if for any
transition $q_1\xrightarrow{a}q'_1$ and any state $q_2\in Q$ such that
$(q_1,q_2)\in \mathscr{S}$, there is a transition $q_2\xrightarrow{a}q'_2$
such that $(q'_1,q'_2)\in \mathscr{S}$. The simulation $\mathscr{S}$ is a
bisimulation if $\mathscr{S}^{-1}$ is also a simulation.
Given any preorder (reflexive and
transitive relation) $\mathscr{R}\subseteq Q\times Q$ the
purpose of this paper is to design efficient algorithms which compute the coarsest
simulation over $T$ included in $\mathscr{R}$.
In the context of Kripke structures, which are transition systems where
only states are labelled, the most efficient algorithms are GPP, the one of
Gentilini, Piazza and Policriti \cite{GPP03} (corrected by van Glabbeek and
Ploeger \cite{GP08}), for the space efficiency, and RT, the one of Ranzato and
Tapparo \cite{RT10}, for the time efficiency. These two algorithms either
use, for GPP, or extend, for RT, HHK the one of Henzinger,
Henzinger and Kopke \cite{HHK95}.
\begin{center}
\begin{tikzpicture}[shorten >=2pt, shorten <=2pt,font=\footnotesize]
\path coordinate [label=right:\textcolor{black}{$q'$}] (q') [fill]
circle (1pt) ;
\path ++(0.2,0) ++(90:1.5cm) +(1,0) coordinate (r') [fill] circle (1pt)
; \path (r') coordinate[label=right:\textcolor{black}{$r'$}] ;
\path (q') ++(90:1.5cm) ++(-3,0) coordinate (r) ++(-.2,0)
node[circle,minimum size=.5cm, label={left:$r\in \remove(q')$}] {};
\path (r) coordinate[label=left:\textcolor{black}{$r$}] [fill] circle
(1pt) ;
\path ++(-0.8,1)
coordinate[label=above:\textcolor{black}{$r''$}] (r'')
[fill] circle (1pt) ;
\path let \p{r}=(r),\p{q'}=(q'.center) in (\x{r},\y{q'}) coordinate
(q'') ; \path (q'') coordinate [label=left:\textcolor{black}{$q$}]
(q) [fill] circle (1pt) ;
\path[every edge/.style={->,dashed,draw},circle,inner sep=2pt, every node/.style={fill=white}]
(q') edge node {$\mathscr{R}$} (r'') edge node
{$\overline{\mathscr{R}}$} (r') (q) edge node {$\mathscr{R}$} (r) ;
\path[->,auto,circle,inner sep=1pt,thick] (r) edge (r') edge (r'')
(q) edge (q') ;
\node [cross out,draw=red,line width=2pt,draw opacity=.70,text
width=1cm] at (r'') {};
\end{tikzpicture}
\end{center}
The starting idea of HHK, see the above figure, is to
consider couples $(q', r')$ that do not belong to $\mathscr{R}$ (thus
$(q',r')$ belongs to $\overline{\mathscr{R}}$ the
complement of $\mathscr{R}$) and to
propagate this knowledge backward by refining $\mathscr{R}$. For each state
$q'$ a set of states, $\remove(q')$, is maintained. This set is
included in the complement of
$\pre(\mathscr{R}(q'))$, the set of states which have at least one outgoing
transition leading to a state related to $q'$ by $\mathscr{R}$.
In the figure above, to illustrate that a state $r$ belongs to
$\remove(q')$ we depict that there is no state $r''$ reachable from $r$ and
such that $(q',r'')$ belongs to $\mathscr{R}$.
For a given state $q'$, $\remove(q')$ is used as
follows: for each couple $(q,r)\in \pre(q')\times\remove(q')$, with
$\pre(q')$ the set of states leading, by a transition, to $q'$, if $(q,r)$
belongs to $\mathscr{R}$ then it is removed and $\remove(q)$ is possibly
updated. The couple $(q,r)$ is safely removed from $\mathscr{R}$ because by
the definition of $r\in\remove(q')$ it is impossible that $(q,r)$ belongs to a
simulation included in $\mathscr{R}$. The algorithm HHK runs in
(remember, for the moment transitions are not labelled) $O(|{\rightarrow}|.|Q|)$ time and uses
$O(|Q|^2.\log|Q|)$ bits for all the $\remove$ sets. Note that in order to
achieve the announced time complexity the authors use a set of counters
which plays the same role as this introduced by Paige and Tarjan
\cite{PT87} to lower the time complexity for the corresponding bisimulation
problem from $O(|{\rightarrow}|.|Q|)$ to $O(|{\rightarrow}|.\log|Q|)$. In
HHK the set of counters enable to lower the time complexity for
the simulation problem from $O(|{\rightarrow}|.|Q|^2)$ to
$O(|{\rightarrow}|.|Q|)$.
If one extends HHK to LTS, where transitions are
labelled, there is a necessity to maintain a $\remove$ set for each couple
state-letter $(q',a)$ because, now, $\remove_a(q')$ is included in the
complement of
$\pre_a(\mathscr{R}(q'))$ and $\pre$ need to depend on the letters
labelling the transitions. Then, any natural extension of HHK to
LTS uses $O(|\Sigma|.|Q|^2.\log|Q|)$ bits for
all the $\remove$'s.
Let us come back to Kripke structures. The main difference between
HHK in one hand and, GPP and
RT in the other hand is that the last two do not encode the current relation
$\mathscr{R}$ by a binary matrix of size $|Q|^2$ but by a
partition-relation pair: a couple $(P,R)$
with $R$ a binary matrix of size $|P|^2$ and $P$ the partition of $Q$
issued from the equivalence relation $\mathscr{R}\cap\mathscr{R}^{-1}$.
The difficulty of the proofs and the abstract interpretation framework
put aside, RT is a thus a direct
reformulation of HHK but with partition-relation pairs instead of
mere relations between states. Note that in order to have sound refinements
of the relation $R$, blocks of $P$ may first be split at each main
iteration of the algorithm. The algorithm
RT maintains for each block $B\in P$ a set $\remove(B)$ included
in the complement of $\pre(\mathscr{R}(B))$, the set of states which have
at least one outgoing transition leading to the set of states related to $B$
by $\mathscr{R}$. The algorithm runs in
$O(|P_{sim}|.|{\rightarrow}|)$ time and uses
$O(|P_{sim}|.|Q|.\log|Q|)$ bits for all the $\remove$'s, with $P_{sim}$ the
partition associated to the coarsest simulation relation included in the
initial preorder $\mathscr{R}_{init}$. In \cite{CRT11}, Crafa and the
authors of RT, reduced this space complexity to
$O(|P_{sim}|.|P_{sp}|.\log|P_{sp}|)$ with $P_{sp}$ a partition whose size
is between these of $P_{sim}$
and $P_{bis}$, the partition associated to the coarsest \underline{bi}simulation
included in $\mathscr{R}_{init}\cap\mathscr{R}^{-1}_{init}$. The goal,
which has not been achieved, was a
bit space complexity of $O(|P_{sim}|^2.\log|P_{sim}|)$.
The bit space complexity of \cite{CRT11} is achieved at the cost of an increase of the time
complexity comparable with $O(|P_{sim}|^2.|{\rightarrow}|)$. The algorithm
GPP uses a partition-relation pair
$(P,R)$ too. It also proceed by iterative steps of one split of $P$ and one
refinement
of $R$. A split step is done in a more global way than in RT,
then a refinement step uses HHK on an abstract structure whose
states are blocks of $P$. A refinement step is thus done in
$O(|P_{sim}|.|{\rightarrow}|)$ time (remember, here states are blocks
of $P$). As they prove it, there is at most $|P_{sim}|$ refinement
steps. The entire algorithm is thus done in $O(|P_{sim}|^2.|{\rightarrow}|)$
time. Since states are blocks in this use of HHK, the encoding of
all the $\remove$'s uses $O(|P_{sim}|^2.\log|P_{sim}|)$ bits, which has not
been taken into account in the announced bit space complexity of GPP:
$O(|P_{sim}|^2 + |Q|.\log|P_{sim}|)$.
The paper \cite{ABH+08} provides an adaptation for LTS
of RT. It is also a very useful translation of
RT from the context of abstract interpretation to a more classical
algorithmic view on simulations. The algorithm of \cite{ABH+08} runs in
$O(|\Sigma|.|P_{sim}|.|Q|+|P_{sim}|.|{\rightarrow}|)$ time and uses
$O(|\Sigma|.|P_{sim}|.|Q|.\log|Q|)$ bit space.
\subsection{Our Contributions}
\label{sec:our-contributions}
We have mainly focused our attention on the $|\Sigma|$ factor which is
present in both
time and space complexities of the simulation algorithm in
\cite{ABH+08}. The major step was to realize that if the
$\remove_a(B)$ set associated with a block $B\in P$ need to depend on a
letter, a set of blocks not related to $R(B)=\{C\in P\,\big|\,
(B,C)\in R\}$ does not depend on any letter. Therefore, instead of
maintaining $\remove_a(B)$ we maintain $\notRel(B)$ a set of
blocks included in the complement of $R(B)$ and we compute $\remove_a(B)$
only when we need it. Therefore, we do not have to store it. The great by-product
of doing this is that, for each block, we now maintain a set of blocks,
encoded with $O(|P_{sim}|.\log|P_{sim}|)$ bits, and
not a set of states encoded with $O(|Q|.\log|Q|)$ bits. Thus, we also
achieve the main goal of \cite{CRT11}.
In the next two sections we state the preliminaries and clarify our views
regarding the underlying
theory. Then, we propose our base algorithm. In the section
which follows we derive this base algorithm in several versions. The first
one runs in $O(\min(|P_{sim}|,b).|P_{sim}|.|{\rightarrow}|)$ time, with $b$
a branching factor, of the underlying LTS, defined in Section \ref{sec:compromise}, and uses
$O(|P_{sim}|^2.\log|P_{sim}|+|{\rightarrow}|.\log|{\rightarrow}|)$ bit
space.
By adding a set of counters, like in \cite{RT10,ABH+08}, we obtain a second
version of the algorithm that runs in $O(|P_{sim}|.|{\rightarrow}|)$ time and uses
$O(|P_{sim}|.|\SL(\rightarrow)|.\log|Q|+|{\rightarrow}|.\log|{\rightarrow}|)$
bit space, with (in common cases):
$|P_{sim}|\leq |Q|\leq |\SL(\rightarrow)|\leq|{\rightarrow}|\leq |\Sigma\times Q|$.
The adding space is used to store the counters and is the price to pay to
obtain the best time complexity. This version of the algorithm becomes the
best one, for LTS, regarding time efficiency.
We then explain why GPP does not have a bit space
complexity of $O(|P_{sim}|^2+|Q|.\log|P_{sim}|)$ but at least of
$O(|P_{sim}|^2.\log|P_{sim}|+|Q|.\log|Q|)$. Then, we propose the third
version of our base algorithm. It runs in
$O(|P_{sim}|^2.|{\rightarrow}|)$ time and
uses $O(|P_{sim}|^2+|{\rightarrow}|.\log|{\rightarrow}|)$ bits. It is the
best one regarding space complexity. Then, we detail the data structures that
we use. We end the paper by some perspectives including a future work on
bisimulation.
\section{Preliminaries}
\label{sec:preliminaries}
Let $Q$ be a set of elements. The number of elements of $Q$ is denoted $|Q|$.
A \emph{binary relation} on $Q$ is a subset
of $Q\times Q$.
In the remainder of this paper, we consider only binary
relations, therefore when we write ``relation'' read ``binary relation''. Let
$\mathscr{R}$ be a relation on $Q$.
For all $q,q'\in Q$, we may write $q\,\mathscr{R}\,q'$, or
$q \mathbin{\tikz[baseline] \draw[dashed,->] (0pt,.5ex) --
node[font=\footnotesize,fill=white,inner sep=2pt] {$\mathscr{R}$} (6ex,.5ex);}
q'$
in the figures, when $(q,q')\in\mathscr{R}$.
We define
$\mathscr{R}(q)\triangleq\{q'\in Q\,\big|\, q\,\mathscr{R}\,q'\}$ for $q\in Q$ and
$\mathscr{R}(X)\triangleq\cup_{q\in X}\mathscr{R}(q)$ for $X\subseteq
Q$. In the figures, we note
$X \mathbin{\tikz[baseline] \draw[dashed,->] (0pt,.7ex) --
node[font=\footnotesize,fill=white,inner sep=2pt] {$\mathscr{R}$} (6ex,.7ex);}
Y$ when there is $(q,q')\in X\times Y$ with $q\,\mathscr{R}\,q'$.
The \emph{domain} of $\mathscr{R}$ is
$\dom(\mathscr{R})\triangleq\{q\in Q\,\big|\,\mathscr{R}(q)\neq\emptyset\}$.
The complement of $\mathscr{R}$ is
$\overline{\mathscr{R}}\triangleq\{(x,y)\in Q\times Q\,\big|\, (x,y)\not\in
\mathscr{R}\}$. Let $\mathscr{S}$ be another relation on $Q$, the
composition of $\mathscr{R}$ by $\mathscr{S}$ is
$\mathscr{S}\mathrel{\circ}\mathscr{R}\triangleq\{(x,y)\in Q\times Q\,\big|\,
y\in\mathscr{S}(\mathscr{R}(x))\}$.
The relation $\mathscr{R}$ is said \emph{reflexive} if for all $q\in Q$, we
have $q\,\mathscr{R}\,q$.
The relation $\mathscr{R}$ is said \emph{reflexive on its domain}
if for all $q\in \dom(\mathscr{R})$, we have $q\,\mathscr{R}\,q$.
The relation $\mathscr{R}$ is said \emph{antisymmetric} if
$q\,\mathscr{R}\,q'$ and $q'\,\mathscr{R}\,q$ implies $q=q'$.
The relation $\mathscr{R}$ is said \emph{transitive} if
$\mathscr{R}\mathrel{\circ}\mathscr{R}\subseteq\mathscr{R}$.
A \emph{preorder} is a reflexive and transitive relation.
Let $X$ be a
set of subsets of $Q$, we note $\cup X\triangleq \cup_{B\in X}B$.
A \emph{partition} of $Q$ is a set of non empty subsets of $Q$, called
\emph{blocks}, that are pairwise disjoint and whose union gives $Q$. A
\emph{partition-relation pair} over $Q$ is a pair $(P,R)$ such that $P$ is
a partition of $Q$ and $R$ is a reflexive relation on $P$. A
partition-relation pair $(P,R)$ is said antisymmetric if its relation $R$ is
antisymmetric. From a partition-relation pair $(P,R)$ over $Q$ we derive a
relation $\mathscr{R}_{(P,R)}$ on $Q$ such that:
$\mathscr{R}_{(P,R)} = \cup_{(B,C)\in R}B\times C$.
\begin{definition}
\label{def:partionable}
Let $\mathscr{R}$ be a relation on a set $Q$ such that $\mathscr{R}$ is
reflexive on its domain.
\begin{itemize}
\item For $q\in Q$, we define
$[q]_\mathscr{R} \triangleq \{q'\in Q \,\big|\, q \,\mathscr{R}\, q'
\wedge q' \,\mathscr{R}\, q\}$,
for $X\subseteq Q$, we define
$[X]_\mathscr{R}\triangleq\cup_{q\in X}[q]_{\mathscr{R}}$.
\item A \emph{block} of $\mathscr{R}$ is a non empty set of states $B$
such that $B=[q]_\mathscr{R}$ for a $q\in Q$.
\item $\mathscr{R}$ is said \emph{block-definable}, or \emph{definable by
blocks} if:
$\forall q,q'\in Q\,.\,(q,q')\in\mathscr{R}\Rightarrow
[q]_\mathscr{R}\times[q']_\mathscr{R}\subseteq\mathscr{R}$.
\end{itemize}
\end{definition}
Let us remark that
a preorder is reflexive and definable by blocks.
The notion of definability by blocks will be useful since intermediate
relations of our algorithms will be block-definable, but not necessarily
preorders, even if we start from a preorder and finish with a preorder too.
\begin{remark}
Let $(P,R)$ be an antisymmetric partition-relation pair over a set $Q$. Then:
$P=\{[q]_{\mathscr{R}_{(P,R)}}\subseteq Q\,\big|\, q\in Q\}$ and
$R=\{([q]_{\mathscr{R}_{(P,R)}},[q']_{\mathscr{R}_{(P,R)}})\subseteq
P\times P\,\big|\,
q\,\mathscr{R}_{(P,R)}\,q'\}$.
\end{remark}
From a reflexive and block-definable relation $\mathscr{R}$ we derive an
antisymmetric partition-relation
pair $(P_{\mathscr{R}}, R_{\mathscr{R}})$ such that $P_{\mathscr{R}} =
\{[q]_\mathscr{R}\subseteq Q\,\big|\, q\in Q\}$ and
$R_{\mathscr{R}}=\{([q]_\mathscr{R},[q']_\mathscr{R})\subseteq
P\times P\,\big|\, q\,\mathscr{R}\,q'\}$.
\begin{remark}
Let $\mathscr{R}$ be a reflexive and block-definable relation on $Q$. Then
$\mathscr{R} = \bigcup_{(B,C)\in R_{\mathscr{R}}} B\times C$.
\end{remark}
The two preceding remarks imply a duality between reflexive and block-definable
relations, and antisymmetric partition-relation pairs. However, the notion
of block-definable relation is somehow more general in the sense that we
require its reflexibility on its domain, not necessarily on the whole state
space $Q$.
Let $T=(Q,\Sigma,\rightarrow)$ be a triple
such that $Q$ is a finite set of elements called \emph{states}, $\Sigma$ is
an \emph{alphabet}, a finite set of elements called $letters$ or
\emph{labels}, and
$\rightarrow\subseteq Q\times\Sigma\times Q$ is a \emph{transition
relation} or \emph{set of transitions}. Then, $T$ is called a
\emph{Labelled Transition System (LTS)}. From
$T$, given a letter $a\in\Sigma$, we define the two following relations:
$\xrightarrow{a}\mathbin{\triangleq}\{(q,q')\,\big|\, (q,a,q')\in
\rightarrow\}$ and its reverse
$\pre_{\xrightarrow{a}}\triangleq
\{(q',q)\,\big|\, (q,a,q')\in\rightarrow\}$. When $\rightarrow$ is clear from
the context, we simply note $\pre_a$ instead of
$\pre_{\xrightarrow{a}}$. For $X,Y\subseteq Q$, we note $X\xrightarrow{a}Y$
to express that $X\cap\pre_a(Y)\neq\emptyset$. By abuse of notation, we
also note $q\xrightarrow{a}Y$ for $\{q\}\xrightarrow{a}Y$.
In the complexity analysis of the
algorithms proposed in this paper, a new notion has emerged, that of
\emph{state-letter}. From $T$ we define the set of state-letters
$\SL(\rightarrow)\triangleq\{(q,a)\in Q\times\Sigma\,\big|\,
\exists q'\in Q\;.\;q\xrightarrow{a}q'\in\rightarrow\}$. For $(q,a)\in \SL(\rightarrow)$,
we simply note $q_a$ instead of $(q,a)$.
If $T$ is ``normalized'' (see first paragraph of
Section~\ref{sec:complexity}) we have:
\begin{equation}
\label{eq:StateLetterSmaller}
|Q|\leq |\SL(\rightarrow)|\leq|{\rightarrow}|\leq |\Sigma\times Q|
\end{equation}
It is therefore more interesting to use $\SL(\rightarrow)$ instead of $\Sigma\times
Q$.
The following definition of a simulation happens to be more effective
than the classical one given in the introduction.
\noindent
\begin{minipage}[t]{0.8\linewidth}
\begin{definition}
\label{def:sim}
Let $T=(Q,\Sigma,\rightarrow)$ be a LTS and $\mathscr{S}$ be a relation
on $Q$. The relation $\mathscr{S}$ is a \emph{simulation} over $T$ if:
$ \forall
a\in\Sigma\;.\;\mathscr{S}\mathrel{\circ}\pre_a\subseteq\pre_a\mathrel{\circ}\mathscr{S}$.
For two states $q,q'\in Q$, we say ``$q'$ simulates $q$'' if there is a
simulation $\mathscr{S}$ over $Q$ such that $q\,\mathscr{S}\,q'$.
\end{definition}
\end{minipage}
\begin{minipage}[t]{0.15\linewidth}
\begin{tikzpicture}[baseline=(q3.south),shorten >=2pt,shorten
<=2pt,font=\footnotesize]
\path coordinate (q1) [fill] circle (1pt) (-2,0) coordinate (q2)
[fill] circle (1pt) (-2,1.5) coordinate (q3) [fill] circle (1pt)
(0,1.5) coordinate (q4) [fill] circle (1pt)
(q3)
+(-45:.8) coordinate (q6) [fill] circle (1pt)
;
\path[every edge/.style={->,dashed,draw},circle,inner sep=1pt, every node/.style={fill=white}]
(q2) edge node (S) {$\mathscr{S}$} (q3) (q1) edge node
{$\mathscr{S}$} (q4);
\path[->,auto,circle,inner sep=1pt,thick] (q2) edge node {$a$} (q1)
(q3)edge[shorten >=6pt] node[near start] (base) {$a$} (q4)
(q6)edge[shorten >=3pt] node[near start] {$a$} (q4) ;
\end{tikzpicture}
\end{minipage}
\section{Underlying Theory}
\label{sec:underlyingTh}
The first consequence of the definition of a simulation over a LTS
$T=(Q,\Sigma, \rightarrow)$ is that
states which have an outgoing transition labelled by a letter $a$ can be
simulated only by states which have at least one outgoing transition
labelled by $a$. The next definition and lemma establish that we can restrict
our problem of finding the coarsest simulation inside a preorder to the
search of the coarsest simulation inside a preorder $\mathscr{R}$ that
satisfies:
\begin{equation}
\label{eq:InitRefineRestriction}
\forall a\in\Sigma\;.\;\mathscr{R}(\pre_a(Q))\subseteq\pre_a(Q).
\end{equation}
\begin{definition}
Let $T=(Q,\Sigma, \rightarrow)$ be a LTS and
$\mathscr{R}$ be a preorder on $Q$. We
define $\initRefine(\mathscr{R})\subseteq \mathscr{R}$ such that:
\begin{displaymath}
(q,q')\in\initRefine(\mathscr{R}) \Leftrightarrow
(q,q')\in\mathscr{R}\,\wedge\,
\forall a\in\Sigma\;(q\in\pre_a(Q) \Rightarrow q'\in\pre_a(Q)).
\end{displaymath}
\end{definition}
\begin{lemma}
\label{lem:InitRefine}
Let $T=(Q,\Sigma, \rightarrow)$ be a LTS and
$\mathscr{U} = \initRefine(\mathscr{R})$ with
$\mathscr{R}$ a preorder on $Q$. Then:
\begin{enumerate}
\item $\mathscr{U}$ is a preorder,
\item for all simulation $\mathscr{S}$ over $T$:
$ \mathscr{S}\, \subseteq \mathscr{R}
\Rightarrow \mathscr{S}\, \subseteq \mathscr{U}$,
\item \label{lem:InitRefine:3}
$\forall X\subseteq Q\,\forall a\in\Sigma\;.\;\mathscr{U}(\pre_a(X))
\subseteq \pre_a(Q)$.
\end{enumerate}
\end{lemma}
\begin{proof}\mbox{}
\begin{enumerate}
\item Since $\mathscr{R}$ is a preorder and thus reflexive, $\mathscr{U}$
is also trivially
reflexive. Now, let us suppose $\mathscr{U}$ is not transitive. There are
three states $q_1,q_2,q_3\in Q$ such
that: $q_1\,\mathscr{U}\,q_2\wedge q_2\,\mathscr{U}\,q_3 \wedge \neg\;
q_1\,\mathscr{U}\,q_3$. From the fact that
$\mathscr{U}\subseteq\mathscr{R}$ and $\mathscr{R}$ is a preorder, we
get $q_1\,\mathscr{R}\,q_3$. With $\neg\; q_1\,\mathscr{U}\,q_3$ and the
definition of $\mathscr{U}$ there is $a\in\Sigma$ such that:
$q_1\in\pre_a(Q)$ and $q_3\not\in\pre_a(Q)$. But $q_1\in\pre_a(Q)$ and
$q_1\,\mathscr{U}\,q_2$ implies $q_2\in\pre_a(Q)$. With
$q_2\,\mathscr{U}\,q_3$ we also get $q_3\in\pre_a(Q)$ which contradicts
$q_3\not\in\pre_a(Q)$.
\item If this is not true there are two states $q_1,q_2\in Q$ such
that: $q_1\,\mathscr{S}\,q_2 \wedge \neg\;q_1\,\mathscr{U}\,q_2$. From
$\mathscr{U}\subseteq\mathscr{R}$ we get $q_1\,\mathscr{R}\,q_2 $. With
$\neg\; q_1\,\mathscr{U}\,q_2$ and the definition of $\mathscr{U}$ there
is $a\in\Sigma$ such that $q_2\not\in\pre_a(Q)$ and
$q_1\in\pre_a(Q)$. With $q_1\,\mathscr{S}\,q_2$ we get $q_2\in
\mathscr{S}\mathrel{\circ} \pre_a (Q)$. With the hypothesis that $\mathscr{S}$ is a
simulation, we get $q_2\in \pre_a\mathrel{\circ} \mathscr{S}(Q)$ and thus
$q_2\in \pre_a(Q)$, since $\mathscr{S}(Q)\subseteq Q$, which contradicts
$q_2\not\in\pre_a(Q)$.
\item This a direct consequence of the definition of $\mathscr{U}$.
\end{enumerate}
\end{proof}
The main idea to obtain efficient algorithms is to consider relations
between blocks of states and not merely relations between
states. Therefore, we need a characterization of the notion of simulation
expressed over blocks.
\begin{proposition}
\label{prop:blockSim}
Let $T=(Q,\Sigma,\rightarrow)$ be a LTS and
$\mathscr{S}$ be a reflexive and block-definable relation on $Q$. The relation
$\mathscr{S}$ is a simulation over $T$ if and only if:
\begin{displaymath}
\label{eq:I}
\forall a\in\Sigma\; \forall q\in Q\;.\;
\mathscr{S}\mathrel{\circ}\pre_a([q]_{\mathscr{S}})\subseteq \pre_a\mathrel{\circ}\mathscr{S}([q]_{\mathscr{S}}).
\end{displaymath}
\end{proposition}
\begin{proof}
If $\mathscr{S}$ is a simulation then, by definition, we have for any
$X\subseteq Q$: $\forall
a\in\Sigma\;.\;\mathscr{S}\mathrel{\circ}\pre_a(X)\subseteq\pre_a\mathrel{\circ}\mathscr{S}(X)$. This
inclusion is thus also true for $X=[q]_{\mathscr{S}}$. In the other
direction, if $\mathscr{S}$ is reflexive and block-definable then for any
$q\in Q$ we get:
$q\in [q]_{\mathscr{S}}$ and
$\mathscr{S}(q)=\mathscr{S}([q]_{\mathscr{S}})$. We thus have:
\begin{displaymath}
\mathscr{S}\mathrel{\circ}\pre_a(q)\subseteq
\mathscr{S}\mathrel{\circ}\pre_a([q]_{\mathscr{S}})\subseteq
\pre_a\mathrel{\circ}\mathscr{S}([q]_{\mathscr{S}})=
\pre_a\mathrel{\circ}\mathscr{S}(q)
\end{displaymath}
which ends the proof.
\end{proof}
Now, suppose we have a reflexive and block-definable relation
$\mathscr{R}$ and we want to remove from $\mathscr{R}$ all couples $(q,r)$ not
belonging in a simulation included in $\mathscr{R}$. If $\mathscr{R}$ is
not already a simulation, from the last proposition, there are a letter $a$
and a block $B$ of $\mathscr{R}$ such that
$\mathscr{R}\mathrel{\circ}\pre_a(B)\not\subseteq
\pre_a\mathrel{\circ}\mathscr{R}(B)$. But we can assume that $\mathscr{R}$
satisfies \eqref{eq:InitRefineRestriction}. With
$Q=\mathscr{R}(B)\cup\overline{\mathscr{R}}(B)$ we get:
$\mathscr{R}\mathrel{\circ}\pre_a(B)\subseteq
\pre_a(\mathscr{R}(B)\cup\overline{\mathscr{R}}(B))$. This implies the
existence of a non empty set
$\remove\triangleq\pre_a(\overline{\mathscr{R}}(B))\setminus\pre_a(\mathscr{R}(B))$. Let
$r\in\remove$ and $q\in\pre_a(B)$. If $(q,r)\in\mathscr{R}$ we can safely remove
$(q,r)$ from $\mathscr{R}$. Why? Because, if we had $(q,r)\in\mathscr{S}$
with $\mathscr{S}\subseteq\mathscr{R}$ a simulation, with $q\in\pre_a(B)$,
there would be $q'\in B$ such that $q\in\pre_a(q')$ and thus
$r\in\mathscr{S}\mathrel{\circ}\pre_a(q')$. But $\mathscr{S}$ being a simulation, this
implies $r\in\pre_a\mathrel{\circ}\mathscr{S}(q')$ and thus
$r\in\pre_a\mathrel{\circ}\mathscr{R}(B)$ since $\mathscr{S}\subseteq\mathscr{R}$ and
$q'\in B$. This contradicts $r\in\remove$.
To sum up, we can safely remove $(q,r)$ from $\mathscr{R}$. But can we
safely remove $C\times D$ from $\mathscr{R}$ with $C$ the block of
$\mathscr{R}$ containing $q$ and $D$ the block of $\mathscr{R}$ containing
$r$? In general, the answer is no. However, the remainder of this section
gives, and justifies, sufficient conditions to do so.
We begin by the key definition of the paper.
\begin{definition}
Let $T=(Q,\Sigma, \rightarrow)$ be a LTS and
$\mathscr{R}$ be a reflexive and block-definable relation on $Q$. A
\emph{refiner} of $\mathscr{R}$ is a triple
$(B,\mathscr{R}_1,\mathscr{R}_2)$ with $\mathscr{R}_1$ and
$\mathscr{R}_2$ two relations on $Q$ such that $\mathscr{R}_1$ is
block-definable, $B$ is a block of $\mathscr{R}_1$,
$\mathscr{R}(B)\subseteq \mathscr{R}_1(B)$
and
\begin{displaymath}
\forall a\in\Sigma\;.\;
[\pre_a(\mathscr{R}_1(B)\cup\mathscr{R}_2(B))]_{ \mathscr{R}}
\cup \mathscr{R}(\pre_a(B))
\subseteq
\pre_a(\mathscr{R}_1(B)\cup \mathscr{R}_2(B))
\end{displaymath}
\end{definition}
Let us fix the intuition for the reader. For the above discussion, we take
$\mathscr{R}_1=\mathscr{R}$ and $\mathscr{R}_2=\overline{\mathscr{R}}$
which allows us to satisfy (under the assumption \eqref{eq:InitRefineRestriction}) all
the conditions of the definition of a refiner. However, if we use
$\mathscr{R}_1$ and $\mathscr{R}_2$ like that, we will obtain algorithms whose
time complexity is $O(|P_{sim}|^2.|{\rightarrow}|)$ for
Kripke structures. To obtain algorithms
in $O(|P_{sim}|.|{\rightarrow}|)$ time, still for Kripke structures, we have to consider in
$\overline{\mathscr{R}}(B)$ only what is needed and thus to keep
$\mathscr{R}_2$ smaller than $\overline{\mathscr{R}}$. The presence of the
relation $\mathscr{R}_1$ is due to the management of the different letters
of the alphabet for LTS.
Note first, that constraining for all the letters in the alphabet the
last condition of the definition of a
refiner has made it
independent of a particular letter. During a main iteration of the
algorithm, we consider a relevant refiner. At this stage
$\mathscr{R}_1=\mathscr{R}$. Gradually, as we consider the letters involved
in the transitions leading to $\mathscr{R}_2(B)$, $\mathscr{R}$ is refined
and thus $\mathscr{R}(B)$ stays included in $\mathscr{R}_1(B)$ but may
becomes smaller than $\mathscr{R}_1(B)$.
The first inclusion of the last condition of a refiner,
$[\pre_a(\mathscr{R}_1(B)\cup\mathscr{R}_2(B))]_{ \mathscr{R}}
\subseteq\pre_a(\mathscr{R}_1(B)\cup \mathscr{R}_2(B))$, authorizes to split
the blocks of $P$ either with $\pre_a(\mathscr{R}_1(B))$ or with
$\remove_{a,\mathit{ref}}=\pre_a(\mathscr{R}_2(B))\setminus
\pre_a(\mathscr{R}_1(B))$ like in the next definition. The former induces
algorithms that run in
$O(|P_{sim}|^2.|{\rightarrow}|)$ time. The latter authorizes a run in
$O(|P_{sim}|.|{\rightarrow}|)$ time. Let $\mathscr{R}'$ be the relation
issued from the split. The second inclusion of the last condition of a
refiner,
$\mathscr{R}(\pre_a(B)) \subseteq
\pre_a(\mathscr{R}_1(B)\cup \mathscr{R}_2(B))$,
enables to soundly refine $\mathscr{R}'$ by
$[\pre_a(B)]_{\mathscr{R}'}\times[\remove_{a,\mathit{ref}}]_{\mathscr{R}'}$.
The remainder of the section formalizes this approach.
\begin{definition}
\label{def:refine}
Let $T=(Q,\Sigma, \rightarrow)$ be a LTS,
$\mathscr{R}$ be a reflexive and block-definable
relation on $Q$, $\mathit{ref}=(B,\mathscr{R}_1,\mathscr{R}_2)$ be a refiner of
$\mathscr{R}$ and $a\in\Sigma$ be a letter. We define:
\begin{align*}
\remove_{a,\mathit{ref}} &\triangleq
\pre_a(\mathscr{R}_2(B))\setminus \pre_a(\mathscr{R}_1(B))\\[2ex]
\splitDelete_{a,\mathit{ref}}(\mathscr{R}) &\triangleq
\bigcup_{q\in\remove_{a,\mathit{ref}}}
\begin{array}[t]{l}
([q]_{\mathscr{R}}
\setminus\remove_{a,\mathit{ref}})
\times \\[0cm]
([q]_{\mathscr{R}} \cap \remove_{a,\mathit{ref}})
\end{array}\\[2ex]
\splitRefine_{a,\mathit{ref}}(\mathscr{R}) &\triangleq
\mathscr{R} \setminus \splitDelete_{a,\mathit{ref}}(\mathscr{R}) \\[2ex]
\delete_{a,\mathit{ref}}(\mathscr{R}) &\triangleq
\bigcup_{\substack{ q\in \remove_{a,\mathit{ref}}\\q'\in\pre_a(B)}}
\begin{array}[t]{l}
[q']_{\splitRefine_{a,\mathit{ref}}(\mathscr{R})} \times \\[0cm]
[q]_{\splitRefine_{a,\mathit{ref}}(\mathscr{R})}
\end{array}\\[2ex]
\refine_{a,\mathit{ref}}(\mathscr{R}) &\triangleq
\splitRefine_{a,\mathit{ref}}(\mathscr{R}) \setminus
\delete_{a,\mathit{ref}}(\mathscr{R})
\end{align*}
\end{definition}
We should now prove that if a simulation $\mathscr{S}$ is included in
$\mathscr{R}$ it is still included in
$\refine_{a,\mathit{ref}}(\mathscr{R})$. Unfortunately, we do not know how to do
that. However, if, instead of simply asking $\mathscr{S}$ to be included in
$\mathscr{R}$, we ask $\mathscr{R}$ to be \emph{$\mathscr{S}$-stable} (see
next definition),
everything works nicely.
\begin{definition}
\label{def:simStable}
Let $\mathscr{R}$ and $\mathscr{S}$ be two relations on $Q$. The
relation $\mathscr{R}$ is said \emph{$\mathscr{S}$-stable} if
$ \mathscr{S}\mathrel{\circ}\mathscr{R}\subseteq \mathscr{R}$.
\end{definition}
Obviously, if a reflexive relation $\mathscr{R}$ is $\mathscr{S}$-stable
then $\mathscr{S}$ is included in $\mathscr{R}$. Intuitively, the
$\mathscr{S}$-stability of $\mathscr{R}$ is required by the fact that
$\mathscr{R}$ is no longer supposed to be a preorder but since it contains
a transitive simulation (as we will see, the coarsest simulation included
in a preorder is a preorder and thus transitive) it should be transitive
``with'' that simulation.
\begin{theorem}
\label{th:Refine}
Let $T=(Q,\Sigma, \rightarrow)$ be a LTS, $\mathscr{R}$ be a reflexive and
block-definable relation on $Q$, $\mathscr{S}$ be a simulation
over $T$, $a\in\Sigma$ be a letter and
$\mathit{ref}=(B,\mathscr{R}_1,\mathscr{R}_2)$ be a
refiner of
$\mathscr{R}$ such that $\mathscr{R}$ and $\mathscr{R}_1$ are
$\mathscr{S}$-stable. Let
$\mathscr{U}=\refine_{a,\mathit{ref}}(\mathscr{R})$. Then, $\mathscr{U}$ is a
reflexive, block-definable and $\mathscr{S}$-stable relation. Furthermore,
we have:
$
[\pre_a(\mathscr{R}_1(B))]_{ \mathscr{U}}
\cup \mathscr{U}(\pre_a(B))
\subseteq
\pre_a(\mathscr{R}_1(B))$.
\end{theorem}
For the proof, we first need a lemma.
\begin{lemma}
\label{lem:ImpSstable}
Let $T=(Q,\Sigma, \rightarrow)$ be a LTS and, $\mathscr{R}$ and
$\mathscr{S}$ be two relations on $Q$ such that $\mathscr{S}$ is
a simulation over $T$ and $\mathscr{R}$ is
$\mathscr{S}$-stable. Then:
\begin{displaymath}
\mathscr{S}\mathrel{\circ}\pre_a\mathrel{\circ}\mathscr{R}\subseteq
\pre_a\mathrel{\circ}\mathscr{R}
\end{displaymath}
Said otherwise, $\pre_a\mathrel{\circ}\mathscr{R}$ is $\mathscr{S}$-stable.
\end{lemma}
\begin{proof}
Since $\mathscr{S}$ is a simulation, we have
$\mathscr{S}\mathrel{\circ}\pre_a\subseteq\pre_a\mathrel{\circ}\mathscr{S}$ and thus: 1)
$\mathscr{S}\mathrel{\circ}\pre_a\mathrel{\circ}\mathscr{R}\subseteq\pre_a\mathrel{\circ}\mathscr{S}\mathrel{\circ}\mathscr{R}$. With
the hypothesis that $\mathscr{R}$ is $\mathscr{S}$-stable, we get: 2)
$\pre_a\mathrel{\circ}\mathscr{S}\mathrel{\circ}\mathscr{R}\subseteq\pre_a\mathrel{\circ}\mathscr{R}$. Inclusions
1) and 2) put together imply the claimed property.
\end{proof}
\begin{proof}[Proof of Theorem~\ref{th:Refine}.]
The fact that $\mathscr{U}$ is reflexive and block-definable is an easy
consequence of its definition: from a reflexive and block-definable
relation, $\mathscr{R}$, we split some blocks, then we delete some
relations between \underline{different} blocks (
after $\splitRefine$,
we
have: $[\pre_a(B)]_{\splitRefine_{a,\mathit{ref}}(\mathscr{R})}\cap
[\remove_{a,\mathit{ref}}]_{\splitRefine_{a,\mathit{ref}}(\mathscr{R})}=\emptyset$).
For the $\mathscr{S}$-stability of $\mathscr{U}$, let us first remark
another direct consequence of the definitions of $\remove_{a,\mathit{ref}}$ and
$\splitRefine_{a,\mathit{ref}}$:
\begin{equation}
\label{eq:remove-st-class}
q \in \remove_{a,\mathit{ref}} \Rightarrow
[q]_{\splitRefine_{a,\mathit{ref}}(\mathscr{R})} \subseteq
\remove_{a,\mathit{ref}}
\end{equation}
If $\mathscr{U}$ is not $\mathscr{S}$-stable, there are three states
$q_1, q_2, q_3 \in Q$ such that $q_1\,\mathscr{U}\,q_2 \wedge q_2
\,\mathscr{S}\, q_3 \wedge \neg\; q_1\,\mathscr{U}\,q_3$. We need the
following property:
\begin{equation}
\label{eq:AppImpSstable}
q_3 \in \remove_{a,\mathit{ref}} \Rightarrow
q_2\not\in\pre_a(\mathscr{R}_1(B))
\end{equation}
Suppose $q_3\in\remove_{a,\mathit{ref}}$ and
$q_2\in\pre_a(\mathscr{R}_1(B))$. Since $B$ is a block of $\mathscr{R}_1$
and $\mathscr{R}_1$ is block-definable, for any $q\in B$ we have
$q_3\in\mathscr{S}\mathrel{\circ}\pre_a\mathrel{\circ}\mathscr{R}_1(q)$. With the hypothesis
that $\mathscr{R}_1$ is $\mathscr{S}$-stable, Lemma~\ref{lem:ImpSstable}
implies $q_3\in \pre_a\mathrel{\circ}\mathscr{R}_1(q)$ which contradicts $q_3 \in
\remove_{a,\mathit{ref}}$.
Now, by construction, $\mathscr{U}$ is included in $\mathscr{R}$ and, by
hypothesis, $\mathscr{R}$ is $\mathscr{S}$-stable, then from
$q_1\,\mathscr{U}\,q_2 \wedge q_2 \,\mathscr{S}\, q_3$, and thus
$q_1\,\mathscr{R}\,q_2 \wedge q_2 \,\mathscr{S}\, q_3$, we get:
$q_1\,\mathscr{R}\,q_3$. With $ \neg\; q_1\,\mathscr{U}\,q_3$, we
necessarily have $(q_1,q_3)\in \splitDelete_{a,\mathit{ref}}(\mathscr{R})$ or
$(q_1,q_3)\in \delete_{a,\mathit{ref}}(\mathscr{R})$. Let us consider the two
cases (also depicted in Figure~\ref{fig:splitRefinement}):
\begin{figure}
\caption{$\mathscr{S}
\label{fig:splitRefinement}
\end{figure}
\begin{itemize}
\item $(q_1,q_3)\in \splitDelete_{a,\mathit{ref}}(\mathscr{R})$. This implies the
existence of $q\in \remove_{a,\mathit{ref}}$ such that $q_1,q_3\in
[q]_{\mathscr{R}}$, $q_1\not\in \remove_{a,\mathit{ref}}$ and $q_3\in
\remove_{a,\mathit{ref}}$. Since $\mathscr{R}$ is reflexive and
$\mathscr{S}$-stable, we have $\mathscr{S}\, \subseteq \mathscr{R}$. With
the fact that, by construction, $\mathscr{U}\, \subseteq \mathscr{R}$,
from $q_1\,\mathscr{U}\,q_2 \wedge q_2 \,\mathscr{S}\, q_3$ we get
$q_1\,\mathscr{R}\,q_2 \wedge q_2 \,\mathscr{R}\, q_3$. With $q_1,q_3\in
[q]_{\mathscr{R}}$ and the fact that $\mathscr{R}$ is block-definable we
get: $q_2\in [q]_{\mathscr{R}}$. With $q\in \pre_a(\mathscr{R}_2(B))$ and
the fact that $(B,\mathscr{R}_1,\mathscr{R}_2)$ is a refiner of
$\mathscr{R}$ we have, by definition: $q_2\in \pre_a(\mathscr{R}_1(B)\cup
\mathscr{R}_2(B))$. With \eqref{eq:AppImpSstable} we necessarily have
$q_2\in \pre_a(\mathscr{R}_2(B))\setminus\pre_a(\mathscr{R}_1(B)) =
\remove_{a,\mathit{ref}} $. With $q_1\not\in \remove_{a,\mathit{ref}}$ and $q_1,q_2\in
[q]_{\mathscr{R}}$ this would imply
$(q_1,q_2)\in\splitDelete_{a,\mathit{ref}}(\mathscr{R})$ and would contradict the
fact that $q_1\,\mathscr{U}\,q_2$.
\item $(q_1,q_3)\in \delete_{a,\mathit{ref}}(\mathscr{R})$. This implies the
existence of $q'_1\in \pre_a(B)$ and $q'_3\in \remove_{a,\mathit{ref}}$ such that
$q_1\in [q'_1]_{\splitRefine_{a,\mathit{ref}}(\mathscr{R})}$ and $q_3\in
[q'_3]_{\splitRefine_{a,\mathit{ref}}(\mathscr{R})}$. From
\eqref{eq:remove-st-class} we get $q_3\in \remove_{a,\mathit{ref}}$. From
$q_1\,\mathscr{R}\,q_2$, the fact that $\mathscr{R}$ is block-definable and
$q_1\in [q'_1]_{\splitRefine_{a,\mathit{ref}}(\mathscr{R})}$, thus $q'_1\in
[q_1]_{\mathscr{R}}$ since $\splitRefine_{a,\mathit{ref}}(\mathscr{R}) \subseteq
\mathscr{R}$, we get $q'_1\,\mathscr{R}\,q_2$. With $q'_1\in \pre_a(B)$
we have $q_2\in\mathscr{R}(\pre_a(B))$. With the fact that
$(B,\mathscr{R}_1,\mathscr{R}_2)$ is a refiner of $\mathscr{R}$ we
have, by definition, $q_2\in \pre_a(\mathscr{R}_1(B)\cup
\mathscr{R}_2(B))$. With \eqref{eq:AppImpSstable} we necessarily have
$q_2\in\remove_{a,\mathit{ref}}$. With $q'_1\in \pre_a(B)$ and $q_1\in
[q'_1]_{\splitRefine_{a,\mathit{ref}}(\mathscr{R})}$ this implies $(q_1,q_2)\in
\delete_{a,\mathit{ref}}(\mathscr{R})$ and contradicts the fact that
$q_1\,\mathscr{U}\,q_2$.
\end{itemize}
Both cases lead to a contradiction. The relation $\mathscr{U}$ is thus
$\mathscr{S}$-stable.
Let us now prove the last property:
\begin{itemize}
\item
$[\pre_a(\mathscr{R}_1(B))]_{\mathscr{U}}\subseteq\pre_a(\mathscr{R}_1(B))$. Let
$q\in[\pre_a(\mathscr{R}_1(B))]_{\mathscr{U}}$. There is
$q'\in\pre_a(\mathscr{R}_1(B))$ such that $q\in[q']_{\mathscr{U}}$. Since
$\mathscr{U}\subseteq\mathscr{R}$ and $(B,\mathscr{R}_1,\mathscr{R}_2)$
is a refiner of $\mathscr{R}$ then $q\in\pre_a(\mathscr{R}_1(B)\cup
\mathscr{R}_2(B))$. If $q\not\in\pre_a(\mathscr{R}_1(B))$ then $q\in
\remove_{a,\mathit{ref}}$, which implies
$(q',q)\in\splitDelete_{a,\mathit{ref}}(\mathscr{R})$ and contradicts
$q'\,\mathscr{U}\,q$.
\item $\mathscr{U}(\pre_a(B))\subseteq\pre_a(\mathscr{R}_1(B))$. Let
$q\in\mathscr{U}(\pre_a(B))$. There is $q'\in\pre_a(B)$ such that
$q'\,\mathscr{U}\,q$. Since $\mathscr{U}\subseteq\mathscr{R}$ and
$(B,\mathscr{R}_1,\mathscr{R}_2)$ is a refiner of $\mathscr{R}$
then $q\in\pre_a(\mathscr{R}_1(B)\cup \mathscr{R}_2(B))$. If
$q\not\in\pre_a(\mathscr{R}_1(B))$ then $q\in \remove_{a,\mathit{ref}}$, which
implies $(q',q)\in\delete_{a,\mathit{ref}}(\mathscr{R})$ and contradicts
$q'\,\mathscr{U}\,q$.
\end{itemize}
\end{proof}
\section{Base Algorithm}
\begin{function}[H]
\caption{Split($Remove,P$)\label{func:split}}
$SplitCouples := \emptyset$; $Touched := \emptyset$; $BlocksInRemove := \emptyset$\;
\ForAll{$r\in Remove$} {
$Touched := Touched \cup \{r.\Block\}$\;
}
\ForAll{$C\in Touched $} {
\If {$C\subseteq Remove$}
{$BlocksInRemove := BlocksInRemove\cup\{C\}$\;}
\Else(\ //$C$ must be split)
{
$D := C\cap Remove$; $P := P \cup \{D\}$\;
$BlocksInRemove = BlocksInRemove\cup\{D\}$\;
$C := C\setminus Remove$\;
{\ // Only $C$ is modified, not $C.\Rel$ or $C.\NotRel$}\nllabel{split:l11}\;
$D.\Rel := \Copy(C.\Rel)$\nllabel{split:l12}\;
$D.\NotRel := \Copy(C.\NotRel)$\nllabel{split:l13}\;
\lForAll{$q\in D$} {q.\Block \,:= D}\;
$SplitCouples := SplitCouples \cup \{(C,D)\}$\;
}
}
\ForAll{$(C,D)\in SplitCouples,\,E\in P$\nllabel{split:l16}}
{
\If{$C\in E.\Rel$} {$E.\Rel := E.\Rel \cup
\{D\}$\nllabel{split:l18}\; }
}
\Return{$(P$, $BlocksInRemove$, $SplitCouples)$}
\end{function}
\begin{function}[H]
\caption{Init($T,P_{init},R_{init}$) with $T={(Q,\Sigma,\rightarrow)}$\label{func:init}}
\BlankLine
$P := \Copy(P_{init})$; $S := \emptyset$\nllabel{init:l1}\;
\lForAll{$a\in\Sigma $\nllabel{init:l2}}
{$a.\Remove :=\emptyset$;}
\ForAll{$B\in P$\nllabel{init:l3}}
{
$B.\Rel := \{C \in P \,\big|\, (B,C) \in R_{init}\}$\;
}
\ForAll{$q\xrightarrow{a}q'\in\rightarrow$\nllabel{init:l5}}
{
$a.\Remove := a.\Remove \cup \{q\}$\nllabel{init:l6}\;
}
\ForAll{$a\in \Sigma$\nllabel{init:l7}}
{
$(P,BlocksInRemove,\_) :=\Split(a.\Remove,P)$\nllabel{init:l8}\;
\ForAll{$C\in BlocksInRemove,\, D\in P$\nllabel{init:l9}}
{
\If{$D\not\in BlocksInRemove$}
{
$C.\Rel := C.\Rel \setminus \{D\}$\nllabel{init:l11}\;
}
}
}
\ForAll{$C\in P$ \nllabel{init:l12}}
{
$C.\NotRel :=\cup \{D\in P\,\big|\, D \not\in C.\Rel\}$\;
\lIf{$C.\NotRel \neq \emptyset$}{$S:=S\cup\{C\}$\nllabel{init:l14};}
}
\Return{$(P$, $S)$}
\end{function}
\begin{function}
\caption{Sim($T, P_{init}, R_{init}$) with $T={(Q,\Sigma,\rightarrow)}$}
\label{func:sim}
$ (P, S) := \Init(T,P_{init},R_{init})$\;
\lForAll{$a\in \Sigma $}
{\{$a.\PreB := \emptyset ; a.\Remove :=\emptyset$\nllabel{sim:l2}\}}\;
$alph := \emptyset$\;
\While{$\exists B \in S$\nllabel{sim:l4}} {
$S := S\setminus \{B\}$\nllabel{sim:l5}\;
{ // Assert : $alph = \emptyset\wedge(\forall a\in\Sigma\,.\,a.\PreB = \emptyset\wedge
a.\Remove =\emptyset)$\nllabel{sim:l6}}
\ForAll{$r\xrightarrow{a}(B.\NotRel)
\wedge r \not\in \pre_a(\cup B.\Rel)$\nllabel{sim:l7}}
{
$alph := alph \cup \{a\}$\nllabel{sim:l8}\;
$a.\Remove := a.\Remove \cup \{r\}$\nllabel{sim:l9}\;
}
$B.\NotRel :=\emptyset$\nllabel{sim:l10}\;
\ForAll{$q\xrightarrow{a}B\wedge a\in alph$\nllabel{sim:l11}}
{$a.\PreB := a.\PreB \cup \{q\}$\nllabel{sim:l12}\;}
\ForAll{$a\in alph$ \nllabel{sim:l13}}
{
$(P, BlocksInRemove, SplitCouples) :=
\Split(a.\Remove,P)$\nllabel{sim:l14}\;
\ForAll{$(C,D)\in SplitCouples$\nllabel{sim:l15}}
{ $C.\Rel := C.\Rel
\setminus \{D\}$ ;
$C.\NotRel := C.\NotRel \cup D$\nllabel{sim:l16}\;
$S := S \cup \{C\}$ \nllabel{sim:l17}\;}
\ForAll{
$\begin{array}[c]{ll}
D\in BlocksInRemove, \\
C\in \{q.\Block\in P\,\big|\, q\in a.\PreB\}
\end{array}$\nllabel{sim:l18}
}
{
\If{$D\in C.\Rel$}
{
$C.\Rel := C.\Rel \setminus \{D\}$; $
C.\NotRel := C.\NotRel \cup D$\nllabel{sim:l20}\;
$S := S \cup \{C\}$\nllabel{sim:l21}\;}
}
}
\lForAll{$a\in alph $\nllabel{sim:l22}}{\{$a.\PreB := \emptyset ; a.\Remove
:=\emptyset$\}}\;
$alph := \emptyset$\nllabel{sim:l23}\;
}
$P_{sim} := P$; $R_{sim}:= \{(B,C)\in P\times P\,\big|\, C\in
B.\Rel\}$\;
\Return{$(P_{sim}$, $R_{sim})$}
\end{function}
Given a LTS $T=(Q,\Sigma, \rightarrow)$ and an initial antisymmetric
partition-relation pair $(P_{init},R_{init})$, inducing a preorder
$\mathscr{R}_{init}$, the
algorithm manipulates relevant refiners to
iteratively refine $(P,R)$ initially set to $(P_{init},R_{init})$.
At the end, $(P,R)$ represents $(P_{sim},R_{sim})$ the partition-relation
pair whose induced relation $\mathscr{R}_{sim}$ is the coarsest simulation
included in $\mathscr{R}_{init}$.
The partition $P$ is a set of blocks. To represent $R$, we simply associate
to each block $B\in P$ a set $B.\Rel\subseteq P$ such that
$R=\cup_{B\in P}\{B\}\times B.\Rel$. A block is assimilated with its set of
states. For a given state $q\in Q$, the block of $P$ which contains $q$ is
noted $q.\Block$. We also associate to a block $B$ a set $B.\NotRel$
included in the complement of $\cup B.\Rel$. The refiners will be of
the form: $(B, B\times(\cup B.\Rel),B\times (B.\NotRel))$.
The algorithm is decomposed in three functions: \texttt{Split},
\texttt{Init} and \texttt{Sim}, the main one. The function \texttt{Split}$(Remove,P)$,
used by the two others, splits, possibly, the blocks touched by
$\remove$ and returns the updated partition, the list of blocks included in
$\remove$ and the list of block couples issued
from a split. This last list permits the $\splitRefine_{a,\mathit{ref}}$ of
Definition~\ref{def:refine}
during the \textbf{forall} loop at line~\ref{sim:l15} of \texttt{Sim}.
Note that, even if \texttt{Split} possibly modify the current
partition-relation pair, thanks to lines \ref{split:l12} and
\ref{split:l16}--\ref{split:l18} it does not modify the induced
relation. This is done out of \texttt{Split}, in \texttt{Sim}.
The main role of function \texttt{Init} is to transform the initial
partition-relation pair to a partition-relation pair whose induced
relation satisfies condition \eqref{eq:InitRefineRestriction}. It also
initializes the set $S$ to those $B$'s whose $B.\NotRel$ is not empty.
After the initialization, the \texttt{Sim} function mainly executes the
following loop. As long as there is a block $B$ whose
$B.\NotRel$ is not empty, all non empty $a.\Remove$ sets are
computed by only one (this is important for the time efficiency) scan of
the transitions leading into $B.\NotRel$. Each of them
corresponds to a $\remove_{a,\mathit{ref}}$ of
Definition~\ref{def:refine}. The relevant $\pre_a(B)$, encoded by
$a.\PreB$, are also computed by only one (idem) scan of
the transitions leading into $\pre_a(B)$. Then, for each letter, with a non empty
$a.\Remove$, a
refinement step is executed with the refiner $(B, B\times(\cup B.\Rel),
B\times (B.\NotRel))$. Note that, during a refinement step, each time a
relation $(C,D)$ is removed from $R$, the content of $D$ is
added to $C.\NotRel$. This is done in order to preserve the second
invariant of Lemma~\ref{lem:mainInv}.
The remainder of the section validates the algorithm.
\begin{lemma}
\label{lem:InitAlgo}
Let $T=(Q,\Sigma, \rightarrow)$ be a LTS and $(P_{init}, R_{init})$ be a
partition-relation pair over $Q$ inducing a preorder $\mathscr{R}_{init}$. Let
$(P, \_) = \Init(T,P_{init}, R_{init})$ and
$\mathscr{R}=\cup_{G\in P}G\times(\cup G.\Rel)$.
Then, $\mathscr{R} = \initRefine(\mathscr{R}_{init})$. Furthermore, for all
$G\in P$, we have: $G.\NotRel=Q\setminus \cup G.\Rel$.
\end{lemma}
\begin{proof}
Unless otherwise specified, all line numbers refer to function
\texttt{Init}.
The purpose of the \textbf{forall} loop at line
\ref{init:l3} is to associate to each block
$B\in P$ the initial set of blocks which simulate it:
$B.\Rel = {R}_{Init}(B)$.
In the \textbf{forall} loop at line \ref{init:l5} we
identify $\pre_a(Q)$, encoded by $a.\Remove$, for each letter $a\in \Sigma$. Then, in the
\textbf{forall} loop at line \ref{init:l7}, for each relevant letter
$a\in\Sigma$:
\begin{itemize}
\item We split each block $B\in P$ in two parts, $B\cap \pre_a(Q)$ and
$B\setminus\pre_a(Q)$, and
we update $R$ such that the induced relation of $(P,R)$ stays the
same (lines \ref{split:l11}--\ref{split:l12} and
\ref{split:l16}--\ref{split:l18} of function \texttt{Split}).
\item Now, each block of $P$ is either included in $\pre_a(Q)$ or disjoint
from it. We then delete from $R$ all couple $(C,D)$ such that $C$ is
included in $\pre_a(Q)$ and $D$ is disjoint from $\pre_a(Q)$.
\end{itemize}
At the end $\mathscr{R}$, the induced relation of $(P,R)$, is
$\mathscr{R}_{init}$ where all couples $(q,q')$ such
that $q\in\pre_a(Q)$ and $q'\not\in\pre_a(Q)$ have been deleted. Otherwise
said $\mathscr{R} = \initRefine(\mathscr{R}_{init})$. Then, the
\textbf{forall} loop at line \ref{init:l12} implies
$C.\NotRel=Q\setminus \cup C.\Rel$ for all node $C\in P$.
\end{proof}
\begin{lemma}
\label{lem:mainInv}
Let $T=(Q,\Sigma, \rightarrow)$ be a LTS, $(P_{init},R_{init})$ be an
initial partition-relation pair over $Q$ inducing a preorder
$\mathscr{R}_{init}$ and $\mathscr{S}$ be a simulation over $T$ such that
$\mathscr{S}\subseteq \mathscr{R}_{init}$. Let $\mathscr{R}=\cup_{G\in
P}G\times(\cup G.\Rel)$. Then, the following properties are invariants
of the \texttt{\bf while} loop of function \texttt{Sim}:
\begin{enumerate}
\item $\mathscr{R}$ is a reflexive, block-definable and $\mathscr{S}$-stable
relation,
\item $
\forall G\in P\; \forall c\in \Sigma\;.\;
[\pre_c(\cup G.\Rel\cup G.\NotRel)]_{\mathscr{R}} \cup
\mathscr{R}(\pre_c(G))
\subseteq
\pre_c(\cup G.\Rel\cup G.\NotRel)
$
\end{enumerate}
\end{lemma}
\begin{proof}
The proof is done by an induction on the iterations of the loop. The two
properties are
true just after the initialization. For the first one,
from Lemmas~\ref{lem:InitRefine} and
\ref{lem:InitAlgo}, we deduce that $\mathscr{S}\subseteq\mathscr{R}$,
$\mathscr{R}$ is a preorder and thus reflexive and
block-definable. With the fact that a preorder is, by
definition, transitive we deduce that $\mathscr{S}\subseteq\mathscr{R}$
implies the $\mathscr{S}$-stability of $\mathscr{R}$. For the second one,
this is a direct consequence of item
\ref{lem:InitRefine:3} of Lemma~\ref{lem:InitRefine} and the fact that just after
the initialization:
$\cup G.\Rel\cup G.\NotRel=Q$, see Lemma~\ref{lem:InitAlgo}, for all block
$G\in P$.
Let us consider an iteration of the loop. For the ease of the
demonstration, we prime a variable for its
value before the iteration.
A value during the
iteration is not primed. The two properties are supposed true before the
iteration, we show they are still true after.
Therefore, we assume:
\begin{equation}
\label{eq:mainHypothesis}
\forall G\in P'\; \forall c\in \Sigma\;.\;
\begin{array}[c]{c}
[\pre_c(\cup G.\Rel'\cup G.\NotRel')]_{\mathscr{R}'} \cup\\
\mathscr{R}'(\pre_c(G))\\
\subseteq\\
\pre_c(\cup G.\Rel'\cup G.\NotRel')
\end{array}
\end{equation}
In this proof, all line numbers, if not stated otherwise, refer to
function \texttt{Sim}, and $B$ is the block considered at
line \ref{sim:l4}.
Let $\mathit{ref}=(B,\mathscr{R}_1,\mathscr{R}_2)$ with $\mathscr{R}_1=B\times
(\cup B.\Rel')$, $\mathscr{R}_2=B\times
(B.\NotRel')$. Then, $\mathit{ref}$ is a refiner of $\mathscr{R}'$. This is due
to the following facts:
\begin{itemize}
\item the reflexivity of $\mathscr{R}'$ implies that $B$ is a block of
$\mathscr{R}_1$,
\item $\mathscr{R}'(B) = \mathscr{R}_1(B)\subseteq\mathscr{R}_1(B)$,
\item the partitionability of $\mathscr{R}'$
implies the partitionability of
$\mathscr{R}_1$,
\item from \eqref{eq:mainHypothesis} we have:
\begin{displaymath}
\forall c\in\Sigma\;.\;
[\pre_c(\mathscr{R}_1(B)\cup\mathscr{R}_2(B))]_{ \mathscr{R}'}
\cup \mathscr{R}'(\pre_c(B))
\subseteq
\pre_c(\mathscr{R}_1(B)\cup \mathscr{R}_2(B))
\end{displaymath}
\end{itemize}
Clearly, after the first iteration of the \textbf{forall} loop at line
\ref{sim:l13}, from Theorem~\ref{th:Refine}, we have
$\mathscr{R}=\refine_{a,\mathit{ref}}(\mathscr{R}')$ and
$\mathscr{R}$ is reflexive, block-definable, $\mathscr{S}$-stable and
$\mathscr{R}\subseteq\mathscr{R}'$. Therefore, $\mathit{ref}$ is still a
refiner of $\mathscr{R}$. The same happens for the successive
iterations of the \textbf{forall} loop at line
\ref{sim:l13}. The first property of the current lemma is thus true.
To prove the second property of the lemma, we need two intermediate results.
\begin{equation}
\label{eq:descOfNotBInduc}
\forall F\in P'\setminus\{B\}\ \forall G\in P\;.\; G\subseteq F \Rightarrow
\cup F.\Rel'\cup F.\NotRel'=\cup G.\Rel\cup G.\NotRel
\end{equation}
By an induction on the number of splits from $F$ to $G$. If there has been
no split then $G=F$ and only line \ref{sim:l20} can modify
$G.\Rel$ or $G.\NotRel$, but such that the expression $\cup G.\Rel\cup
G.\NotRel$ stays constant. Suppose the property is true before a split.
If that split does not involve $G$ then, thanks to lines
\ref{split:l16}--\ref{split:l18} in \texttt{Split}, $\cup G.\Rel$ and
$G.\NotRel$ are not modified. If it is a split of
$G$ in $G_1$ and $G_2$. Then, the split is done such that, function
\texttt{Split} lines \ref{split:l11}--\ref{split:l13}, $\cup G_i.\Rel=\cup
G.\Rel$, $G_i.\NotRel=G.\NotRel$ and, function \texttt{Split} lines
\ref{split:l16}--\ref{split:l18}, $\mathscr{R}$ is not changed. With
the induction hypothesis we get:
$\cup F.\Rel'\cup F.\NotRel' = \cup G_i.\Rel\cup G_i.\NotRel$.
After the split, only lines \ref{sim:l16} and \ref{sim:l20} can modify
$G_i.\Rel$ or $G_i.\NotRel$, but such that the expression $\cup G_i.\Rel\cup
G_i.\NotRel$ stays constant.
\begin{equation}
\label{eq:descOfB}
\forall G\in P\;.\; G\subseteq B \Rightarrow
\cup B.\Rel'=\cup G.\Rel\cup G.\NotRel
\end{equation}
The proof is similar to the previous one except that $B.\NotRel'$ has been
emptied at line~\ref{sim:l10}.
Let $G\in P$ after a given iteration of the \textbf{forall} loop at line
\ref{sim:l13}. There are two cases:
\begin{itemize}
\item There is $F\in P'\setminus\{B\}$ such that $G\subseteq F$.
From \eqref{eq:mainHypothesis} and
\eqref{eq:descOfNotBInduc} we get: $ \forall c\in \Sigma\;.\;
[\pre_c(\cup G.\Rel\cup G.\NotRel)]_{\mathscr{R}'} \cup
\mathscr{R}'(\pre_c(G))
\subseteq
\pre_c(\cup G.\Rel\cup G.\NotRel)
$.
From the fact that $\mathscr{R}\subseteq\mathscr{R}'$ we obtain: $
\forall c\in \Sigma\;.\;
[\pre_c(\cup G.\Rel\cup G.\NotRel)]_{\mathscr{R}} \cup
\mathscr{R}(\pre_c(G))
\subseteq
\pre_c(\cup G.\Rel\cup G.\NotRel)
$.
\item $G\subseteq B$.
We have two sub cases:
\begin{itemize}
\item $c\in alph$. Let $\mathscr{R}_c$ be the value of $\mathscr{R}$
after the iteration of the \textbf{forall} loop at line \ref{sim:l13} with
$a=c$. From what precede, remember that $\mathit{ref}$ is still a refiner of
$\mathscr{R}$, and $\mathscr{R}$ and $\mathscr{R}_1$ are still
$\mathscr{S}$-stable. Then, from Theorem~\ref{th:Refine} we get $
[\pre_c(\mathscr{R}_1(B))]_{\mathscr{R}_c} \cup
\mathscr{R}_c(\pre_c(B)) \subseteq \pre_c(\mathscr{R}_1(B))$. At the
end of the iteration of the while loop, we obviously have
$\mathscr{R}\subseteq\mathscr{R}_c$. With \eqref{eq:descOfB} and
$G\subseteq B$ we obtain: $ [\pre_c(\cup G.\Rel \cup
G.\NotRel)]_{\mathscr{R}} \cup \mathscr{R}(\pre_c(G)) \subseteq
\pre_c(\cup G.\Rel \cup G.\NotRel))$.
\item $c\not\in alph$. In that case, $c.\Remove=\emptyset$, thus
$\pre_c(B.\NotRel')\subseteq \pre_c(\cup B.\Rel')$. With
\eqref{eq:mainHypothesis} we get: $ [\pre_c(\cup
B.\Rel')]_{\mathscr{R}'} \cup \mathscr{R}'(\pre_c(B)) \subseteq
\pre_c(\cup B.\Rel')$. With \eqref{eq:descOfB},
$\mathscr{R}\subseteq\mathscr{R}'$ and $G\subseteq B$ we obtain: $
[\pre_c(\cup G.\Rel \cup G.\NotRel)]_{\mathscr{R}} \cup
\mathscr{R}(\pre_c(G)) \subseteq \pre_c(\cup G.\Rel \cup G.\NotRel))$.
\end{itemize}
\end{itemize}
\end{proof}
\begin{theorem}
\label{thm:SimIsCorrect}
Let $T=(Q,\Sigma, \rightarrow)$ be a LTS and $(P_{init},R_{init})$ be an
initial partition-relation pair over $Q$ inducing a preorder
$\mathscr{R}_{init}$. Function \texttt{Sim} computes the partition-relation pair
$(P_{sim},R_{sim})$ inducing $\mathscr{R}_{sim}$ the maximal
simulation over $T$ contained in $\mathscr{R}_{init}$. Furthermore,
$\mathscr{R}_{sim}$ is a preorder.
\end{theorem}
\begin{proof}
From line \ref{init:l14}
of function \texttt{Init}, lines \ref{sim:l17} and \ref{sim:l21} of
function \texttt{Sim}, a block $G\in P$ is added in $S$ whenever
$G.\NotRel$ is not empty. Furthermore, each time a block $G$ is
withdrawn from $S$, line \ref{sim:l5}, $G.\NotRel$ is emptied, line
\ref{sim:l10}. Therefore, a block $G$ is in $S$ iff $G.\NotRel$
is not empty.
\begin{enumerate}
\item \textbf{Function \texttt{Sim} terminates}. Let
$\mathscr{R}'=\cup_{G\in P}G\times(\cup G.\Rel\cup G.\NotRel)$. At each
iteration of the \texttt{\bf while} loop $\mathscr{R}'$ strictly
decrease (a not empty $B.\NotRel$ is emptied). Since $\mathscr{R}'$ is
a finite set the algorithm terminates necessarily.
\item
\textbf{$\mathscr{R}_{sim}$ is a simulation}. The algorithm terminates when $S$ is
empty. From what precede, at this moment, for all $G\in P$,
$G.\NotRel=\emptyset$. With Lemma~\ref{lem:mainInv} we get:
$ \forall G\in P\; \forall a\in \Sigma\;.\;
\mathscr{R}_{sim}(\pre_a(G))
\subseteq
\pre_a( \mathscr{R}_{sim}(G))$.
With Proposition \ref{prop:blockSim} this means that
$\mathscr{R}_{sim}$ is a simulation since $\mathscr{R}_{sim}$ is
reflexive and block-definable (Lemma~\ref{lem:mainInv}).
\item \textbf{$\mathscr{R}_{sim}$ contains all simulation included in
$\mathscr{R}_{init}$}. From Lemma~\ref{lem:mainInv} we deduce that
for all simulation $\mathscr{S}$ over $T$ included in
$\mathscr{R}_{init}$, $\mathscr{R}_{sim}$ is reflexive and
$\mathscr{S}$-stable. These two properties
imply $\mathscr{S}\subseteq\mathscr{R}_{sim}$.
\item \textbf{$\mathscr{R}_{sim}$ is a preorder}. We have seen that
$\mathscr{R}_{sim}$ is $\mathscr{S}$-stable for any simulation included
in $\mathscr{R}_{init}$. Since $\mathscr{R}_{sim}$ is such a
simulation, $\mathscr{R}_{sim}$ is also $\mathscr{R}_{sim}$-stable and
thus transitive. This relation being reflexive and transitive is a
preorder.
\end{enumerate}
\end{proof}
\section{Complexity}
\label{sec:complexity}
From now on, all space complexities are given in
bits.
Let $X$ be a set of elements, we qualify an encoding of $X$ as
\emph{indexed} if the elements of $X$ are encoded in an array of
$|X|$ slots, one for each element. Therefore, an elements of $X$ can be
identify with its index in this array.
Let $T=(Q,\Sigma, \rightarrow)$ be a LTS, an encoding of $T$ is said
\emph{normalized} if the encodings of $Q$, $\Sigma$ and $\rightarrow$ are
indexed, a transition is encoded by the index of its source
state, the index of its label and the index of its destination state, and
if $|Q|$ and $|\Sigma|$ are in $O(|{\rightarrow}|)$.
If $|\Sigma|$ is not in $O(|{\rightarrow}|)$, we can restrict it to its
useful part $\Sigma'=\{a\in\Sigma\,\big|\, \exists q,q'\in
Q\;.\;q\xrightarrow{a}q'\in\rightarrow\}$ whose size is less than
$|{\rightarrow}|$. To do this, we can use hash table techniques, sort
the set $\rightarrow$ with the keys being the letters labelling the transitions,
or more efficiently use a similar technique of that we used in the algorithm to
distribute a set of transitions relatively to its labels (see, as an
example, the \textbf{forall} loop at line \ref{sim:l7} of
\texttt{Sim}). This is done in $O(|\Sigma|+|{\rightarrow}|)$ time and uses
$O(|\Sigma|.\log|\Sigma|)$ space. We have recently seen that
this may be done in $O(|{\rightarrow}|)$ time, still with
$O(|\Sigma|.\log|\Sigma|)$ space, by using a technique presented
in \cite{Val09}.
If $|Q|$ is not in $O(|{\rightarrow}|)$ this means there are states that
are not involved in any transition. In general, these states are
ignored. Indeed, any state can simulate them and they can not simulate any
state with an outgoing transition. Therefore, we can restrict $Q$ to its
useful part $\{q\in Q\,\big|\, \exists q'\in Q\;\exists a\in \Sigma\;.\;
q\xrightarrow{a}q'\in\rightarrow \vee \;q'\xrightarrow{a}q\in\rightarrow\}$
whose size is in $O(|{\rightarrow}|)$. This is done like for
$\Sigma$.
\begin{center}
All encodings of LTS in this paper are assumed to be normalized.
\end{center}
We also assume the encoding of the initial partition-relation pair
$(P_{init},R_{init})$ to be such that: the encoding of $P_{init}$ is
indexed, for each block $B\in P$ scanning of the states in $B$ can be
done in $O(|B|)$ time and scanning of $R_{init}(B)$ can be
done in $O(|P_{init}|)$ time. Furthermore, for each state $q\in Q$, we
assume set $q.\Block$ the block of $P_{init}$ to which $q$ belongs.
One difficulty concerning the data structures was to design an efficient
encoding of the $\NotRel$'s avoiding the need to update them each time
a block is split. This led us to design an original encoding of $P$
(encoding which finally happens to be similar to the one designed in
\cite{Val09}).
The first two versions of the algorithm essentially differ
by the implementation of the test $r \not\in \pre_a(\cup B.\Rel)$ at line
\ref{sim:l7}; the overall time complexity of the other parts of the
algorithm being in $O(|P_{sim}|.|{\rightarrow}|)$.
\subsection{Hypothesis}
\label{sec:hypothesis}
In this sub-section, we state the relevant complexity properties of the
data structures we use. We postpone on Section~\ref{sec:DataStruct} the
explanations on how we meet these properties.
During an initialization phase, we set the following:
\begin{itemize}
\item
For each transition $t=q\xrightarrow{a}q'$, we set $t.\Sl=q_a\in \SL(\rightarrow)$ the state-letter
associated with $t$.
\item For each state-letter $q_a\in \SL(\rightarrow)$, we set $q_a.\State=q$ and
$q_a.\Post=\{q\xrightarrow{a}q'\in\rightarrow\,\big|\, q'\in Q\}$ such
that scanning the
transitions in $q_a.\Post$ is done in linear time.
\item For each state $q'\in Q$, we set
$q'.\Pre=\{q\xrightarrow{a}q'\in \rightarrow \,\big|\, q\in Q,
\;a\in\Sigma\}$ such that scanning the transitions of this set is done in
linear time.
\end{itemize}
This initialization phase is done in $O(|{\rightarrow}|)$
time and uses $O(|{\rightarrow}|.\log|{\rightarrow}|)$ space.
The partition $P$ is encoded such that adding a new block is done in
constant amortized time and scanning the states of a block is done in
linear time. The encoding of $P$ uses $O(|P_{sim}|.\log|{\rightarrow}|)$
space. Note that the content of all the blocks uses $O(|Q|.\log|Q|)$
space.
The union $B.\NotRel$ of blocks that do not simulate a given $B\in P$ is
encoded such that resetting to $\emptyset$ is done in constant time and adding the content of a
block is done in constant amortized time (relatively to the number of added
blocks) while scanning the states present
in the union is done in
linear time of the number of states. The encoding of all $\NotRel$'s uses
$O(|P_{sim}|^2.\log|P_{sim}|)$ space.
The set of blocks, $B.\Rel$, that simulate a given $B\in P$ is
encoded such that membership test and removing of a
block are done in constant time while adding of a block is done in constant
amortized time (relatively to the size of $P_{sim}$). The encoding of all
$\Rel$'s is done in $O(|P_{sim}|^2)$ space.
The set $S$ of blocks to be treated by the main loop of the algorithm is
encoded such that the emptiness test and the extraction of one element (arbitrarily
chosen by the data structure) are done in constant
time, and adding an element in $S$ is done in constant amortized
time. The encoding of $S$ uses $O(|P_{sim}|.\log|{\rightarrow}|)$ space.
The sets $alph$, $SplitCouples$, $Touched$ and $BlocksInRemove$ are encoded
such that adding of an element is done in constant time and,
scanning of their elements and resetting to $\emptyset$ are done in linear
time (relatively to the number of elements). The encoding of these sets
uses $O(|{\rightarrow}|.\log|{\rightarrow}|)$ space.
For all $a\in\Sigma$, $a.\PreB$ and $a.\Remove$ are encoded such that
adding of an element is done in constant time,
scanning of their elements and resetting to $\emptyset$ is done in linear
time (relatively to the number of elements). The encoding of all $a.\PreB$
and $a.\Remove$ takes $O(|{\rightarrow}|.\log|{\rightarrow}|)$ space.
Finally, $\Split(Remove,P)$ is done in $O(|Remove|)$ time.
\subsection{Common analysis}
\label{sec:commonParts}
\begin{figure}
\caption{A configuration during a iteration of the while loop}
\label{fig:configIt}
\end{figure}
Figure~\ref{fig:configIt} illustrates the main lemma of this section.
\begin{lemma}
\label{lem:AtMostOnce}
Let $B$ be a block defined at line~\ref{sim:l4} of \emph{\texttt{Sim}}.
During the execution of \emph{\texttt{Sim}}, the following configurations
can happen at most once at line~\ref{sim:l4} (line~\ref{sim:l18} for the
last one):
\begin{enumerate}
\item \label{lem:AtMostOnce:1} $B$ and a block $E$ such that
$E\subseteq B.\NotRel$,
\item \label{lem:AtMostOnce:2} $B$ and a transition
$r\xrightarrow{a}r'$ such that $r'\in B.\NotRel$,
\item \label{lem:AtMostOnce:3} $B$, a block $E\subseteq B.\NotRel$ and a
transition $q\xrightarrow{a}q'$ such that $q'\in B$,
\item \label{lem:AtMostOnce:4} $B$, a block $D$ and a transition $q\xrightarrow{a}q'$
such that $D\subseteq a.\Remove$ (or $D\in BlocksInRemove$) and $q'\in B$.
\end{enumerate}
\end{lemma}
\begin{proof}
\mbox{}
\begin{enumerate}
\item After the initialization, the content of a block can be added into
$B.\NotRel$ only if this block is removed from $\cup B.\Rel$,
lines~\ref{sim:l16} and \ref{sim:l20}. Furthermore, $\cup B.\Rel$ can
only decrease and if $E$ is included in $B.\NotRel$ at
line~\ref{sim:l4}, $B.\NotRel$ is emptied at line~\ref{sim:l10}. From
what precedes, it will not be possible again for $E$ to be included in
$B.\NotRel$ during another iteration of the while loop.
\item[2,3.] Direct consequences of the preceding point.
\item[4.] Let us suppose this can happen twice. Let $B.\Rel'$ be the
value of $B.\Rel$ the first time it happens and $B.Rel''$, $B.\NotRel''$ be the
values of $B.\Rel$ and $B.\NotRel$ the second time it happens. With a same
reasoning than that of the first point, we get: $B.\NotRel''\subseteq
\cup B.\Rel'$. Let $r$ be any element of $D$. The first time the
configuration happens, we necessarily have, lines~\ref{sim:l6},
\ref{sim:l7} and \ref{sim:l9}: $r\not\in\pre_a(\cup B.\Rel')$. The
second time the configuration happens we necessarily have:
$r\in\pre_a(B.\NotRel'')\subseteq \pre_a(\cup B.\Rel')$ which
contradicts $r\not\in\pre_a(\cup B.\Rel')$.
\end{enumerate}
\end{proof}
\subsubsection{Time}
\label{sec:CommonTime}
In this sub-section, the $O$ notation refers to time complexity and the
\emph{overall complexity} of a line is the time complexity of
all the executions of that line during the lifetime of the algorithm.
\paragraph{Initialisation}
\label{sec:initialisation}
In this paragraph, we consider the complexity of the initialization. All
line numbers refer to function \texttt{Init}.
Line~\ref{init:l1} essentially corresponds to a copy of $P_{init}$, this is
done in $O(|P_{init}|)$ and thus $O(|P_{sim}|)$.
Line~\ref{init:l2} is done in $O(|\Sigma|)$ and thus in $O(|{\rightarrow}|)$.
The \textbf{forall} loop at line~\ref{init:l3} is done in $O(|P_{init}|^2)$
and thus in $O(|P_{sim}|^2)$.
The \textbf{forall} loop at line~\ref{init:l5} is done in
$O(|{\rightarrow}|)$.
Since we have $\Sigma_{a\in\Sigma}|a.\Remove|\leq |\SL(\rightarrow)|\leq
|{\rightarrow}|$, the overall complexity of line~\ref{init:l8} is $O(|{\rightarrow}|)$.
At first glance the overall complexity of
lines~\ref{init:l9}--\ref{init:l11} is in $O(|\Sigma|.|P_{sim}|^2)$ but it is
also in $O(|\SL(\rightarrow)|.|P_{sim}|)$, since there is at most $|\SL(\rightarrow)|$ blocks
$C$ concerned by $BlocksInRemove$ and each time it is for a given $a\in \Sigma$,
and thus in $O(|{\rightarrow}|.|P_{sim}|)$.
The \textbf{forall} loop at line~\ref{init:l12} is done in
$O(|P_{sim}|^2)$. From all of that, the complexity of \texttt{Init} is
$O(|{\rightarrow}|.|P_{sim}|)$.
\paragraph{Simulation algorithm}
\label{sec:simulationAlgo}
Thanks to Lemma~\ref{lem:AtMostOnce}, item~\ref{lem:AtMostOnce:1}, the
\textbf{while} loop of function \texttt{Sim} is executed at most
$|P_{sim}|^2$ times since a block $B$ is concerned by the loop only if
$B.\NotRel\neq\emptyset$ and $B.\NotRel$ is made of a union of blocks.
The first two versions of the algorithm differ by the test $r\not\in\pre_a(\cup
B.\Rel)$ at line~\ref{sim:l7}. Therefore, in this paragraph, we do not consider
the overall complexity of this test. We only consider right now the overall
complexity of the scanning of all transitions $r\xrightarrow{a}r'$ such
that $r'\in\pre_a(B.\NotRel)$ and the overall complexity of
lines~\ref{sim:l8}--\ref{sim:l9}. From Lemma~\ref{lem:AtMostOnce}
item~\ref{lem:AtMostOnce:2} it is $O(|P_{sim}|.|{\rightarrow}|)$.
From Lemma~\ref{lem:AtMostOnce}
item~\ref{lem:AtMostOnce:3}, the overall complexity of the
\texttt{forall} loop at line~\ref{sim:l11} is
$O(|P_{sim}|.|{\rightarrow}|)$.
The two preceding paragraphs imply that the overall complexity of
lines~\ref{sim:l22} and \ref{sim:l23} is
$O(|P_{sim}|.|{\rightarrow}|)$ since resetting $a.\PreB$ or $a.\Remove$ is
linear in their sizes and thus less than what has been added in them, which
is less than $O(|P_{sim}|.|{\rightarrow}|)$ (overall complexity of lines
\ref{sim:l12} and \ref{sim:l9}).
The overall complexity of line~\ref{sim:l13} is less than that of
line~\ref{sim:l8} and thus $O(|P_{sim}|.|{\rightarrow}|)$.
The complexity of \texttt{Split($a.\Remove,P$)} is
$O(|a.\Remove|)$. From the time complexity of line~\ref{sim:l9} we get the
overall complexity of line~\ref{sim:l14}: $O(|P_{sim}|.|{\rightarrow}|)$.
There is at most $|P_{sim}|$ couples $(C,D)$ issued from the splits of
blocks. So, the overall complexity of the \textbf{forall} loop at
line~\ref{sim:l15} is $O(|P_{sim}|)$.
From the overall complexity of lines~\ref{sim:l9} and \ref{sim:l12}, the
overall complexity of the calculation of all $D$ and of all $C$ concerned
by line~\ref{sim:l18} is $O(|P_{sim}|.|{\rightarrow}|)$. From
Lemma~\ref{lem:AtMostOnce} item~\ref{lem:AtMostOnce:4}, there is at most
$O(|P_{sim}|.|{\rightarrow}|)$ couples $(C,D)$ which have been involved at
line~\ref{sim:l18}. This implies the overall time complexity of the
\textbf{forall} loop of line~\ref{sim:l18}: $O(|P_{sim}|.|{\rightarrow}|)$.
With what precedes, the test $r\not\in\pre_a(\cup B.\Rel)$ at
line~\ref{sim:l7} being aside, the overall time complexities of the other lines
of the algorithm are all in $O(|P_{sim}|.|{\rightarrow}|)$.
\subsubsection{Space}
\label{sec:CommonSpace}
Apart from the data structures needed to do the test $r\not\in\pre_a(\cup B.\Rel)$ at
line~\ref{sim:l7}, from Section~\ref{sec:hypothesis}, the space complexity
of the algorithm is
$O(|P_{sim}|^2.\log|P_{sim}| +|{\rightarrow}|.\log|{\rightarrow}|)$.
\subsection{The nice compromise}
\label{sec:compromise}
We use a set of state-letters, $SL\subseteq \SL(\rightarrow)$, with the same time and space
complexity properties as those of $alph$. Before line~\ref{sim:l7}
this set is emptied. Then, let us consider a given $r'\in
B.\NotRel$. From $r'$ we
get, in linear time, all $t=r\xrightarrow{a}r'\in r'.\Pre$. If $r_a= t.\Sl$
is already in $SL$ it has already been treated, so we stop there and
consider the next element of
$r'.\Pre$. Otherwise, we have not yet tested whether $r\not\in\pre_a(\cup
B.\Rel)$. To do that, first, we add $r_a$ into $SL$ and then, consider all
$r\xrightarrow{a}r''\in r_a.\Post$. If for none of them $r''.\Block$ is in
$B.\Rel$, which is tested each time in constant time, then
$r\not\in\pre_a(\cup B.\Rel)$. Thanks to the use of $SL$, from
Lemma~\ref{lem:AtMostOnce}, item~\ref{lem:AtMostOnce:1}, a transition
$r\xrightarrow{a}r''\in r_a.\Post$ is considered only once for a given couple
$(B,E)$ of blocks in Figure~\ref{fig:configIt}. Therefore, the overall time
complexity of the test $r\not\in\pre_a(\cup B.\Rel)$ in line~\ref{sim:l7}
is $O(|P_{sim}|^2.|{\rightarrow}|)$.
We can also express the time
complexity in another way. For that, we need to introduce the state-letter
branching factor of a LTS. The \emph{branching factor of a state} is the
number of its outgoing transitions. The \emph{branching factor of a
state-letter} $q_a$ is the number of the outgoing transitions of $q$
labelled by $a$. The \emph{state branching factor} of a LTS is the greatest
branching factor of its states. The \emph{state-letter branching factor} of
a LTS is the greatest branching factor of its state-letters. Let us come
back to the analysis of the complexity of the test
$r\xrightarrow{a}(B.\NotRel) \wedge r \not\in \pre_a(\cup B.\Rel)$. From
Lemma~\ref{lem:AtMostOnce} item~\ref{lem:AtMostOnce:2}, a configuration
such that $r\xrightarrow{a}r'$ is a transition with $r'\in B.\NotRel$
happens only once. From this configuration, and with the use of the set
$SL$ described above, we have to consider at most $b$ transitions
$r\xrightarrow{a}r''\in r_a.\Post$ to test whether $r\not\in\pre_a(\cup
B.\Rel)$.
From what precedes we obtain the following theorem.
\begin{theorem}
\label{th:niceCompromiseVersionComplexities}
Let $T=(Q,\Sigma, \rightarrow)$ be a LTS and $(P_{init},R_{init})$ be an
initial partition-relation pair over $Q$ inducing a preorder
$\mathscr{R}_{init}$. The nice compromise version of \texttt{\em Sim} computes the
partition-relation pair $(P_{sim},R_{sim})$ inducing $\mathscr{R}_{sim}$
the maximal simulation over $T$ contained in $\mathscr{R}_{init}$ in:
\begin{itemize}
\item $O(\min(|P_{sim}|,b).|P_{sim}|.|{\rightarrow}|)$ time, and
\item $O(|P_{sim}|^2.\log|P_{sim}|+|{\rightarrow}|.\log|{\rightarrow}|)$
space.
\end{itemize}
with
$b=\max_{q_a\in \SL(\rightarrow)}|
\{q\xrightarrow{a}q'\in\rightarrow\,\big|\, q'\in Q\}|$
the \emph{state-letter branching factor} of $T$.
\end{theorem}
Clearly, the state-letter branching factor of a LTS is
smaller than its state branching factor. For the state-letter branching
factor $b$ in the preceding theorem, we have: $b\leq |Q|$. We also
have $|P_{sim}|\leq |Q|$. But there
is no definitive comparison between $b$ and $|P_{sim}|$. However if one
considers the VLTS Benchmark Suite
(\url{http://cadp.inria.fr/resources/benchmark_bcg.html}), the state
branching factor is rarely more than one hundred even for systems with
millions of states. Furthermore, in the case of deterministic systems, the
state-letter branching factor is 1. Therefore, we consider this version of
\texttt{Sim} as a nice compromise between space and time efficiency.
\subsection{The Time Efficient Version}
\label{sec:timeVersion}
To get an efficient time version of the algorithm, we need counters. To
each block $B\in P$ we associate $B.\RelCount$, an array of counters
indexed on the set of state-letters $\SL(\rightarrow)$ such that:
$B.\RelCount(r_a)=|\{r\xrightarrow{a}r'\in\rightarrow\,\big|\, r'\in \cup
B.\Rel\cup B.\NotRel\}|$. Let $r_a.\Post=\{r\xrightarrow{a}r'\in\rightarrow\,\big|\, r'\in Q\}$.
The initialization consists just of setting
$B.\RelCount(r_a)=|r_a.\Post|$ since at the end of the initialization, for
any $B\in P$, $Q=\cup B.\Rel\cup B.\NotRel$. Therefore, the time complexity
of the initialization of all the counters is
$O(|P_{sim}|.|\SL(\rightarrow)|)$ and thus $O(|P_{sim}|.|{\rightarrow}|)$.
Let us come back to the overall time complexity of line~\ref{sim:l7}. For
each transition $r\xrightarrow{a}r'$ with $r'\in B.\NotRel$,
$B.\RelCount(r_a)$ is decremented, and if after that $B.\RelCount(r_a)=0$
this implies that $r\not\in\pre_a(\cup B.\Rel)$. This means that the
test $r\not\in\pre_a(\cup B.\Rel)$ is done in constant time for each
transition $r\xrightarrow{a}r'$ with $r'\in B.\NotRel$.
Note that when a block is split during the function \texttt{Split} its
array of counters must be copied to the new block. This is done in
$O(|\SL(\rightarrow)|)$. Since during the computation there is at most
$|P_{sim}|$ splits, the overall time complexity of all the copy operations
is $O(|P_{sim}|.|\SL(\rightarrow)|)$ and thus
$O(|P_{sim}|.|{\rightarrow}|)$. We then get the following theorem.
\begin{theorem}
\label{th:timeEfficientVersionComplexities}
Let $T=(Q,\Sigma, \rightarrow)$ be a LTS and $(P_{init},R_{init})$ be an
initial partition-relation pair over $Q$ inducing a preorder
$\mathscr{R}_{init}$. The time efficient version of \texttt{\em Sim}
computes the partition-relation pair
$(P_{sim},R_{sim})$ inducing $\mathscr{R}_{sim}$ the maximal
simulation over $T$ contained in $\mathscr{R}_{init}$ in:
\begin{center}
$O(|P_{sim}|.|{\rightarrow}|)$ time and
$O(|P_{sim}|.|\SL(\rightarrow)|.\log|Q|+|{\rightarrow}|.\log|{\rightarrow}|)$ space.
\end{center}
\end{theorem}
\subsection{The Space Efficient Version}
\label{sec:SpaceEfficientVersions}
The algorithm GPP has a time complexity of
$O(|P_{sim}|^2. |{\rightarrow}|)$, for Kripke structures,
but an announced space complexity of $O(|P_{sim}|^2 +
|Q|.\log|P_{sim}|)$. Unfortunately, this space complexity does not
correspond to that of GPP. As announced in the
introduction of the present paper, GPP uses (a modified version of)
HHK. For each state $q'\in Q$, HHK uses an array of
counters, to speed up the algorithm, and a set of states, $\remove(q')$, that
do not lead via a transition to a state simulating $q'$. The counters and
the $\remove$ sets use $O(|Q|^2.\log|Q|)$ bits.
As GPP uses HHK on an abstract structure whose states correspond to
blocks of the current partition, the initial version of GPP uses
$O(|P_{sim}|^2.\log|P_{sim}|)$ bits for the counters and the $\remove$
sets. Then, the authors explain how
to avoid the use of the counters, but do not do the same for the
$\remove$ sets. Therefore their algorithm still uses at least
$O(|P_{sim}|^2.\log|P_{sim}|)$ bits. The $O(|Q|.\log|P_{sim}|)$
part of their announced space complexity comes from the necessity to
memorize for each state $q$ the block to which it belongs ($q.\Block$ in
the present paper). But GPP, like the algorithm in \cite{CRT11}, scan in
linear time the
states belonging to a block. To do that the set of the states of a block is
encoded by a doubly linked list which also enable to remove and to add a
state in a block in constant time. This implies that the size of each pointer of these
lists need to be sufficient to distinguish the $|Q|$ states: $\log
|Q|$. Since there is $|Q|$ states, GPP, like the algorithm in \cite{CRT11}, needs
at least $|Q|.\log|Q|$ bits. The real space complexity of
GPP is thus $O(|P_{sim}|^2.\log|P_{sim}|+|Q|.\log|Q|)$.
By removing the use of the $\NotRel$'s in our base algorithm we are able to
propose the space efficient version. The time complexities of \texttt{Init} and
\texttt{Split} do not change, but now the overall time
complexity of almost all the lines in the \textbf{while} loop of
\texttt{Sim} becomes $O(|P_{sim}|^2.|{\rightarrow}|)$.
We now present how to avoid, in our nice compromise version of \texttt{Sim},
the use of the $\NotRel$'s in order to lower the space complexity from
$O(|P_{sim}|^2.\log|P_{sim}| +|{\rightarrow}|.\log|{\rightarrow}|)$ to
$O(|P_{sim}|^2+|{\rightarrow}|.\log|{\rightarrow}|)$ while keeping a time
complexity of $O(|P_{sim}|^2.|{\rightarrow}|)$.
The transformation of the algorithm is quite simple: we mainly replace
line~\ref{init:l14} of \texttt{Init} by ``$S:=\Copy(P)$;'',
line~\ref{sim:l7} of \texttt{Sim} by
``\textbf{forall} $r\xrightarrow{a}(\overline{\cup B.\Rel})
\wedge r \not\in \pre_a(\cup B.\Rel)$ \textbf{do}'',
and we remove all other instructions where $\NotRel$ appears.
Just to simplify the proof, we also replace
line~\ref{sim:l17} of \texttt{Sim} by ``$S:=S\cup\{C,D\};$''.
In the remainder of this sub-section, lines refer only to the function
\texttt{Sim}.
\subparagraph{Correctness}
\label{sec:correctness}
Clearly, after the initialization, which puts us under condition
\eqref{eq:InitRefineRestriction}, for any block $B\in P$, $(B, B\times
(\cup B.\Rel),B\times \overline{\cup B.\Rel})$ is a refiner of the
current relation. By noting $B.\NotRel\triangleq\overline{\cup B.\Rel}$,
second item of Lemma~\ref{lem:mainInv} becomes trivial. For the first item,
we follow the same proof. The algorithm terminates since, after the
first time, a block $B$
can be chosen again by line~\ref{sim:l4} only if it is issued from a split
(new line~\ref{sim:l17}) or if a block has been removed from $B.\Rel$
(line~\ref{sim:l21}). Each case can happen at most $|P_{sim}|$ times. Let
$\mathscr{R}_{sim}$ be the relation induced by the result
$(P_{sim},R_{sim})$ of \texttt{Sim}. Like
in the proof of Theorem~\ref{thm:SimIsCorrect}, we use
Lemma~\ref{lem:mainInv} to
deduce that $\mathscr{R}_{sim}$ contains all simulation included in
$\mathscr{R}_{init}$ the relation induced by the initial partition-relation
pair. Now, for a given block $B$ of the last partition, consider the last
time $B$ has been chosen by line~\ref{sim:l4}. After, the execution of the
corresponding iteration of the loop, from Theorem~\ref{th:Refine} we deduce
that $\mathscr{R}(\pre_a(B))\subseteq\pre_a(\cup B.\Rel')$ with $B.\Rel'$
the value of $B.\Rel$ before the iteration. But since
this is the last use of $B$, $B.\Rel$ has not been modified during this
iteration of the while loop. Thus, $B.\Rel=B.\Rel'$. From this moment on,
$B.\Rel$ will not be modified. So we have
$\mathscr{R}_{sim}(\pre_a(B))\subseteq\pre_a(\mathscr{R}_{sim}(B))$ for
all block $B\in P_{sim}$ and all letter $a\in\Sigma$. This defines a
simulation (Proposition~\ref{prop:blockSim}).
\subparagraph{Complexity}
The \textbf{forall} loop at line~\ref{sim:l7} is encoded by the following
lines:
{ \restylealgo{plain}
\linesnotnumbered
\begin{center}
\begin{algorithm}[H]
\ForAll{$q\xrightarrow{a}q'\in \rightarrow$} { \lIf{$q'.\Block\in
B.\Rel$} {$a.\PreRel:=a.\PreRel\cup\{q\}$;} }
\ForAll{$q\xrightarrow{a}q'\in \rightarrow$} {
\If{$q'.\Block\not\in B.\Rel\wedge q\not\in a.\PreRel$} { $alph
:= alph \cup \{a\}$\; $a.\Remove := a.\Remove \cup \{q\}$\; } }
\end{algorithm}
\end{center}
}
In addition, we add ``$a.\PreRel:=\emptyset$;'' to the bodies of
lines \ref{sim:l2} and \ref{sim:l22}. The $a.\PreRel$'s are data structures with the same
complexity properties as those of $a.\PreB$ and $a.\Remove$. For a given
iteration of the \textbf{while} loop the time complexity of these lines is
$O(|{\rightarrow}|)$. Since the number of iterations of the \textbf{while}
loop is in $O(|P_{sim}|^2)$, the overall time complexity of these lines is
$O(|P_{sim}|^2.|{\rightarrow}|)$. This is thus also the overall time complexity
of lines
\ref{sim:l13}, \ref{sim:l14}, \ref{sim:l22} and \ref{sim:l23}, and the
overall time complexity of calculation of all $C$ and $D$ concerned by
line~\ref{sim:l18}. The
overall time complexity of the \textbf{forall} loop at line~\ref{sim:l15}
does not change: $O(|P_{sim}|)$. Consider now Figure~\ref{fig:configIt}. A
transition $q\xrightarrow{a}q'$ with $q'$ in a block $B$ chosen at
line~\ref{sim:l4} is considered only $O(|P_{sim}|)$ times. Knowing that for
each time there is at most $|P_{sim}|$ blocks $D$ in $a.\Remove$ we deduce
the overall time complexity of the \textbf{forall} loop at
line~\ref{sim:l18}: $O(|P_{sim}|^2.|{\rightarrow}|)$.
Note for the attentive reader: continuing the discussion just before
Definition~\ref{def:refine}, in practice, it should be more interesting
to do the split with $\pre_a(\cup B.\Rel)$ instead of $a.\Remove$ and to do
the refinement step with $BlocksInRemove=\{D\in P\,\big|\, D\cap\pre_a(\cup
B.\Rel)=\emptyset\}$. Because, $\pre_a(\cup B.\Rel)$ is supposed to
decrease at each iteration which is not the case for $a.\Remove$ when we no
longer have $B.\NotRel$.
\begin{theorem}
\label{th:spaceEfficientVersionComplexities}
Let $T=(Q,\Sigma, \rightarrow)$ be a LTS and $(P_{init},R_{init})$ be an
initial partition-relation pair over $Q$ inducing a preorder
$\mathscr{R}_{init}$. The space efficient version of
\texttt{\em Sim}
computes the partition-relation pair
$(P_{sim},R_{sim})$ inducing $\mathscr{R}_{sim}$ the maximal forward
simulation on $T$ contained in $\mathscr{R}_{init}$ in:
\begin{center}
$O(|P_{sim}|^2.|{\rightarrow}|)$ time and
$O(|P_{sim}|^2+|{\rightarrow}|.\log|{\rightarrow}|)$ space.
\end{center}
\end{theorem}
In the case of Kripke structures, it seems possible to derive a version of
the algorithm which still works in $O(|P_{sim}|^2.|{\rightarrow}|)$ time
but uses only $O(|P_{sim}|^2+|Q|.\log|P_{sim}|)$ space. The idea is to not
use the option to scan the states of a block in linear time. The split
operation is thus now done in $O(|Q|)$ time and so one. This is just a bit
tedious to do. Moreover, in
practical cases, the problem is not the
$O(|{\rightarrow}|.\log|{\rightarrow}|)$ part of our space complexity
but the
$O(|P_{sim}|^2)$ part.
\section{Data structures}
\label{sec:DataStruct}
In what follows, we use different kinds of data: simple objects, arrays and
lists of objects. The size of the pointers has an importance for bit space
complexity. It should be
enough to differentiate all the considered objects and thus
$O(|{\rightarrow}|)$ for normalized LTS. We call a resizable array an array
which double its size when needed. Therefore, adding a new item in this kind
of array is done in constant amortized time.
First, we have to set $t.\Sl$ for each transition $t\in\rightarrow$, $q_a.\State$ and
$q_a.\Post$ for each state-letter $q_a$. To do
that we create a new array of transitions, $Post$, as the result of sorting the set
of transitions with the labels as keys, then with the source states as
keys. We use counting sorts. This means that the two sorts are done in
$O(|{\rightarrow}|)$ time since $Q$ and $\Sigma$ are in
$O(|{\rightarrow}|)$. Counting sorts are
stable. As a result, in $Post$,
transitions are packed by source states and within a pack of transition
sharing the same source state, there are the sub-packs of transitions sharing
the same label. Then, we scan the elements of $Post$ from the first one to
the last one. For each transition $t=q\xrightarrow{a}q'$, we consider the
couple $(q,a)$ and whenever it changes we create a new state-letter $q_a$
and we set $t.\Sl = q_a$. Then, we set $q_a.\State=q$ and
$q_a.\Range=(idx_{start}, idx_{end})$ with $idx_{start}$ the index in
$Post$ of the first transition from $q$ and labelled by $a$, and
$idx_{end}$ the index in $Post$ of the last transition from $q$ labelled by
$a$. Thanks to the two sorts, $(q_a.\Range, Post)$ provides an
encoding of $q_a.\Post$.
To represent $q'.\Pre$, we just create a new array of transitions, $Pre$, as
the result of sorting, by a counting sort, the set of transitions with
destination states as keys. Then, by scanning this array, we associate to
each state $q'$ the couple $q'.\Range=(idx_{start}, idx_{end})$ with
$idx_{start}$ the index of the first transition in $Pre$ having $q'$ as
destination state and $idx_{end}$ the index of the last transition in $Pre$
having $q'$ as destination state. Therefore, $(q'.\Range, Pre)$ provides an
encoding of $q'.\Pre$. All of this is done in
$O(|{\rightarrow}|)$ time and uses
$O(|{\rightarrow}|.\log|{\rightarrow}|)$ space.
We do not encode the content of a block of $P$ by a doubly-linked list of
states like the other papers because we need a certain stability
property. The problem is the following.
let $C$ be a block, we want to be able to add the content of another block
$D$ in $C.\NotRel$ in constant time, without scanning the content of
$D$. We also want the encoding of $C.\NotRel$ to be in
$O(|P_{sim}|.\log|P_{sim}|)$ space. A first idea is to store in $C.\NotRel$ only the
reference of $D$. But a problem arises when $D$ is split in $D_1$ and
$D_2$: we have to update all the $C.\NotRel$ and replace $D$ by $D_1$ and
$D_2$; this implies an overall time complexity of $O(|P_{sim}|^3)$ since
there may have $|P_{sim}|$ splits. But $O(|P_{sim}|^3)$ is
too much for the time efficient version of our algorithm.
A solution is to use a kind of
family tree. A block of $P$ is a leave of the tree and is linked to the set
of its states. When a block is split, it becomes an internal node of the
tree and is no more directly linked to the set of
its states but to its two son blocks (which are both linked to their
respective set of states). This solution satisfy all the
requirements since in a binary tree the number of nodes is at most two
times the number of leaves.
However here is a more efficient solution. The set of states $Q$ is copied
in a new array $Q_p$ such that
the set
of states of a block $B\in P$ is arranged in consecutive slots of this array
$Q_p$. Therefore, to memorize the content of $B$, we just have to
memorize $B.\Start$ the starting position and $B.\End$ the ending position of the
corresponding
subarray in $Q_p$. When a block $B$ is split in two sub-blocks
$B_1$ and $B_2$, we just arrange the content of the subarray of $B$ in
$Q_p$ such that the first slots are for $B_1$ and the last slots are
for $B_2$. This way, even after several splits, the set of states
which once corresponded to a block of $P$ will always be in the same
subarray, even if the order of the states is modified. Note that to
do the rearrangement, during a split, of the states of $B$ in $Q_p$ we
need to memorize for a given state $r\in Q$ its position, $r.\PosQp$, in
$Q_p$. See function \texttt{SplitImplementation}.
For a given $C\in P$, $C.\NotRel$ may thus be encoded as a set of couples
$(x,y)$, which once corresponded to blocks of $P$, such that $x$ is the
start of a subarray in $Q_p$ and $y$ is the end of
that subarray. Due to the fact that $B.\NotRel\cap(\cup
B.\Rel)=\emptyset$ and when the content of a block is added in $B.\NotRel$
the block is removed
from $B.\Rel$, all the blocks encoded in $C.\NotRel$ are
different. Therefore, $|C.\NotRel|$ is in $O(|P_{sim}|)$ and thus
the encoding of all the $\NotRel$'s is done in $O(|P_{sim}|^2.\log(|Q|)$
space. The factor $\log(|Q|)$ is due to the fact that in a couple $(x,y)$,
the maximum value for $x$ and $y$ is $|Q|$. However, we want the encoding of
all the $\NotRel$'s to be in $O(|P_{sim}|^2.\log(|P_{sim}|)$. To do that,
remember the family tree mentioned above. The number of past and actual
blocks of $P$ is in $O(|P_{sim}|)$. Therefore, we introduce $N$, a set of
nodes. During the initialization, we associate $B.\Node\in N$, a node, to each
block $B\in P$. The starting and ending position in $Q_p$ corresponding
to a block $B$ is not directly store in $B$ but in $B.\Node$ via
$B.\Node.\Start$ and $B.\Node.\End$. Therefore, when we want to add the
content of a block $D$ in $C.\NotRel$, in fact we add $D.\Node$ in
$C.\NotRel$. Since $|N|$ is in $O(|P_{sim}|)$, the encoding of all the
$\NotRel$'s is done in $O(|P_{sim}|^2.\log(|P_{sim}|)$ space. Note that the
encoding of all the nodes in $N$ is done in $O(|P_{sim}|.\log(|Q|)$
space. The $O(\log(|Q|)$ factor being for the couple $(n.\Start,n.\End)$
for each $n\in N$. The set $N$ and the $\NotRel$'s are encoded by resizable
arrays.
For a given $B\in P$, the set $B.\Rel$ is encoded by a resizable boolean
array. To know whether a block $C$
belongs to $B.\Rel$ we check $B.\Rel[C.\Index]$ with $C.\Index$ the index of
$C$ in the array encoding $P$. The encoding of all $\Rel$'s is therefore
done in $O(|P_{sim}|^2)$ space.
A given block $B\in P$ is encoded in $O(\log(|{\rightarrow}|)$ space since
we just need a constant number of integers, less than $|{\rightarrow}|$, or
pointers for $B.\Index$,
$B.\Node$, $B.\NotRel$, $B.\Rel$, $B.\SplitCount$ (see function
\texttt{SplitImple\-mentation}) and $B.\RelCount$ (for the time efficient
version). Thanks to, $Q_p$, $B.\Node.\Start$ and $B.\Node.\End$ scanning of the
states contained in a block $B\in P$ is done in linear time. The set $P$
is encoded as a resizable array of blocks. Therefore, the encoding of
$P$ is done in $O(|P_{sim}|.\log(|{\rightarrow}|)$ space and the encoding
of the contents of the blocks of $P$ is done in $O(|Q|.\log(|Q|)$.
The set $S$ is encoded as a list of
blocks (we could have used a resizable array) but we also need to add a
boolean mark to the blocks of $P$ to know whether a given block is already
in $S$. That way, we keep the encoding of $S$ in
$O(|P_{sim}|.\log(|{\rightarrow}|))$ space.
The sets $alph$, $SplitCouples$ and $Touched$ are implemented like $S$: a list
and a binary mark on the respective elements. To reset one of these sets,
we simply scan the list of elements; for each of them we unset the
corresponding mark, then we empty the list. All of this is done in linear
time. The maximum sizes for $alph$ is $|\Sigma|$, for $SplitCouples$ and
$Touched$ it is $|P_{sim}|$. Therefore, they are all encoded in
$O(|{\rightarrow}|.\log(|{\rightarrow}|))$ space.
To represent a set $a.\PreB$ or $a.\Remove$ with $a\in\Sigma$ we should not
use a list of states and a binary array indexed on $|Q|$. This would have
implied a total size of $|\Sigma|.|Q|$ for all the letters, which may
exceed $|{\rightarrow}|$. Instead, we use a list of elements of
$\SL(\rightarrow)$ per letter and only one common (for all the letters)
binary array indexed on $\SL(\rightarrow)$. We also use the fact that for a
given $a\in\Sigma$ a state can not belongs to both $a.\PreB$ and
$a.\Remove$ in an iteration of the \textbf{while} loop of \texttt{Sim}. When we need to
add a state $r$ in $a.\Remove$, for example, it is from a transition
$r\xrightarrow{a}r'$ issued from a call of $r'.\Pre$. This call provides
$r_a$ too. Then, we add $r_a$ in the list of $a.\Remove$ and we set the
mark associate with $r_a$, only if this mark is not already set. Cleaning
of $a.\Remove$ is done like cleaning of $alph$ (scanning the
elements and unsetting the associated marks). Note that we store
$r_a$ instead of $r$ in $a.\Remove$, but this is not a problem since
$r_a.\State$ gives us $r$. The encoding of all $a.\PreB$ and $a.\Remove$
is done in $O(|\SL(\rightarrow)|.\log |{\rightarrow}|)$ space and thus
in $O(|{\rightarrow}|.\log |{\rightarrow}|)$ space.
As denoted by the name, function \texttt{SplitImplementation} is an
implementation of function \texttt{Split} taking into account the new way
of encoding the partition. Clearly, a call of
\texttt{SplitImplementation($Remove,P$)} is done in $O(|Remove|)$ time.
\begin{function}[h]
\caption{SplitImplementation($Remove,P$)\label{func:splitImpl}}
$SplitCouples := \emptyset$; $Touched := \emptyset$; $BlocksInRemove := \emptyset$\;
{ // Assert : $\forall C\in P\,.\,C.\SplitCount=0$}\;
{ // When a block is created, all its counters are set to 0.}\;
\ForAll{$r\in Remove$} {
$C:=r.\Block$\;
$Touched := Touched \cup \{C\}$\;
$oldpos:=r.\PosQp$; $newpos:=C.\Node.\Start+C.\SplitCount$\;
$r':=Q_p[newpos]$\;
$Q_p[newpos]:=r$; $Q_p[oldpos]:=r'$\;
$r.\PosQp:=newpos$; $r'.\PosQp:=oldpos$\;
$C.\SplitCount:=C.\SplitCount+1$ \;
}
\ForAll{$C\in Touched $} {
\If {$C.\SplitCount= |C|$}
{
$BlocksInRemove:= BlocksInRemove\cup \{C\}$\;
}
\Else(\ //$C$ must be splitted)
{
$D:=\Newblock$; $P := P \cup \{D\}$\;
$D.\Node:=\Newnode$; $N := N \cup \{D.\Node\}$\;
$BlocksInRemove:= BlocksInRemove\cup \{D\}$\;
$D.\Node.\Start:=C.\Node.\Start$\;
$D.\Node.\End:=C.\Node.\Start+C.\SplitCount-1$\;
$C.\Node.\Start:=D.\Node.\End+1$\;
$D.\Rel := \Copy(C.\Rel)$\;
$D.\NotRel := \Copy(C.\NotRel)$\;
$SplitCouples := SplitCouples \cup \{(C,D)\}$\;
\lForAll{$pos\in \{D.\Node.\Start,\ldots,D.\Node.\End\}$} {$Q_p[pos].\Block:=D$}\;
}
$C.\SplitCount:=0$\;
}
\ForAll{$(C,D)\in SplitCouples,\,E\in P$}
{
\If{$C\in E.\Rel$} {$E.\Rel := E.\Rel \cup
\{D\}$\; }
}
\Return{$(P$, $BlocksInRemove$, $SplitCouples)$}
\end{function}
\section{Future Works}
\label{sec:future}
In order to simplify the presentation, no practical optimization has been
proposed. This will be done in a future work with the implementation of
the algorithms. For the moment we just recall
an easy theoretical optimization: the coarsest bisimulation relation should
be computed before, and used by the algorithms computing the coarsest
simulation relation. This reduces $\SL(\rightarrow)$, which is really
important for the space complexity of the time efficient version of the
algorithm,
and also reduces the transition relation, which has a positive impact on
the time complexity of all the versions of the algorithm.
Concerning the search of the coarsest bisimulation relation in a LTS, the
framework presented in the present paper can be adapted. We have
recently learned that an algorithm avoiding the effect of the size of the
alphabet in the time and space complexities of the bisimulation problem has
already been presented by Valmari \cite{Val09} in 2009. The approach of Valmari is different.
His splitters (roughly speaking, they play the same role of our refiners
but are
adapted for the bisimulation problem) depend conceptually on letters but he uses two partitions of
the set of transitions, beside the classical one for the states, to avoid
the negative effect of the size of the alphabet.
At first glance, an adaptation of our present work in the case of
bisimulation yields a simpler algorithm than the one of Valmari and,
furthermore, closer to the one of Paige and Tajan for Kripke structures
\cite{PT87}. This will be made precise in a future paper.
\end{document} |
\betagin{document}
\title{Random spatial growth \\ with paralyzing obstacles}
\author{J. van den Berg\footnote{Research funded in part by the Dutch BSIK/BRICKS project.}, \,
Y. Peres\footnote{Research supported in part by NSF grant DMS-0605166.}, \,
V. Sidoravicius\footnote{Partially supported by CNPq, Brazil}\, and
M.E. Vares \footnote{Partially supported by CNPq, Brazil}
\\
{\small CWI and VUA, Microsoft and UC Berkeley, IMPA and CBPF} \\
{\footnotesize email: J.van.den.Berg@cwi.nl;
peres@stat.berkeley.edu; vladas@impa.br; eulalia@cbpf.br} }
\downarrowte{}
\maketitle
\betagin{abstract}
We study models of spatial growth processes where initially there
are sources of growth (indicated by the colour green) and sources of
a growth-stopping (paralyzing) substance (indicated by red). The
green sources expand and may merge with others (there is no
`inter-green' competition). The red substance remains passive as
long as it is isolated. However, when a green cluster comes in touch
with the red substance, it is immediately invaded by the latter,
stops growing and starts to act as red substance itself. In our main
model space is represented by a graph, of which initially each
vertex is randomly green, red or white (vacant), and the growth of
the green clusters is similar to that in first-passage percolation.
The main issues we investigate are whether the model is well-defined
on an infinite graph (e.g. the $d$-dimensional cubic lattice),
and what can be said
about the distribution of the size of a green cluster just before it
is paralyzed. We show that, if
the initial density of red vertices is positive, and that of white
vertices is sufficiently small, the model is indeed well-defined and
the above distribution has an exponential tail. In fact, we believe
this to be true whenever the initial density of red is positive. \\
This research also led to a relation between invasion percolation and critical
Bernoulli percolation which seems to be of independent interest.
\end{abstract}
\noindent {\it 2000 MSC:} primary 60K35, secondary 60K37, 82B43.
\noindent {\it Key words and phrases:} Growth process,
percolation, invasion percolation.
\betagin{section}{Introduction}
\betagin{subsection}{Description of the model and the main problems}
Consider the following model where different `objects' (or
`populations') grow simultaneously until they hit a paralyzing
substance, in which case they stop growing and become paralyzing
themselves: Each vertex of a connected, finite (or countably
infinite, locally finite) graph $G= (V,E)$ is initially,
independently of the other vertices, white, red or green with
probabilities $p_w$, $p_r$ and $p_g$ respectively. Each edge of $G$ is
initially closed. By a green cluster we will mean a maximal
connected subgraph of $G$ of which all vertices are green and all
edges are open. We denote the green cluster containing $v$ at time
$t$ by $C_g(v,t)$. (If $v$ is not green at time $t$, then $C_g(v,t)$
is empty). It is clear from the above that initially the only green
clusters are single green vertices. These green clusters can
grow, merge with other green clusters and finally become paralyzed (red) as follows. \\
Whenever an edge $e = \lambdangle v,w \rightarrown$ is closed and has at least one
green end-vertex, say $v$, it becomes open at rate $1$. Moreover,
immediately after it gets open the following action takes place
instantaneously: If exactly one end-vertex, say $v$, is green and
the other, $w$, is white, $w$ becomes green (and we say, informally,
that the green cluster of $v$ grows by absorbing $w$). If $w$ is
red, then each vertex in the green cluster of $v$ becomes red (and
we say that the green cluster of $v$ becomes paralyzed). Finally, if
both vertices are green, no extra action takes place. (Note that in
this case the two vertices may have been in two different green
clusters right before the opening of $e$, but are now in the same
green cluster).
Note that once an edge is open it remains open, that once a vertex is green it never turns white (but may become red),
and once a vertex is red it remains red.
\noindent
Let us first consider the case where the graph $G$ is finite. In
that case the above process is clearly well-defined and has some
obvious properties, which we will state after introducing the
following terminology. By a configuration (or `site-bond
configuration') we mean an element of $\{0,1\}^E \, \times \,
\{\mbox{ green, red, white } \}^V$, where $0$ and $1$ denote `open'
and `closed' respectively. An `open-bond cluster' (with respect to a
configuration) is a maximal connected subgraph of $G$ of which all
edges are open (for that configuration). We say that it is
non-trivial if it has at least one edge. Note that the earlier
defined `green cluster' is an open-bond cluster of which each vertex
is green. A `red cluster' is defined similarly. We call a
configuration admissible if each non-trivial open-bond cluster is
either a red cluster or a green cluster. Now we are ready to state
the announced simple properties and observations: If $G$ is finite,
the process is a Markov chain on the set of admissible
configurations. The admissible configurations where no vertices are
green or all vertices are green are absorbing, and the chain will
with probability 1 end in one of those configurations. In
particular, if initially there was at least one red vertex, then every
green vertex will eventually become red. Moreover (because initially
all edges were closed) at any time, every non-empty red cluster $\mathcal{C}$
contains exactly one vertex $v$ that was originally red.
We say that this vertex $v$ is `responsible for' the other vertices in
$\mathcal{C}$ becoming red (or, that the vertices in $\mathcal{C}$ became red
`due to' $v$).
\noindent
If $G$ is {\it infinite}, for instance the $d$-dimensional cubic
lattice, the situation is much more problematic, due to the fact
that the range of the interaction is not bounded: an entire
cluster, no matter how large, can change colour instantaneously.
The main questions we address in this paper concerning the above
process, and some other related models, are:
\betagin{itemize}
\item {\bf 1.} Does the dynamics exist? This is a nontrivial issue for
such interacting processes on infinite graphs: See for instance,
Aldous' frozen percolation process (\cite{A}), which was shown by
Benjamini and Schramm (1999, private communication) not to exist in
$\Z^2$. For related matters on the non-existence of that process,
see also Remark (i) in Section 3 of \cite{BeT} and the example due
to Antal J\'arai (1999, private communication) which follows it. A
crucial difference between Aldous' model and ours is that in Aldous'
model, clusters freeze only when they are infinite, while we believe that in our model, due to the
positive density of initially red vertices, the green clusters do
not become infinite (see the next item).
A model which has more in common with ours is
the forest-fire model studied in \cite{D}. But again there is a
major difference: in that model there is a uniform lower bound for
the probability that a cluster of interest is `destroyed' before
growing further, and this uniform bound is a crucial ingredient in
the existence proof in \cite{D}. In our model there seems to be no
analog of such a property.
\item{\bf 2.} Is a green cluster
always finite at the moment it becomes red? Does the distribution of its radius (and of its volume)
have an exponential tail?
\item {\bf 3.} Let $w$ be an originally red vertex. Is the set of
originally green vertices $v$ with the property that $w$ is
responsible for $v$ becoming red, finite? Does the distribution of
its volume have an exponential tail?
\end{itemize}
The organization of the paper is as follows. In Subsection 1.2 we
give a partial answer to the questions listed above. In particular,
Theorem \ref{mainthm} states that, for $G = \Z^d$ and $p_w$
sufficiently small, the answers to the above questions are positive.
Our research also led to a new result for invasion percolation (see
Theorem \ref{uni-bound} and Proposition \ref{inv-perc}). In
Subsection 1.3 we explain the notion of `autonomous region' which
plays an important role in this paper. In subsection 1.4 we briefly
discuss some alternative versions of the model. In section 2 we give
a proof of the main result for the special case where $p_w = 0$. It
turns out that that case can be dealt with in a very elegant and
transparent way. It serves as an introduction to the proof of the
more complicated case where $p_w$ is small but positive, which is
given in Section 3. At the end of Section 3 we come briefly back to
the alternative versions of the model discussed in Subsection 1.4.
\end{subsection}
\betagin{subsection}{Statement of the main results}
Let $G$ be a connected, countably infinite graph of bounded degree,
and consider the model presented in Subsection 1.1, with parameters
$p_w$, $p_g$ and $p_r$. Our main result, Theorem \ref{mainthm}
below, states, among other things, that under certain conditions the
dynamics is well-defined. The formulation of the condition requires
some additional notation and terminology: By the distance $d(v,w)$
between two vertices $v$ and $w$ of $G$ we mean the length (i.e.
number of edges) of the shortest path from $v$ to $w$. The diameter
of a set of vertices $W$ of $G$ is defined as $\max_{v, w \in W}
d(v,w)$, and $\partial W$ will denote the set of all vertices that
are not in $W$ but have an edge with some vertex in $W$.
The number of elements of a set $W$ will be denoted by
$|W|$. For a finite graph $H$, denote by $|H|$ the number of
vertices in $H$. Let $D$ denote the maximal degree in $G$.
For each vertex $v$ of $G$ and $p \in (0,1)$, let $\xi_v(p)$ denote
the expectation of the volume (i.e. number of vertices) of the
occupied cluster of $v$ in site percolation on $G$ with
parameter $p$. Further, define
$$\xi(p) = \sup_v \xi_v(p).$$
Recall the definition of $C_g(v,t)$ in Subsection 1.1.
We are now ready to state our main results.
\betagin{thm}\lambdabel{mainthm}
Suppose that
\betagin{equation} \lambdabel{key}
(D-1) \xi(p_w) < p_r \,.
\end{equation}
We have
\item
{(a)} The dynamics on $G$ is well-defined. With probability 1, at
any time, each red cluster has a unique initially red vertex.
\item (b) For any originally green
vertex $v$, let $C_g(v) = \cup_{t \geq 0} C_g(v,t)$ be the green
cluster of $v$ just before it becomes red. Let $|C_g(v)|$
be the number of vertices of $C_g(v)$. Then, with probability $1$,
$|C_g(v)|$ is finite for each such $v$. Moreover, the distribution of
$|C_g(v)|$ has an exponential tail.
\item (c) If $G$ is a Cayley graph and $w$ is an originally red vertex in $G$,
then the set $D(w)$ consisting of all green vertices that become red
due to $w$ is finite; moreover, the diameter of $D(w)$ has an
exponential tail. (Here, extending the definition given before in
the case of finite $G$, if $v$ is an originally green vertex and $w$
is the (unique a.s.) originally red vertex in the red clusters that
eventually contain $v$, we say that $v$ becomes red due to $w$.)
\item (d) If $G$ is the $d$-dimensional cubic lattice, then the distribution of
$|D(w)|$ also has an exponential tail.
\end{thm}
Note that in the case $p_w = 0$, condition~\eqref{key} of Theorem
\ref{mainthm} is satisfied for every positive $p_r$. For this case
we have, in addition to Theorem \ref{mainthm}, considerably
stronger results. In particular, the
following theorem holds, where we fix $p_w = 0$ and then vary the
parameter $p_r$. In this theorem and its proof, $P_p$ denotes the
ordinary (Bernoulli) bond percolation measure with parameter $p$
and $P_{cr}$ stands for $P_{p_c}$, where $p_c$ denotes the critical
probability for this percolation model. By $B(n)$ we denote the
set of all vertices at (graph) distance $\leq n$ from some
specified vertex $O$. The event that there is an open path from $O$ to $\partial B(n)$ is
denoted by $\{O \leftrightarrow \partial B(n)\}$. Further, the symbol $\approx$ denotes
logarithmic equivalence, i.e., we say for two positive functions
$g(n)$ and $h(n)$ that $g(n) \approx h(n)$ as $n \rightarrow
\infty,$ if
$$\frac{\log h(n)}{\log g(n)} \rightarrow 1, \,\,\, n \rightarrow \infty.$$
Let $W$ be a set of vertices in a graph $G$ with a distinguished vertex $O$.
By the {\em radius} of $W$ we mean the maximal distance from $O$ to a vertex of $W$.
We are now ready to state the following theorem.
\betagin{thm} \lambdabel{uni-bound}
Let $C_g(\cdot)$ be as in part (b) of Theorem \ref{mainthm}. If $G$
is the square lattice in two dimensions (or the triangular or the
hexagonal lattice), and $p_w = 0$, then
$$P(\mbox{The radius of } C_g(O) \mbox{ is at least } n) \,\, \uparrow f(n), \,\,
\mbox{ as } p_r \downarrow 0,$$
where $f$ is a function satisfying
$$f(n) \approx P_{cr}(O \leftrightarrow \partial B(n)).$$
\end{thm}
Theorem \ref{uni-bound} follows easily from the following
Proposition concerning invasion percolation on the
lattices considered in the theorem. Before we state it, we briefly
recall the invasion percolation model (on these lattices) and some
of its basic properties. (Invasion percolation was introduced by Wilkinson and Willemsen, see \cite{WW}.
For a detailed study of this process
see \cite{LPS}, or the earlier works \cite{CCN}, \cite{Ale} and \cite{J2}).
To each edge $e$ we assign, independent of
the other edges, a random variable $\tau_e$, uniformly distributed
in the interval $(0,1)$. We construct, recursively, a
growing tree. Initially the tree consists only of one vertex, say
$O$. At each step we consider all edges that have exactly one
endpoint in the tree that has been created so far. From these edges
we select the one with smallest $\tau$ value and add it (and its
`external' endpoint) to the tree. Let $\tau(n)$ be the $\tau$ value
of the $n$th edge invaded by this procedure.
For any infinite transitive graph $G$, it is proved in \cite{HPS} that
\betagin{equation}
\lambdabel{hpseq}
\limsup_{n \rightarrow \infty} \tau(n) = p_c,
\end{equation}
where $p_c$ is the critical probability for bond percolation.
Further, note that, if all $\tau(n) < p_c$, then $O$ belongs to an
infinite cluster on which all $\tau$ values are smaller than
$p_c$. For the graphs in the statement of Theorem \ref{uni-bound}
this latter event has probability $0$. (See \cite{G} for this
classical result and references). Hence, for these lattices,
(a.s.) there is an $n$ with $\tau(n) > p_c$. This, together with
\eqref{hpseq}, implies that (a.s.) $\tau(n)$ achieves its maximum
(and that this maximum is larger than $p_c$). The following
proposition is about the invaded region at the step where this
maximum is achieved. Although this and related regions have been
under consideration before in the literature (see the subsection
`Ponds and outlets' in Stein and Newman (1995)),
this result is, as far as we know, new. \\
{\bf Remark:} The {\em invasion basin\/} of $O$ is defined similarly to the invasion tree,
except that at every step, the edge of minimal $\tau$-value among the edges outside the
current invasion basin
that have {\em at least\/} one endpoint in the basin is added to the basin.
The invasion basin is typically not a tree. It is easy to
see that each edge $e$ in the invasion tree is in the invasion basin,
and the set of sites in the invasion basin
immediately before such an edge $e$ is added to it
is the same as the set of vertices in the invasion tree immediately before $e$ is added.
\betagin{prop} \lambdabel{inv-perc}
Consider invasion percolation on the square lattice (or the
triangular or the hexagonal lattice) with edge values $\tau_e$. Let
$\hat e$ be the edge with maximal $\tau$ value in the invasion basin
(as explained above). Let $\hat R$ be the radius of the region that
has been invaded up to the step where $\hat e$ is invaded. We have:
\item(a)
\betagin{equation*}
P(\hat R > n) \ge P_{cr}(O \leftrightarrow
\partial B(n)) ;
\end{equation*}
\item(b)
\betagin{equation}
\lambdabel{invperc} P(\hat R > n) \approx P_{cr}(O \leftrightarrow
\partial B(n)), \,\,\, n \rightarrow \infty.
\end{equation}
\end{prop}
\noindent{\bf Remarks:} \\
(a) Proposition \ref{inv-perc} has triggered further research on the comparison of ponds and critical percolation
clusters: see recent refinements and generalizations in \cite{BJV}. \\
(b) The value $\hat R$ above can also be described in the
following, somewhat informal, way. Suppose each edge $e$ is closed
at time $0$ and becomes open at time $\tau_e$ (after which it
remains open). The open cluster of $O$ grows in time. Up to time
$p_c$ it is finite, but at some time larger than $p_c$ it will
become infinite (a.s). The radius of this cluster just before it
becomes infinite is $\hat R$.
\end{subsection}
\betagin{subsection}{Description of the model in terms of passage times.
Autonomous regions}
Consider the description of the dynamics in the beginning of this
section, and assume for the moment that the graph is finite. Recall
that an open edge remains open and that a closed edge with at least
one green end-vertex becomes open at rate $1$. This means that if we
assign to each edge $e$ an exponentially distributed (mean $1$)
random variable $\tau(e)$, independent of the other edges (and of
the initial colours of the vertices), the time evolution of the
process can be completely described in terms of the initial colours
of the vertices and the $\tau-$ variables of the edges: Each edge
$e$ remains closed until the time $t$ at which $L_t(e)$ (defined
below) has Lebesgue measure $\tau_e$. (If no such time exists, the
edge remains closed forever). Here $L_t$ is defined by
\betagin{equation}
\lambdabel{Ldef} L_t(e) = \{s < t \, : \, e \mbox{ has at least one
green end-vertex at time } s\}.
\end{equation}
(Since, once a vertex is green it can change colour only one more
time, $L_t(e)$ is clearly an interval or union of two intervals).
When $e$ becomes open and one of its end-vertices is white or red,
the appropriate action in the description in Section 1.1 is carried
out instantaneously.
In the following this equivalent description of the process turns
out to be very convenient. To illustrate it and to emphasize the
difference with one of the modified models that will be discussed in
Subsection 1.4, we give the following example:
\betagin{example}
\lambdabel{dynex1}
Consider the graph with vertices denoted by $\{1, 2, 3, 4, 5\}$ and edges $\lambdangle i, i+1 \rightarrown$, $1 \leq i \leq 4$.
Suppose that the initial colours of the vertices $1, \cdots, 5$ are red, green, white, green, red
respectively, and that the $\tau$ values of the edges $\lambdangle 1, 2 \rightarrown, \cdots \lambdangle 4, 5 \rightarrown$ are
$6$, $3$, $4$ and $2$ respectively. As one can check by following the above description,
the initially green vertex $2$ becomes red at time $5$ due to vertex $5$.
\end{example}
\noindent
Now suppose some finite, but possibly large, graph $G$ is given,
together with initial colours $c(v), v \in V$ and `opening times'
$\tau(e), e \in E$. Further suppose we are only interested in the
time evolution in a small subgraph of $G$, for instance just one
initially green vertex $v$. Do we need to `follow' the process in
the whole graph to reconstruct what happens at $v$? Often this is
not the case. An instructive example is when $v$
is incident to three
edges, $e$, $e'$ and $e''$ with the properties that $\tau(e)$ is
smaller than $\tau(e')$ and $\tau(e'')$, and that the other
end-vertex of $e$, which we denote by $w$, is red. In that case we
know that $v$ is green until time $\tau(e)$ and from then on is red
(which would also happen in the `isolated' graph consisting only of
the vertices $v$ and $w$ and the edge $e$). This holds no matter
what the initial colours of the vertices in $V \setminus \{v,w\}$
and the $\tau$-values of the edges in $E \setminus \{e, e', e''\}$
are. Note that this still holds when we extend $G$ to a bigger graph
(with $c$ and $\tau$-variables) as long as we don't add extra edges
to $v$.
This brings us to the notion of {\it autonomous set}: Let
$H=(V(H),E(H))$ be a finite sub-graph of a graph $G$, and let
$\bar E$ be a finite set of external edges of $H$, i.e. edges in
$G$, which have exactly one vertex in $V(H)$. Assume that we have
given an initial colour assignment $c(v)$ to all $v \in V(H)$ and
opening times $\tau(e)$ to all $e \in E(H) \cup \bar E$. Let $\bar
H$ be the minimal graph containing $H$ as subgraph and $\bar E
\subset E(\bar H)$. We say that $(H,\bar E)$ is {\bf autonomous}
(with respect to $\tau$ and $c$), if for every finite subgraph
$G_0$ of $G$ which has $\bar H$ as a subgraph, the growth process
on $G_0$ starting with a colour pattern and opening times
extending the above given $c$'s and $\tau$'s has, restricted to
$H$, always the same time evolution, i.e. the same evolution as it
would have with $G_0=\bar H$, and which does not depend on colours
at the vertices in $\bar H$ not in $H$. In the simple example
considered in the previous paragraph, the graph with vertices $v$
and $w$, and edge $e$, together with the set of external edges
$\bar E = \{e', e''\}$, is autonomous.
Often, when the identity of $\bar E$ is obvious and the choice of
$c$- and $\tau$- variables is considered to be known, we simply say
that $H$ is autonomous. For this reason we might refer to the
autonomous set as ``autonomous subgraph".
\noindent
Now suppose we have an infinite graph $G$ with given $\tau$- and $c$- variables. If every vertex (and every edge) is
contained in a finite autonomous subgraph of $G$, the infinite-volume time evolution on $G$ can be defined
in an obvious way.
The key of the proof of Theorem \ref{mainthm} is to show that, under the condition in the theorem, these autonomous
subgraphs exist with probability $1$. That is, for almost-all initial colour patterns, and almost-all $\tau$-values
each vertex and edge is contained in a finite autonomous region.
\end{subsection}
\betagin{subsection}{Some alternative versions of the model}
There are many modifications or generalizations of our model (which we will sometimes call the
{\it basic model} to distinguish it from these modified versions). Below we mention four of them.
\noindent
(i) In the basic model the $\tau$ variables are exponentially distributed. It is easy to see that if
the initial colours of the vertices are given,
and none of them is white,
the time evolution is essentially determined by the order statistics of the $\tau$ variables. It is
also easy to see that in that case
each edge $e$ becomes open at time $\tau_e$ or remains closed forever.
From such observations it easily follows that, if $p_w = 0$, replacing the exponential distribution of the
$\tau$ variables by some other continuous distribution, leaves the law of the process unchanged, apart from
an obvious time change.
This is not true if $p_w > 0$. However, as one can easily see from its proof, Theorem \ref{mainthm} remains valid
under such replacement of distribution.
\noindent
(ii) Recall that in our basic model an edge $e$ becomes open at the
smallest time $t$ with the property that the subset of times $s < t$
at which $e$ has at least one green end-vertex, has Lebesgue measure
$\tau_e$. A natural modification of this rule is the one where
$e=\lambdangle v, w \rightarrown$ becomes open at the smallest time $t$ with the
property that $v$ is green throughout the interval $[t - \tau_e, t)$
or $w$ is green throughout the interval $[t - \tau_e, t)$. To
illustrate the difference between the rules, consider again the
graph with $\tau$ values and initial colours in Example
\ref{dynex1}. As can be easily checked, under the modified rule the
vertex $2$ will no longer become red due to vertex $5$ but due to
vertex $1$ (and at time $6$ instead of $5$). It turns out that
Theorem 1.1 remains valid for this modified model and that its proof
only needs some small modifications.
\noindent
(iii) The third modification is the following model in continuous
space. Consider two homogeneous Poisson point processes $\zetata_G$,
$\zetata_R$ on $\mathbb R^d$, with intensities $\lambdambda_G=1$,
$\lambdambda_R \equiv \lambdambda \in (0, + \infty) $ respectively. The
points of $\zetata_G$ ({\it green}) are interpreted as sources of
growth, and those of $\zetata_R$ ({\it red}) as sources of
``paralyzing poison''. All other elements of $\R^d$ are uncoloured.
From each source in $\zetata_G$ at time zero a green Euclidean sphere
begins to grow with constant speed 1 (of its radius). When two or
more green spheres intersect, they keep growing in the same manner,
but we say that they have become connected (are in the same
connected green component). If a growing green sphere hits a red
region, its {\it entire} connected green component (note that this
is a union of spheres) instantaneously gets red and stops growing.
Analogs of the questions for our basic model in Subsection 1.1, in
particular the existence question, arise naturally, but so far we
have made very little progress. Although at first sight there is
some resemblance with the model studied in \cite{HaM}, the arguments
used there seem not to work here.
\noindent
(iv)
Consider the following change of rule of the previous model (model (iii) above):
When a green sphere hits a red region, {\it only} the centers of all the spheres
of its connected green component become red; the remaining parts of the spheres
disappear (become uncoloured). This change makes the model much easier to handle
(using an invasion procedure resembling the one we will use in Section 2 for the
case $p_w =0$ of our basic model), but also
considerably less natural, and we will not discuss it in more detail.
\end{subsection}
\end{section}
\betagin{section}{Proofs for the case $p_w = 0$}
\betagin{subsection}{General properties for the case $p_w = 0$}
The case where $p_w = 0$
is considerably easier
than the general case and serves as
a good introduction to the latter.
We
start with some deterministic observations and claims. Let us first
restrict to a finite graph $G$, with given $\tau$-values and
$c$-values. We assume that at least one vertex has initial colour
red, at least one vertex has initial colour green, and no vertex has
initial colour white. Let $x$ be a vertex with initial colour green,
and let $t(x)$ denote the time at which $x$ becomes red. Let $\mathbb Pi$
denote the set of all paths of which the starting point is $x$ and
the end-vertex has initial colour red. It is easy to see that
\betagin{equation}
\lambdabel{lowerbd}
t(x) \geq \min_{\pi \in \mathbb Pi} \max_{e \in \pi} \tau(e).
\end{equation}
Indeed, for each $t$ smaller than the r.h.s. of \eqref{lowerbd}
there is a `cut set' of edges that are still closed at time $t$
and `shield' $x$ from all initially red vertices. It is also quite
easy to see that equality holds in \eqref{lowerbd}. The
algorithmic (and inductive) argument below is not the most direct
one but has the advantage that it gives more, namely an elegant
and suitable construction of an autonomous region. This particular
construction will almost immediately lead to a proof of parts (a)
and (b) of Theorem \ref{mainthm} for the case $p_w = 0$. The
`algorithm' is a modification (`stopped' version) of the standard
invasion percolation procedure (starting at $x$) described a few
lines above Proposition \ref{inv-perc}. At each stage of the
procedure we have a tree which is a subgraph of $G$. Initially
this tree consists only of the vertex $x$. At each step we
consider all edges that have exactly one end-vertex in the tree,
also called the {\it external edges} of the tree. Among these
edges we select the one with minimal $\tau$-value and add it (and
its external end-vertex) to the tree. The procedure is stopped as
soon as an initially red vertex is added to the tree. Let us
denote this vertex by $R$, and the final tree given by this
procedure by $T(x)$. Let $\tau^*$ be the maximal $\tau$ value on
this tree, and $e^*$ the edge where this maximum is attained.
Removing this edge from the tree $T(x)$ `splits' the tree in two
parts. Let $T_1^*(x)$ denote the part containing $x$.
\noindent
\betagin{claim}
\lambdabel{claimpw0}
\noindent
(i) The vertex $R$ is responsible for $x$ becoming red. \\
(ii) $x$ becomes red at time $\tau^*$. That is, $t(x) = \tau^*$.
Moreover, $C_g(x)$ (defined in
part (b) of the Theorem) is the set of vertices of $T_1^*(x)$. \\
(iii). Let $\bar E$ denote the set of all edges of which one
end-vertex is a vertex of $T(x)$, different from $R$, and one
end-vertex is not in $T(x)$. Let $\widehat{T}(x)$ be the graph with
the same vertices as $T(x)$ and with all edges that have both
end-vertices in $T(x)$. Then $(\widehat{T}(x), \bar E)$ is
autonomous (with respect to this coloring).
\end{claim}
\noindent
\betagin{proof} (of Claim)
The proof of the Claim is by induction on the number of steps in the
above invasion procedure. If the number of steps is $1$ we are in
the situation that the edge incident to $x$ with minimal $\tau$-
value has a red end-vertex, and the above Claim follows easily.
(Note that this case corresponds with the example in the second
paragraph of Subsection 1.3). Now suppose the number of steps is
larger than $1$. Consider the edge $e^*$ defined above. Let $E^*$
denote the set of external edges, except $e^*$ itself, at the stage
of the procedure immediately before $e^*$ was added. From the
definition of invasion percolation, all edges in $E^*$ have
$\tau$-value larger than $\tau^*$. On the other hand, all edges that
were added after that step have, by definition, $\tau$-value smaller
than $\tau^*$. Therefore the edges in $E^*$ were never added to the
tree. Hence, since $R$ was added after $e^*$ (and was the first red
point added to the tree), it follows that every path in $G$ from $x$
to a red point contains $e^*$ or an edge in $E^*$. Therefore, by
\eqref{lowerbd} we get that
$$t(x) \geq \tau^*.$$
To get the reversed inequality, note the following. Let $y$ denote
the external end-vertex of $e^*$ when $e^*$ was added to the tree.
We already remarked that removing $e^*$ from $T(x)$ `splits'
$T(x)$ in two separate trees, and we denoted the part containing
$x$ by $T_1^*(x)$. Let $T_2^*(x)$ denote the other part. It
follows from the above that $T_2^*(x)$ contains $y$ and $R$. We
will assume that the initial colour of $y$ is green (otherwise the
Claim follows easily). It is easy to see from the above that a
similar invasion procedure as before, but now starting at $y$
instead of $x$, has as its final tree the tree $T_2^*(x)$. By the
induction hypothesis we have that $y$ becomes red at the time
which is equal to the maximal edge value in $T_2^*(x)$ and hence
before time $\tau^*$, and that $R$ is responsible for $y$ becoming
red. Also note that, from the earlier observations, just before
time $\tau^*$ there is an open path from $x$ to the end-vertex
$\neq y$ of $e^*$. Since $e^*$ becomes open at time $\tau^*$ it
follows that $x$ becomes red at time $\tau^*$. Moreover, since $R$
is responsible for $y$ becoming red, it is also responsible for
$x$ becoming red. This (and the earlier made observation that all
external edges $\neq e^*$ of $T_1^*(x)$ have $\tau$ value larger
than $\tau^*$)) completes part (i) and (ii) of the proof of
Claim~\ref{claimpw0}. Similar arguments show part (iii).
\end{proof}
\noindent
Now we are ready to handle the case where $G$ is infinite. If $G$ is
infinite and $p_r > 0$, it is not a priori clear that the process
described in Subsection 1.1 is well-defined. However, the above
invasion procedure and the corresponding Claim now give us the
instrument to define it and to give a proof of Theorem \ref{mainthm}
in this particular case.
\end{subsection}
\betagin{subsection}{Proof of Theorem \ref{mainthm} for the case $p_w =
0$} For each green vertex $x$ simply run the invasion procedure
starting from $x$. Since the initial colours and the $\tau$
variables are independent, we have, at each step in the invasion
from $x$, probability $p_r$ of hitting a red vertex (independently
of the previous steps in this invasion). Hence the invasion
procedure starting at $x$ stops with probability $1$, and (by part
(iii) of Claim~\ref{claimpw0}) yields an autonomous region
containing $x$. Since the graph has countably many vertices, this
yields a construction of the process on $G$ and completes the
proof of part (a) of the theorem. Moreover it shows that
Claim~\ref{claimpw0} also holds (a.s.) for $G$. Further, the
number of steps in the invasion procedure from an initially green
vertex clearly has a geometric distribution: the probability that
it is larger than $n$ is $(1-p_r)^n$. Since (by part (ii) of
Claim~\ref{claimpw0}) $|C_g(v)|$ is at most the number of steps in
the invasion procedure,
part (b) of the theorem follows. \\
{\it Proof of part (c)}: For each pair of vertices $x,y$, let
$I(x,y)$ denote the event that $x$ is initially green and that $y$
is initially red and responsible for $x$ becoming red. It follows
immediately from the above that for all vertices $x$ and all $m$
\betagin{equation}
\lambdabel{Rbd} \sum_{y : d(x,y) \geq m} P(I(x,y)) \, = \, P\left(d(x,
R(x)) \geq m \right) \le (1-p_r)^m.
\end{equation}
Further, using that $G$ is a Cayley graph, the `mass transport
principle' (see e.g. Section 7.1 in \cite{LyP} or \cite{HPS}) gives:
$$ P\left(D(w) \mbox{has radius } \geq m\right) \leq \sum_{v \,: \, d(v,w) \geq m} \! \! P(I(v,w)) =
\sum_{v \,: \, d(v,w) \geq m} \! \! P(I(w,v)),$$ which by
\eqref{Rbd} is at most $(1-p_r)^m$. This completes the proof of
part (c) of the theorem.
\noindent
{\it Proof of part (d)}. As we will see, this follows from earlier
observations, together with a block argument which is quite
similar to one in percolation theory, due to Kesten (see
\cite{K}). Below we denote the $d-$dimensional cubic lattice
simply by $\Z^d$.
Let, as before, $T(x)$ denote the tree produced by the invasion
procedure starting at $x$. We want to prove exponential decay for
$P(|D(v)| > n)$, where $v$ is an initially red point. Without loss
of generality we take $v = {\bf 0}$. We say that a finite set $W$ of
vertices containing $\bf 0$ is a lattice animal (abbreviated as
l.a.) if for all $w \in W$ there is a path in $\Z^d$ from $\bf 0$ to
$w$ of which every vertex is in $W$. From the definitions (and
since, as we saw in (c), $D(\bf 0)$ is a.s. finite), it is clear
that $D(\bf 0)$ is a lattice animal. Let $L$ be an even integer and
consider the partition of $\mathbb{Z}^d$ into cubes $Q_L (x) :=
[-L/2, L/2 )^d + L x$, $ x \in \Z^d$. We say that $x \in \Z^d$ is
{\it fine} if $Q_L(x) \cap D({\bf 0}) \neq \emptyset$. Let $V_F$
denote the set of all vertices that are fine. Since $D({\bf 0})$ is
a lattice animal, $V_F$ is also a lattice animal. Further, we say
that $x \in \Z^d$ is {\it proper} if $Q_L(x)$ contains a vertex $y$
with $|T(y)| > L/4$, and write $I( x \mbox{ is proper })$ for the
indicator function of the corresponding event. (Here $T(\cdot)$ is
as defined in the invasion procedure earlier in this Section).
Finally, a subset of $\Z^d$ is proper if every element in the set is
proper. It is clear that for every $x \neq \bf 0$, if $x$ is fine,
then $x$ is proper. It is also clear that if $D(\bf 0)$
contains vertices outside $Q_L(\bf 0)$, then $\bf 0$ is also proper.
Recall from Claim~\ref{claimpw0}(iii) that for each tree $T$ in $\Z^d$ and each vertex $y$, the
event $\{T(y) = T\}$ depends only on the $c$ values of the vertices
of $T$ and the $\tau$ values of the edges that have at least one
end-vertex in $T$. From this it easily follows that the process
$\left(I( x \mbox{ is proper }), \, x \in \Z^d\right)$ is
$2$-dependent (see e.g. \cite{G} for this notion). Let $\varepsilon=\varepsilon(L) = \varepsilon(L,d)$ be the probability that a
given vertex is proper. Since, for each $y$, the distribution of
$|T(y)|$ is geometric (and $|Q_L(y)|$ is polynomially bounded in
$L$) it is clear that for fixed $d$
$$\varepsilon(L,d) \rightarrow 0 \mbox{ as } L \rightarrow \infty.$$
The above mentioned $2$-dependence gives that there is a constant $C_1 = C_1(d)$ such that
for every set $W \subset \Z^d$
\betagin{equation}
\lambdabel{1dp} P(W \mbox{ is proper }) \leq \varepsilon^{\frac{|W|}{C_1}}.
\end{equation}
Finally, we use that there is a constant $C_2 = C_2(d)$ such that
the number of lattice animals of size $m$ is at most $C_2^m$, see~\cite{G}.
Together, the above gives that (noting that each l.a. of size $\geq
m$ contains a l.a. of size $m$) that for $n$ large enough (depending on $L$),
\betagin{eqnarray}
& & P(|D({\bf 0})| > n) \leq P\left(\exists \mbox{ a proper l.a.
of size } \lceil\frac{n}{|Q_L|}\rceil \right) \\ \nonumber \leq & &
C_2^{\frac{n}{|Q_L|}+1} \, \varepsilon(L)^{\frac{n}{|Q_L| C_1}} \\
\nonumber = & & C_2 \, \left[ \left(C_2
\,\,\varepsilon(L)^{\frac{1}{C_1}}\right)^{1/Q_L}\right]^n.
\end{eqnarray}
Taking $L$ so large that $C_2 \,\, \varepsilon(L)^{(1/C_1)} < 1$ completes
the proof of part (d). This completes the proof of Theorem
\ref{mainthm} for the special case where $p_w = 0$.
\end{subsection}
\betagin{subsection}{Proof of Proposition \ref{inv-perc} and Theorem \ref{uni-bound}}
We first prove Proposition \ref{inv-perc}. We say that an edge is
$p$-open if $\tau_e < p$. Define $p$-open paths and $p$-open
clusters in the obvious way. To prove the Proposition we will derive
suitable lower and upper bounds for the
l.h.s. of \eqref{invperc} in terms of an expression of the form of its r.h.s. \\
The lower bound is very easy: Since $\tau_{\hat e} > p_c$ (see the
paragraph below \eqref{hpseq}), it follows immediately that (a.s)
the region which is already invaded at the step where $\hat e$ is
invaded, contains all the vertices of the $p_c$-open cluster
of $O$. Hence the l.h.s of \eqref{invperc} is larger than or equal to the r.h.s.\\
The upper bound is more complicated.
We use the standard percolation notation $\thetaeta(p)$ for the probability that $O$ is in
an infinite $p$-open cluster.\\
Define, for each $p$ and $n$, the following two events:
\betagin{eqnarray*}
A_{n,p} = \{\exists \mbox{ a } p \mbox{-closed circuit with
diameter}\geq n &\mbox{in the dual lattice}\\\mbox{ that contains }
O \mbox{ in its interior}\}.
\end{eqnarray*}
$$D_p = \{O \mbox{ belongs to an infinite } p \mbox{-open cluster }\}.$$
Note that $P(D_p) = \thetaeta(p)$ and that if $p_1 < p_2$,
then $D_{p_1} \subset D_{p_2}$ and $A_{n, p_2} \subset A_{n, p_1}$.
Let $\hat \tau = \tau_{\hat e}$.
Let $p'$ be some number between $p_c$ and $1$.
The following observation is straightforward.
\noindent
{\it Observation}\\
(a) If $\hat \tau > p'$ and $\hat R \geq n$, then there is a $p > p'$
such that the event $A_{n,p}$ occurs. \\
(b) If $\hat \tau <p'$, then there is a $p<p'$ such that $D_p$ occurs.
\noindent
Let, for $ p > p_c$, $L(p)$ be the correlation length
(=$L(p,\varepsilon_0)$) as defined in Section 1 in the paper by Kesten
(1987) on scaling relations. (See \cite{K2}). That is, $L(p)$ is
the smallest $n$ such that the probability that there is a
$p$--open horizontal crossing of a given $n \times n$ box is
larger than $1 - \varepsilon_0$. Here $\varepsilon_0$ is an appropriately
(sufficiently small) chosen positive constant. (From this
definition it is clear that $L(p)$ is non-increasing in $p$ on the
interval $(p_c,1]$). It is well-known (see (2.25) in \cite{K2} and
the references preceding that equation) that there are constants
$C_1 > 0$ and $C_2
> 0$ such that for all $p >p_c$ and all $n$,
\betagin{equation} \lambdabel{ke1}
P_p(A_{n,p}) \leq C_1 \, \exp\left(- \frac{C_2 n}{L(p)}\right).
\end{equation}
Further, Theorem 2 in \cite{K2} says that there is a constant $C_3
> 0$ such that, for all $p > p_c$,
\betagin{equation} \lambdabel{ke2}
\thetaeta(p) \leq C_3 P_{cr}\left(O \leftrightarrow \partial B(L(p))\right).
\end{equation}
Now take, for $p'$, the supremum of those $p$ for which $L(p) >
n/(C_4 \log n)$, where $C_4$ is a positive constant that will be
appropriately chosen later.
Obviously,
\betagin{equation}
\lambdabel{obv-eq}
P(\hat R \geq n) \leq P(\hat R \geq n, \, \hat \tau > p')
+ P(\hat \tau < p').
\end{equation}
The first term in the r.h.s of \eqref{obv-eq} is, by Observation
(a) above and the `nesting' property of the events $A_{n,p}$
(stated in the sentence below the definition of these events),
smaller than or equal to
\betagin{equation}
\lambdabel{obv1} \lim_{p \downarrow p'} P(A_{n,p}) \leq \limsup_{p
\downarrow p'} C_1 \exp(- \frac{C_2 n}{L(p)}) \leq C_1 \exp(- C_2
C_4 \log n),
\end{equation}
where the first inequality follows from \eqref{ke1} and the second inequality from the definition of
$p'$.
The second term of \eqref{obv-eq} is, by Observation (b) and the
`nesting' property of the events $D_p$, smaller than or equal to
\betagin{equation}
\lambdabel{obv2}
\lim_{p \uparrow p'} \thetaeta(p) \leq \limsup_{p \uparrow p'} C_3 P_{cr}\left(O \leftrightarrow \partial B(L(p))\right)
\leq C_3 P_{cr}\left(O \leftrightarrow \partial B(\frac{n}{C_4 \log n})\right),
\end{equation}
where the first inequality follows from \eqref{ke2} and the second follows by (again) using the
definition of $p'$.
Putting \eqref{obv-eq}, \eqref{obv1} and \eqref{obv2} together we have
\betagin{equation} \lambdabel{kec}
P(\hat R \geq n)
\leq C_3 P_{cr}\left(O \leftrightarrow \partial B(\frac{n}{C_4 \log n})\right)
+ C_1 \, \exp(- C_2 C_4 \log n).
\end{equation}
It is believed that $P_{cr}(O \leftrightarrow \partial B(n))$ has a
power law behaviour. This has only been proved for site percolation
on the triangular lattice. However, for the percolation models we
are considering, we do know that this function of $n$ has power-law
lower and upper bounds. As a consequence we can choose $C_4$ so
large that the second term in the r.h.s. of \eqref{kec} is (for all
large enough n) smaller than the first term. Finally, it follows
quite easily from RSW arguments (see e.g. Sections 11.7 and 11.8 in \cite{G})
that $P_{cr}\left(O \leftrightarrow
\partial B(n/C_4\log n )\right)\approxP_{cr}\left(O \leftrightarrow
\partial B(n)\right)$. This completes the proof of Proposition
\ref{inv-perc}. $\Box$
\noindent
Now we are ready to prove Theorem \ref{uni-bound}. The invasion
procedure in Subsection 2.1, which was used in the proof of
Theorem \ref{mainthm}, differs from the `ordinary' invasion
percolation model (described in the paragraphs preceding
Proposition \ref{inv-perc}, in that is stops as soon as the growing
tree `hits' a red vertex. There is strictly speaking another
difference: the $\tau$ values in the former case were
exponentially distributed and those in the latter case were
uniformly distributed on $(0,1)$. However, that difference clearly
doesn't matter, and in the rest of this proof we assume the $\tau$
variables to be uniformly distributed on $(0,1)$. Let us call the
former procedure a `stopped' invasion procedure (with parameter
$p_r$), and the latter an ordinary invasion procedure. All these
procedures (the stopped procedures with $p_r$ varying between $0$
and $1$, and the ordinary procedure) can be coupled in the
following natural way: Assign to each vertex $v$, independent of
the others, (and of the $\tau$ variables) a random variable
$\rho(v)$, uniformly distributed on the interval $(0,1)$. When we
now do invasion percolation (w.r.t. the $\tau$ variables) and stop
when we hit a vertex with $\rho$ value smaller than $p_r$, this
corresponds exactly with the above mentioned stopped invasion with
parameter $p_r$. In this coupled setting, the set $C_g(O)$ for the
stopped model with parameter $p_r$ is clearly non-increasing in
$p_r$, and the union of these sets over all the values $p_r>0$ is
exactly the region mentioned in Proposition \ref{inv-perc}. Theorem
\ref{uni-bound} now follows from this proposition. $\Box$
\end{subsection}
\end{section}
\betagin{section}{Proof for the case $p_w > 0$}
In this section we prove Theorem \ref{mainthm} for the case $p_w>0$.
Recall that in the
special case where there are no white vertices (see Section 2) there
was an elegant invasion procedure which produced, with probability
$1$, a finite autonomous set containing a given vertex or edge. This
is much more complicated in the general case, when there are white
vertices. We still have a procedure which, if it stops, gives an
autonomous set containing, say, a given vertex $x$. This algorithm
starts as before, with one invasion tree, which initially consists
only of the vertex $x$, and which grows by invading the edge with
minimal $\tau$ value. However, when we hit a `fresh' white vertex
$y$ we have to investigate the `space-time paths from outside' that
have possibly influenced $y$. This is done by starting new invasion
trees in the green vertices on the boundary of the white cluster of
$y$. As before, an invasion tree stops when it invades a red vertex.
In the situation in the previous Section this also marked the end of
the algorithm. But in the current situation it only marks the end of
one invasion tree, while the others keep growing and creating new
invasion trees. In this way the algorithm might go on forever.
However, we show that under the condition in Theorem \ref{mainthm} the
algorithm, which is described more precisely below, does end.
\noindent
The input is a connected graph $G = (V,E)$, the initial colours $c(v), v \in
V$ and the opening times $\tau(e), e \in E$, and the vertex $x$ or
edge $e$ for which we want to find an autonomous region. Here we
only handle the case concerning a vertex $x$ and we assume that $x$
is green; the other cases can be done in a very similar way. For the
moment it suffices to restrict to finite graphs. The algorithm will
produce an autonomous subgraph $H$ and, for some vertices $v$ of
$H$, non-negative numbers $t_g(v)$ and $t_r(v)$, and for some edges
$e$ of $H$ a positive number $t(e)$. Here $t_g(v)$ and $t_r(v)$ will
denote the time at which $v$ becomes green and red, respectively.
The value $t(e)$ will be the time when $e$ becomes open. It will
be clear from the description below that, at each stage of the
algorithm the edges to which a $t$-value has been assigned form a
collection of disjoint trees. Each tree in this collection has one of two labels:
`active' or `paralyzing'.
How these labels are assigned is described in Subsection
\ref{Desc} below.
The collection of active trees is
denoted by $\mathcal{T}_a$ and the collection of paralyzing trees by $\mathcal{T}_p$.
As we will see, new active or paralyzing trees are `created' during
the algorithm, and active trees can merge with each other or
with a paralyzing tree. In the former case the new tree is active, in the latter case
it is paralyzing.
The set of edges which have at least one
end-vertex in an active tree (and not both end-vertices in the same
active tree) is denoted by $\mathcal{E}$. With some abuse of terminology
we say that a vertex is in $\mathcal{T}_a$ if it is a vertex of some tree
in $\mathcal{T}_a$. A similar remark holds w.r.t. $\mathcal{T}_p$.
Apart from the above, we need the following auxiliary variables and structures,
which will be assigned during the algorithm.
The first auxiliary structure we mention here is a set $S$, which
can be interpreted as the set of all initially white vertices that `have been
seen until the current stage' in the algorithm.
We say that a vertex `is registered' if it is in $\mathcal{T}_p$, $\mathcal{T}_a$ or $S$.
Further, to each
edge $e \in \mathcal{E}$ (as introduced above) a value $t_1(e)$ will be
assigned, which can be interpreted as a tentative, possible value
for $t(e)$.
Finally, the following definition will be important:
The {\it white cluster} $C_w(v)$ of a vertex $v$ is defined as the maximal connected subset of $G$ of
which all vertices $y$ have initial colour $c(y) = $ white. (Note that this notion, in contrast with
the notion of green clusters (defined in
Section 1) does not involve the state (open/closed) of the edges.
The boundary of the white cluster of $v$, denoted by $\partial C_w(v)$, is the set of all vertices that
are not in $C_w(v)$ but have an edge to some vertex in $C_w(v)$.
If $c(v)$ is not white, then $C_w(v)$ and $\partial C_w(v)$ are empty.
\betagin{subsection}{Description of the algorithm} \lambdabel{Desc}
Using the notions above we are now ready to describe the algorithm. It starts with action 1 below,
followed by an iteration of (some of) the other actions. Recall that $c(x)$ is green.
\noindent
{\bf 1.} {\bf Initialization of some of the variables and structures.} \\
Set $\mathcal{T}_p = \emptyset$, $\mathcal{T}_a = \{\{x\}\}$, and $S = \emptyset$. \\
Set $t_g(x) = 0$, $\mathcal{E} $ as the set of all edges incident to $x$,
and $t_1(e) = \tau(e)$ for all edges $e\in \mathcal{E}$.
\noindent
{\bf 2.} {\bf Selection of minimal external edge.} \\
Remove from $\mathcal{E}$ all edges of which both endpoints are in the same tree of $\mathcal{T}_a$. \\
{\it Comment: such edges can have resulted from some af the actions below} \\
If $\mathcal{E} = \emptyset$, stop.
Otherwise, let $e$ be the edge in $\mathcal{E}$ with minimal $t_1$-value. \\
Write $e = \lambdangle v,y \rightarrown$ with $v$ in $\mathcal{T}_a$. (This way of
writing is of course not unique if both end-vertices
of $e$ are in $\mathcal{T}_a$ but that doesn't matter). Let $T$ denote the tree in $\mathcal{T}_a$ of which $v$ is a vertex. \\
If $y$ is not in $\mathcal{T}_a$, $\mathcal{T}_p$ or $S$ (that is, $y$ is `fresh') go to 2a, else go to 2b.
\noindent
{\bf 2a. Fresh vertex.} \\
Determine $c(y)$. \\
If $c(y) =$ red, set $t(e) = t_1(e)$ and go to 3a. \\
If $c(y) =$ green, set $t(e) = t_1(e)$ and go to 4. \\
If $c(y) =$ white, go to 6.
\noindent
{\bf 2b. Registered vertex.} \\
Set $t(e) = t_1(e)$. \\
If $y$ is in $\mathcal{T}_p$ go to 3b. \\
If $y$ is in $\mathcal{T}_a$ go to 5. \\
Else go to 7.
\noindent
{\bf 3a. Fresh red.} \\
{\it Comment: This case can be handled in almost the same way as 3b
below and therefore, with an `administrative trick',
we simply turn it into the latter case:} \\
Set $t_r(y) = 0$.
Add to $\mathcal{T}_p$ the tree which consists only of the vertex $y$. \\
Go to 3b.
\noindent
{\bf 3b. Active tree $T$ becomes paralyzed.}
Set $t_r(z) = t(e)$ for all vertices $z$ of $T$. \\
Remove from $\mathcal{E}$ all edges of which one end-vertex is in $T$
and the other end-vertex is not in $\mathcal{T}_a$. Let $T'$ be the tree
in $\mathcal{T}_p$ of which $y$ is a vertex. Replace, in $\mathcal{T}_p$, the
tree $T'$ by that obtained from `glueing together' $T$ and $T'$
via the edge $e$.
Remove $T$ from $\mathcal{T}_a$. \\
Go to 2.
\noindent
{\bf 4. Fresh green. } \\
Set $t_g(y) = 0$. For each edge $e'$ incident to $y$ that was not
yet in $\mathcal{E}$: add $e'$ to $\mathcal{E}$ and set $t_1(e') = \tau(e')$.
Replace, in $\mathcal{T}_a$, the tree $T$ by a new tree obtained from
glueing $y$ to $T$ by the edge $e$. \\
Go to 2.
\noindent
{\bf 5. Two active trees join.} \\
Let $T' \in \mathcal{T}_a$ be the active tree of which $y$ is a vertex.
Replace, in $\mathcal{T}_a$, the trees $T$ and $T'$ by a new tree
obtained from `glueing together' $T$ and $T'$ with the edge $e$. \\
Go to 2.
\noindent
{\bf 6. Fresh white.} \\
Add every vertex of $C_w(y)$ to $S$. \\
For each vertex $z$ in $\partial C_w(y)$ that has $c(z) =$ green and is not
in $\mathcal{T}_a$ or $\mathcal{T}_p$, do the following: \\
Set $t_g(z) = 0$; add the tree $\{z\}$ to $\mathcal{T}_a$; add to $\mathcal{E}$
each edge $e'$ incident to $z$ that is not yet in $\mathcal{E}$, and set
$t_1(e') = \tau(e')$.
\noindent
For each vertex $z$ in $\partial C_w(y)$ that has $c(z) =$ red and is not
in $\mathcal{T}_p$, set $t_r(z) = 0$ and add the tree $\{z\}$ to $\mathcal{T}_p$.\\
Go to 2.
\noindent
{\bf 7. Registered white.} \\
Set $t_g(y) = t(e)$. Replace, in $\mathcal{T}_a$, the tree $T$ by the
tree obtained from $T$ by `glueing' the vertex $y$ to it by the
edge $e$. For each edge $e' = \lambdangle y, z \rightarrown$ of $y$ that is not
in
$\mathcal{E}$, add it to $\mathcal{E}$ and set $t_1(e')$ as follows: \\
If $z$ is in $\mathcal{T}_p$ but $c(z) \neq$ red, set
\betagin{equation}
\lambdabel{adjust} t_1(e') = t(e) + \tau(e') -(t_r(z) - t_g(z)),
\end{equation}
else set
$$t_1(e') = t(e) + \tau(e').$$
{\it Comment: The subtracted term in \eqref{adjust} accounts for the time that $e'$ already had
a green end-vertex. See also the Remark at the end of Subsection 3.2} \\
Go to 2.
\noindent
{\bf Remark:} \\
Note that initially there is only one active tree and that new
active trees are only formed in part 6 of the algorithm. Also note
that initially there are no paralyzing trees; these can be formed in
part 6 and in part 3a. Moreover, 3a always leads, via 3b, to the
elimination of an active tree. Now consider the case that $G$ has no
vertices with initial colour white. Then the algorithm never enters
part 6 (neither part 7) so that throughout the algorithm there is
one active tree until a red vertex is `hit'. From such
considerations it is easily seen that in this case the algorithm
reduces to the one described in Section 2.
\end{subsection}
\betagin{subsection}{Correctness of the algorithm}
If $G$ is finite the above algorithm will clearly stop. Moreover, we
claim that if $G$ has at least one vertex with initial colour red,
we have the following situation at the end of the algorithm: The set
of active trees $\mathcal{T}_a$ is empty. The set $\mathcal{T}_p$ contains one
or more trees, and the vertex $x$ is in one of them. Each of these
trees has exactly one vertex with initial colour red, and this
vertex is `responsible' for the other vertices in that tree to
become red. The following pair, $(H, \bar E)$, is autonomous: The
vertices of $H$ are the vertices in $\mathcal{T}_p$ together with all
vertices in $S$. The edges of $H$ are all edges of which both
end-vertices are in the above set. The set $\bar E$ is the set of
all edges of which one end-vertex is a vertex
$v$ of $H$ with $c(v) \neq$ red, and the other end-vertex is not in $H$.
Further, each initially green vertex $v$ of $H$ becomes red at
time $t_r(v)$.
The `correctness' of the above algorithm (that is,
the above claim) can, in principle, be proved by induction,
e.g. on the number of edges. Instead of giving a full proof (which would be extremely tedious)
we present the key ideas/observations ((a) - (d) below) to be used in such proof.
\noindent (a) As in many induction proofs it is useful, or even necessary, (for carrying out
the induction step) to generalize
the statement one wants to prove. In the current situation this generalization is as follows: In the above algorithm,
information is stored in the administration when the vertices involved are `encoutered' by
the algorithm. In particular, in action 6 a white cluster and its boundary are
`stored' because a vertex of the white cluster had been encountered (as endpoint of the edge selected in action 2).
The same algorithm still works if at one or more stages of the algorithm such information about a white cluster (and its
boundary) is stored `spontaneously' (that is, without this cluster having been encoutered in the sense above).
\noindent
(b) The main observation for doing induction on the number of edges is the following:
Let, among all edges with at least one initially
green endpoint, $\hat e$ be the one with minimal $\tau$ value. Let $\hat x$ and $\hat y$ denote its endpoints.
We may assume that $\hat x$ is initially green. It is clear that the first thing that happens in the `real' growth
process is the opening of $\hat e$ (namely, at time $\tau(\hat e)$). It is alo clear that from that moment
on the growth process behaves
as if starting on a graph with one vertex less, namely the graph obtained by `identifying' (or glueing together) $\hat x$
and $\hat y$ (with an obviously assigned colour: green if $c(y)$ is white or green; red if $c(y)$ is red).
\noindent
(c) To carry out the induction step it has to be shown that the algorithm has a property analogous to
that for the real
process described in (b) above. That this is indeed the case, can be seen as follows: As long as $\hat x$ and $\hat y$
are not `registered' in the algorithm, the algorithm behaves the same as it would behave
for the graph obtained after the identification described in (b).
Moreover, one can easily see from the description of the algorithm that immediately after one of these vertices is
registered,
the other one also is, and that they are immediately `attached to each other' (by the edge $\hat e$) in the same
tree.
\noindent
(d) The following side remark must be added to (c) above: Suppose
that $\hat y \in C_w(y)$ in action 6 at some stage of the
algorithm. This cluster $C_w(y)$ could be larger than that in the
graph obtained by identifying $\hat y$ and $\hat x$. This means
that in that step `more information is collected' than in the
situation where $\hat x$ and $\hat y$ would be identified from the
beginning. It is exactly for this issue that the generalized
algorithm (and claim) in (a) was given.
\end{subsection}
\betagin{subsection}{Proof of Theorem \ref{mainthm}}
\betagin{proof}
It follows, in the same way as in the case $p_w = 0$, that on
an infinite graph the dynamics is well-defined provided the
algorithm stops with probability $1$. We will show that, under the
condition \eqref{key} in the statement of the Theorem, the algorithm
indeed stops. In fact, the arguments we use will give something
stronger, namely Proposition \ref{prop1} below, from which not only
part (a) of Theorem~\ref{mainthm} follows, but which we will also use to prove
part (b), (c) and (d).
\betagin{prop}\lambdabel{prop1}
Under the condition of Theorem~\ref{mainthm}, we have that, for each $x$, the
above mentioned algorithm stops, and, moreover, the distributions of
the volume and the diameter of the graph $H$ defined above have an
exponential tail.
\end{prop}
\betagin{proof} By the $k$th step of the algorithm we mean everything done by the
algorithm between the $k$th and $k+1$th time the algorithm
`enters' part 2a in the description in Subsection 3.1. Recall that
we say that a vertex is registered if it is in $\mathcal{T}_a$,
$\mathcal{T}_p$ or $S$. Let $\nu_k$ be the number of registered vertices
at the beginning of step $k$. (In particular, $\nu_1 = 1$.) If the
algorithm is already terminated during step $j$ for some $j < k$,
we set $\nu_k$ equal to the number of registered vertices at the
moment of termination. Further, let $y_k$ denote the `fresh'
vertex (i.e. the vertex $y$ in part 2a of the description in
Subsection 3.1) treated in step $k$ of the algorithm. (In
particular, $y_1$ is the end-vertex of the edge incident to $x$
with minimal $\tau$ value). Let $\eta_k = \nu_{k+1} - \nu_k$.
Further, let $\alpha_k$ denote the net increase of the number of active trees during step $k$ of the algorithm.
If the algorithm is terminated during step $k$, we set $\alpha_k = -1$.
(This choice is somewhat arbitrary; it is simply a suitable choice to ensure that certain statements below
hold for all $k$).
Note that the initial colours of the vertices are independent
random variables, each being white, red or green with probability
$p_w$, $p_r$ and $p_g$ respectively. It is clear from the
algorithm that we may consider the colour of a vertex as `hidden'
until the moment the vertex becomes registered. Let $\mathcal{F}_k$ be
all information obtained by the algorithm until the beginning of
step $k$
(including the identity but not the colour of $y_k$).\\
Let
$N = \min\{n \, : \, 1 + \sum_{k=1}^n \alphapha_k = 0\}$.
It is easy to see that if $N$ is finite the algorithm stops during or before step $N$, and the number
of vertices in the above defined graph $H$ is
\betagin{equation}
\lambdabel{Hbd}
1 + \sum_{k=1}^N \eta_k.
\end{equation}
Note that if $c(y_k)$ is white, the procedure is sent to part 6, and
the newly registered vertices in step $k$ of the algorithm are
exactly the vertices of $C_w(y_k)$ and the not yet registered
vertices of $\partial C_w(y_k)$; moreover, $|\mathcal{T}_a|$ increases
during this step by at most the number of green vertices in
$\partial C_w(y_k)$. We write {\it at most}, because during the
remainder of step $k$ no new active trees are created but already
present active trees may disappear (which happens if the algorithm
enters part 3b before it enters part 2a again.
Similarly, if $c(y_k)$ is red or green, then the only newly registered vertex is $y_k$ itself; moreover,
in the former case
$|\mathcal{T}_a|$ goes down during step $k$ by at least $1$, while in the latter case it goes down or
doesn't change. \\
For every connected set $W$ of vertices with $|W| \ge 2$, the number of vertices in
the boundary of $W$ is at most $(D-1) |W|$; hence, we have
\betagin{equation}
\lambdabel{etbd}
\eta_k \leq D |C_w(y_k)| + \I_{\{c(y_k) \mbox{ not white}\}}.
\end{equation}
\betagin{equation}
\lambdabel{albd}
\alpha_k \leq (D-1) |C_w(y_k)| - \I_{\{c(y_k) \mbox{ is red}\}}.
\end{equation}
Note that (since $y_k$ is `fresh') the conditional probability that $c(y_k)$ is red, white
or green, given $\mathcal{F}_k$, is
$p_r$, $p_w$ and $p_g$
respectively. Also note that, by the condition in the Theorem, $p_w < 1/(D-1)$ and hence
(as is well-known and easy to check) there is a $q <1$ such that for all $n$ and all vertices $v$,
\betagin{equation}
\lambdabel{expcw}
P(|C_w(v)| \geq n) \leq q^n.
\end{equation}
Moreover, it is easy to see that conditioned on $\mathcal{F}_k$, which includes the information that $y_k$ is
a specific vertex, say $y$, the cluster size
$|C_w(y_k)|$ is stochastically smaller than $|C_w(y)|$.
Hence the bound \eqref{expcw} also holds (a.s) if we replace its l.h.s. by $P(|C_w(y_k)| \ge n | \mathcal{F}_k)$.
This, combined with \eqref{etbd}
immediately gives that there is a
$\gamma < 1$ such that for all $k$ and $n$,
\betagin{equation}
\lambdabel{etdbd}
P(\eta_k \geq n | \mathcal{F}_k) \leq \gamma^n.
\end{equation}
As to the $\alphapha$'s, define (compare \eqref{albd}), for every vertex $v$,
\betagin{equation}
\lambdabel{alvbd}
\alpha(v) = (D-1) |C_w(v)| - \I_{\{c(v) \mbox{ is red}\}}.
\end{equation}
Let $\alpha'(v), \, v \in V$ be independent copies of the $\alphapha(v), \, v \in V$.
By a similar stochastic domination argument that led to \eqref{etdbd}, we have
for all vertices $v$, and all
positive integers $k$ and $n$,
\betagin{equation}
\lambdabel{aldbd}
P(\alphapha_k \geq n | \mathcal{F}_k,\, y_k = v) \leq P(\alphapha(v) \geq n) = P(\alphapha'(v) \geq n).
\end{equation}
And, again by \eqref{expcw}, there is a $\lambda < 1$ such that for all $n$ and $v$
\betagin{equation}
\lambdabel{alv-xbd}
P(\alphapha'(v) \geq n) = P(\alphapha(v) \geq n) \leq \lambdambda^n.
\end{equation}
Further note that, for each vertex $v$, we have $E(|C_w(v)|) = \xi_v(p_w)$.
Hence, condition \eqref{key} in Theorem 1.1 says that there is an
$\varepsilon > 0$ such that for all vertices $v$ we have
\betagin{equation}
\lambdabel{almbd}
E(\alpha'(v)) = E(\alpha(v)) < -\varepsilon.
\end{equation}
\noindent
From \eqref{aldbd} and the definition of the random variables $\alpha'(v), \, v \in V$,
it follows (from stochastic domination) that, for all positive integers $K$,
\betagin{equation}
\lambdabel{supbd}
P\left(\sum_{k=1}^K \alphapha_k \geq 0\right) \leq
\sup^* P\left(\sum_{k=1}^K \alphapha'(v_k) \geq 0\right),
\end{equation}
where we use '*' to indicate that the supremum is taken over all tuples
of $K$ distinct vertices $v_1, v_2, ..., v_K$.
\noindent
From \eqref{alv-xbd} and \eqref{almbd}
it follows (by
standard large-deviation upper bounds
for independent random variables)
that there is a $\betata < 1$ such that for all $K$ and all distinct vertices
$v_1, v_2, ..., v_k$,
$$
P(\sum_{k=1}^K \alphapha'(v_k) \geq 0) \leq \betata^K.
$$
\noindent
From this and \eqref{supbd} it follows that the distribution of $N$
has an exponential tail.
Putting this together with \eqref{etdbd} and \eqref{Hbd} we that
the number of vertices in $H$ has an exponential tail. Indeed the
event that $1+\sum_{k=1}^N \eta_k \ge n$ is contained in the union
of the events $N \ge an$ and $\sum_{k=1}^{an} \eta_k \ge n$; the
probabilities of these events decay exponentially in $n$ for
suitable $a$.
This completes the proof of Proposition \ref{prop1}. (Note that the diameter of $H$ is
at most its volume, since $H$ is a connected graph).
\end{proof}
\noindent
{\it Parts (a) and (b) of Theorem~\ref{mainthm}} follow immediately
from Proposition~\ref{prop1} (noting that the vertices of $C_g(x)$ belong to $H$). \\
Using Proposition~\ref{prop1}, {\it Parts (c) and (d)} of the Theorem~\ref{mainthm} can now be derived in the same way as in the special
case $p_w=0$ in Section 2.
This completes the proof of Theorem \ref{mainthm}.
\end{proof}
\noindent
{\bf Remark:} For the alternative model (i) in Subsection 1.4, the
proof of Theorem \eqref{mainthm} is exactly the same. Note that the
proof doesn't use that the $\tau's$ are exponentially distributed,
it applies in the same manner to any continuous
distribution. \\
For the alternative model (ii) the algorithm in
Subsection 3.1
needs a few small adaptations. Apart from this the proof remains practically the same.
\end{subsection}
\end{section}
{\bf Acknowledgments.} Two of the authors (V.S. and M.E.V.) learned
about the continuum model from E.J. Neves. We thank Antal J\'{a}rai
for comments on Proposition \ref{inv-perc} and Chuck Newman for
drawing our attention to the article \cite{StN}. We also thank Ron
Peled and the referees for corrections in the first manuscript.
\betagin{thebibliography}{A}
\bibitem[A]{A} D.J. Aldous. The percolation process on a tree
where infinite clusters are frozen. {\it Proc. Camb. Phil. Soc.}
\textbf{128}, 465--477 (2000).
\bibitem[ALE]{Ale} K.S. Alexander. Percolation and minimal spanning forests in infinite graphs.
{\em Ann. Probab.} {\bf 23}, 87--104 (1995).
\bibitem[BeS] {BeS} I. Benjamini and O. Schramm. {\it Private Communication} (1999).
\bibitem[BeT]{BeT} J. van den Berg and B. T\'oth.
A signal-recovery system: asymptotic properties, and construction
of an infinite-volume process. {\em Stoch. Proc. Appl.} {\bf 96},
177--190 (2001).
\bibitem[BJV]{BJV} J. van den Berg, A. J\'arai and B. V\'agv\"olgyi. The size of a pond in $2D$ invasion percolation.
{\em Electr. Comm. Probab.}
{\bf 12}, Paper 39, 411--420 (2007).
\bibitem[CCN]{CCN} J.T. Chayes, L. Chayes, and C.M. Newman. Bernoulli percolation above threshold: an invasion percolation analysis. {\em Ann. Probab.} {\bf 15}, 1272--1287 (1987).
\bibitem[D]{D} M. D\"urre. Existence of multi-dimensional infinite volume self-organized
critical forest-fire models. {\it Electronic J. Probab.} {\bf 11}, paper 22 (2006).
\bibitem[G]{G} G.R. Grimmett. {\it Percolation, second edition}, Springer (1999).
\bibitem[HaM] {HaM} O. H\"aggstr\"om and R. Meester. Nearest Neighbor and Hard Sphere Models in
Continuum Percolation. {\it Random Structures and Algorithms} {\bf 9}, 295--315 (1996).
\bibitem[HPS]{HPS} O. H\"aggstr\"om, Y. Peres and R. Schonmann.
Percolation on transitive graphs as a coalescent process: Relentless merging
followed by simultaneous uniqueness. In {\em Perplexing Problems in Probability} (M. Bramson and R. Durrett, eds.) 69--90. Birkh\"auser, Boston.
\bibitem[J]{J} A. J\'arai. Private communication (1999).
\bibitem[J2]{J2} A. J\'arai. Invasion percolation and the incipient infinite cluster in
2D. {\it Commun. Math. Phys.} {\bf 236}, 311--334 (2003).
\bibitem[K]{K} H. Kesten. Analyticity properties and power law estimates in percolation
theory. {\it Journal of Statistical Physics} {\bf 25}, 717--756
(1981).
\bibitem[K2]{K2} H. Kesten. Scaling relations for 2D percolation. {\it Commun. Math. Phys.}
{\bf 109}, 109--156 (1987).
\bibitem[LyP]{LyP} R. Lyons, Y. Peres. {\it Probability on trees and
networks.} Available at {\tt http://mypage.iu.edu/~rdlyons/}.
\bibitem[LPS]{LPS}
R. Lyons, Y. Peres, Y. and O. Schramm. Minimal spanning forests. {\em Ann. Probab.} {\bf 34}, 1665--1692
(2006).
\bibitem[StN]{StN} D.L. Stein and C.M. Newman. Broken ergodicity and the geometry of rugged
landscapes. {\it Phys. Rev. E} {\bf 51}, 5228--5238.
\bibitem[WW]{WW} D. Wilkinson and J. F. Willemsen. Invasion percolation: a new form of percolation theory.
J. Phys. A, 3365-3376, 1983.
\end{thebibliography}
\end{document} |